]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - net/bluetooth/mgmt.c
Bluetooth: Use intervals and tx power from mgmt cmds
[mirror_ubuntu-jammy-kernel.git] / net / bluetooth / mgmt.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI Management interface */
26
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35
36 #include "hci_request.h"
37 #include "smp.h"
38 #include "mgmt_util.h"
39 #include "mgmt_config.h"
40 #include "msft.h"
41
42 #define MGMT_VERSION 1
43 #define MGMT_REVISION 18
44
45 static const u16 mgmt_commands[] = {
46 MGMT_OP_READ_INDEX_LIST,
47 MGMT_OP_READ_INFO,
48 MGMT_OP_SET_POWERED,
49 MGMT_OP_SET_DISCOVERABLE,
50 MGMT_OP_SET_CONNECTABLE,
51 MGMT_OP_SET_FAST_CONNECTABLE,
52 MGMT_OP_SET_BONDABLE,
53 MGMT_OP_SET_LINK_SECURITY,
54 MGMT_OP_SET_SSP,
55 MGMT_OP_SET_HS,
56 MGMT_OP_SET_LE,
57 MGMT_OP_SET_DEV_CLASS,
58 MGMT_OP_SET_LOCAL_NAME,
59 MGMT_OP_ADD_UUID,
60 MGMT_OP_REMOVE_UUID,
61 MGMT_OP_LOAD_LINK_KEYS,
62 MGMT_OP_LOAD_LONG_TERM_KEYS,
63 MGMT_OP_DISCONNECT,
64 MGMT_OP_GET_CONNECTIONS,
65 MGMT_OP_PIN_CODE_REPLY,
66 MGMT_OP_PIN_CODE_NEG_REPLY,
67 MGMT_OP_SET_IO_CAPABILITY,
68 MGMT_OP_PAIR_DEVICE,
69 MGMT_OP_CANCEL_PAIR_DEVICE,
70 MGMT_OP_UNPAIR_DEVICE,
71 MGMT_OP_USER_CONFIRM_REPLY,
72 MGMT_OP_USER_CONFIRM_NEG_REPLY,
73 MGMT_OP_USER_PASSKEY_REPLY,
74 MGMT_OP_USER_PASSKEY_NEG_REPLY,
75 MGMT_OP_READ_LOCAL_OOB_DATA,
76 MGMT_OP_ADD_REMOTE_OOB_DATA,
77 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
78 MGMT_OP_START_DISCOVERY,
79 MGMT_OP_STOP_DISCOVERY,
80 MGMT_OP_CONFIRM_NAME,
81 MGMT_OP_BLOCK_DEVICE,
82 MGMT_OP_UNBLOCK_DEVICE,
83 MGMT_OP_SET_DEVICE_ID,
84 MGMT_OP_SET_ADVERTISING,
85 MGMT_OP_SET_BREDR,
86 MGMT_OP_SET_STATIC_ADDRESS,
87 MGMT_OP_SET_SCAN_PARAMS,
88 MGMT_OP_SET_SECURE_CONN,
89 MGMT_OP_SET_DEBUG_KEYS,
90 MGMT_OP_SET_PRIVACY,
91 MGMT_OP_LOAD_IRKS,
92 MGMT_OP_GET_CONN_INFO,
93 MGMT_OP_GET_CLOCK_INFO,
94 MGMT_OP_ADD_DEVICE,
95 MGMT_OP_REMOVE_DEVICE,
96 MGMT_OP_LOAD_CONN_PARAM,
97 MGMT_OP_READ_UNCONF_INDEX_LIST,
98 MGMT_OP_READ_CONFIG_INFO,
99 MGMT_OP_SET_EXTERNAL_CONFIG,
100 MGMT_OP_SET_PUBLIC_ADDRESS,
101 MGMT_OP_START_SERVICE_DISCOVERY,
102 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
103 MGMT_OP_READ_EXT_INDEX_LIST,
104 MGMT_OP_READ_ADV_FEATURES,
105 MGMT_OP_ADD_ADVERTISING,
106 MGMT_OP_REMOVE_ADVERTISING,
107 MGMT_OP_GET_ADV_SIZE_INFO,
108 MGMT_OP_START_LIMITED_DISCOVERY,
109 MGMT_OP_READ_EXT_INFO,
110 MGMT_OP_SET_APPEARANCE,
111 MGMT_OP_SET_BLOCKED_KEYS,
112 MGMT_OP_SET_WIDEBAND_SPEECH,
113 MGMT_OP_READ_SECURITY_INFO,
114 MGMT_OP_READ_EXP_FEATURES_INFO,
115 MGMT_OP_SET_EXP_FEATURE,
116 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
117 MGMT_OP_SET_DEF_SYSTEM_CONFIG,
118 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
119 MGMT_OP_SET_DEF_RUNTIME_CONFIG,
120 MGMT_OP_GET_DEVICE_FLAGS,
121 MGMT_OP_SET_DEVICE_FLAGS,
122 MGMT_OP_READ_ADV_MONITOR_FEATURES,
123 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
124 MGMT_OP_REMOVE_ADV_MONITOR,
125 MGMT_OP_ADD_EXT_ADV_PARAMS,
126 MGMT_OP_ADD_EXT_ADV_DATA,
127 };
128
129 static const u16 mgmt_events[] = {
130 MGMT_EV_CONTROLLER_ERROR,
131 MGMT_EV_INDEX_ADDED,
132 MGMT_EV_INDEX_REMOVED,
133 MGMT_EV_NEW_SETTINGS,
134 MGMT_EV_CLASS_OF_DEV_CHANGED,
135 MGMT_EV_LOCAL_NAME_CHANGED,
136 MGMT_EV_NEW_LINK_KEY,
137 MGMT_EV_NEW_LONG_TERM_KEY,
138 MGMT_EV_DEVICE_CONNECTED,
139 MGMT_EV_DEVICE_DISCONNECTED,
140 MGMT_EV_CONNECT_FAILED,
141 MGMT_EV_PIN_CODE_REQUEST,
142 MGMT_EV_USER_CONFIRM_REQUEST,
143 MGMT_EV_USER_PASSKEY_REQUEST,
144 MGMT_EV_AUTH_FAILED,
145 MGMT_EV_DEVICE_FOUND,
146 MGMT_EV_DISCOVERING,
147 MGMT_EV_DEVICE_BLOCKED,
148 MGMT_EV_DEVICE_UNBLOCKED,
149 MGMT_EV_DEVICE_UNPAIRED,
150 MGMT_EV_PASSKEY_NOTIFY,
151 MGMT_EV_NEW_IRK,
152 MGMT_EV_NEW_CSRK,
153 MGMT_EV_DEVICE_ADDED,
154 MGMT_EV_DEVICE_REMOVED,
155 MGMT_EV_NEW_CONN_PARAM,
156 MGMT_EV_UNCONF_INDEX_ADDED,
157 MGMT_EV_UNCONF_INDEX_REMOVED,
158 MGMT_EV_NEW_CONFIG_OPTIONS,
159 MGMT_EV_EXT_INDEX_ADDED,
160 MGMT_EV_EXT_INDEX_REMOVED,
161 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
162 MGMT_EV_ADVERTISING_ADDED,
163 MGMT_EV_ADVERTISING_REMOVED,
164 MGMT_EV_EXT_INFO_CHANGED,
165 MGMT_EV_PHY_CONFIGURATION_CHANGED,
166 MGMT_EV_EXP_FEATURE_CHANGED,
167 MGMT_EV_DEVICE_FLAGS_CHANGED,
168 MGMT_EV_CONTROLLER_SUSPEND,
169 MGMT_EV_CONTROLLER_RESUME,
170 };
171
172 static const u16 mgmt_untrusted_commands[] = {
173 MGMT_OP_READ_INDEX_LIST,
174 MGMT_OP_READ_INFO,
175 MGMT_OP_READ_UNCONF_INDEX_LIST,
176 MGMT_OP_READ_CONFIG_INFO,
177 MGMT_OP_READ_EXT_INDEX_LIST,
178 MGMT_OP_READ_EXT_INFO,
179 MGMT_OP_READ_SECURITY_INFO,
180 MGMT_OP_READ_EXP_FEATURES_INFO,
181 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
182 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
183 };
184
185 static const u16 mgmt_untrusted_events[] = {
186 MGMT_EV_INDEX_ADDED,
187 MGMT_EV_INDEX_REMOVED,
188 MGMT_EV_NEW_SETTINGS,
189 MGMT_EV_CLASS_OF_DEV_CHANGED,
190 MGMT_EV_LOCAL_NAME_CHANGED,
191 MGMT_EV_UNCONF_INDEX_ADDED,
192 MGMT_EV_UNCONF_INDEX_REMOVED,
193 MGMT_EV_NEW_CONFIG_OPTIONS,
194 MGMT_EV_EXT_INDEX_ADDED,
195 MGMT_EV_EXT_INDEX_REMOVED,
196 MGMT_EV_EXT_INFO_CHANGED,
197 MGMT_EV_EXP_FEATURE_CHANGED,
198 MGMT_EV_ADV_MONITOR_ADDED,
199 MGMT_EV_ADV_MONITOR_REMOVED,
200 };
201
202 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
203
204 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
205 "\x00\x00\x00\x00\x00\x00\x00\x00"
206
207 /* HCI to MGMT error code conversion table */
208 static const u8 mgmt_status_table[] = {
209 MGMT_STATUS_SUCCESS,
210 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
211 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
212 MGMT_STATUS_FAILED, /* Hardware Failure */
213 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
214 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
215 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
216 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
217 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
218 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
219 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
220 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
221 MGMT_STATUS_BUSY, /* Command Disallowed */
222 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
223 MGMT_STATUS_REJECTED, /* Rejected Security */
224 MGMT_STATUS_REJECTED, /* Rejected Personal */
225 MGMT_STATUS_TIMEOUT, /* Host Timeout */
226 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
227 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
228 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
229 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
230 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
231 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
232 MGMT_STATUS_BUSY, /* Repeated Attempts */
233 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
234 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
235 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
236 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
237 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
238 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
239 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
240 MGMT_STATUS_FAILED, /* Unspecified Error */
241 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
242 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
243 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
244 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
245 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
246 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
247 MGMT_STATUS_FAILED, /* Unit Link Key Used */
248 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
249 MGMT_STATUS_TIMEOUT, /* Instant Passed */
250 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
251 MGMT_STATUS_FAILED, /* Transaction Collision */
252 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
253 MGMT_STATUS_REJECTED, /* QoS Rejected */
254 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
255 MGMT_STATUS_REJECTED, /* Insufficient Security */
256 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
257 MGMT_STATUS_BUSY, /* Role Switch Pending */
258 MGMT_STATUS_FAILED, /* Slot Violation */
259 MGMT_STATUS_FAILED, /* Role Switch Failed */
260 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
261 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
262 MGMT_STATUS_BUSY, /* Host Busy Pairing */
263 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
264 MGMT_STATUS_BUSY, /* Controller Busy */
265 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
266 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
267 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
268 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
269 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
270 };
271
272 static u8 mgmt_status(u8 hci_status)
273 {
274 if (hci_status < ARRAY_SIZE(mgmt_status_table))
275 return mgmt_status_table[hci_status];
276
277 return MGMT_STATUS_FAILED;
278 }
279
280 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
281 u16 len, int flag)
282 {
283 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
284 flag, NULL);
285 }
286
287 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
288 u16 len, int flag, struct sock *skip_sk)
289 {
290 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
291 flag, skip_sk);
292 }
293
294 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
295 struct sock *skip_sk)
296 {
297 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
298 HCI_SOCK_TRUSTED, skip_sk);
299 }
300
301 static u8 le_addr_type(u8 mgmt_addr_type)
302 {
303 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
304 return ADDR_LE_DEV_PUBLIC;
305 else
306 return ADDR_LE_DEV_RANDOM;
307 }
308
309 void mgmt_fill_version_info(void *ver)
310 {
311 struct mgmt_rp_read_version *rp = ver;
312
313 rp->version = MGMT_VERSION;
314 rp->revision = cpu_to_le16(MGMT_REVISION);
315 }
316
317 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
318 u16 data_len)
319 {
320 struct mgmt_rp_read_version rp;
321
322 bt_dev_dbg(hdev, "sock %p", sk);
323
324 mgmt_fill_version_info(&rp);
325
326 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
327 &rp, sizeof(rp));
328 }
329
330 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
331 u16 data_len)
332 {
333 struct mgmt_rp_read_commands *rp;
334 u16 num_commands, num_events;
335 size_t rp_size;
336 int i, err;
337
338 bt_dev_dbg(hdev, "sock %p", sk);
339
340 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
341 num_commands = ARRAY_SIZE(mgmt_commands);
342 num_events = ARRAY_SIZE(mgmt_events);
343 } else {
344 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
345 num_events = ARRAY_SIZE(mgmt_untrusted_events);
346 }
347
348 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
349
350 rp = kmalloc(rp_size, GFP_KERNEL);
351 if (!rp)
352 return -ENOMEM;
353
354 rp->num_commands = cpu_to_le16(num_commands);
355 rp->num_events = cpu_to_le16(num_events);
356
357 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
358 __le16 *opcode = rp->opcodes;
359
360 for (i = 0; i < num_commands; i++, opcode++)
361 put_unaligned_le16(mgmt_commands[i], opcode);
362
363 for (i = 0; i < num_events; i++, opcode++)
364 put_unaligned_le16(mgmt_events[i], opcode);
365 } else {
366 __le16 *opcode = rp->opcodes;
367
368 for (i = 0; i < num_commands; i++, opcode++)
369 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
370
371 for (i = 0; i < num_events; i++, opcode++)
372 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
373 }
374
375 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
376 rp, rp_size);
377 kfree(rp);
378
379 return err;
380 }
381
382 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
383 u16 data_len)
384 {
385 struct mgmt_rp_read_index_list *rp;
386 struct hci_dev *d;
387 size_t rp_len;
388 u16 count;
389 int err;
390
391 bt_dev_dbg(hdev, "sock %p", sk);
392
393 read_lock(&hci_dev_list_lock);
394
395 count = 0;
396 list_for_each_entry(d, &hci_dev_list, list) {
397 if (d->dev_type == HCI_PRIMARY &&
398 !hci_dev_test_flag(d, HCI_UNCONFIGURED))
399 count++;
400 }
401
402 rp_len = sizeof(*rp) + (2 * count);
403 rp = kmalloc(rp_len, GFP_ATOMIC);
404 if (!rp) {
405 read_unlock(&hci_dev_list_lock);
406 return -ENOMEM;
407 }
408
409 count = 0;
410 list_for_each_entry(d, &hci_dev_list, list) {
411 if (hci_dev_test_flag(d, HCI_SETUP) ||
412 hci_dev_test_flag(d, HCI_CONFIG) ||
413 hci_dev_test_flag(d, HCI_USER_CHANNEL))
414 continue;
415
416 /* Devices marked as raw-only are neither configured
417 * nor unconfigured controllers.
418 */
419 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
420 continue;
421
422 if (d->dev_type == HCI_PRIMARY &&
423 !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
424 rp->index[count++] = cpu_to_le16(d->id);
425 bt_dev_dbg(hdev, "Added hci%u", d->id);
426 }
427 }
428
429 rp->num_controllers = cpu_to_le16(count);
430 rp_len = sizeof(*rp) + (2 * count);
431
432 read_unlock(&hci_dev_list_lock);
433
434 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
435 0, rp, rp_len);
436
437 kfree(rp);
438
439 return err;
440 }
441
442 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
443 void *data, u16 data_len)
444 {
445 struct mgmt_rp_read_unconf_index_list *rp;
446 struct hci_dev *d;
447 size_t rp_len;
448 u16 count;
449 int err;
450
451 bt_dev_dbg(hdev, "sock %p", sk);
452
453 read_lock(&hci_dev_list_lock);
454
455 count = 0;
456 list_for_each_entry(d, &hci_dev_list, list) {
457 if (d->dev_type == HCI_PRIMARY &&
458 hci_dev_test_flag(d, HCI_UNCONFIGURED))
459 count++;
460 }
461
462 rp_len = sizeof(*rp) + (2 * count);
463 rp = kmalloc(rp_len, GFP_ATOMIC);
464 if (!rp) {
465 read_unlock(&hci_dev_list_lock);
466 return -ENOMEM;
467 }
468
469 count = 0;
470 list_for_each_entry(d, &hci_dev_list, list) {
471 if (hci_dev_test_flag(d, HCI_SETUP) ||
472 hci_dev_test_flag(d, HCI_CONFIG) ||
473 hci_dev_test_flag(d, HCI_USER_CHANNEL))
474 continue;
475
476 /* Devices marked as raw-only are neither configured
477 * nor unconfigured controllers.
478 */
479 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
480 continue;
481
482 if (d->dev_type == HCI_PRIMARY &&
483 hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
484 rp->index[count++] = cpu_to_le16(d->id);
485 bt_dev_dbg(hdev, "Added hci%u", d->id);
486 }
487 }
488
489 rp->num_controllers = cpu_to_le16(count);
490 rp_len = sizeof(*rp) + (2 * count);
491
492 read_unlock(&hci_dev_list_lock);
493
494 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
495 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
496
497 kfree(rp);
498
499 return err;
500 }
501
502 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
503 void *data, u16 data_len)
504 {
505 struct mgmt_rp_read_ext_index_list *rp;
506 struct hci_dev *d;
507 u16 count;
508 int err;
509
510 bt_dev_dbg(hdev, "sock %p", sk);
511
512 read_lock(&hci_dev_list_lock);
513
514 count = 0;
515 list_for_each_entry(d, &hci_dev_list, list) {
516 if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
517 count++;
518 }
519
520 rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
521 if (!rp) {
522 read_unlock(&hci_dev_list_lock);
523 return -ENOMEM;
524 }
525
526 count = 0;
527 list_for_each_entry(d, &hci_dev_list, list) {
528 if (hci_dev_test_flag(d, HCI_SETUP) ||
529 hci_dev_test_flag(d, HCI_CONFIG) ||
530 hci_dev_test_flag(d, HCI_USER_CHANNEL))
531 continue;
532
533 /* Devices marked as raw-only are neither configured
534 * nor unconfigured controllers.
535 */
536 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
537 continue;
538
539 if (d->dev_type == HCI_PRIMARY) {
540 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
541 rp->entry[count].type = 0x01;
542 else
543 rp->entry[count].type = 0x00;
544 } else if (d->dev_type == HCI_AMP) {
545 rp->entry[count].type = 0x02;
546 } else {
547 continue;
548 }
549
550 rp->entry[count].bus = d->bus;
551 rp->entry[count++].index = cpu_to_le16(d->id);
552 bt_dev_dbg(hdev, "Added hci%u", d->id);
553 }
554
555 rp->num_controllers = cpu_to_le16(count);
556
557 read_unlock(&hci_dev_list_lock);
558
559 /* If this command is called at least once, then all the
560 * default index and unconfigured index events are disabled
561 * and from now on only extended index events are used.
562 */
563 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
564 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
565 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
566
567 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
568 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
569 struct_size(rp, entry, count));
570
571 kfree(rp);
572
573 return err;
574 }
575
576 static bool is_configured(struct hci_dev *hdev)
577 {
578 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
579 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
580 return false;
581
582 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
583 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
584 !bacmp(&hdev->public_addr, BDADDR_ANY))
585 return false;
586
587 return true;
588 }
589
590 static __le32 get_missing_options(struct hci_dev *hdev)
591 {
592 u32 options = 0;
593
594 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
595 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
596 options |= MGMT_OPTION_EXTERNAL_CONFIG;
597
598 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
599 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
600 !bacmp(&hdev->public_addr, BDADDR_ANY))
601 options |= MGMT_OPTION_PUBLIC_ADDRESS;
602
603 return cpu_to_le32(options);
604 }
605
606 static int new_options(struct hci_dev *hdev, struct sock *skip)
607 {
608 __le32 options = get_missing_options(hdev);
609
610 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
611 sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
612 }
613
614 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
615 {
616 __le32 options = get_missing_options(hdev);
617
618 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
619 sizeof(options));
620 }
621
622 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
623 void *data, u16 data_len)
624 {
625 struct mgmt_rp_read_config_info rp;
626 u32 options = 0;
627
628 bt_dev_dbg(hdev, "sock %p", sk);
629
630 hci_dev_lock(hdev);
631
632 memset(&rp, 0, sizeof(rp));
633 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
634
635 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
636 options |= MGMT_OPTION_EXTERNAL_CONFIG;
637
638 if (hdev->set_bdaddr)
639 options |= MGMT_OPTION_PUBLIC_ADDRESS;
640
641 rp.supported_options = cpu_to_le32(options);
642 rp.missing_options = get_missing_options(hdev);
643
644 hci_dev_unlock(hdev);
645
646 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
647 &rp, sizeof(rp));
648 }
649
650 static u32 get_supported_phys(struct hci_dev *hdev)
651 {
652 u32 supported_phys = 0;
653
654 if (lmp_bredr_capable(hdev)) {
655 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
656
657 if (hdev->features[0][0] & LMP_3SLOT)
658 supported_phys |= MGMT_PHY_BR_1M_3SLOT;
659
660 if (hdev->features[0][0] & LMP_5SLOT)
661 supported_phys |= MGMT_PHY_BR_1M_5SLOT;
662
663 if (lmp_edr_2m_capable(hdev)) {
664 supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
665
666 if (lmp_edr_3slot_capable(hdev))
667 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
668
669 if (lmp_edr_5slot_capable(hdev))
670 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
671
672 if (lmp_edr_3m_capable(hdev)) {
673 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
674
675 if (lmp_edr_3slot_capable(hdev))
676 supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
677
678 if (lmp_edr_5slot_capable(hdev))
679 supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
680 }
681 }
682 }
683
684 if (lmp_le_capable(hdev)) {
685 supported_phys |= MGMT_PHY_LE_1M_TX;
686 supported_phys |= MGMT_PHY_LE_1M_RX;
687
688 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
689 supported_phys |= MGMT_PHY_LE_2M_TX;
690 supported_phys |= MGMT_PHY_LE_2M_RX;
691 }
692
693 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
694 supported_phys |= MGMT_PHY_LE_CODED_TX;
695 supported_phys |= MGMT_PHY_LE_CODED_RX;
696 }
697 }
698
699 return supported_phys;
700 }
701
702 static u32 get_selected_phys(struct hci_dev *hdev)
703 {
704 u32 selected_phys = 0;
705
706 if (lmp_bredr_capable(hdev)) {
707 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
708
709 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
710 selected_phys |= MGMT_PHY_BR_1M_3SLOT;
711
712 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
713 selected_phys |= MGMT_PHY_BR_1M_5SLOT;
714
715 if (lmp_edr_2m_capable(hdev)) {
716 if (!(hdev->pkt_type & HCI_2DH1))
717 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
718
719 if (lmp_edr_3slot_capable(hdev) &&
720 !(hdev->pkt_type & HCI_2DH3))
721 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
722
723 if (lmp_edr_5slot_capable(hdev) &&
724 !(hdev->pkt_type & HCI_2DH5))
725 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
726
727 if (lmp_edr_3m_capable(hdev)) {
728 if (!(hdev->pkt_type & HCI_3DH1))
729 selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
730
731 if (lmp_edr_3slot_capable(hdev) &&
732 !(hdev->pkt_type & HCI_3DH3))
733 selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
734
735 if (lmp_edr_5slot_capable(hdev) &&
736 !(hdev->pkt_type & HCI_3DH5))
737 selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
738 }
739 }
740 }
741
742 if (lmp_le_capable(hdev)) {
743 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
744 selected_phys |= MGMT_PHY_LE_1M_TX;
745
746 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
747 selected_phys |= MGMT_PHY_LE_1M_RX;
748
749 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
750 selected_phys |= MGMT_PHY_LE_2M_TX;
751
752 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
753 selected_phys |= MGMT_PHY_LE_2M_RX;
754
755 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
756 selected_phys |= MGMT_PHY_LE_CODED_TX;
757
758 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
759 selected_phys |= MGMT_PHY_LE_CODED_RX;
760 }
761
762 return selected_phys;
763 }
764
765 static u32 get_configurable_phys(struct hci_dev *hdev)
766 {
767 return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
768 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
769 }
770
771 static u32 get_supported_settings(struct hci_dev *hdev)
772 {
773 u32 settings = 0;
774
775 settings |= MGMT_SETTING_POWERED;
776 settings |= MGMT_SETTING_BONDABLE;
777 settings |= MGMT_SETTING_DEBUG_KEYS;
778 settings |= MGMT_SETTING_CONNECTABLE;
779 settings |= MGMT_SETTING_DISCOVERABLE;
780
781 if (lmp_bredr_capable(hdev)) {
782 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
783 settings |= MGMT_SETTING_FAST_CONNECTABLE;
784 settings |= MGMT_SETTING_BREDR;
785 settings |= MGMT_SETTING_LINK_SECURITY;
786
787 if (lmp_ssp_capable(hdev)) {
788 settings |= MGMT_SETTING_SSP;
789 if (IS_ENABLED(CONFIG_BT_HS))
790 settings |= MGMT_SETTING_HS;
791 }
792
793 if (lmp_sc_capable(hdev))
794 settings |= MGMT_SETTING_SECURE_CONN;
795
796 if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
797 &hdev->quirks))
798 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
799 }
800
801 if (lmp_le_capable(hdev)) {
802 settings |= MGMT_SETTING_LE;
803 settings |= MGMT_SETTING_SECURE_CONN;
804 settings |= MGMT_SETTING_PRIVACY;
805 settings |= MGMT_SETTING_STATIC_ADDRESS;
806
807 /* When the experimental feature for LL Privacy support is
808 * enabled, then advertising is no longer supported.
809 */
810 if (!hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
811 settings |= MGMT_SETTING_ADVERTISING;
812 }
813
814 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
815 hdev->set_bdaddr)
816 settings |= MGMT_SETTING_CONFIGURATION;
817
818 settings |= MGMT_SETTING_PHY_CONFIGURATION;
819
820 return settings;
821 }
822
823 static u32 get_current_settings(struct hci_dev *hdev)
824 {
825 u32 settings = 0;
826
827 if (hdev_is_powered(hdev))
828 settings |= MGMT_SETTING_POWERED;
829
830 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
831 settings |= MGMT_SETTING_CONNECTABLE;
832
833 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
834 settings |= MGMT_SETTING_FAST_CONNECTABLE;
835
836 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
837 settings |= MGMT_SETTING_DISCOVERABLE;
838
839 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
840 settings |= MGMT_SETTING_BONDABLE;
841
842 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
843 settings |= MGMT_SETTING_BREDR;
844
845 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
846 settings |= MGMT_SETTING_LE;
847
848 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
849 settings |= MGMT_SETTING_LINK_SECURITY;
850
851 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
852 settings |= MGMT_SETTING_SSP;
853
854 if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
855 settings |= MGMT_SETTING_HS;
856
857 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
858 settings |= MGMT_SETTING_ADVERTISING;
859
860 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
861 settings |= MGMT_SETTING_SECURE_CONN;
862
863 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
864 settings |= MGMT_SETTING_DEBUG_KEYS;
865
866 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
867 settings |= MGMT_SETTING_PRIVACY;
868
869 /* The current setting for static address has two purposes. The
870 * first is to indicate if the static address will be used and
871 * the second is to indicate if it is actually set.
872 *
873 * This means if the static address is not configured, this flag
874 * will never be set. If the address is configured, then if the
875 * address is actually used decides if the flag is set or not.
876 *
877 * For single mode LE only controllers and dual-mode controllers
878 * with BR/EDR disabled, the existence of the static address will
879 * be evaluated.
880 */
881 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
882 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
883 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
884 if (bacmp(&hdev->static_addr, BDADDR_ANY))
885 settings |= MGMT_SETTING_STATIC_ADDRESS;
886 }
887
888 if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
889 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
890
891 return settings;
892 }
893
894 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
895 {
896 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
897 }
898
899 static struct mgmt_pending_cmd *pending_find_data(u16 opcode,
900 struct hci_dev *hdev,
901 const void *data)
902 {
903 return mgmt_pending_find_data(HCI_CHANNEL_CONTROL, opcode, hdev, data);
904 }
905
906 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
907 {
908 struct mgmt_pending_cmd *cmd;
909
910 /* If there's a pending mgmt command the flags will not yet have
911 * their final values, so check for this first.
912 */
913 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
914 if (cmd) {
915 struct mgmt_mode *cp = cmd->param;
916 if (cp->val == 0x01)
917 return LE_AD_GENERAL;
918 else if (cp->val == 0x02)
919 return LE_AD_LIMITED;
920 } else {
921 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
922 return LE_AD_LIMITED;
923 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
924 return LE_AD_GENERAL;
925 }
926
927 return 0;
928 }
929
930 bool mgmt_get_connectable(struct hci_dev *hdev)
931 {
932 struct mgmt_pending_cmd *cmd;
933
934 /* If there's a pending mgmt command the flag will not yet have
935 * it's final value, so check for this first.
936 */
937 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
938 if (cmd) {
939 struct mgmt_mode *cp = cmd->param;
940
941 return cp->val;
942 }
943
944 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
945 }
946
947 static void service_cache_off(struct work_struct *work)
948 {
949 struct hci_dev *hdev = container_of(work, struct hci_dev,
950 service_cache.work);
951 struct hci_request req;
952
953 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
954 return;
955
956 hci_req_init(&req, hdev);
957
958 hci_dev_lock(hdev);
959
960 __hci_req_update_eir(&req);
961 __hci_req_update_class(&req);
962
963 hci_dev_unlock(hdev);
964
965 hci_req_run(&req, NULL);
966 }
967
968 static void rpa_expired(struct work_struct *work)
969 {
970 struct hci_dev *hdev = container_of(work, struct hci_dev,
971 rpa_expired.work);
972 struct hci_request req;
973
974 bt_dev_dbg(hdev, "");
975
976 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
977
978 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
979 return;
980
981 /* The generation of a new RPA and programming it into the
982 * controller happens in the hci_req_enable_advertising()
983 * function.
984 */
985 hci_req_init(&req, hdev);
986 if (ext_adv_capable(hdev))
987 __hci_req_start_ext_adv(&req, hdev->cur_adv_instance);
988 else
989 __hci_req_enable_advertising(&req);
990 hci_req_run(&req, NULL);
991 }
992
993 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
994 {
995 if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
996 return;
997
998 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
999 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1000
1001 /* Non-mgmt controlled devices get this bit set
1002 * implicitly so that pairing works for them, however
1003 * for mgmt we require user-space to explicitly enable
1004 * it
1005 */
1006 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1007 }
1008
1009 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1010 void *data, u16 data_len)
1011 {
1012 struct mgmt_rp_read_info rp;
1013
1014 bt_dev_dbg(hdev, "sock %p", sk);
1015
1016 hci_dev_lock(hdev);
1017
1018 memset(&rp, 0, sizeof(rp));
1019
1020 bacpy(&rp.bdaddr, &hdev->bdaddr);
1021
1022 rp.version = hdev->hci_ver;
1023 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1024
1025 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1026 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1027
1028 memcpy(rp.dev_class, hdev->dev_class, 3);
1029
1030 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1031 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1032
1033 hci_dev_unlock(hdev);
1034
1035 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1036 sizeof(rp));
1037 }
1038
1039 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1040 {
1041 u16 eir_len = 0;
1042 size_t name_len;
1043
1044 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1045 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1046 hdev->dev_class, 3);
1047
1048 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1049 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1050 hdev->appearance);
1051
1052 name_len = strlen(hdev->dev_name);
1053 eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1054 hdev->dev_name, name_len);
1055
1056 name_len = strlen(hdev->short_name);
1057 eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1058 hdev->short_name, name_len);
1059
1060 return eir_len;
1061 }
1062
1063 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1064 void *data, u16 data_len)
1065 {
1066 char buf[512];
1067 struct mgmt_rp_read_ext_info *rp = (void *)buf;
1068 u16 eir_len;
1069
1070 bt_dev_dbg(hdev, "sock %p", sk);
1071
1072 memset(&buf, 0, sizeof(buf));
1073
1074 hci_dev_lock(hdev);
1075
1076 bacpy(&rp->bdaddr, &hdev->bdaddr);
1077
1078 rp->version = hdev->hci_ver;
1079 rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1080
1081 rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1082 rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1083
1084
1085 eir_len = append_eir_data_to_buf(hdev, rp->eir);
1086 rp->eir_len = cpu_to_le16(eir_len);
1087
1088 hci_dev_unlock(hdev);
1089
1090 /* If this command is called at least once, then the events
1091 * for class of device and local name changes are disabled
1092 * and only the new extended controller information event
1093 * is used.
1094 */
1095 hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1096 hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1097 hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1098
1099 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1100 sizeof(*rp) + eir_len);
1101 }
1102
1103 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1104 {
1105 char buf[512];
1106 struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1107 u16 eir_len;
1108
1109 memset(buf, 0, sizeof(buf));
1110
1111 eir_len = append_eir_data_to_buf(hdev, ev->eir);
1112 ev->eir_len = cpu_to_le16(eir_len);
1113
1114 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1115 sizeof(*ev) + eir_len,
1116 HCI_MGMT_EXT_INFO_EVENTS, skip);
1117 }
1118
1119 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1120 {
1121 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1122
1123 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1124 sizeof(settings));
1125 }
1126
1127 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1128 {
1129 bt_dev_dbg(hdev, "status 0x%02x", status);
1130
1131 if (hci_conn_count(hdev) == 0) {
1132 cancel_delayed_work(&hdev->power_off);
1133 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1134 }
1135 }
1136
1137 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1138 {
1139 struct mgmt_ev_advertising_added ev;
1140
1141 ev.instance = instance;
1142
1143 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1144 }
1145
1146 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1147 u8 instance)
1148 {
1149 struct mgmt_ev_advertising_removed ev;
1150
1151 ev.instance = instance;
1152
1153 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1154 }
1155
1156 static void cancel_adv_timeout(struct hci_dev *hdev)
1157 {
1158 if (hdev->adv_instance_timeout) {
1159 hdev->adv_instance_timeout = 0;
1160 cancel_delayed_work(&hdev->adv_instance_expire);
1161 }
1162 }
1163
1164 static int clean_up_hci_state(struct hci_dev *hdev)
1165 {
1166 struct hci_request req;
1167 struct hci_conn *conn;
1168 bool discov_stopped;
1169 int err;
1170
1171 hci_req_init(&req, hdev);
1172
1173 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1174 test_bit(HCI_PSCAN, &hdev->flags)) {
1175 u8 scan = 0x00;
1176 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1177 }
1178
1179 hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, false);
1180
1181 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1182 __hci_req_disable_advertising(&req);
1183
1184 discov_stopped = hci_req_stop_discovery(&req);
1185
1186 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1187 /* 0x15 == Terminated due to Power Off */
1188 __hci_abort_conn(&req, conn, 0x15);
1189 }
1190
1191 err = hci_req_run(&req, clean_up_hci_complete);
1192 if (!err && discov_stopped)
1193 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1194
1195 return err;
1196 }
1197
1198 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1199 u16 len)
1200 {
1201 struct mgmt_mode *cp = data;
1202 struct mgmt_pending_cmd *cmd;
1203 int err;
1204
1205 bt_dev_dbg(hdev, "sock %p", sk);
1206
1207 if (cp->val != 0x00 && cp->val != 0x01)
1208 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1209 MGMT_STATUS_INVALID_PARAMS);
1210
1211 hci_dev_lock(hdev);
1212
1213 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1214 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1215 MGMT_STATUS_BUSY);
1216 goto failed;
1217 }
1218
1219 if (!!cp->val == hdev_is_powered(hdev)) {
1220 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1221 goto failed;
1222 }
1223
1224 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1225 if (!cmd) {
1226 err = -ENOMEM;
1227 goto failed;
1228 }
1229
1230 if (cp->val) {
1231 queue_work(hdev->req_workqueue, &hdev->power_on);
1232 err = 0;
1233 } else {
1234 /* Disconnect connections, stop scans, etc */
1235 err = clean_up_hci_state(hdev);
1236 if (!err)
1237 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1238 HCI_POWER_OFF_TIMEOUT);
1239
1240 /* ENODATA means there were no HCI commands queued */
1241 if (err == -ENODATA) {
1242 cancel_delayed_work(&hdev->power_off);
1243 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1244 err = 0;
1245 }
1246 }
1247
1248 failed:
1249 hci_dev_unlock(hdev);
1250 return err;
1251 }
1252
1253 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1254 {
1255 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1256
1257 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1258 sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1259 }
1260
1261 int mgmt_new_settings(struct hci_dev *hdev)
1262 {
1263 return new_settings(hdev, NULL);
1264 }
1265
1266 struct cmd_lookup {
1267 struct sock *sk;
1268 struct hci_dev *hdev;
1269 u8 mgmt_status;
1270 };
1271
1272 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1273 {
1274 struct cmd_lookup *match = data;
1275
1276 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1277
1278 list_del(&cmd->list);
1279
1280 if (match->sk == NULL) {
1281 match->sk = cmd->sk;
1282 sock_hold(match->sk);
1283 }
1284
1285 mgmt_pending_free(cmd);
1286 }
1287
1288 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1289 {
1290 u8 *status = data;
1291
1292 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1293 mgmt_pending_remove(cmd);
1294 }
1295
1296 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1297 {
1298 if (cmd->cmd_complete) {
1299 u8 *status = data;
1300
1301 cmd->cmd_complete(cmd, *status);
1302 mgmt_pending_remove(cmd);
1303
1304 return;
1305 }
1306
1307 cmd_status_rsp(cmd, data);
1308 }
1309
1310 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1311 {
1312 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1313 cmd->param, cmd->param_len);
1314 }
1315
1316 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1317 {
1318 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1319 cmd->param, sizeof(struct mgmt_addr_info));
1320 }
1321
1322 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1323 {
1324 if (!lmp_bredr_capable(hdev))
1325 return MGMT_STATUS_NOT_SUPPORTED;
1326 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1327 return MGMT_STATUS_REJECTED;
1328 else
1329 return MGMT_STATUS_SUCCESS;
1330 }
1331
1332 static u8 mgmt_le_support(struct hci_dev *hdev)
1333 {
1334 if (!lmp_le_capable(hdev))
1335 return MGMT_STATUS_NOT_SUPPORTED;
1336 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1337 return MGMT_STATUS_REJECTED;
1338 else
1339 return MGMT_STATUS_SUCCESS;
1340 }
1341
1342 void mgmt_set_discoverable_complete(struct hci_dev *hdev, u8 status)
1343 {
1344 struct mgmt_pending_cmd *cmd;
1345
1346 bt_dev_dbg(hdev, "status 0x%02x", status);
1347
1348 hci_dev_lock(hdev);
1349
1350 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1351 if (!cmd)
1352 goto unlock;
1353
1354 if (status) {
1355 u8 mgmt_err = mgmt_status(status);
1356 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1357 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1358 goto remove_cmd;
1359 }
1360
1361 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1362 hdev->discov_timeout > 0) {
1363 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1364 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1365 }
1366
1367 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1368 new_settings(hdev, cmd->sk);
1369
1370 remove_cmd:
1371 mgmt_pending_remove(cmd);
1372
1373 unlock:
1374 hci_dev_unlock(hdev);
1375 }
1376
1377 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1378 u16 len)
1379 {
1380 struct mgmt_cp_set_discoverable *cp = data;
1381 struct mgmt_pending_cmd *cmd;
1382 u16 timeout;
1383 int err;
1384
1385 bt_dev_dbg(hdev, "sock %p", sk);
1386
1387 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1388 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1389 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1390 MGMT_STATUS_REJECTED);
1391
1392 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1393 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1394 MGMT_STATUS_INVALID_PARAMS);
1395
1396 timeout = __le16_to_cpu(cp->timeout);
1397
1398 /* Disabling discoverable requires that no timeout is set,
1399 * and enabling limited discoverable requires a timeout.
1400 */
1401 if ((cp->val == 0x00 && timeout > 0) ||
1402 (cp->val == 0x02 && timeout == 0))
1403 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1404 MGMT_STATUS_INVALID_PARAMS);
1405
1406 hci_dev_lock(hdev);
1407
1408 if (!hdev_is_powered(hdev) && timeout > 0) {
1409 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1410 MGMT_STATUS_NOT_POWERED);
1411 goto failed;
1412 }
1413
1414 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1415 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1416 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1417 MGMT_STATUS_BUSY);
1418 goto failed;
1419 }
1420
1421 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1422 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1423 MGMT_STATUS_REJECTED);
1424 goto failed;
1425 }
1426
1427 if (hdev->advertising_paused) {
1428 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1429 MGMT_STATUS_BUSY);
1430 goto failed;
1431 }
1432
1433 if (!hdev_is_powered(hdev)) {
1434 bool changed = false;
1435
1436 /* Setting limited discoverable when powered off is
1437 * not a valid operation since it requires a timeout
1438 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1439 */
1440 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1441 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1442 changed = true;
1443 }
1444
1445 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1446 if (err < 0)
1447 goto failed;
1448
1449 if (changed)
1450 err = new_settings(hdev, sk);
1451
1452 goto failed;
1453 }
1454
1455 /* If the current mode is the same, then just update the timeout
1456 * value with the new value. And if only the timeout gets updated,
1457 * then no need for any HCI transactions.
1458 */
1459 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1460 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1461 HCI_LIMITED_DISCOVERABLE)) {
1462 cancel_delayed_work(&hdev->discov_off);
1463 hdev->discov_timeout = timeout;
1464
1465 if (cp->val && hdev->discov_timeout > 0) {
1466 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1467 queue_delayed_work(hdev->req_workqueue,
1468 &hdev->discov_off, to);
1469 }
1470
1471 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1472 goto failed;
1473 }
1474
1475 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1476 if (!cmd) {
1477 err = -ENOMEM;
1478 goto failed;
1479 }
1480
1481 /* Cancel any potential discoverable timeout that might be
1482 * still active and store new timeout value. The arming of
1483 * the timeout happens in the complete handler.
1484 */
1485 cancel_delayed_work(&hdev->discov_off);
1486 hdev->discov_timeout = timeout;
1487
1488 if (cp->val)
1489 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1490 else
1491 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1492
1493 /* Limited discoverable mode */
1494 if (cp->val == 0x02)
1495 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1496 else
1497 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1498
1499 queue_work(hdev->req_workqueue, &hdev->discoverable_update);
1500 err = 0;
1501
1502 failed:
1503 hci_dev_unlock(hdev);
1504 return err;
1505 }
1506
1507 void mgmt_set_connectable_complete(struct hci_dev *hdev, u8 status)
1508 {
1509 struct mgmt_pending_cmd *cmd;
1510
1511 bt_dev_dbg(hdev, "status 0x%02x", status);
1512
1513 hci_dev_lock(hdev);
1514
1515 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1516 if (!cmd)
1517 goto unlock;
1518
1519 if (status) {
1520 u8 mgmt_err = mgmt_status(status);
1521 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1522 goto remove_cmd;
1523 }
1524
1525 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1526 new_settings(hdev, cmd->sk);
1527
1528 remove_cmd:
1529 mgmt_pending_remove(cmd);
1530
1531 unlock:
1532 hci_dev_unlock(hdev);
1533 }
1534
1535 static int set_connectable_update_settings(struct hci_dev *hdev,
1536 struct sock *sk, u8 val)
1537 {
1538 bool changed = false;
1539 int err;
1540
1541 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1542 changed = true;
1543
1544 if (val) {
1545 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1546 } else {
1547 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1548 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1549 }
1550
1551 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1552 if (err < 0)
1553 return err;
1554
1555 if (changed) {
1556 hci_req_update_scan(hdev);
1557 hci_update_background_scan(hdev);
1558 return new_settings(hdev, sk);
1559 }
1560
1561 return 0;
1562 }
1563
1564 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1565 u16 len)
1566 {
1567 struct mgmt_mode *cp = data;
1568 struct mgmt_pending_cmd *cmd;
1569 int err;
1570
1571 bt_dev_dbg(hdev, "sock %p", sk);
1572
1573 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1574 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1575 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1576 MGMT_STATUS_REJECTED);
1577
1578 if (cp->val != 0x00 && cp->val != 0x01)
1579 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1580 MGMT_STATUS_INVALID_PARAMS);
1581
1582 hci_dev_lock(hdev);
1583
1584 if (!hdev_is_powered(hdev)) {
1585 err = set_connectable_update_settings(hdev, sk, cp->val);
1586 goto failed;
1587 }
1588
1589 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1590 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1591 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1592 MGMT_STATUS_BUSY);
1593 goto failed;
1594 }
1595
1596 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1597 if (!cmd) {
1598 err = -ENOMEM;
1599 goto failed;
1600 }
1601
1602 if (cp->val) {
1603 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1604 } else {
1605 if (hdev->discov_timeout > 0)
1606 cancel_delayed_work(&hdev->discov_off);
1607
1608 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1609 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1610 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1611 }
1612
1613 queue_work(hdev->req_workqueue, &hdev->connectable_update);
1614 err = 0;
1615
1616 failed:
1617 hci_dev_unlock(hdev);
1618 return err;
1619 }
1620
1621 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1622 u16 len)
1623 {
1624 struct mgmt_mode *cp = data;
1625 bool changed;
1626 int err;
1627
1628 bt_dev_dbg(hdev, "sock %p", sk);
1629
1630 if (cp->val != 0x00 && cp->val != 0x01)
1631 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1632 MGMT_STATUS_INVALID_PARAMS);
1633
1634 hci_dev_lock(hdev);
1635
1636 if (cp->val)
1637 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1638 else
1639 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1640
1641 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1642 if (err < 0)
1643 goto unlock;
1644
1645 if (changed) {
1646 /* In limited privacy mode the change of bondable mode
1647 * may affect the local advertising address.
1648 */
1649 if (hdev_is_powered(hdev) &&
1650 hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1651 hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1652 hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1653 queue_work(hdev->req_workqueue,
1654 &hdev->discoverable_update);
1655
1656 err = new_settings(hdev, sk);
1657 }
1658
1659 unlock:
1660 hci_dev_unlock(hdev);
1661 return err;
1662 }
1663
1664 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1665 u16 len)
1666 {
1667 struct mgmt_mode *cp = data;
1668 struct mgmt_pending_cmd *cmd;
1669 u8 val, status;
1670 int err;
1671
1672 bt_dev_dbg(hdev, "sock %p", sk);
1673
1674 status = mgmt_bredr_support(hdev);
1675 if (status)
1676 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1677 status);
1678
1679 if (cp->val != 0x00 && cp->val != 0x01)
1680 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1681 MGMT_STATUS_INVALID_PARAMS);
1682
1683 hci_dev_lock(hdev);
1684
1685 if (!hdev_is_powered(hdev)) {
1686 bool changed = false;
1687
1688 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1689 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1690 changed = true;
1691 }
1692
1693 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1694 if (err < 0)
1695 goto failed;
1696
1697 if (changed)
1698 err = new_settings(hdev, sk);
1699
1700 goto failed;
1701 }
1702
1703 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1704 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1705 MGMT_STATUS_BUSY);
1706 goto failed;
1707 }
1708
1709 val = !!cp->val;
1710
1711 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1712 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1713 goto failed;
1714 }
1715
1716 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1717 if (!cmd) {
1718 err = -ENOMEM;
1719 goto failed;
1720 }
1721
1722 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1723 if (err < 0) {
1724 mgmt_pending_remove(cmd);
1725 goto failed;
1726 }
1727
1728 failed:
1729 hci_dev_unlock(hdev);
1730 return err;
1731 }
1732
1733 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1734 {
1735 struct mgmt_mode *cp = data;
1736 struct mgmt_pending_cmd *cmd;
1737 u8 status;
1738 int err;
1739
1740 bt_dev_dbg(hdev, "sock %p", sk);
1741
1742 status = mgmt_bredr_support(hdev);
1743 if (status)
1744 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1745
1746 if (!lmp_ssp_capable(hdev))
1747 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1748 MGMT_STATUS_NOT_SUPPORTED);
1749
1750 if (cp->val != 0x00 && cp->val != 0x01)
1751 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1752 MGMT_STATUS_INVALID_PARAMS);
1753
1754 hci_dev_lock(hdev);
1755
1756 if (!hdev_is_powered(hdev)) {
1757 bool changed;
1758
1759 if (cp->val) {
1760 changed = !hci_dev_test_and_set_flag(hdev,
1761 HCI_SSP_ENABLED);
1762 } else {
1763 changed = hci_dev_test_and_clear_flag(hdev,
1764 HCI_SSP_ENABLED);
1765 if (!changed)
1766 changed = hci_dev_test_and_clear_flag(hdev,
1767 HCI_HS_ENABLED);
1768 else
1769 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1770 }
1771
1772 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1773 if (err < 0)
1774 goto failed;
1775
1776 if (changed)
1777 err = new_settings(hdev, sk);
1778
1779 goto failed;
1780 }
1781
1782 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1783 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1784 MGMT_STATUS_BUSY);
1785 goto failed;
1786 }
1787
1788 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
1789 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1790 goto failed;
1791 }
1792
1793 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1794 if (!cmd) {
1795 err = -ENOMEM;
1796 goto failed;
1797 }
1798
1799 if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
1800 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
1801 sizeof(cp->val), &cp->val);
1802
1803 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1804 if (err < 0) {
1805 mgmt_pending_remove(cmd);
1806 goto failed;
1807 }
1808
1809 failed:
1810 hci_dev_unlock(hdev);
1811 return err;
1812 }
1813
1814 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1815 {
1816 struct mgmt_mode *cp = data;
1817 bool changed;
1818 u8 status;
1819 int err;
1820
1821 bt_dev_dbg(hdev, "sock %p", sk);
1822
1823 if (!IS_ENABLED(CONFIG_BT_HS))
1824 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1825 MGMT_STATUS_NOT_SUPPORTED);
1826
1827 status = mgmt_bredr_support(hdev);
1828 if (status)
1829 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1830
1831 if (!lmp_ssp_capable(hdev))
1832 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1833 MGMT_STATUS_NOT_SUPPORTED);
1834
1835 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1836 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1837 MGMT_STATUS_REJECTED);
1838
1839 if (cp->val != 0x00 && cp->val != 0x01)
1840 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1841 MGMT_STATUS_INVALID_PARAMS);
1842
1843 hci_dev_lock(hdev);
1844
1845 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1846 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1847 MGMT_STATUS_BUSY);
1848 goto unlock;
1849 }
1850
1851 if (cp->val) {
1852 changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
1853 } else {
1854 if (hdev_is_powered(hdev)) {
1855 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1856 MGMT_STATUS_REJECTED);
1857 goto unlock;
1858 }
1859
1860 changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
1861 }
1862
1863 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1864 if (err < 0)
1865 goto unlock;
1866
1867 if (changed)
1868 err = new_settings(hdev, sk);
1869
1870 unlock:
1871 hci_dev_unlock(hdev);
1872 return err;
1873 }
1874
1875 static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1876 {
1877 struct cmd_lookup match = { NULL, hdev };
1878
1879 hci_dev_lock(hdev);
1880
1881 if (status) {
1882 u8 mgmt_err = mgmt_status(status);
1883
1884 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1885 &mgmt_err);
1886 goto unlock;
1887 }
1888
1889 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1890
1891 new_settings(hdev, match.sk);
1892
1893 if (match.sk)
1894 sock_put(match.sk);
1895
1896 /* Make sure the controller has a good default for
1897 * advertising data. Restrict the update to when LE
1898 * has actually been enabled. During power on, the
1899 * update in powered_update_hci will take care of it.
1900 */
1901 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1902 struct hci_request req;
1903 hci_req_init(&req, hdev);
1904 if (ext_adv_capable(hdev)) {
1905 int err;
1906
1907 err = __hci_req_setup_ext_adv_instance(&req, 0x00);
1908 if (!err)
1909 __hci_req_update_scan_rsp_data(&req, 0x00);
1910 } else {
1911 __hci_req_update_adv_data(&req, 0x00);
1912 __hci_req_update_scan_rsp_data(&req, 0x00);
1913 }
1914 hci_req_run(&req, NULL);
1915 hci_update_background_scan(hdev);
1916 }
1917
1918 unlock:
1919 hci_dev_unlock(hdev);
1920 }
1921
1922 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1923 {
1924 struct mgmt_mode *cp = data;
1925 struct hci_cp_write_le_host_supported hci_cp;
1926 struct mgmt_pending_cmd *cmd;
1927 struct hci_request req;
1928 int err;
1929 u8 val, enabled;
1930
1931 bt_dev_dbg(hdev, "sock %p", sk);
1932
1933 if (!lmp_le_capable(hdev))
1934 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1935 MGMT_STATUS_NOT_SUPPORTED);
1936
1937 if (cp->val != 0x00 && cp->val != 0x01)
1938 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1939 MGMT_STATUS_INVALID_PARAMS);
1940
1941 /* Bluetooth single mode LE only controllers or dual-mode
1942 * controllers configured as LE only devices, do not allow
1943 * switching LE off. These have either LE enabled explicitly
1944 * or BR/EDR has been previously switched off.
1945 *
1946 * When trying to enable an already enabled LE, then gracefully
1947 * send a positive response. Trying to disable it however will
1948 * result into rejection.
1949 */
1950 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1951 if (cp->val == 0x01)
1952 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1953
1954 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1955 MGMT_STATUS_REJECTED);
1956 }
1957
1958 hci_dev_lock(hdev);
1959
1960 val = !!cp->val;
1961 enabled = lmp_host_le_capable(hdev);
1962
1963 if (!val)
1964 hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, true);
1965
1966 if (!hdev_is_powered(hdev) || val == enabled) {
1967 bool changed = false;
1968
1969 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1970 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
1971 changed = true;
1972 }
1973
1974 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
1975 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1976 changed = true;
1977 }
1978
1979 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1980 if (err < 0)
1981 goto unlock;
1982
1983 if (changed)
1984 err = new_settings(hdev, sk);
1985
1986 goto unlock;
1987 }
1988
1989 if (pending_find(MGMT_OP_SET_LE, hdev) ||
1990 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
1991 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1992 MGMT_STATUS_BUSY);
1993 goto unlock;
1994 }
1995
1996 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
1997 if (!cmd) {
1998 err = -ENOMEM;
1999 goto unlock;
2000 }
2001
2002 hci_req_init(&req, hdev);
2003
2004 memset(&hci_cp, 0, sizeof(hci_cp));
2005
2006 if (val) {
2007 hci_cp.le = val;
2008 hci_cp.simul = 0x00;
2009 } else {
2010 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2011 __hci_req_disable_advertising(&req);
2012
2013 if (ext_adv_capable(hdev))
2014 __hci_req_clear_ext_adv_sets(&req);
2015 }
2016
2017 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2018 &hci_cp);
2019
2020 err = hci_req_run(&req, le_enable_complete);
2021 if (err < 0)
2022 mgmt_pending_remove(cmd);
2023
2024 unlock:
2025 hci_dev_unlock(hdev);
2026 return err;
2027 }
2028
2029 /* This is a helper function to test for pending mgmt commands that can
2030 * cause CoD or EIR HCI commands. We can only allow one such pending
2031 * mgmt command at a time since otherwise we cannot easily track what
2032 * the current values are, will be, and based on that calculate if a new
2033 * HCI command needs to be sent and if yes with what value.
2034 */
2035 static bool pending_eir_or_class(struct hci_dev *hdev)
2036 {
2037 struct mgmt_pending_cmd *cmd;
2038
2039 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2040 switch (cmd->opcode) {
2041 case MGMT_OP_ADD_UUID:
2042 case MGMT_OP_REMOVE_UUID:
2043 case MGMT_OP_SET_DEV_CLASS:
2044 case MGMT_OP_SET_POWERED:
2045 return true;
2046 }
2047 }
2048
2049 return false;
2050 }
2051
2052 static const u8 bluetooth_base_uuid[] = {
2053 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2054 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2055 };
2056
2057 static u8 get_uuid_size(const u8 *uuid)
2058 {
2059 u32 val;
2060
2061 if (memcmp(uuid, bluetooth_base_uuid, 12))
2062 return 128;
2063
2064 val = get_unaligned_le32(&uuid[12]);
2065 if (val > 0xffff)
2066 return 32;
2067
2068 return 16;
2069 }
2070
2071 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2072 {
2073 struct mgmt_pending_cmd *cmd;
2074
2075 hci_dev_lock(hdev);
2076
2077 cmd = pending_find(mgmt_op, hdev);
2078 if (!cmd)
2079 goto unlock;
2080
2081 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2082 mgmt_status(status), hdev->dev_class, 3);
2083
2084 mgmt_pending_remove(cmd);
2085
2086 unlock:
2087 hci_dev_unlock(hdev);
2088 }
2089
2090 static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2091 {
2092 bt_dev_dbg(hdev, "status 0x%02x", status);
2093
2094 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2095 }
2096
2097 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2098 {
2099 struct mgmt_cp_add_uuid *cp = data;
2100 struct mgmt_pending_cmd *cmd;
2101 struct hci_request req;
2102 struct bt_uuid *uuid;
2103 int err;
2104
2105 bt_dev_dbg(hdev, "sock %p", sk);
2106
2107 hci_dev_lock(hdev);
2108
2109 if (pending_eir_or_class(hdev)) {
2110 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2111 MGMT_STATUS_BUSY);
2112 goto failed;
2113 }
2114
2115 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2116 if (!uuid) {
2117 err = -ENOMEM;
2118 goto failed;
2119 }
2120
2121 memcpy(uuid->uuid, cp->uuid, 16);
2122 uuid->svc_hint = cp->svc_hint;
2123 uuid->size = get_uuid_size(cp->uuid);
2124
2125 list_add_tail(&uuid->list, &hdev->uuids);
2126
2127 hci_req_init(&req, hdev);
2128
2129 __hci_req_update_class(&req);
2130 __hci_req_update_eir(&req);
2131
2132 err = hci_req_run(&req, add_uuid_complete);
2133 if (err < 0) {
2134 if (err != -ENODATA)
2135 goto failed;
2136
2137 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2138 hdev->dev_class, 3);
2139 goto failed;
2140 }
2141
2142 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2143 if (!cmd) {
2144 err = -ENOMEM;
2145 goto failed;
2146 }
2147
2148 err = 0;
2149
2150 failed:
2151 hci_dev_unlock(hdev);
2152 return err;
2153 }
2154
2155 static bool enable_service_cache(struct hci_dev *hdev)
2156 {
2157 if (!hdev_is_powered(hdev))
2158 return false;
2159
2160 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2161 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2162 CACHE_TIMEOUT);
2163 return true;
2164 }
2165
2166 return false;
2167 }
2168
2169 static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2170 {
2171 bt_dev_dbg(hdev, "status 0x%02x", status);
2172
2173 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2174 }
2175
2176 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2177 u16 len)
2178 {
2179 struct mgmt_cp_remove_uuid *cp = data;
2180 struct mgmt_pending_cmd *cmd;
2181 struct bt_uuid *match, *tmp;
2182 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2183 struct hci_request req;
2184 int err, found;
2185
2186 bt_dev_dbg(hdev, "sock %p", sk);
2187
2188 hci_dev_lock(hdev);
2189
2190 if (pending_eir_or_class(hdev)) {
2191 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2192 MGMT_STATUS_BUSY);
2193 goto unlock;
2194 }
2195
2196 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2197 hci_uuids_clear(hdev);
2198
2199 if (enable_service_cache(hdev)) {
2200 err = mgmt_cmd_complete(sk, hdev->id,
2201 MGMT_OP_REMOVE_UUID,
2202 0, hdev->dev_class, 3);
2203 goto unlock;
2204 }
2205
2206 goto update_class;
2207 }
2208
2209 found = 0;
2210
2211 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2212 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2213 continue;
2214
2215 list_del(&match->list);
2216 kfree(match);
2217 found++;
2218 }
2219
2220 if (found == 0) {
2221 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2222 MGMT_STATUS_INVALID_PARAMS);
2223 goto unlock;
2224 }
2225
2226 update_class:
2227 hci_req_init(&req, hdev);
2228
2229 __hci_req_update_class(&req);
2230 __hci_req_update_eir(&req);
2231
2232 err = hci_req_run(&req, remove_uuid_complete);
2233 if (err < 0) {
2234 if (err != -ENODATA)
2235 goto unlock;
2236
2237 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2238 hdev->dev_class, 3);
2239 goto unlock;
2240 }
2241
2242 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2243 if (!cmd) {
2244 err = -ENOMEM;
2245 goto unlock;
2246 }
2247
2248 err = 0;
2249
2250 unlock:
2251 hci_dev_unlock(hdev);
2252 return err;
2253 }
2254
2255 static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2256 {
2257 bt_dev_dbg(hdev, "status 0x%02x", status);
2258
2259 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2260 }
2261
2262 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2263 u16 len)
2264 {
2265 struct mgmt_cp_set_dev_class *cp = data;
2266 struct mgmt_pending_cmd *cmd;
2267 struct hci_request req;
2268 int err;
2269
2270 bt_dev_dbg(hdev, "sock %p", sk);
2271
2272 if (!lmp_bredr_capable(hdev))
2273 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2274 MGMT_STATUS_NOT_SUPPORTED);
2275
2276 hci_dev_lock(hdev);
2277
2278 if (pending_eir_or_class(hdev)) {
2279 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2280 MGMT_STATUS_BUSY);
2281 goto unlock;
2282 }
2283
2284 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2285 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2286 MGMT_STATUS_INVALID_PARAMS);
2287 goto unlock;
2288 }
2289
2290 hdev->major_class = cp->major;
2291 hdev->minor_class = cp->minor;
2292
2293 if (!hdev_is_powered(hdev)) {
2294 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2295 hdev->dev_class, 3);
2296 goto unlock;
2297 }
2298
2299 hci_req_init(&req, hdev);
2300
2301 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2302 hci_dev_unlock(hdev);
2303 cancel_delayed_work_sync(&hdev->service_cache);
2304 hci_dev_lock(hdev);
2305 __hci_req_update_eir(&req);
2306 }
2307
2308 __hci_req_update_class(&req);
2309
2310 err = hci_req_run(&req, set_class_complete);
2311 if (err < 0) {
2312 if (err != -ENODATA)
2313 goto unlock;
2314
2315 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2316 hdev->dev_class, 3);
2317 goto unlock;
2318 }
2319
2320 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2321 if (!cmd) {
2322 err = -ENOMEM;
2323 goto unlock;
2324 }
2325
2326 err = 0;
2327
2328 unlock:
2329 hci_dev_unlock(hdev);
2330 return err;
2331 }
2332
2333 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2334 u16 len)
2335 {
2336 struct mgmt_cp_load_link_keys *cp = data;
2337 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2338 sizeof(struct mgmt_link_key_info));
2339 u16 key_count, expected_len;
2340 bool changed;
2341 int i;
2342
2343 bt_dev_dbg(hdev, "sock %p", sk);
2344
2345 if (!lmp_bredr_capable(hdev))
2346 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2347 MGMT_STATUS_NOT_SUPPORTED);
2348
2349 key_count = __le16_to_cpu(cp->key_count);
2350 if (key_count > max_key_count) {
2351 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2352 key_count);
2353 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2354 MGMT_STATUS_INVALID_PARAMS);
2355 }
2356
2357 expected_len = struct_size(cp, keys, key_count);
2358 if (expected_len != len) {
2359 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2360 expected_len, len);
2361 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2362 MGMT_STATUS_INVALID_PARAMS);
2363 }
2364
2365 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2366 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2367 MGMT_STATUS_INVALID_PARAMS);
2368
2369 bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2370 key_count);
2371
2372 for (i = 0; i < key_count; i++) {
2373 struct mgmt_link_key_info *key = &cp->keys[i];
2374
2375 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2376 return mgmt_cmd_status(sk, hdev->id,
2377 MGMT_OP_LOAD_LINK_KEYS,
2378 MGMT_STATUS_INVALID_PARAMS);
2379 }
2380
2381 hci_dev_lock(hdev);
2382
2383 hci_link_keys_clear(hdev);
2384
2385 if (cp->debug_keys)
2386 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2387 else
2388 changed = hci_dev_test_and_clear_flag(hdev,
2389 HCI_KEEP_DEBUG_KEYS);
2390
2391 if (changed)
2392 new_settings(hdev, NULL);
2393
2394 for (i = 0; i < key_count; i++) {
2395 struct mgmt_link_key_info *key = &cp->keys[i];
2396
2397 if (hci_is_blocked_key(hdev,
2398 HCI_BLOCKED_KEY_TYPE_LINKKEY,
2399 key->val)) {
2400 bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2401 &key->addr.bdaddr);
2402 continue;
2403 }
2404
2405 /* Always ignore debug keys and require a new pairing if
2406 * the user wants to use them.
2407 */
2408 if (key->type == HCI_LK_DEBUG_COMBINATION)
2409 continue;
2410
2411 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2412 key->type, key->pin_len, NULL);
2413 }
2414
2415 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2416
2417 hci_dev_unlock(hdev);
2418
2419 return 0;
2420 }
2421
2422 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2423 u8 addr_type, struct sock *skip_sk)
2424 {
2425 struct mgmt_ev_device_unpaired ev;
2426
2427 bacpy(&ev.addr.bdaddr, bdaddr);
2428 ev.addr.type = addr_type;
2429
2430 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2431 skip_sk);
2432 }
2433
2434 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2435 u16 len)
2436 {
2437 struct mgmt_cp_unpair_device *cp = data;
2438 struct mgmt_rp_unpair_device rp;
2439 struct hci_conn_params *params;
2440 struct mgmt_pending_cmd *cmd;
2441 struct hci_conn *conn;
2442 u8 addr_type;
2443 int err;
2444
2445 memset(&rp, 0, sizeof(rp));
2446 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2447 rp.addr.type = cp->addr.type;
2448
2449 if (!bdaddr_type_is_valid(cp->addr.type))
2450 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2451 MGMT_STATUS_INVALID_PARAMS,
2452 &rp, sizeof(rp));
2453
2454 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2455 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2456 MGMT_STATUS_INVALID_PARAMS,
2457 &rp, sizeof(rp));
2458
2459 hci_dev_lock(hdev);
2460
2461 if (!hdev_is_powered(hdev)) {
2462 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2463 MGMT_STATUS_NOT_POWERED, &rp,
2464 sizeof(rp));
2465 goto unlock;
2466 }
2467
2468 if (cp->addr.type == BDADDR_BREDR) {
2469 /* If disconnection is requested, then look up the
2470 * connection. If the remote device is connected, it
2471 * will be later used to terminate the link.
2472 *
2473 * Setting it to NULL explicitly will cause no
2474 * termination of the link.
2475 */
2476 if (cp->disconnect)
2477 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2478 &cp->addr.bdaddr);
2479 else
2480 conn = NULL;
2481
2482 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2483 if (err < 0) {
2484 err = mgmt_cmd_complete(sk, hdev->id,
2485 MGMT_OP_UNPAIR_DEVICE,
2486 MGMT_STATUS_NOT_PAIRED, &rp,
2487 sizeof(rp));
2488 goto unlock;
2489 }
2490
2491 goto done;
2492 }
2493
2494 /* LE address type */
2495 addr_type = le_addr_type(cp->addr.type);
2496
2497 /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
2498 err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
2499 if (err < 0) {
2500 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2501 MGMT_STATUS_NOT_PAIRED, &rp,
2502 sizeof(rp));
2503 goto unlock;
2504 }
2505
2506 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
2507 if (!conn) {
2508 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2509 goto done;
2510 }
2511
2512
2513 /* Defer clearing up the connection parameters until closing to
2514 * give a chance of keeping them if a repairing happens.
2515 */
2516 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2517
2518 /* Disable auto-connection parameters if present */
2519 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
2520 if (params) {
2521 if (params->explicit_connect)
2522 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2523 else
2524 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2525 }
2526
2527 /* If disconnection is not requested, then clear the connection
2528 * variable so that the link is not terminated.
2529 */
2530 if (!cp->disconnect)
2531 conn = NULL;
2532
2533 done:
2534 /* If the connection variable is set, then termination of the
2535 * link is requested.
2536 */
2537 if (!conn) {
2538 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2539 &rp, sizeof(rp));
2540 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2541 goto unlock;
2542 }
2543
2544 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2545 sizeof(*cp));
2546 if (!cmd) {
2547 err = -ENOMEM;
2548 goto unlock;
2549 }
2550
2551 cmd->cmd_complete = addr_cmd_complete;
2552
2553 err = hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
2554 if (err < 0)
2555 mgmt_pending_remove(cmd);
2556
2557 unlock:
2558 hci_dev_unlock(hdev);
2559 return err;
2560 }
2561
2562 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2563 u16 len)
2564 {
2565 struct mgmt_cp_disconnect *cp = data;
2566 struct mgmt_rp_disconnect rp;
2567 struct mgmt_pending_cmd *cmd;
2568 struct hci_conn *conn;
2569 int err;
2570
2571 bt_dev_dbg(hdev, "sock %p", sk);
2572
2573 memset(&rp, 0, sizeof(rp));
2574 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2575 rp.addr.type = cp->addr.type;
2576
2577 if (!bdaddr_type_is_valid(cp->addr.type))
2578 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2579 MGMT_STATUS_INVALID_PARAMS,
2580 &rp, sizeof(rp));
2581
2582 hci_dev_lock(hdev);
2583
2584 if (!test_bit(HCI_UP, &hdev->flags)) {
2585 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2586 MGMT_STATUS_NOT_POWERED, &rp,
2587 sizeof(rp));
2588 goto failed;
2589 }
2590
2591 if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
2592 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2593 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2594 goto failed;
2595 }
2596
2597 if (cp->addr.type == BDADDR_BREDR)
2598 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2599 &cp->addr.bdaddr);
2600 else
2601 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2602 le_addr_type(cp->addr.type));
2603
2604 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2605 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2606 MGMT_STATUS_NOT_CONNECTED, &rp,
2607 sizeof(rp));
2608 goto failed;
2609 }
2610
2611 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2612 if (!cmd) {
2613 err = -ENOMEM;
2614 goto failed;
2615 }
2616
2617 cmd->cmd_complete = generic_cmd_complete;
2618
2619 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2620 if (err < 0)
2621 mgmt_pending_remove(cmd);
2622
2623 failed:
2624 hci_dev_unlock(hdev);
2625 return err;
2626 }
2627
2628 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2629 {
2630 switch (link_type) {
2631 case LE_LINK:
2632 switch (addr_type) {
2633 case ADDR_LE_DEV_PUBLIC:
2634 return BDADDR_LE_PUBLIC;
2635
2636 default:
2637 /* Fallback to LE Random address type */
2638 return BDADDR_LE_RANDOM;
2639 }
2640
2641 default:
2642 /* Fallback to BR/EDR type */
2643 return BDADDR_BREDR;
2644 }
2645 }
2646
2647 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2648 u16 data_len)
2649 {
2650 struct mgmt_rp_get_connections *rp;
2651 struct hci_conn *c;
2652 int err;
2653 u16 i;
2654
2655 bt_dev_dbg(hdev, "sock %p", sk);
2656
2657 hci_dev_lock(hdev);
2658
2659 if (!hdev_is_powered(hdev)) {
2660 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2661 MGMT_STATUS_NOT_POWERED);
2662 goto unlock;
2663 }
2664
2665 i = 0;
2666 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2667 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2668 i++;
2669 }
2670
2671 rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
2672 if (!rp) {
2673 err = -ENOMEM;
2674 goto unlock;
2675 }
2676
2677 i = 0;
2678 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2679 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2680 continue;
2681 bacpy(&rp->addr[i].bdaddr, &c->dst);
2682 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2683 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2684 continue;
2685 i++;
2686 }
2687
2688 rp->conn_count = cpu_to_le16(i);
2689
2690 /* Recalculate length in case of filtered SCO connections, etc */
2691 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2692 struct_size(rp, addr, i));
2693
2694 kfree(rp);
2695
2696 unlock:
2697 hci_dev_unlock(hdev);
2698 return err;
2699 }
2700
2701 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2702 struct mgmt_cp_pin_code_neg_reply *cp)
2703 {
2704 struct mgmt_pending_cmd *cmd;
2705 int err;
2706
2707 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2708 sizeof(*cp));
2709 if (!cmd)
2710 return -ENOMEM;
2711
2712 cmd->cmd_complete = addr_cmd_complete;
2713
2714 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2715 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2716 if (err < 0)
2717 mgmt_pending_remove(cmd);
2718
2719 return err;
2720 }
2721
2722 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2723 u16 len)
2724 {
2725 struct hci_conn *conn;
2726 struct mgmt_cp_pin_code_reply *cp = data;
2727 struct hci_cp_pin_code_reply reply;
2728 struct mgmt_pending_cmd *cmd;
2729 int err;
2730
2731 bt_dev_dbg(hdev, "sock %p", sk);
2732
2733 hci_dev_lock(hdev);
2734
2735 if (!hdev_is_powered(hdev)) {
2736 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2737 MGMT_STATUS_NOT_POWERED);
2738 goto failed;
2739 }
2740
2741 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2742 if (!conn) {
2743 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2744 MGMT_STATUS_NOT_CONNECTED);
2745 goto failed;
2746 }
2747
2748 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2749 struct mgmt_cp_pin_code_neg_reply ncp;
2750
2751 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2752
2753 bt_dev_err(hdev, "PIN code is not 16 bytes long");
2754
2755 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2756 if (err >= 0)
2757 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2758 MGMT_STATUS_INVALID_PARAMS);
2759
2760 goto failed;
2761 }
2762
2763 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2764 if (!cmd) {
2765 err = -ENOMEM;
2766 goto failed;
2767 }
2768
2769 cmd->cmd_complete = addr_cmd_complete;
2770
2771 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2772 reply.pin_len = cp->pin_len;
2773 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2774
2775 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2776 if (err < 0)
2777 mgmt_pending_remove(cmd);
2778
2779 failed:
2780 hci_dev_unlock(hdev);
2781 return err;
2782 }
2783
2784 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2785 u16 len)
2786 {
2787 struct mgmt_cp_set_io_capability *cp = data;
2788
2789 bt_dev_dbg(hdev, "sock %p", sk);
2790
2791 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2792 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2793 MGMT_STATUS_INVALID_PARAMS);
2794
2795 hci_dev_lock(hdev);
2796
2797 hdev->io_capability = cp->io_capability;
2798
2799 bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
2800
2801 hci_dev_unlock(hdev);
2802
2803 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
2804 NULL, 0);
2805 }
2806
2807 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
2808 {
2809 struct hci_dev *hdev = conn->hdev;
2810 struct mgmt_pending_cmd *cmd;
2811
2812 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2813 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2814 continue;
2815
2816 if (cmd->user_data != conn)
2817 continue;
2818
2819 return cmd;
2820 }
2821
2822 return NULL;
2823 }
2824
2825 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
2826 {
2827 struct mgmt_rp_pair_device rp;
2828 struct hci_conn *conn = cmd->user_data;
2829 int err;
2830
2831 bacpy(&rp.addr.bdaddr, &conn->dst);
2832 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2833
2834 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
2835 status, &rp, sizeof(rp));
2836
2837 /* So we don't get further callbacks for this connection */
2838 conn->connect_cfm_cb = NULL;
2839 conn->security_cfm_cb = NULL;
2840 conn->disconn_cfm_cb = NULL;
2841
2842 hci_conn_drop(conn);
2843
2844 /* The device is paired so there is no need to remove
2845 * its connection parameters anymore.
2846 */
2847 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2848
2849 hci_conn_put(conn);
2850
2851 return err;
2852 }
2853
2854 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2855 {
2856 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2857 struct mgmt_pending_cmd *cmd;
2858
2859 cmd = find_pairing(conn);
2860 if (cmd) {
2861 cmd->cmd_complete(cmd, status);
2862 mgmt_pending_remove(cmd);
2863 }
2864 }
2865
2866 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2867 {
2868 struct mgmt_pending_cmd *cmd;
2869
2870 BT_DBG("status %u", status);
2871
2872 cmd = find_pairing(conn);
2873 if (!cmd) {
2874 BT_DBG("Unable to find a pending command");
2875 return;
2876 }
2877
2878 cmd->cmd_complete(cmd, mgmt_status(status));
2879 mgmt_pending_remove(cmd);
2880 }
2881
2882 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2883 {
2884 struct mgmt_pending_cmd *cmd;
2885
2886 BT_DBG("status %u", status);
2887
2888 if (!status)
2889 return;
2890
2891 cmd = find_pairing(conn);
2892 if (!cmd) {
2893 BT_DBG("Unable to find a pending command");
2894 return;
2895 }
2896
2897 cmd->cmd_complete(cmd, mgmt_status(status));
2898 mgmt_pending_remove(cmd);
2899 }
2900
2901 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2902 u16 len)
2903 {
2904 struct mgmt_cp_pair_device *cp = data;
2905 struct mgmt_rp_pair_device rp;
2906 struct mgmt_pending_cmd *cmd;
2907 u8 sec_level, auth_type;
2908 struct hci_conn *conn;
2909 int err;
2910
2911 bt_dev_dbg(hdev, "sock %p", sk);
2912
2913 memset(&rp, 0, sizeof(rp));
2914 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2915 rp.addr.type = cp->addr.type;
2916
2917 if (!bdaddr_type_is_valid(cp->addr.type))
2918 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2919 MGMT_STATUS_INVALID_PARAMS,
2920 &rp, sizeof(rp));
2921
2922 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
2923 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2924 MGMT_STATUS_INVALID_PARAMS,
2925 &rp, sizeof(rp));
2926
2927 hci_dev_lock(hdev);
2928
2929 if (!hdev_is_powered(hdev)) {
2930 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2931 MGMT_STATUS_NOT_POWERED, &rp,
2932 sizeof(rp));
2933 goto unlock;
2934 }
2935
2936 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
2937 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2938 MGMT_STATUS_ALREADY_PAIRED, &rp,
2939 sizeof(rp));
2940 goto unlock;
2941 }
2942
2943 sec_level = BT_SECURITY_MEDIUM;
2944 auth_type = HCI_AT_DEDICATED_BONDING;
2945
2946 if (cp->addr.type == BDADDR_BREDR) {
2947 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
2948 auth_type, CONN_REASON_PAIR_DEVICE);
2949 } else {
2950 u8 addr_type = le_addr_type(cp->addr.type);
2951 struct hci_conn_params *p;
2952
2953 /* When pairing a new device, it is expected to remember
2954 * this device for future connections. Adding the connection
2955 * parameter information ahead of time allows tracking
2956 * of the slave preferred values and will speed up any
2957 * further connection establishment.
2958 *
2959 * If connection parameters already exist, then they
2960 * will be kept and this function does nothing.
2961 */
2962 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
2963
2964 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
2965 p->auto_connect = HCI_AUTO_CONN_DISABLED;
2966
2967 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
2968 sec_level, HCI_LE_CONN_TIMEOUT,
2969 CONN_REASON_PAIR_DEVICE);
2970 }
2971
2972 if (IS_ERR(conn)) {
2973 int status;
2974
2975 if (PTR_ERR(conn) == -EBUSY)
2976 status = MGMT_STATUS_BUSY;
2977 else if (PTR_ERR(conn) == -EOPNOTSUPP)
2978 status = MGMT_STATUS_NOT_SUPPORTED;
2979 else if (PTR_ERR(conn) == -ECONNREFUSED)
2980 status = MGMT_STATUS_REJECTED;
2981 else
2982 status = MGMT_STATUS_CONNECT_FAILED;
2983
2984 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2985 status, &rp, sizeof(rp));
2986 goto unlock;
2987 }
2988
2989 if (conn->connect_cfm_cb) {
2990 hci_conn_drop(conn);
2991 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2992 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2993 goto unlock;
2994 }
2995
2996 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2997 if (!cmd) {
2998 err = -ENOMEM;
2999 hci_conn_drop(conn);
3000 goto unlock;
3001 }
3002
3003 cmd->cmd_complete = pairing_complete;
3004
3005 /* For LE, just connecting isn't a proof that the pairing finished */
3006 if (cp->addr.type == BDADDR_BREDR) {
3007 conn->connect_cfm_cb = pairing_complete_cb;
3008 conn->security_cfm_cb = pairing_complete_cb;
3009 conn->disconn_cfm_cb = pairing_complete_cb;
3010 } else {
3011 conn->connect_cfm_cb = le_pairing_complete_cb;
3012 conn->security_cfm_cb = le_pairing_complete_cb;
3013 conn->disconn_cfm_cb = le_pairing_complete_cb;
3014 }
3015
3016 conn->io_capability = cp->io_cap;
3017 cmd->user_data = hci_conn_get(conn);
3018
3019 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3020 hci_conn_security(conn, sec_level, auth_type, true)) {
3021 cmd->cmd_complete(cmd, 0);
3022 mgmt_pending_remove(cmd);
3023 }
3024
3025 err = 0;
3026
3027 unlock:
3028 hci_dev_unlock(hdev);
3029 return err;
3030 }
3031
3032 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3033 u16 len)
3034 {
3035 struct mgmt_addr_info *addr = data;
3036 struct mgmt_pending_cmd *cmd;
3037 struct hci_conn *conn;
3038 int err;
3039
3040 bt_dev_dbg(hdev, "sock %p", sk);
3041
3042 hci_dev_lock(hdev);
3043
3044 if (!hdev_is_powered(hdev)) {
3045 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3046 MGMT_STATUS_NOT_POWERED);
3047 goto unlock;
3048 }
3049
3050 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3051 if (!cmd) {
3052 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3053 MGMT_STATUS_INVALID_PARAMS);
3054 goto unlock;
3055 }
3056
3057 conn = cmd->user_data;
3058
3059 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3060 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3061 MGMT_STATUS_INVALID_PARAMS);
3062 goto unlock;
3063 }
3064
3065 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3066 mgmt_pending_remove(cmd);
3067
3068 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3069 addr, sizeof(*addr));
3070
3071 /* Since user doesn't want to proceed with the connection, abort any
3072 * ongoing pairing and then terminate the link if it was created
3073 * because of the pair device action.
3074 */
3075 if (addr->type == BDADDR_BREDR)
3076 hci_remove_link_key(hdev, &addr->bdaddr);
3077 else
3078 smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3079 le_addr_type(addr->type));
3080
3081 if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3082 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3083
3084 unlock:
3085 hci_dev_unlock(hdev);
3086 return err;
3087 }
3088
3089 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3090 struct mgmt_addr_info *addr, u16 mgmt_op,
3091 u16 hci_op, __le32 passkey)
3092 {
3093 struct mgmt_pending_cmd *cmd;
3094 struct hci_conn *conn;
3095 int err;
3096
3097 hci_dev_lock(hdev);
3098
3099 if (!hdev_is_powered(hdev)) {
3100 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3101 MGMT_STATUS_NOT_POWERED, addr,
3102 sizeof(*addr));
3103 goto done;
3104 }
3105
3106 if (addr->type == BDADDR_BREDR)
3107 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3108 else
3109 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3110 le_addr_type(addr->type));
3111
3112 if (!conn) {
3113 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3114 MGMT_STATUS_NOT_CONNECTED, addr,
3115 sizeof(*addr));
3116 goto done;
3117 }
3118
3119 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3120 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3121 if (!err)
3122 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3123 MGMT_STATUS_SUCCESS, addr,
3124 sizeof(*addr));
3125 else
3126 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3127 MGMT_STATUS_FAILED, addr,
3128 sizeof(*addr));
3129
3130 goto done;
3131 }
3132
3133 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3134 if (!cmd) {
3135 err = -ENOMEM;
3136 goto done;
3137 }
3138
3139 cmd->cmd_complete = addr_cmd_complete;
3140
3141 /* Continue with pairing via HCI */
3142 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3143 struct hci_cp_user_passkey_reply cp;
3144
3145 bacpy(&cp.bdaddr, &addr->bdaddr);
3146 cp.passkey = passkey;
3147 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3148 } else
3149 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3150 &addr->bdaddr);
3151
3152 if (err < 0)
3153 mgmt_pending_remove(cmd);
3154
3155 done:
3156 hci_dev_unlock(hdev);
3157 return err;
3158 }
3159
3160 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3161 void *data, u16 len)
3162 {
3163 struct mgmt_cp_pin_code_neg_reply *cp = data;
3164
3165 bt_dev_dbg(hdev, "sock %p", sk);
3166
3167 return user_pairing_resp(sk, hdev, &cp->addr,
3168 MGMT_OP_PIN_CODE_NEG_REPLY,
3169 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3170 }
3171
3172 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3173 u16 len)
3174 {
3175 struct mgmt_cp_user_confirm_reply *cp = data;
3176
3177 bt_dev_dbg(hdev, "sock %p", sk);
3178
3179 if (len != sizeof(*cp))
3180 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3181 MGMT_STATUS_INVALID_PARAMS);
3182
3183 return user_pairing_resp(sk, hdev, &cp->addr,
3184 MGMT_OP_USER_CONFIRM_REPLY,
3185 HCI_OP_USER_CONFIRM_REPLY, 0);
3186 }
3187
3188 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3189 void *data, u16 len)
3190 {
3191 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3192
3193 bt_dev_dbg(hdev, "sock %p", sk);
3194
3195 return user_pairing_resp(sk, hdev, &cp->addr,
3196 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3197 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3198 }
3199
3200 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3201 u16 len)
3202 {
3203 struct mgmt_cp_user_passkey_reply *cp = data;
3204
3205 bt_dev_dbg(hdev, "sock %p", sk);
3206
3207 return user_pairing_resp(sk, hdev, &cp->addr,
3208 MGMT_OP_USER_PASSKEY_REPLY,
3209 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3210 }
3211
3212 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3213 void *data, u16 len)
3214 {
3215 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3216
3217 bt_dev_dbg(hdev, "sock %p", sk);
3218
3219 return user_pairing_resp(sk, hdev, &cp->addr,
3220 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3221 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3222 }
3223
3224 static void adv_expire(struct hci_dev *hdev, u32 flags)
3225 {
3226 struct adv_info *adv_instance;
3227 struct hci_request req;
3228 int err;
3229
3230 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3231 if (!adv_instance)
3232 return;
3233
3234 /* stop if current instance doesn't need to be changed */
3235 if (!(adv_instance->flags & flags))
3236 return;
3237
3238 cancel_adv_timeout(hdev);
3239
3240 adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3241 if (!adv_instance)
3242 return;
3243
3244 hci_req_init(&req, hdev);
3245 err = __hci_req_schedule_adv_instance(&req, adv_instance->instance,
3246 true);
3247 if (err)
3248 return;
3249
3250 hci_req_run(&req, NULL);
3251 }
3252
3253 static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3254 {
3255 struct mgmt_cp_set_local_name *cp;
3256 struct mgmt_pending_cmd *cmd;
3257
3258 bt_dev_dbg(hdev, "status 0x%02x", status);
3259
3260 hci_dev_lock(hdev);
3261
3262 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3263 if (!cmd)
3264 goto unlock;
3265
3266 cp = cmd->param;
3267
3268 if (status) {
3269 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3270 mgmt_status(status));
3271 } else {
3272 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3273 cp, sizeof(*cp));
3274
3275 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3276 adv_expire(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3277 }
3278
3279 mgmt_pending_remove(cmd);
3280
3281 unlock:
3282 hci_dev_unlock(hdev);
3283 }
3284
3285 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3286 u16 len)
3287 {
3288 struct mgmt_cp_set_local_name *cp = data;
3289 struct mgmt_pending_cmd *cmd;
3290 struct hci_request req;
3291 int err;
3292
3293 bt_dev_dbg(hdev, "sock %p", sk);
3294
3295 hci_dev_lock(hdev);
3296
3297 /* If the old values are the same as the new ones just return a
3298 * direct command complete event.
3299 */
3300 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3301 !memcmp(hdev->short_name, cp->short_name,
3302 sizeof(hdev->short_name))) {
3303 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3304 data, len);
3305 goto failed;
3306 }
3307
3308 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3309
3310 if (!hdev_is_powered(hdev)) {
3311 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3312
3313 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3314 data, len);
3315 if (err < 0)
3316 goto failed;
3317
3318 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3319 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3320 ext_info_changed(hdev, sk);
3321
3322 goto failed;
3323 }
3324
3325 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3326 if (!cmd) {
3327 err = -ENOMEM;
3328 goto failed;
3329 }
3330
3331 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3332
3333 hci_req_init(&req, hdev);
3334
3335 if (lmp_bredr_capable(hdev)) {
3336 __hci_req_update_name(&req);
3337 __hci_req_update_eir(&req);
3338 }
3339
3340 /* The name is stored in the scan response data and so
3341 * no need to udpate the advertising data here.
3342 */
3343 if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3344 __hci_req_update_scan_rsp_data(&req, hdev->cur_adv_instance);
3345
3346 err = hci_req_run(&req, set_name_complete);
3347 if (err < 0)
3348 mgmt_pending_remove(cmd);
3349
3350 failed:
3351 hci_dev_unlock(hdev);
3352 return err;
3353 }
3354
3355 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3356 u16 len)
3357 {
3358 struct mgmt_cp_set_appearance *cp = data;
3359 u16 appearance;
3360 int err;
3361
3362 bt_dev_dbg(hdev, "sock %p", sk);
3363
3364 if (!lmp_le_capable(hdev))
3365 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3366 MGMT_STATUS_NOT_SUPPORTED);
3367
3368 appearance = le16_to_cpu(cp->appearance);
3369
3370 hci_dev_lock(hdev);
3371
3372 if (hdev->appearance != appearance) {
3373 hdev->appearance = appearance;
3374
3375 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3376 adv_expire(hdev, MGMT_ADV_FLAG_APPEARANCE);
3377
3378 ext_info_changed(hdev, sk);
3379 }
3380
3381 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3382 0);
3383
3384 hci_dev_unlock(hdev);
3385
3386 return err;
3387 }
3388
3389 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3390 void *data, u16 len)
3391 {
3392 struct mgmt_rp_get_phy_configuration rp;
3393
3394 bt_dev_dbg(hdev, "sock %p", sk);
3395
3396 hci_dev_lock(hdev);
3397
3398 memset(&rp, 0, sizeof(rp));
3399
3400 rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3401 rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3402 rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3403
3404 hci_dev_unlock(hdev);
3405
3406 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3407 &rp, sizeof(rp));
3408 }
3409
3410 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3411 {
3412 struct mgmt_ev_phy_configuration_changed ev;
3413
3414 memset(&ev, 0, sizeof(ev));
3415
3416 ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3417
3418 return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3419 sizeof(ev), skip);
3420 }
3421
3422 static void set_default_phy_complete(struct hci_dev *hdev, u8 status,
3423 u16 opcode, struct sk_buff *skb)
3424 {
3425 struct mgmt_pending_cmd *cmd;
3426
3427 bt_dev_dbg(hdev, "status 0x%02x", status);
3428
3429 hci_dev_lock(hdev);
3430
3431 cmd = pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev);
3432 if (!cmd)
3433 goto unlock;
3434
3435 if (status) {
3436 mgmt_cmd_status(cmd->sk, hdev->id,
3437 MGMT_OP_SET_PHY_CONFIGURATION,
3438 mgmt_status(status));
3439 } else {
3440 mgmt_cmd_complete(cmd->sk, hdev->id,
3441 MGMT_OP_SET_PHY_CONFIGURATION, 0,
3442 NULL, 0);
3443
3444 mgmt_phy_configuration_changed(hdev, cmd->sk);
3445 }
3446
3447 mgmt_pending_remove(cmd);
3448
3449 unlock:
3450 hci_dev_unlock(hdev);
3451 }
3452
3453 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3454 void *data, u16 len)
3455 {
3456 struct mgmt_cp_set_phy_configuration *cp = data;
3457 struct hci_cp_le_set_default_phy cp_phy;
3458 struct mgmt_pending_cmd *cmd;
3459 struct hci_request req;
3460 u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
3461 u16 pkt_type = (HCI_DH1 | HCI_DM1);
3462 bool changed = false;
3463 int err;
3464
3465 bt_dev_dbg(hdev, "sock %p", sk);
3466
3467 configurable_phys = get_configurable_phys(hdev);
3468 supported_phys = get_supported_phys(hdev);
3469 selected_phys = __le32_to_cpu(cp->selected_phys);
3470
3471 if (selected_phys & ~supported_phys)
3472 return mgmt_cmd_status(sk, hdev->id,
3473 MGMT_OP_SET_PHY_CONFIGURATION,
3474 MGMT_STATUS_INVALID_PARAMS);
3475
3476 unconfigure_phys = supported_phys & ~configurable_phys;
3477
3478 if ((selected_phys & unconfigure_phys) != unconfigure_phys)
3479 return mgmt_cmd_status(sk, hdev->id,
3480 MGMT_OP_SET_PHY_CONFIGURATION,
3481 MGMT_STATUS_INVALID_PARAMS);
3482
3483 if (selected_phys == get_selected_phys(hdev))
3484 return mgmt_cmd_complete(sk, hdev->id,
3485 MGMT_OP_SET_PHY_CONFIGURATION,
3486 0, NULL, 0);
3487
3488 hci_dev_lock(hdev);
3489
3490 if (!hdev_is_powered(hdev)) {
3491 err = mgmt_cmd_status(sk, hdev->id,
3492 MGMT_OP_SET_PHY_CONFIGURATION,
3493 MGMT_STATUS_REJECTED);
3494 goto unlock;
3495 }
3496
3497 if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
3498 err = mgmt_cmd_status(sk, hdev->id,
3499 MGMT_OP_SET_PHY_CONFIGURATION,
3500 MGMT_STATUS_BUSY);
3501 goto unlock;
3502 }
3503
3504 if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
3505 pkt_type |= (HCI_DH3 | HCI_DM3);
3506 else
3507 pkt_type &= ~(HCI_DH3 | HCI_DM3);
3508
3509 if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
3510 pkt_type |= (HCI_DH5 | HCI_DM5);
3511 else
3512 pkt_type &= ~(HCI_DH5 | HCI_DM5);
3513
3514 if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
3515 pkt_type &= ~HCI_2DH1;
3516 else
3517 pkt_type |= HCI_2DH1;
3518
3519 if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
3520 pkt_type &= ~HCI_2DH3;
3521 else
3522 pkt_type |= HCI_2DH3;
3523
3524 if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
3525 pkt_type &= ~HCI_2DH5;
3526 else
3527 pkt_type |= HCI_2DH5;
3528
3529 if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
3530 pkt_type &= ~HCI_3DH1;
3531 else
3532 pkt_type |= HCI_3DH1;
3533
3534 if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
3535 pkt_type &= ~HCI_3DH3;
3536 else
3537 pkt_type |= HCI_3DH3;
3538
3539 if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
3540 pkt_type &= ~HCI_3DH5;
3541 else
3542 pkt_type |= HCI_3DH5;
3543
3544 if (pkt_type != hdev->pkt_type) {
3545 hdev->pkt_type = pkt_type;
3546 changed = true;
3547 }
3548
3549 if ((selected_phys & MGMT_PHY_LE_MASK) ==
3550 (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
3551 if (changed)
3552 mgmt_phy_configuration_changed(hdev, sk);
3553
3554 err = mgmt_cmd_complete(sk, hdev->id,
3555 MGMT_OP_SET_PHY_CONFIGURATION,
3556 0, NULL, 0);
3557
3558 goto unlock;
3559 }
3560
3561 cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
3562 len);
3563 if (!cmd) {
3564 err = -ENOMEM;
3565 goto unlock;
3566 }
3567
3568 hci_req_init(&req, hdev);
3569
3570 memset(&cp_phy, 0, sizeof(cp_phy));
3571
3572 if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
3573 cp_phy.all_phys |= 0x01;
3574
3575 if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
3576 cp_phy.all_phys |= 0x02;
3577
3578 if (selected_phys & MGMT_PHY_LE_1M_TX)
3579 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
3580
3581 if (selected_phys & MGMT_PHY_LE_2M_TX)
3582 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
3583
3584 if (selected_phys & MGMT_PHY_LE_CODED_TX)
3585 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
3586
3587 if (selected_phys & MGMT_PHY_LE_1M_RX)
3588 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
3589
3590 if (selected_phys & MGMT_PHY_LE_2M_RX)
3591 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
3592
3593 if (selected_phys & MGMT_PHY_LE_CODED_RX)
3594 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
3595
3596 hci_req_add(&req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp_phy), &cp_phy);
3597
3598 err = hci_req_run_skb(&req, set_default_phy_complete);
3599 if (err < 0)
3600 mgmt_pending_remove(cmd);
3601
3602 unlock:
3603 hci_dev_unlock(hdev);
3604
3605 return err;
3606 }
3607
3608 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
3609 u16 len)
3610 {
3611 int err = MGMT_STATUS_SUCCESS;
3612 struct mgmt_cp_set_blocked_keys *keys = data;
3613 const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
3614 sizeof(struct mgmt_blocked_key_info));
3615 u16 key_count, expected_len;
3616 int i;
3617
3618 bt_dev_dbg(hdev, "sock %p", sk);
3619
3620 key_count = __le16_to_cpu(keys->key_count);
3621 if (key_count > max_key_count) {
3622 bt_dev_err(hdev, "too big key_count value %u", key_count);
3623 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3624 MGMT_STATUS_INVALID_PARAMS);
3625 }
3626
3627 expected_len = struct_size(keys, keys, key_count);
3628 if (expected_len != len) {
3629 bt_dev_err(hdev, "expected %u bytes, got %u bytes",
3630 expected_len, len);
3631 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3632 MGMT_STATUS_INVALID_PARAMS);
3633 }
3634
3635 hci_dev_lock(hdev);
3636
3637 hci_blocked_keys_clear(hdev);
3638
3639 for (i = 0; i < keys->key_count; ++i) {
3640 struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
3641
3642 if (!b) {
3643 err = MGMT_STATUS_NO_RESOURCES;
3644 break;
3645 }
3646
3647 b->type = keys->keys[i].type;
3648 memcpy(b->val, keys->keys[i].val, sizeof(b->val));
3649 list_add_rcu(&b->list, &hdev->blocked_keys);
3650 }
3651 hci_dev_unlock(hdev);
3652
3653 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3654 err, NULL, 0);
3655 }
3656
3657 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
3658 void *data, u16 len)
3659 {
3660 struct mgmt_mode *cp = data;
3661 int err;
3662 bool changed = false;
3663
3664 bt_dev_dbg(hdev, "sock %p", sk);
3665
3666 if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
3667 return mgmt_cmd_status(sk, hdev->id,
3668 MGMT_OP_SET_WIDEBAND_SPEECH,
3669 MGMT_STATUS_NOT_SUPPORTED);
3670
3671 if (cp->val != 0x00 && cp->val != 0x01)
3672 return mgmt_cmd_status(sk, hdev->id,
3673 MGMT_OP_SET_WIDEBAND_SPEECH,
3674 MGMT_STATUS_INVALID_PARAMS);
3675
3676 hci_dev_lock(hdev);
3677
3678 if (pending_find(MGMT_OP_SET_WIDEBAND_SPEECH, hdev)) {
3679 err = mgmt_cmd_status(sk, hdev->id,
3680 MGMT_OP_SET_WIDEBAND_SPEECH,
3681 MGMT_STATUS_BUSY);
3682 goto unlock;
3683 }
3684
3685 if (hdev_is_powered(hdev) &&
3686 !!cp->val != hci_dev_test_flag(hdev,
3687 HCI_WIDEBAND_SPEECH_ENABLED)) {
3688 err = mgmt_cmd_status(sk, hdev->id,
3689 MGMT_OP_SET_WIDEBAND_SPEECH,
3690 MGMT_STATUS_REJECTED);
3691 goto unlock;
3692 }
3693
3694 if (cp->val)
3695 changed = !hci_dev_test_and_set_flag(hdev,
3696 HCI_WIDEBAND_SPEECH_ENABLED);
3697 else
3698 changed = hci_dev_test_and_clear_flag(hdev,
3699 HCI_WIDEBAND_SPEECH_ENABLED);
3700
3701 err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
3702 if (err < 0)
3703 goto unlock;
3704
3705 if (changed)
3706 err = new_settings(hdev, sk);
3707
3708 unlock:
3709 hci_dev_unlock(hdev);
3710 return err;
3711 }
3712
3713 static int read_security_info(struct sock *sk, struct hci_dev *hdev,
3714 void *data, u16 data_len)
3715 {
3716 char buf[16];
3717 struct mgmt_rp_read_security_info *rp = (void *)buf;
3718 u16 sec_len = 0;
3719 u8 flags = 0;
3720
3721 bt_dev_dbg(hdev, "sock %p", sk);
3722
3723 memset(&buf, 0, sizeof(buf));
3724
3725 hci_dev_lock(hdev);
3726
3727 /* When the Read Simple Pairing Options command is supported, then
3728 * the remote public key validation is supported.
3729 */
3730 if (hdev->commands[41] & 0x08)
3731 flags |= 0x01; /* Remote public key validation (BR/EDR) */
3732
3733 flags |= 0x02; /* Remote public key validation (LE) */
3734
3735 /* When the Read Encryption Key Size command is supported, then the
3736 * encryption key size is enforced.
3737 */
3738 if (hdev->commands[20] & 0x10)
3739 flags |= 0x04; /* Encryption key size enforcement (BR/EDR) */
3740
3741 flags |= 0x08; /* Encryption key size enforcement (LE) */
3742
3743 sec_len = eir_append_data(rp->sec, sec_len, 0x01, &flags, 1);
3744
3745 /* When the Read Simple Pairing Options command is supported, then
3746 * also max encryption key size information is provided.
3747 */
3748 if (hdev->commands[41] & 0x08)
3749 sec_len = eir_append_le16(rp->sec, sec_len, 0x02,
3750 hdev->max_enc_key_size);
3751
3752 sec_len = eir_append_le16(rp->sec, sec_len, 0x03, SMP_MAX_ENC_KEY_SIZE);
3753
3754 rp->sec_len = cpu_to_le16(sec_len);
3755
3756 hci_dev_unlock(hdev);
3757
3758 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_SECURITY_INFO, 0,
3759 rp, sizeof(*rp) + sec_len);
3760 }
3761
3762 #ifdef CONFIG_BT_FEATURE_DEBUG
3763 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
3764 static const u8 debug_uuid[16] = {
3765 0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
3766 0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
3767 };
3768 #endif
3769
3770 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
3771 static const u8 simult_central_periph_uuid[16] = {
3772 0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
3773 0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
3774 };
3775
3776 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
3777 static const u8 rpa_resolution_uuid[16] = {
3778 0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
3779 0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
3780 };
3781
3782 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
3783 void *data, u16 data_len)
3784 {
3785 char buf[62]; /* Enough space for 3 features */
3786 struct mgmt_rp_read_exp_features_info *rp = (void *)buf;
3787 u16 idx = 0;
3788 u32 flags;
3789
3790 bt_dev_dbg(hdev, "sock %p", sk);
3791
3792 memset(&buf, 0, sizeof(buf));
3793
3794 #ifdef CONFIG_BT_FEATURE_DEBUG
3795 if (!hdev) {
3796 flags = bt_dbg_get() ? BIT(0) : 0;
3797
3798 memcpy(rp->features[idx].uuid, debug_uuid, 16);
3799 rp->features[idx].flags = cpu_to_le32(flags);
3800 idx++;
3801 }
3802 #endif
3803
3804 if (hdev) {
3805 if (test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) &&
3806 (hdev->le_states[4] & 0x08) && /* Central */
3807 (hdev->le_states[4] & 0x40) && /* Peripheral */
3808 (hdev->le_states[3] & 0x10)) /* Simultaneous */
3809 flags = BIT(0);
3810 else
3811 flags = 0;
3812
3813 memcpy(rp->features[idx].uuid, simult_central_periph_uuid, 16);
3814 rp->features[idx].flags = cpu_to_le32(flags);
3815 idx++;
3816 }
3817
3818 if (hdev && use_ll_privacy(hdev)) {
3819 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
3820 flags = BIT(0) | BIT(1);
3821 else
3822 flags = BIT(1);
3823
3824 memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
3825 rp->features[idx].flags = cpu_to_le32(flags);
3826 idx++;
3827 }
3828
3829 rp->feature_count = cpu_to_le16(idx);
3830
3831 /* After reading the experimental features information, enable
3832 * the events to update client on any future change.
3833 */
3834 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3835
3836 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
3837 MGMT_OP_READ_EXP_FEATURES_INFO,
3838 0, rp, sizeof(*rp) + (20 * idx));
3839 }
3840
3841 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
3842 struct sock *skip)
3843 {
3844 struct mgmt_ev_exp_feature_changed ev;
3845
3846 memset(&ev, 0, sizeof(ev));
3847 memcpy(ev.uuid, rpa_resolution_uuid, 16);
3848 ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
3849
3850 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
3851 &ev, sizeof(ev),
3852 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
3853
3854 }
3855
3856 #ifdef CONFIG_BT_FEATURE_DEBUG
3857 static int exp_debug_feature_changed(bool enabled, struct sock *skip)
3858 {
3859 struct mgmt_ev_exp_feature_changed ev;
3860
3861 memset(&ev, 0, sizeof(ev));
3862 memcpy(ev.uuid, debug_uuid, 16);
3863 ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
3864
3865 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, NULL,
3866 &ev, sizeof(ev),
3867 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
3868 }
3869 #endif
3870
3871 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
3872 void *data, u16 data_len)
3873 {
3874 struct mgmt_cp_set_exp_feature *cp = data;
3875 struct mgmt_rp_set_exp_feature rp;
3876
3877 bt_dev_dbg(hdev, "sock %p", sk);
3878
3879 if (!memcmp(cp->uuid, ZERO_KEY, 16)) {
3880 memset(rp.uuid, 0, 16);
3881 rp.flags = cpu_to_le32(0);
3882
3883 #ifdef CONFIG_BT_FEATURE_DEBUG
3884 if (!hdev) {
3885 bool changed = bt_dbg_get();
3886
3887 bt_dbg_set(false);
3888
3889 if (changed)
3890 exp_debug_feature_changed(false, sk);
3891 }
3892 #endif
3893
3894 if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
3895 bool changed = hci_dev_test_flag(hdev,
3896 HCI_ENABLE_LL_PRIVACY);
3897
3898 hci_dev_clear_flag(hdev, HCI_ENABLE_LL_PRIVACY);
3899
3900 if (changed)
3901 exp_ll_privacy_feature_changed(false, hdev, sk);
3902 }
3903
3904 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3905
3906 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
3907 MGMT_OP_SET_EXP_FEATURE, 0,
3908 &rp, sizeof(rp));
3909 }
3910
3911 #ifdef CONFIG_BT_FEATURE_DEBUG
3912 if (!memcmp(cp->uuid, debug_uuid, 16)) {
3913 bool val, changed;
3914 int err;
3915
3916 /* Command requires to use the non-controller index */
3917 if (hdev)
3918 return mgmt_cmd_status(sk, hdev->id,
3919 MGMT_OP_SET_EXP_FEATURE,
3920 MGMT_STATUS_INVALID_INDEX);
3921
3922 /* Parameters are limited to a single octet */
3923 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
3924 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3925 MGMT_OP_SET_EXP_FEATURE,
3926 MGMT_STATUS_INVALID_PARAMS);
3927
3928 /* Only boolean on/off is supported */
3929 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
3930 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3931 MGMT_OP_SET_EXP_FEATURE,
3932 MGMT_STATUS_INVALID_PARAMS);
3933
3934 val = !!cp->param[0];
3935 changed = val ? !bt_dbg_get() : bt_dbg_get();
3936 bt_dbg_set(val);
3937
3938 memcpy(rp.uuid, debug_uuid, 16);
3939 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
3940
3941 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3942
3943 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
3944 MGMT_OP_SET_EXP_FEATURE, 0,
3945 &rp, sizeof(rp));
3946
3947 if (changed)
3948 exp_debug_feature_changed(val, sk);
3949
3950 return err;
3951 }
3952 #endif
3953
3954 if (!memcmp(cp->uuid, rpa_resolution_uuid, 16)) {
3955 bool val, changed;
3956 int err;
3957 u32 flags;
3958
3959 /* Command requires to use the controller index */
3960 if (!hdev)
3961 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3962 MGMT_OP_SET_EXP_FEATURE,
3963 MGMT_STATUS_INVALID_INDEX);
3964
3965 /* Changes can only be made when controller is powered down */
3966 if (hdev_is_powered(hdev))
3967 return mgmt_cmd_status(sk, hdev->id,
3968 MGMT_OP_SET_EXP_FEATURE,
3969 MGMT_STATUS_NOT_POWERED);
3970
3971 /* Parameters are limited to a single octet */
3972 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
3973 return mgmt_cmd_status(sk, hdev->id,
3974 MGMT_OP_SET_EXP_FEATURE,
3975 MGMT_STATUS_INVALID_PARAMS);
3976
3977 /* Only boolean on/off is supported */
3978 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
3979 return mgmt_cmd_status(sk, hdev->id,
3980 MGMT_OP_SET_EXP_FEATURE,
3981 MGMT_STATUS_INVALID_PARAMS);
3982
3983 val = !!cp->param[0];
3984
3985 if (val) {
3986 changed = !hci_dev_test_flag(hdev,
3987 HCI_ENABLE_LL_PRIVACY);
3988 hci_dev_set_flag(hdev, HCI_ENABLE_LL_PRIVACY);
3989 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
3990
3991 /* Enable LL privacy + supported settings changed */
3992 flags = BIT(0) | BIT(1);
3993 } else {
3994 changed = hci_dev_test_flag(hdev,
3995 HCI_ENABLE_LL_PRIVACY);
3996 hci_dev_clear_flag(hdev, HCI_ENABLE_LL_PRIVACY);
3997
3998 /* Disable LL privacy + supported settings changed */
3999 flags = BIT(1);
4000 }
4001
4002 memcpy(rp.uuid, rpa_resolution_uuid, 16);
4003 rp.flags = cpu_to_le32(flags);
4004
4005 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4006
4007 err = mgmt_cmd_complete(sk, hdev->id,
4008 MGMT_OP_SET_EXP_FEATURE, 0,
4009 &rp, sizeof(rp));
4010
4011 if (changed)
4012 exp_ll_privacy_feature_changed(val, hdev, sk);
4013
4014 return err;
4015 }
4016
4017 return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4018 MGMT_OP_SET_EXP_FEATURE,
4019 MGMT_STATUS_NOT_SUPPORTED);
4020 }
4021
4022 #define SUPPORTED_DEVICE_FLAGS() ((1U << HCI_CONN_FLAG_MAX) - 1)
4023
4024 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4025 u16 data_len)
4026 {
4027 struct mgmt_cp_get_device_flags *cp = data;
4028 struct mgmt_rp_get_device_flags rp;
4029 struct bdaddr_list_with_flags *br_params;
4030 struct hci_conn_params *params;
4031 u32 supported_flags = SUPPORTED_DEVICE_FLAGS();
4032 u32 current_flags = 0;
4033 u8 status = MGMT_STATUS_INVALID_PARAMS;
4034
4035 bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
4036 &cp->addr.bdaddr, cp->addr.type);
4037
4038 hci_dev_lock(hdev);
4039
4040 if (cp->addr.type == BDADDR_BREDR) {
4041 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->whitelist,
4042 &cp->addr.bdaddr,
4043 cp->addr.type);
4044 if (!br_params)
4045 goto done;
4046
4047 current_flags = br_params->current_flags;
4048 } else {
4049 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4050 le_addr_type(cp->addr.type));
4051
4052 if (!params)
4053 goto done;
4054
4055 current_flags = params->current_flags;
4056 }
4057
4058 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4059 rp.addr.type = cp->addr.type;
4060 rp.supported_flags = cpu_to_le32(supported_flags);
4061 rp.current_flags = cpu_to_le32(current_flags);
4062
4063 status = MGMT_STATUS_SUCCESS;
4064
4065 done:
4066 hci_dev_unlock(hdev);
4067
4068 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
4069 &rp, sizeof(rp));
4070 }
4071
4072 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
4073 bdaddr_t *bdaddr, u8 bdaddr_type,
4074 u32 supported_flags, u32 current_flags)
4075 {
4076 struct mgmt_ev_device_flags_changed ev;
4077
4078 bacpy(&ev.addr.bdaddr, bdaddr);
4079 ev.addr.type = bdaddr_type;
4080 ev.supported_flags = cpu_to_le32(supported_flags);
4081 ev.current_flags = cpu_to_le32(current_flags);
4082
4083 mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
4084 }
4085
4086 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4087 u16 len)
4088 {
4089 struct mgmt_cp_set_device_flags *cp = data;
4090 struct bdaddr_list_with_flags *br_params;
4091 struct hci_conn_params *params;
4092 u8 status = MGMT_STATUS_INVALID_PARAMS;
4093 u32 supported_flags = SUPPORTED_DEVICE_FLAGS();
4094 u32 current_flags = __le32_to_cpu(cp->current_flags);
4095
4096 bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
4097 &cp->addr.bdaddr, cp->addr.type,
4098 __le32_to_cpu(current_flags));
4099
4100 if ((supported_flags | current_flags) != supported_flags) {
4101 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
4102 current_flags, supported_flags);
4103 goto done;
4104 }
4105
4106 hci_dev_lock(hdev);
4107
4108 if (cp->addr.type == BDADDR_BREDR) {
4109 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->whitelist,
4110 &cp->addr.bdaddr,
4111 cp->addr.type);
4112
4113 if (br_params) {
4114 br_params->current_flags = current_flags;
4115 status = MGMT_STATUS_SUCCESS;
4116 } else {
4117 bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
4118 &cp->addr.bdaddr, cp->addr.type);
4119 }
4120 } else {
4121 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4122 le_addr_type(cp->addr.type));
4123 if (params) {
4124 params->current_flags = current_flags;
4125 status = MGMT_STATUS_SUCCESS;
4126 } else {
4127 bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
4128 &cp->addr.bdaddr,
4129 le_addr_type(cp->addr.type));
4130 }
4131 }
4132
4133 done:
4134 hci_dev_unlock(hdev);
4135
4136 if (status == MGMT_STATUS_SUCCESS)
4137 device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
4138 supported_flags, current_flags);
4139
4140 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
4141 &cp->addr, sizeof(cp->addr));
4142 }
4143
4144 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
4145 u16 handle)
4146 {
4147 struct mgmt_ev_adv_monitor_added ev;
4148
4149 ev.monitor_handle = cpu_to_le16(handle);
4150
4151 mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
4152 }
4153
4154 static void mgmt_adv_monitor_removed(struct sock *sk, struct hci_dev *hdev,
4155 u16 handle)
4156 {
4157 struct mgmt_ev_adv_monitor_added ev;
4158
4159 ev.monitor_handle = cpu_to_le16(handle);
4160
4161 mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk);
4162 }
4163
4164 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
4165 void *data, u16 len)
4166 {
4167 struct adv_monitor *monitor = NULL;
4168 struct mgmt_rp_read_adv_monitor_features *rp = NULL;
4169 int handle, err;
4170 size_t rp_size = 0;
4171 __u32 supported = 0;
4172 __u16 num_handles = 0;
4173 __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
4174
4175 BT_DBG("request for %s", hdev->name);
4176
4177 hci_dev_lock(hdev);
4178
4179 if (msft_get_features(hdev) & MSFT_FEATURE_MASK_LE_ADV_MONITOR)
4180 supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
4181
4182 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle) {
4183 handles[num_handles++] = monitor->handle;
4184 }
4185
4186 hci_dev_unlock(hdev);
4187
4188 rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
4189 rp = kmalloc(rp_size, GFP_KERNEL);
4190 if (!rp)
4191 return -ENOMEM;
4192
4193 /* Once controller-based monitoring is in place, the enabled_features
4194 * should reflect the use.
4195 */
4196 rp->supported_features = cpu_to_le32(supported);
4197 rp->enabled_features = 0;
4198 rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
4199 rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
4200 rp->num_handles = cpu_to_le16(num_handles);
4201 if (num_handles)
4202 memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
4203
4204 err = mgmt_cmd_complete(sk, hdev->id,
4205 MGMT_OP_READ_ADV_MONITOR_FEATURES,
4206 MGMT_STATUS_SUCCESS, rp, rp_size);
4207
4208 kfree(rp);
4209
4210 return err;
4211 }
4212
4213 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4214 void *data, u16 len)
4215 {
4216 struct mgmt_cp_add_adv_patterns_monitor *cp = data;
4217 struct mgmt_rp_add_adv_patterns_monitor rp;
4218 struct adv_monitor *m = NULL;
4219 struct adv_pattern *p = NULL;
4220 unsigned int mp_cnt = 0, prev_adv_monitors_cnt;
4221 __u8 cp_ofst = 0, cp_len = 0;
4222 int err, i;
4223
4224 BT_DBG("request for %s", hdev->name);
4225
4226 if (len <= sizeof(*cp) || cp->pattern_count == 0) {
4227 err = mgmt_cmd_status(sk, hdev->id,
4228 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4229 MGMT_STATUS_INVALID_PARAMS);
4230 goto failed;
4231 }
4232
4233 m = kmalloc(sizeof(*m), GFP_KERNEL);
4234 if (!m) {
4235 err = -ENOMEM;
4236 goto failed;
4237 }
4238
4239 INIT_LIST_HEAD(&m->patterns);
4240 m->active = false;
4241
4242 for (i = 0; i < cp->pattern_count; i++) {
4243 if (++mp_cnt > HCI_MAX_ADV_MONITOR_NUM_PATTERNS) {
4244 err = mgmt_cmd_status(sk, hdev->id,
4245 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4246 MGMT_STATUS_INVALID_PARAMS);
4247 goto failed;
4248 }
4249
4250 cp_ofst = cp->patterns[i].offset;
4251 cp_len = cp->patterns[i].length;
4252 if (cp_ofst >= HCI_MAX_AD_LENGTH ||
4253 cp_len > HCI_MAX_AD_LENGTH ||
4254 (cp_ofst + cp_len) > HCI_MAX_AD_LENGTH) {
4255 err = mgmt_cmd_status(sk, hdev->id,
4256 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4257 MGMT_STATUS_INVALID_PARAMS);
4258 goto failed;
4259 }
4260
4261 p = kmalloc(sizeof(*p), GFP_KERNEL);
4262 if (!p) {
4263 err = -ENOMEM;
4264 goto failed;
4265 }
4266
4267 p->ad_type = cp->patterns[i].ad_type;
4268 p->offset = cp->patterns[i].offset;
4269 p->length = cp->patterns[i].length;
4270 memcpy(p->value, cp->patterns[i].value, p->length);
4271
4272 INIT_LIST_HEAD(&p->list);
4273 list_add(&p->list, &m->patterns);
4274 }
4275
4276 if (mp_cnt != cp->pattern_count) {
4277 err = mgmt_cmd_status(sk, hdev->id,
4278 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4279 MGMT_STATUS_INVALID_PARAMS);
4280 goto failed;
4281 }
4282
4283 hci_dev_lock(hdev);
4284
4285 prev_adv_monitors_cnt = hdev->adv_monitors_cnt;
4286
4287 err = hci_add_adv_monitor(hdev, m);
4288 if (err) {
4289 if (err == -ENOSPC) {
4290 mgmt_cmd_status(sk, hdev->id,
4291 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4292 MGMT_STATUS_NO_RESOURCES);
4293 }
4294 goto unlock;
4295 }
4296
4297 if (hdev->adv_monitors_cnt > prev_adv_monitors_cnt)
4298 mgmt_adv_monitor_added(sk, hdev, m->handle);
4299
4300 hci_dev_unlock(hdev);
4301
4302 rp.monitor_handle = cpu_to_le16(m->handle);
4303
4304 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4305 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
4306
4307 unlock:
4308 hci_dev_unlock(hdev);
4309
4310 failed:
4311 hci_free_adv_monitor(m);
4312 return err;
4313 }
4314
4315 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
4316 void *data, u16 len)
4317 {
4318 struct mgmt_cp_remove_adv_monitor *cp = data;
4319 struct mgmt_rp_remove_adv_monitor rp;
4320 unsigned int prev_adv_monitors_cnt;
4321 u16 handle;
4322 int err;
4323
4324 BT_DBG("request for %s", hdev->name);
4325
4326 hci_dev_lock(hdev);
4327
4328 handle = __le16_to_cpu(cp->monitor_handle);
4329 prev_adv_monitors_cnt = hdev->adv_monitors_cnt;
4330
4331 err = hci_remove_adv_monitor(hdev, handle);
4332 if (err == -ENOENT) {
4333 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
4334 MGMT_STATUS_INVALID_INDEX);
4335 goto unlock;
4336 }
4337
4338 if (hdev->adv_monitors_cnt < prev_adv_monitors_cnt)
4339 mgmt_adv_monitor_removed(sk, hdev, handle);
4340
4341 hci_dev_unlock(hdev);
4342
4343 rp.monitor_handle = cp->monitor_handle;
4344
4345 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
4346 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
4347
4348 unlock:
4349 hci_dev_unlock(hdev);
4350 return err;
4351 }
4352
4353 static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status,
4354 u16 opcode, struct sk_buff *skb)
4355 {
4356 struct mgmt_rp_read_local_oob_data mgmt_rp;
4357 size_t rp_size = sizeof(mgmt_rp);
4358 struct mgmt_pending_cmd *cmd;
4359
4360 bt_dev_dbg(hdev, "status %u", status);
4361
4362 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
4363 if (!cmd)
4364 return;
4365
4366 if (status || !skb) {
4367 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4368 status ? mgmt_status(status) : MGMT_STATUS_FAILED);
4369 goto remove;
4370 }
4371
4372 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
4373
4374 if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
4375 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
4376
4377 if (skb->len < sizeof(*rp)) {
4378 mgmt_cmd_status(cmd->sk, hdev->id,
4379 MGMT_OP_READ_LOCAL_OOB_DATA,
4380 MGMT_STATUS_FAILED);
4381 goto remove;
4382 }
4383
4384 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
4385 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
4386
4387 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
4388 } else {
4389 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
4390
4391 if (skb->len < sizeof(*rp)) {
4392 mgmt_cmd_status(cmd->sk, hdev->id,
4393 MGMT_OP_READ_LOCAL_OOB_DATA,
4394 MGMT_STATUS_FAILED);
4395 goto remove;
4396 }
4397
4398 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
4399 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
4400
4401 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
4402 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
4403 }
4404
4405 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4406 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
4407
4408 remove:
4409 mgmt_pending_remove(cmd);
4410 }
4411
4412 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
4413 void *data, u16 data_len)
4414 {
4415 struct mgmt_pending_cmd *cmd;
4416 struct hci_request req;
4417 int err;
4418
4419 bt_dev_dbg(hdev, "sock %p", sk);
4420
4421 hci_dev_lock(hdev);
4422
4423 if (!hdev_is_powered(hdev)) {
4424 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4425 MGMT_STATUS_NOT_POWERED);
4426 goto unlock;
4427 }
4428
4429 if (!lmp_ssp_capable(hdev)) {
4430 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4431 MGMT_STATUS_NOT_SUPPORTED);
4432 goto unlock;
4433 }
4434
4435 if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
4436 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4437 MGMT_STATUS_BUSY);
4438 goto unlock;
4439 }
4440
4441 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
4442 if (!cmd) {
4443 err = -ENOMEM;
4444 goto unlock;
4445 }
4446
4447 hci_req_init(&req, hdev);
4448
4449 if (bredr_sc_enabled(hdev))
4450 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
4451 else
4452 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
4453
4454 err = hci_req_run_skb(&req, read_local_oob_data_complete);
4455 if (err < 0)
4456 mgmt_pending_remove(cmd);
4457
4458 unlock:
4459 hci_dev_unlock(hdev);
4460 return err;
4461 }
4462
4463 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
4464 void *data, u16 len)
4465 {
4466 struct mgmt_addr_info *addr = data;
4467 int err;
4468
4469 bt_dev_dbg(hdev, "sock %p", sk);
4470
4471 if (!bdaddr_type_is_valid(addr->type))
4472 return mgmt_cmd_complete(sk, hdev->id,
4473 MGMT_OP_ADD_REMOTE_OOB_DATA,
4474 MGMT_STATUS_INVALID_PARAMS,
4475 addr, sizeof(*addr));
4476
4477 hci_dev_lock(hdev);
4478
4479 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
4480 struct mgmt_cp_add_remote_oob_data *cp = data;
4481 u8 status;
4482
4483 if (cp->addr.type != BDADDR_BREDR) {
4484 err = mgmt_cmd_complete(sk, hdev->id,
4485 MGMT_OP_ADD_REMOTE_OOB_DATA,
4486 MGMT_STATUS_INVALID_PARAMS,
4487 &cp->addr, sizeof(cp->addr));
4488 goto unlock;
4489 }
4490
4491 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
4492 cp->addr.type, cp->hash,
4493 cp->rand, NULL, NULL);
4494 if (err < 0)
4495 status = MGMT_STATUS_FAILED;
4496 else
4497 status = MGMT_STATUS_SUCCESS;
4498
4499 err = mgmt_cmd_complete(sk, hdev->id,
4500 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
4501 &cp->addr, sizeof(cp->addr));
4502 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
4503 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
4504 u8 *rand192, *hash192, *rand256, *hash256;
4505 u8 status;
4506
4507 if (bdaddr_type_is_le(cp->addr.type)) {
4508 /* Enforce zero-valued 192-bit parameters as
4509 * long as legacy SMP OOB isn't implemented.
4510 */
4511 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
4512 memcmp(cp->hash192, ZERO_KEY, 16)) {
4513 err = mgmt_cmd_complete(sk, hdev->id,
4514 MGMT_OP_ADD_REMOTE_OOB_DATA,
4515 MGMT_STATUS_INVALID_PARAMS,
4516 addr, sizeof(*addr));
4517 goto unlock;
4518 }
4519
4520 rand192 = NULL;
4521 hash192 = NULL;
4522 } else {
4523 /* In case one of the P-192 values is set to zero,
4524 * then just disable OOB data for P-192.
4525 */
4526 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
4527 !memcmp(cp->hash192, ZERO_KEY, 16)) {
4528 rand192 = NULL;
4529 hash192 = NULL;
4530 } else {
4531 rand192 = cp->rand192;
4532 hash192 = cp->hash192;
4533 }
4534 }
4535
4536 /* In case one of the P-256 values is set to zero, then just
4537 * disable OOB data for P-256.
4538 */
4539 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
4540 !memcmp(cp->hash256, ZERO_KEY, 16)) {
4541 rand256 = NULL;
4542 hash256 = NULL;
4543 } else {
4544 rand256 = cp->rand256;
4545 hash256 = cp->hash256;
4546 }
4547
4548 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
4549 cp->addr.type, hash192, rand192,
4550 hash256, rand256);
4551 if (err < 0)
4552 status = MGMT_STATUS_FAILED;
4553 else
4554 status = MGMT_STATUS_SUCCESS;
4555
4556 err = mgmt_cmd_complete(sk, hdev->id,
4557 MGMT_OP_ADD_REMOTE_OOB_DATA,
4558 status, &cp->addr, sizeof(cp->addr));
4559 } else {
4560 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
4561 len);
4562 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
4563 MGMT_STATUS_INVALID_PARAMS);
4564 }
4565
4566 unlock:
4567 hci_dev_unlock(hdev);
4568 return err;
4569 }
4570
4571 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
4572 void *data, u16 len)
4573 {
4574 struct mgmt_cp_remove_remote_oob_data *cp = data;
4575 u8 status;
4576 int err;
4577
4578 bt_dev_dbg(hdev, "sock %p", sk);
4579
4580 if (cp->addr.type != BDADDR_BREDR)
4581 return mgmt_cmd_complete(sk, hdev->id,
4582 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
4583 MGMT_STATUS_INVALID_PARAMS,
4584 &cp->addr, sizeof(cp->addr));
4585
4586 hci_dev_lock(hdev);
4587
4588 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
4589 hci_remote_oob_data_clear(hdev);
4590 status = MGMT_STATUS_SUCCESS;
4591 goto done;
4592 }
4593
4594 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
4595 if (err < 0)
4596 status = MGMT_STATUS_INVALID_PARAMS;
4597 else
4598 status = MGMT_STATUS_SUCCESS;
4599
4600 done:
4601 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
4602 status, &cp->addr, sizeof(cp->addr));
4603
4604 hci_dev_unlock(hdev);
4605 return err;
4606 }
4607
4608 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
4609 {
4610 struct mgmt_pending_cmd *cmd;
4611
4612 bt_dev_dbg(hdev, "status %d", status);
4613
4614 hci_dev_lock(hdev);
4615
4616 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
4617 if (!cmd)
4618 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
4619
4620 if (!cmd)
4621 cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
4622
4623 if (cmd) {
4624 cmd->cmd_complete(cmd, mgmt_status(status));
4625 mgmt_pending_remove(cmd);
4626 }
4627
4628 hci_dev_unlock(hdev);
4629
4630 /* Handle suspend notifier */
4631 if (test_and_clear_bit(SUSPEND_UNPAUSE_DISCOVERY,
4632 hdev->suspend_tasks)) {
4633 bt_dev_dbg(hdev, "Unpaused discovery");
4634 wake_up(&hdev->suspend_wait_q);
4635 }
4636 }
4637
4638 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
4639 uint8_t *mgmt_status)
4640 {
4641 switch (type) {
4642 case DISCOV_TYPE_LE:
4643 *mgmt_status = mgmt_le_support(hdev);
4644 if (*mgmt_status)
4645 return false;
4646 break;
4647 case DISCOV_TYPE_INTERLEAVED:
4648 *mgmt_status = mgmt_le_support(hdev);
4649 if (*mgmt_status)
4650 return false;
4651 fallthrough;
4652 case DISCOV_TYPE_BREDR:
4653 *mgmt_status = mgmt_bredr_support(hdev);
4654 if (*mgmt_status)
4655 return false;
4656 break;
4657 default:
4658 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
4659 return false;
4660 }
4661
4662 return true;
4663 }
4664
4665 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
4666 u16 op, void *data, u16 len)
4667 {
4668 struct mgmt_cp_start_discovery *cp = data;
4669 struct mgmt_pending_cmd *cmd;
4670 u8 status;
4671 int err;
4672
4673 bt_dev_dbg(hdev, "sock %p", sk);
4674
4675 hci_dev_lock(hdev);
4676
4677 if (!hdev_is_powered(hdev)) {
4678 err = mgmt_cmd_complete(sk, hdev->id, op,
4679 MGMT_STATUS_NOT_POWERED,
4680 &cp->type, sizeof(cp->type));
4681 goto failed;
4682 }
4683
4684 if (hdev->discovery.state != DISCOVERY_STOPPED ||
4685 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4686 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
4687 &cp->type, sizeof(cp->type));
4688 goto failed;
4689 }
4690
4691 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
4692 err = mgmt_cmd_complete(sk, hdev->id, op, status,
4693 &cp->type, sizeof(cp->type));
4694 goto failed;
4695 }
4696
4697 /* Can't start discovery when it is paused */
4698 if (hdev->discovery_paused) {
4699 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
4700 &cp->type, sizeof(cp->type));
4701 goto failed;
4702 }
4703
4704 /* Clear the discovery filter first to free any previously
4705 * allocated memory for the UUID list.
4706 */
4707 hci_discovery_filter_clear(hdev);
4708
4709 hdev->discovery.type = cp->type;
4710 hdev->discovery.report_invalid_rssi = false;
4711 if (op == MGMT_OP_START_LIMITED_DISCOVERY)
4712 hdev->discovery.limited = true;
4713 else
4714 hdev->discovery.limited = false;
4715
4716 cmd = mgmt_pending_add(sk, op, hdev, data, len);
4717 if (!cmd) {
4718 err = -ENOMEM;
4719 goto failed;
4720 }
4721
4722 cmd->cmd_complete = generic_cmd_complete;
4723
4724 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4725 queue_work(hdev->req_workqueue, &hdev->discov_update);
4726 err = 0;
4727
4728 failed:
4729 hci_dev_unlock(hdev);
4730 return err;
4731 }
4732
4733 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
4734 void *data, u16 len)
4735 {
4736 return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
4737 data, len);
4738 }
4739
4740 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
4741 void *data, u16 len)
4742 {
4743 return start_discovery_internal(sk, hdev,
4744 MGMT_OP_START_LIMITED_DISCOVERY,
4745 data, len);
4746 }
4747
4748 static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd,
4749 u8 status)
4750 {
4751 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
4752 cmd->param, 1);
4753 }
4754
4755 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
4756 void *data, u16 len)
4757 {
4758 struct mgmt_cp_start_service_discovery *cp = data;
4759 struct mgmt_pending_cmd *cmd;
4760 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
4761 u16 uuid_count, expected_len;
4762 u8 status;
4763 int err;
4764
4765 bt_dev_dbg(hdev, "sock %p", sk);
4766
4767 hci_dev_lock(hdev);
4768
4769 if (!hdev_is_powered(hdev)) {
4770 err = mgmt_cmd_complete(sk, hdev->id,
4771 MGMT_OP_START_SERVICE_DISCOVERY,
4772 MGMT_STATUS_NOT_POWERED,
4773 &cp->type, sizeof(cp->type));
4774 goto failed;
4775 }
4776
4777 if (hdev->discovery.state != DISCOVERY_STOPPED ||
4778 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4779 err = mgmt_cmd_complete(sk, hdev->id,
4780 MGMT_OP_START_SERVICE_DISCOVERY,
4781 MGMT_STATUS_BUSY, &cp->type,
4782 sizeof(cp->type));
4783 goto failed;
4784 }
4785
4786 uuid_count = __le16_to_cpu(cp->uuid_count);
4787 if (uuid_count > max_uuid_count) {
4788 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
4789 uuid_count);
4790 err = mgmt_cmd_complete(sk, hdev->id,
4791 MGMT_OP_START_SERVICE_DISCOVERY,
4792 MGMT_STATUS_INVALID_PARAMS, &cp->type,
4793 sizeof(cp->type));
4794 goto failed;
4795 }
4796
4797 expected_len = sizeof(*cp) + uuid_count * 16;
4798 if (expected_len != len) {
4799 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
4800 expected_len, len);
4801 err = mgmt_cmd_complete(sk, hdev->id,
4802 MGMT_OP_START_SERVICE_DISCOVERY,
4803 MGMT_STATUS_INVALID_PARAMS, &cp->type,
4804 sizeof(cp->type));
4805 goto failed;
4806 }
4807
4808 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
4809 err = mgmt_cmd_complete(sk, hdev->id,
4810 MGMT_OP_START_SERVICE_DISCOVERY,
4811 status, &cp->type, sizeof(cp->type));
4812 goto failed;
4813 }
4814
4815 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
4816 hdev, data, len);
4817 if (!cmd) {
4818 err = -ENOMEM;
4819 goto failed;
4820 }
4821
4822 cmd->cmd_complete = service_discovery_cmd_complete;
4823
4824 /* Clear the discovery filter first to free any previously
4825 * allocated memory for the UUID list.
4826 */
4827 hci_discovery_filter_clear(hdev);
4828
4829 hdev->discovery.result_filtering = true;
4830 hdev->discovery.type = cp->type;
4831 hdev->discovery.rssi = cp->rssi;
4832 hdev->discovery.uuid_count = uuid_count;
4833
4834 if (uuid_count > 0) {
4835 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
4836 GFP_KERNEL);
4837 if (!hdev->discovery.uuids) {
4838 err = mgmt_cmd_complete(sk, hdev->id,
4839 MGMT_OP_START_SERVICE_DISCOVERY,
4840 MGMT_STATUS_FAILED,
4841 &cp->type, sizeof(cp->type));
4842 mgmt_pending_remove(cmd);
4843 goto failed;
4844 }
4845 }
4846
4847 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4848 queue_work(hdev->req_workqueue, &hdev->discov_update);
4849 err = 0;
4850
4851 failed:
4852 hci_dev_unlock(hdev);
4853 return err;
4854 }
4855
4856 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
4857 {
4858 struct mgmt_pending_cmd *cmd;
4859
4860 bt_dev_dbg(hdev, "status %d", status);
4861
4862 hci_dev_lock(hdev);
4863
4864 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4865 if (cmd) {
4866 cmd->cmd_complete(cmd, mgmt_status(status));
4867 mgmt_pending_remove(cmd);
4868 }
4869
4870 hci_dev_unlock(hdev);
4871
4872 /* Handle suspend notifier */
4873 if (test_and_clear_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks)) {
4874 bt_dev_dbg(hdev, "Paused discovery");
4875 wake_up(&hdev->suspend_wait_q);
4876 }
4877 }
4878
4879 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
4880 u16 len)
4881 {
4882 struct mgmt_cp_stop_discovery *mgmt_cp = data;
4883 struct mgmt_pending_cmd *cmd;
4884 int err;
4885
4886 bt_dev_dbg(hdev, "sock %p", sk);
4887
4888 hci_dev_lock(hdev);
4889
4890 if (!hci_discovery_active(hdev)) {
4891 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4892 MGMT_STATUS_REJECTED, &mgmt_cp->type,
4893 sizeof(mgmt_cp->type));
4894 goto unlock;
4895 }
4896
4897 if (hdev->discovery.type != mgmt_cp->type) {
4898 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4899 MGMT_STATUS_INVALID_PARAMS,
4900 &mgmt_cp->type, sizeof(mgmt_cp->type));
4901 goto unlock;
4902 }
4903
4904 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
4905 if (!cmd) {
4906 err = -ENOMEM;
4907 goto unlock;
4908 }
4909
4910 cmd->cmd_complete = generic_cmd_complete;
4911
4912 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
4913 queue_work(hdev->req_workqueue, &hdev->discov_update);
4914 err = 0;
4915
4916 unlock:
4917 hci_dev_unlock(hdev);
4918 return err;
4919 }
4920
4921 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
4922 u16 len)
4923 {
4924 struct mgmt_cp_confirm_name *cp = data;
4925 struct inquiry_entry *e;
4926 int err;
4927
4928 bt_dev_dbg(hdev, "sock %p", sk);
4929
4930 hci_dev_lock(hdev);
4931
4932 if (!hci_discovery_active(hdev)) {
4933 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4934 MGMT_STATUS_FAILED, &cp->addr,
4935 sizeof(cp->addr));
4936 goto failed;
4937 }
4938
4939 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
4940 if (!e) {
4941 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4942 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
4943 sizeof(cp->addr));
4944 goto failed;
4945 }
4946
4947 if (cp->name_known) {
4948 e->name_state = NAME_KNOWN;
4949 list_del(&e->list);
4950 } else {
4951 e->name_state = NAME_NEEDED;
4952 hci_inquiry_cache_update_resolve(hdev, e);
4953 }
4954
4955 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
4956 &cp->addr, sizeof(cp->addr));
4957
4958 failed:
4959 hci_dev_unlock(hdev);
4960 return err;
4961 }
4962
4963 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
4964 u16 len)
4965 {
4966 struct mgmt_cp_block_device *cp = data;
4967 u8 status;
4968 int err;
4969
4970 bt_dev_dbg(hdev, "sock %p", sk);
4971
4972 if (!bdaddr_type_is_valid(cp->addr.type))
4973 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
4974 MGMT_STATUS_INVALID_PARAMS,
4975 &cp->addr, sizeof(cp->addr));
4976
4977 hci_dev_lock(hdev);
4978
4979 err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
4980 cp->addr.type);
4981 if (err < 0) {
4982 status = MGMT_STATUS_FAILED;
4983 goto done;
4984 }
4985
4986 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4987 sk);
4988 status = MGMT_STATUS_SUCCESS;
4989
4990 done:
4991 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
4992 &cp->addr, sizeof(cp->addr));
4993
4994 hci_dev_unlock(hdev);
4995
4996 return err;
4997 }
4998
4999 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
5000 u16 len)
5001 {
5002 struct mgmt_cp_unblock_device *cp = data;
5003 u8 status;
5004 int err;
5005
5006 bt_dev_dbg(hdev, "sock %p", sk);
5007
5008 if (!bdaddr_type_is_valid(cp->addr.type))
5009 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
5010 MGMT_STATUS_INVALID_PARAMS,
5011 &cp->addr, sizeof(cp->addr));
5012
5013 hci_dev_lock(hdev);
5014
5015 err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
5016 cp->addr.type);
5017 if (err < 0) {
5018 status = MGMT_STATUS_INVALID_PARAMS;
5019 goto done;
5020 }
5021
5022 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5023 sk);
5024 status = MGMT_STATUS_SUCCESS;
5025
5026 done:
5027 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
5028 &cp->addr, sizeof(cp->addr));
5029
5030 hci_dev_unlock(hdev);
5031
5032 return err;
5033 }
5034
5035 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
5036 u16 len)
5037 {
5038 struct mgmt_cp_set_device_id *cp = data;
5039 struct hci_request req;
5040 int err;
5041 __u16 source;
5042
5043 bt_dev_dbg(hdev, "sock %p", sk);
5044
5045 source = __le16_to_cpu(cp->source);
5046
5047 if (source > 0x0002)
5048 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
5049 MGMT_STATUS_INVALID_PARAMS);
5050
5051 hci_dev_lock(hdev);
5052
5053 hdev->devid_source = source;
5054 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
5055 hdev->devid_product = __le16_to_cpu(cp->product);
5056 hdev->devid_version = __le16_to_cpu(cp->version);
5057
5058 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
5059 NULL, 0);
5060
5061 hci_req_init(&req, hdev);
5062 __hci_req_update_eir(&req);
5063 hci_req_run(&req, NULL);
5064
5065 hci_dev_unlock(hdev);
5066
5067 return err;
5068 }
5069
5070 static void enable_advertising_instance(struct hci_dev *hdev, u8 status,
5071 u16 opcode)
5072 {
5073 bt_dev_dbg(hdev, "status %d", status);
5074 }
5075
5076 static void set_advertising_complete(struct hci_dev *hdev, u8 status,
5077 u16 opcode)
5078 {
5079 struct cmd_lookup match = { NULL, hdev };
5080 struct hci_request req;
5081 u8 instance;
5082 struct adv_info *adv_instance;
5083 int err;
5084
5085 hci_dev_lock(hdev);
5086
5087 if (status) {
5088 u8 mgmt_err = mgmt_status(status);
5089
5090 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
5091 cmd_status_rsp, &mgmt_err);
5092 goto unlock;
5093 }
5094
5095 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
5096 hci_dev_set_flag(hdev, HCI_ADVERTISING);
5097 else
5098 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
5099
5100 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
5101 &match);
5102
5103 new_settings(hdev, match.sk);
5104
5105 if (match.sk)
5106 sock_put(match.sk);
5107
5108 /* Handle suspend notifier */
5109 if (test_and_clear_bit(SUSPEND_PAUSE_ADVERTISING,
5110 hdev->suspend_tasks)) {
5111 bt_dev_dbg(hdev, "Paused advertising");
5112 wake_up(&hdev->suspend_wait_q);
5113 } else if (test_and_clear_bit(SUSPEND_UNPAUSE_ADVERTISING,
5114 hdev->suspend_tasks)) {
5115 bt_dev_dbg(hdev, "Unpaused advertising");
5116 wake_up(&hdev->suspend_wait_q);
5117 }
5118
5119 /* If "Set Advertising" was just disabled and instance advertising was
5120 * set up earlier, then re-enable multi-instance advertising.
5121 */
5122 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
5123 list_empty(&hdev->adv_instances))
5124 goto unlock;
5125
5126 instance = hdev->cur_adv_instance;
5127 if (!instance) {
5128 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
5129 struct adv_info, list);
5130 if (!adv_instance)
5131 goto unlock;
5132
5133 instance = adv_instance->instance;
5134 }
5135
5136 hci_req_init(&req, hdev);
5137
5138 err = __hci_req_schedule_adv_instance(&req, instance, true);
5139
5140 if (!err)
5141 err = hci_req_run(&req, enable_advertising_instance);
5142
5143 if (err)
5144 bt_dev_err(hdev, "failed to re-configure advertising");
5145
5146 unlock:
5147 hci_dev_unlock(hdev);
5148 }
5149
5150 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
5151 u16 len)
5152 {
5153 struct mgmt_mode *cp = data;
5154 struct mgmt_pending_cmd *cmd;
5155 struct hci_request req;
5156 u8 val, status;
5157 int err;
5158
5159 bt_dev_dbg(hdev, "sock %p", sk);
5160
5161 status = mgmt_le_support(hdev);
5162 if (status)
5163 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5164 status);
5165
5166 /* Enabling the experimental LL Privay support disables support for
5167 * advertising.
5168 */
5169 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
5170 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5171 MGMT_STATUS_NOT_SUPPORTED);
5172
5173 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5174 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5175 MGMT_STATUS_INVALID_PARAMS);
5176
5177 if (hdev->advertising_paused)
5178 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5179 MGMT_STATUS_BUSY);
5180
5181 hci_dev_lock(hdev);
5182
5183 val = !!cp->val;
5184
5185 /* The following conditions are ones which mean that we should
5186 * not do any HCI communication but directly send a mgmt
5187 * response to user space (after toggling the flag if
5188 * necessary).
5189 */
5190 if (!hdev_is_powered(hdev) ||
5191 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
5192 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
5193 hci_conn_num(hdev, LE_LINK) > 0 ||
5194 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5195 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
5196 bool changed;
5197
5198 if (cp->val) {
5199 hdev->cur_adv_instance = 0x00;
5200 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
5201 if (cp->val == 0x02)
5202 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5203 else
5204 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5205 } else {
5206 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
5207 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5208 }
5209
5210 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
5211 if (err < 0)
5212 goto unlock;
5213
5214 if (changed)
5215 err = new_settings(hdev, sk);
5216
5217 goto unlock;
5218 }
5219
5220 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
5221 pending_find(MGMT_OP_SET_LE, hdev)) {
5222 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5223 MGMT_STATUS_BUSY);
5224 goto unlock;
5225 }
5226
5227 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
5228 if (!cmd) {
5229 err = -ENOMEM;
5230 goto unlock;
5231 }
5232
5233 hci_req_init(&req, hdev);
5234
5235 if (cp->val == 0x02)
5236 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5237 else
5238 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5239
5240 cancel_adv_timeout(hdev);
5241
5242 if (val) {
5243 /* Switch to instance "0" for the Set Advertising setting.
5244 * We cannot use update_[adv|scan_rsp]_data() here as the
5245 * HCI_ADVERTISING flag is not yet set.
5246 */
5247 hdev->cur_adv_instance = 0x00;
5248
5249 if (ext_adv_capable(hdev)) {
5250 __hci_req_start_ext_adv(&req, 0x00);
5251 } else {
5252 __hci_req_update_adv_data(&req, 0x00);
5253 __hci_req_update_scan_rsp_data(&req, 0x00);
5254 __hci_req_enable_advertising(&req);
5255 }
5256 } else {
5257 __hci_req_disable_advertising(&req);
5258 }
5259
5260 err = hci_req_run(&req, set_advertising_complete);
5261 if (err < 0)
5262 mgmt_pending_remove(cmd);
5263
5264 unlock:
5265 hci_dev_unlock(hdev);
5266 return err;
5267 }
5268
5269 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
5270 void *data, u16 len)
5271 {
5272 struct mgmt_cp_set_static_address *cp = data;
5273 int err;
5274
5275 bt_dev_dbg(hdev, "sock %p", sk);
5276
5277 if (!lmp_le_capable(hdev))
5278 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5279 MGMT_STATUS_NOT_SUPPORTED);
5280
5281 if (hdev_is_powered(hdev))
5282 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5283 MGMT_STATUS_REJECTED);
5284
5285 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
5286 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
5287 return mgmt_cmd_status(sk, hdev->id,
5288 MGMT_OP_SET_STATIC_ADDRESS,
5289 MGMT_STATUS_INVALID_PARAMS);
5290
5291 /* Two most significant bits shall be set */
5292 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
5293 return mgmt_cmd_status(sk, hdev->id,
5294 MGMT_OP_SET_STATIC_ADDRESS,
5295 MGMT_STATUS_INVALID_PARAMS);
5296 }
5297
5298 hci_dev_lock(hdev);
5299
5300 bacpy(&hdev->static_addr, &cp->bdaddr);
5301
5302 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
5303 if (err < 0)
5304 goto unlock;
5305
5306 err = new_settings(hdev, sk);
5307
5308 unlock:
5309 hci_dev_unlock(hdev);
5310 return err;
5311 }
5312
5313 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
5314 void *data, u16 len)
5315 {
5316 struct mgmt_cp_set_scan_params *cp = data;
5317 __u16 interval, window;
5318 int err;
5319
5320 bt_dev_dbg(hdev, "sock %p", sk);
5321
5322 if (!lmp_le_capable(hdev))
5323 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5324 MGMT_STATUS_NOT_SUPPORTED);
5325
5326 interval = __le16_to_cpu(cp->interval);
5327
5328 if (interval < 0x0004 || interval > 0x4000)
5329 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5330 MGMT_STATUS_INVALID_PARAMS);
5331
5332 window = __le16_to_cpu(cp->window);
5333
5334 if (window < 0x0004 || window > 0x4000)
5335 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5336 MGMT_STATUS_INVALID_PARAMS);
5337
5338 if (window > interval)
5339 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5340 MGMT_STATUS_INVALID_PARAMS);
5341
5342 hci_dev_lock(hdev);
5343
5344 hdev->le_scan_interval = interval;
5345 hdev->le_scan_window = window;
5346
5347 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
5348 NULL, 0);
5349
5350 /* If background scan is running, restart it so new parameters are
5351 * loaded.
5352 */
5353 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5354 hdev->discovery.state == DISCOVERY_STOPPED) {
5355 struct hci_request req;
5356
5357 hci_req_init(&req, hdev);
5358
5359 hci_req_add_le_scan_disable(&req, false);
5360 hci_req_add_le_passive_scan(&req);
5361
5362 hci_req_run(&req, NULL);
5363 }
5364
5365 hci_dev_unlock(hdev);
5366
5367 return err;
5368 }
5369
5370 static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
5371 u16 opcode)
5372 {
5373 struct mgmt_pending_cmd *cmd;
5374
5375 bt_dev_dbg(hdev, "status 0x%02x", status);
5376
5377 hci_dev_lock(hdev);
5378
5379 cmd = pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
5380 if (!cmd)
5381 goto unlock;
5382
5383 if (status) {
5384 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5385 mgmt_status(status));
5386 } else {
5387 struct mgmt_mode *cp = cmd->param;
5388
5389 if (cp->val)
5390 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
5391 else
5392 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
5393
5394 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
5395 new_settings(hdev, cmd->sk);
5396 }
5397
5398 mgmt_pending_remove(cmd);
5399
5400 unlock:
5401 hci_dev_unlock(hdev);
5402 }
5403
5404 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
5405 void *data, u16 len)
5406 {
5407 struct mgmt_mode *cp = data;
5408 struct mgmt_pending_cmd *cmd;
5409 struct hci_request req;
5410 int err;
5411
5412 bt_dev_dbg(hdev, "sock %p", sk);
5413
5414 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
5415 hdev->hci_ver < BLUETOOTH_VER_1_2)
5416 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5417 MGMT_STATUS_NOT_SUPPORTED);
5418
5419 if (cp->val != 0x00 && cp->val != 0x01)
5420 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5421 MGMT_STATUS_INVALID_PARAMS);
5422
5423 hci_dev_lock(hdev);
5424
5425 if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
5426 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5427 MGMT_STATUS_BUSY);
5428 goto unlock;
5429 }
5430
5431 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
5432 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
5433 hdev);
5434 goto unlock;
5435 }
5436
5437 if (!hdev_is_powered(hdev)) {
5438 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
5439 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
5440 hdev);
5441 new_settings(hdev, sk);
5442 goto unlock;
5443 }
5444
5445 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
5446 data, len);
5447 if (!cmd) {
5448 err = -ENOMEM;
5449 goto unlock;
5450 }
5451
5452 hci_req_init(&req, hdev);
5453
5454 __hci_req_write_fast_connectable(&req, cp->val);
5455
5456 err = hci_req_run(&req, fast_connectable_complete);
5457 if (err < 0) {
5458 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5459 MGMT_STATUS_FAILED);
5460 mgmt_pending_remove(cmd);
5461 }
5462
5463 unlock:
5464 hci_dev_unlock(hdev);
5465
5466 return err;
5467 }
5468
5469 static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5470 {
5471 struct mgmt_pending_cmd *cmd;
5472
5473 bt_dev_dbg(hdev, "status 0x%02x", status);
5474
5475 hci_dev_lock(hdev);
5476
5477 cmd = pending_find(MGMT_OP_SET_BREDR, hdev);
5478 if (!cmd)
5479 goto unlock;
5480
5481 if (status) {
5482 u8 mgmt_err = mgmt_status(status);
5483
5484 /* We need to restore the flag if related HCI commands
5485 * failed.
5486 */
5487 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
5488
5489 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
5490 } else {
5491 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
5492 new_settings(hdev, cmd->sk);
5493 }
5494
5495 mgmt_pending_remove(cmd);
5496
5497 unlock:
5498 hci_dev_unlock(hdev);
5499 }
5500
5501 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
5502 {
5503 struct mgmt_mode *cp = data;
5504 struct mgmt_pending_cmd *cmd;
5505 struct hci_request req;
5506 int err;
5507
5508 bt_dev_dbg(hdev, "sock %p", sk);
5509
5510 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
5511 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5512 MGMT_STATUS_NOT_SUPPORTED);
5513
5514 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5515 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5516 MGMT_STATUS_REJECTED);
5517
5518 if (cp->val != 0x00 && cp->val != 0x01)
5519 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5520 MGMT_STATUS_INVALID_PARAMS);
5521
5522 hci_dev_lock(hdev);
5523
5524 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5525 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5526 goto unlock;
5527 }
5528
5529 if (!hdev_is_powered(hdev)) {
5530 if (!cp->val) {
5531 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
5532 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
5533 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
5534 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
5535 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
5536 }
5537
5538 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
5539
5540 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5541 if (err < 0)
5542 goto unlock;
5543
5544 err = new_settings(hdev, sk);
5545 goto unlock;
5546 }
5547
5548 /* Reject disabling when powered on */
5549 if (!cp->val) {
5550 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5551 MGMT_STATUS_REJECTED);
5552 goto unlock;
5553 } else {
5554 /* When configuring a dual-mode controller to operate
5555 * with LE only and using a static address, then switching
5556 * BR/EDR back on is not allowed.
5557 *
5558 * Dual-mode controllers shall operate with the public
5559 * address as its identity address for BR/EDR and LE. So
5560 * reject the attempt to create an invalid configuration.
5561 *
5562 * The same restrictions applies when secure connections
5563 * has been enabled. For BR/EDR this is a controller feature
5564 * while for LE it is a host stack feature. This means that
5565 * switching BR/EDR back on when secure connections has been
5566 * enabled is not a supported transaction.
5567 */
5568 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5569 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
5570 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
5571 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5572 MGMT_STATUS_REJECTED);
5573 goto unlock;
5574 }
5575 }
5576
5577 if (pending_find(MGMT_OP_SET_BREDR, hdev)) {
5578 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5579 MGMT_STATUS_BUSY);
5580 goto unlock;
5581 }
5582
5583 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
5584 if (!cmd) {
5585 err = -ENOMEM;
5586 goto unlock;
5587 }
5588
5589 /* We need to flip the bit already here so that
5590 * hci_req_update_adv_data generates the correct flags.
5591 */
5592 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
5593
5594 hci_req_init(&req, hdev);
5595
5596 __hci_req_write_fast_connectable(&req, false);
5597 __hci_req_update_scan(&req);
5598
5599 /* Since only the advertising data flags will change, there
5600 * is no need to update the scan response data.
5601 */
5602 __hci_req_update_adv_data(&req, hdev->cur_adv_instance);
5603
5604 err = hci_req_run(&req, set_bredr_complete);
5605 if (err < 0)
5606 mgmt_pending_remove(cmd);
5607
5608 unlock:
5609 hci_dev_unlock(hdev);
5610 return err;
5611 }
5612
5613 static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5614 {
5615 struct mgmt_pending_cmd *cmd;
5616 struct mgmt_mode *cp;
5617
5618 bt_dev_dbg(hdev, "status %u", status);
5619
5620 hci_dev_lock(hdev);
5621
5622 cmd = pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
5623 if (!cmd)
5624 goto unlock;
5625
5626 if (status) {
5627 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
5628 mgmt_status(status));
5629 goto remove;
5630 }
5631
5632 cp = cmd->param;
5633
5634 switch (cp->val) {
5635 case 0x00:
5636 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
5637 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5638 break;
5639 case 0x01:
5640 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5641 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5642 break;
5643 case 0x02:
5644 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5645 hci_dev_set_flag(hdev, HCI_SC_ONLY);
5646 break;
5647 }
5648
5649 send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
5650 new_settings(hdev, cmd->sk);
5651
5652 remove:
5653 mgmt_pending_remove(cmd);
5654 unlock:
5655 hci_dev_unlock(hdev);
5656 }
5657
5658 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
5659 void *data, u16 len)
5660 {
5661 struct mgmt_mode *cp = data;
5662 struct mgmt_pending_cmd *cmd;
5663 struct hci_request req;
5664 u8 val;
5665 int err;
5666
5667 bt_dev_dbg(hdev, "sock %p", sk);
5668
5669 if (!lmp_sc_capable(hdev) &&
5670 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5671 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5672 MGMT_STATUS_NOT_SUPPORTED);
5673
5674 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5675 lmp_sc_capable(hdev) &&
5676 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
5677 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5678 MGMT_STATUS_REJECTED);
5679
5680 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5681 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5682 MGMT_STATUS_INVALID_PARAMS);
5683
5684 hci_dev_lock(hdev);
5685
5686 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
5687 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5688 bool changed;
5689
5690 if (cp->val) {
5691 changed = !hci_dev_test_and_set_flag(hdev,
5692 HCI_SC_ENABLED);
5693 if (cp->val == 0x02)
5694 hci_dev_set_flag(hdev, HCI_SC_ONLY);
5695 else
5696 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5697 } else {
5698 changed = hci_dev_test_and_clear_flag(hdev,
5699 HCI_SC_ENABLED);
5700 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5701 }
5702
5703 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5704 if (err < 0)
5705 goto failed;
5706
5707 if (changed)
5708 err = new_settings(hdev, sk);
5709
5710 goto failed;
5711 }
5712
5713 if (pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
5714 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5715 MGMT_STATUS_BUSY);
5716 goto failed;
5717 }
5718
5719 val = !!cp->val;
5720
5721 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
5722 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5723 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5724 goto failed;
5725 }
5726
5727 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
5728 if (!cmd) {
5729 err = -ENOMEM;
5730 goto failed;
5731 }
5732
5733 hci_req_init(&req, hdev);
5734 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
5735 err = hci_req_run(&req, sc_enable_complete);
5736 if (err < 0) {
5737 mgmt_pending_remove(cmd);
5738 goto failed;
5739 }
5740
5741 failed:
5742 hci_dev_unlock(hdev);
5743 return err;
5744 }
5745
5746 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
5747 void *data, u16 len)
5748 {
5749 struct mgmt_mode *cp = data;
5750 bool changed, use_changed;
5751 int err;
5752
5753 bt_dev_dbg(hdev, "sock %p", sk);
5754
5755 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5756 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
5757 MGMT_STATUS_INVALID_PARAMS);
5758
5759 hci_dev_lock(hdev);
5760
5761 if (cp->val)
5762 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
5763 else
5764 changed = hci_dev_test_and_clear_flag(hdev,
5765 HCI_KEEP_DEBUG_KEYS);
5766
5767 if (cp->val == 0x02)
5768 use_changed = !hci_dev_test_and_set_flag(hdev,
5769 HCI_USE_DEBUG_KEYS);
5770 else
5771 use_changed = hci_dev_test_and_clear_flag(hdev,
5772 HCI_USE_DEBUG_KEYS);
5773
5774 if (hdev_is_powered(hdev) && use_changed &&
5775 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
5776 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
5777 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
5778 sizeof(mode), &mode);
5779 }
5780
5781 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
5782 if (err < 0)
5783 goto unlock;
5784
5785 if (changed)
5786 err = new_settings(hdev, sk);
5787
5788 unlock:
5789 hci_dev_unlock(hdev);
5790 return err;
5791 }
5792
5793 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5794 u16 len)
5795 {
5796 struct mgmt_cp_set_privacy *cp = cp_data;
5797 bool changed;
5798 int err;
5799
5800 bt_dev_dbg(hdev, "sock %p", sk);
5801
5802 if (!lmp_le_capable(hdev))
5803 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5804 MGMT_STATUS_NOT_SUPPORTED);
5805
5806 if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
5807 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5808 MGMT_STATUS_INVALID_PARAMS);
5809
5810 if (hdev_is_powered(hdev))
5811 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5812 MGMT_STATUS_REJECTED);
5813
5814 hci_dev_lock(hdev);
5815
5816 /* If user space supports this command it is also expected to
5817 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
5818 */
5819 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5820
5821 if (cp->privacy) {
5822 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
5823 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
5824 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
5825 hci_adv_instances_set_rpa_expired(hdev, true);
5826 if (cp->privacy == 0x02)
5827 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
5828 else
5829 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
5830 } else {
5831 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
5832 memset(hdev->irk, 0, sizeof(hdev->irk));
5833 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
5834 hci_adv_instances_set_rpa_expired(hdev, false);
5835 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
5836 }
5837
5838 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
5839 if (err < 0)
5840 goto unlock;
5841
5842 if (changed)
5843 err = new_settings(hdev, sk);
5844
5845 unlock:
5846 hci_dev_unlock(hdev);
5847 return err;
5848 }
5849
5850 static bool irk_is_valid(struct mgmt_irk_info *irk)
5851 {
5852 switch (irk->addr.type) {
5853 case BDADDR_LE_PUBLIC:
5854 return true;
5855
5856 case BDADDR_LE_RANDOM:
5857 /* Two most significant bits shall be set */
5858 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5859 return false;
5860 return true;
5861 }
5862
5863 return false;
5864 }
5865
5866 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5867 u16 len)
5868 {
5869 struct mgmt_cp_load_irks *cp = cp_data;
5870 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
5871 sizeof(struct mgmt_irk_info));
5872 u16 irk_count, expected_len;
5873 int i, err;
5874
5875 bt_dev_dbg(hdev, "sock %p", sk);
5876
5877 if (!lmp_le_capable(hdev))
5878 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5879 MGMT_STATUS_NOT_SUPPORTED);
5880
5881 irk_count = __le16_to_cpu(cp->irk_count);
5882 if (irk_count > max_irk_count) {
5883 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
5884 irk_count);
5885 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5886 MGMT_STATUS_INVALID_PARAMS);
5887 }
5888
5889 expected_len = struct_size(cp, irks, irk_count);
5890 if (expected_len != len) {
5891 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
5892 expected_len, len);
5893 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5894 MGMT_STATUS_INVALID_PARAMS);
5895 }
5896
5897 bt_dev_dbg(hdev, "irk_count %u", irk_count);
5898
5899 for (i = 0; i < irk_count; i++) {
5900 struct mgmt_irk_info *key = &cp->irks[i];
5901
5902 if (!irk_is_valid(key))
5903 return mgmt_cmd_status(sk, hdev->id,
5904 MGMT_OP_LOAD_IRKS,
5905 MGMT_STATUS_INVALID_PARAMS);
5906 }
5907
5908 hci_dev_lock(hdev);
5909
5910 hci_smp_irks_clear(hdev);
5911
5912 for (i = 0; i < irk_count; i++) {
5913 struct mgmt_irk_info *irk = &cp->irks[i];
5914
5915 if (hci_is_blocked_key(hdev,
5916 HCI_BLOCKED_KEY_TYPE_IRK,
5917 irk->val)) {
5918 bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
5919 &irk->addr.bdaddr);
5920 continue;
5921 }
5922
5923 hci_add_irk(hdev, &irk->addr.bdaddr,
5924 le_addr_type(irk->addr.type), irk->val,
5925 BDADDR_ANY);
5926 }
5927
5928 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5929
5930 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
5931
5932 hci_dev_unlock(hdev);
5933
5934 return err;
5935 }
5936
5937 static bool ltk_is_valid(struct mgmt_ltk_info *key)
5938 {
5939 if (key->master != 0x00 && key->master != 0x01)
5940 return false;
5941
5942 switch (key->addr.type) {
5943 case BDADDR_LE_PUBLIC:
5944 return true;
5945
5946 case BDADDR_LE_RANDOM:
5947 /* Two most significant bits shall be set */
5948 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5949 return false;
5950 return true;
5951 }
5952
5953 return false;
5954 }
5955
5956 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
5957 void *cp_data, u16 len)
5958 {
5959 struct mgmt_cp_load_long_term_keys *cp = cp_data;
5960 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
5961 sizeof(struct mgmt_ltk_info));
5962 u16 key_count, expected_len;
5963 int i, err;
5964
5965 bt_dev_dbg(hdev, "sock %p", sk);
5966
5967 if (!lmp_le_capable(hdev))
5968 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5969 MGMT_STATUS_NOT_SUPPORTED);
5970
5971 key_count = __le16_to_cpu(cp->key_count);
5972 if (key_count > max_key_count) {
5973 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
5974 key_count);
5975 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5976 MGMT_STATUS_INVALID_PARAMS);
5977 }
5978
5979 expected_len = struct_size(cp, keys, key_count);
5980 if (expected_len != len) {
5981 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
5982 expected_len, len);
5983 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5984 MGMT_STATUS_INVALID_PARAMS);
5985 }
5986
5987 bt_dev_dbg(hdev, "key_count %u", key_count);
5988
5989 for (i = 0; i < key_count; i++) {
5990 struct mgmt_ltk_info *key = &cp->keys[i];
5991
5992 if (!ltk_is_valid(key))
5993 return mgmt_cmd_status(sk, hdev->id,
5994 MGMT_OP_LOAD_LONG_TERM_KEYS,
5995 MGMT_STATUS_INVALID_PARAMS);
5996 }
5997
5998 hci_dev_lock(hdev);
5999
6000 hci_smp_ltks_clear(hdev);
6001
6002 for (i = 0; i < key_count; i++) {
6003 struct mgmt_ltk_info *key = &cp->keys[i];
6004 u8 type, authenticated;
6005
6006 if (hci_is_blocked_key(hdev,
6007 HCI_BLOCKED_KEY_TYPE_LTK,
6008 key->val)) {
6009 bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
6010 &key->addr.bdaddr);
6011 continue;
6012 }
6013
6014 switch (key->type) {
6015 case MGMT_LTK_UNAUTHENTICATED:
6016 authenticated = 0x00;
6017 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
6018 break;
6019 case MGMT_LTK_AUTHENTICATED:
6020 authenticated = 0x01;
6021 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
6022 break;
6023 case MGMT_LTK_P256_UNAUTH:
6024 authenticated = 0x00;
6025 type = SMP_LTK_P256;
6026 break;
6027 case MGMT_LTK_P256_AUTH:
6028 authenticated = 0x01;
6029 type = SMP_LTK_P256;
6030 break;
6031 case MGMT_LTK_P256_DEBUG:
6032 authenticated = 0x00;
6033 type = SMP_LTK_P256_DEBUG;
6034 fallthrough;
6035 default:
6036 continue;
6037 }
6038
6039 hci_add_ltk(hdev, &key->addr.bdaddr,
6040 le_addr_type(key->addr.type), type, authenticated,
6041 key->val, key->enc_size, key->ediv, key->rand);
6042 }
6043
6044 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
6045 NULL, 0);
6046
6047 hci_dev_unlock(hdev);
6048
6049 return err;
6050 }
6051
6052 static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
6053 {
6054 struct hci_conn *conn = cmd->user_data;
6055 struct mgmt_rp_get_conn_info rp;
6056 int err;
6057
6058 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
6059
6060 if (status == MGMT_STATUS_SUCCESS) {
6061 rp.rssi = conn->rssi;
6062 rp.tx_power = conn->tx_power;
6063 rp.max_tx_power = conn->max_tx_power;
6064 } else {
6065 rp.rssi = HCI_RSSI_INVALID;
6066 rp.tx_power = HCI_TX_POWER_INVALID;
6067 rp.max_tx_power = HCI_TX_POWER_INVALID;
6068 }
6069
6070 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
6071 status, &rp, sizeof(rp));
6072
6073 hci_conn_drop(conn);
6074 hci_conn_put(conn);
6075
6076 return err;
6077 }
6078
6079 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
6080 u16 opcode)
6081 {
6082 struct hci_cp_read_rssi *cp;
6083 struct mgmt_pending_cmd *cmd;
6084 struct hci_conn *conn;
6085 u16 handle;
6086 u8 status;
6087
6088 bt_dev_dbg(hdev, "status 0x%02x", hci_status);
6089
6090 hci_dev_lock(hdev);
6091
6092 /* Commands sent in request are either Read RSSI or Read Transmit Power
6093 * Level so we check which one was last sent to retrieve connection
6094 * handle. Both commands have handle as first parameter so it's safe to
6095 * cast data on the same command struct.
6096 *
6097 * First command sent is always Read RSSI and we fail only if it fails.
6098 * In other case we simply override error to indicate success as we
6099 * already remembered if TX power value is actually valid.
6100 */
6101 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
6102 if (!cp) {
6103 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
6104 status = MGMT_STATUS_SUCCESS;
6105 } else {
6106 status = mgmt_status(hci_status);
6107 }
6108
6109 if (!cp) {
6110 bt_dev_err(hdev, "invalid sent_cmd in conn_info response");
6111 goto unlock;
6112 }
6113
6114 handle = __le16_to_cpu(cp->handle);
6115 conn = hci_conn_hash_lookup_handle(hdev, handle);
6116 if (!conn) {
6117 bt_dev_err(hdev, "unknown handle (%d) in conn_info response",
6118 handle);
6119 goto unlock;
6120 }
6121
6122 cmd = pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
6123 if (!cmd)
6124 goto unlock;
6125
6126 cmd->cmd_complete(cmd, status);
6127 mgmt_pending_remove(cmd);
6128
6129 unlock:
6130 hci_dev_unlock(hdev);
6131 }
6132
6133 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
6134 u16 len)
6135 {
6136 struct mgmt_cp_get_conn_info *cp = data;
6137 struct mgmt_rp_get_conn_info rp;
6138 struct hci_conn *conn;
6139 unsigned long conn_info_age;
6140 int err = 0;
6141
6142 bt_dev_dbg(hdev, "sock %p", sk);
6143
6144 memset(&rp, 0, sizeof(rp));
6145 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6146 rp.addr.type = cp->addr.type;
6147
6148 if (!bdaddr_type_is_valid(cp->addr.type))
6149 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6150 MGMT_STATUS_INVALID_PARAMS,
6151 &rp, sizeof(rp));
6152
6153 hci_dev_lock(hdev);
6154
6155 if (!hdev_is_powered(hdev)) {
6156 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6157 MGMT_STATUS_NOT_POWERED, &rp,
6158 sizeof(rp));
6159 goto unlock;
6160 }
6161
6162 if (cp->addr.type == BDADDR_BREDR)
6163 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6164 &cp->addr.bdaddr);
6165 else
6166 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
6167
6168 if (!conn || conn->state != BT_CONNECTED) {
6169 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6170 MGMT_STATUS_NOT_CONNECTED, &rp,
6171 sizeof(rp));
6172 goto unlock;
6173 }
6174
6175 if (pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
6176 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6177 MGMT_STATUS_BUSY, &rp, sizeof(rp));
6178 goto unlock;
6179 }
6180
6181 /* To avoid client trying to guess when to poll again for information we
6182 * calculate conn info age as random value between min/max set in hdev.
6183 */
6184 conn_info_age = hdev->conn_info_min_age +
6185 prandom_u32_max(hdev->conn_info_max_age -
6186 hdev->conn_info_min_age);
6187
6188 /* Query controller to refresh cached values if they are too old or were
6189 * never read.
6190 */
6191 if (time_after(jiffies, conn->conn_info_timestamp +
6192 msecs_to_jiffies(conn_info_age)) ||
6193 !conn->conn_info_timestamp) {
6194 struct hci_request req;
6195 struct hci_cp_read_tx_power req_txp_cp;
6196 struct hci_cp_read_rssi req_rssi_cp;
6197 struct mgmt_pending_cmd *cmd;
6198
6199 hci_req_init(&req, hdev);
6200 req_rssi_cp.handle = cpu_to_le16(conn->handle);
6201 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
6202 &req_rssi_cp);
6203
6204 /* For LE links TX power does not change thus we don't need to
6205 * query for it once value is known.
6206 */
6207 if (!bdaddr_type_is_le(cp->addr.type) ||
6208 conn->tx_power == HCI_TX_POWER_INVALID) {
6209 req_txp_cp.handle = cpu_to_le16(conn->handle);
6210 req_txp_cp.type = 0x00;
6211 hci_req_add(&req, HCI_OP_READ_TX_POWER,
6212 sizeof(req_txp_cp), &req_txp_cp);
6213 }
6214
6215 /* Max TX power needs to be read only once per connection */
6216 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
6217 req_txp_cp.handle = cpu_to_le16(conn->handle);
6218 req_txp_cp.type = 0x01;
6219 hci_req_add(&req, HCI_OP_READ_TX_POWER,
6220 sizeof(req_txp_cp), &req_txp_cp);
6221 }
6222
6223 err = hci_req_run(&req, conn_info_refresh_complete);
6224 if (err < 0)
6225 goto unlock;
6226
6227 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
6228 data, len);
6229 if (!cmd) {
6230 err = -ENOMEM;
6231 goto unlock;
6232 }
6233
6234 hci_conn_hold(conn);
6235 cmd->user_data = hci_conn_get(conn);
6236 cmd->cmd_complete = conn_info_cmd_complete;
6237
6238 conn->conn_info_timestamp = jiffies;
6239 } else {
6240 /* Cache is valid, just reply with values cached in hci_conn */
6241 rp.rssi = conn->rssi;
6242 rp.tx_power = conn->tx_power;
6243 rp.max_tx_power = conn->max_tx_power;
6244
6245 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6246 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6247 }
6248
6249 unlock:
6250 hci_dev_unlock(hdev);
6251 return err;
6252 }
6253
6254 static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
6255 {
6256 struct hci_conn *conn = cmd->user_data;
6257 struct mgmt_rp_get_clock_info rp;
6258 struct hci_dev *hdev;
6259 int err;
6260
6261 memset(&rp, 0, sizeof(rp));
6262 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
6263
6264 if (status)
6265 goto complete;
6266
6267 hdev = hci_dev_get(cmd->index);
6268 if (hdev) {
6269 rp.local_clock = cpu_to_le32(hdev->clock);
6270 hci_dev_put(hdev);
6271 }
6272
6273 if (conn) {
6274 rp.piconet_clock = cpu_to_le32(conn->clock);
6275 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
6276 }
6277
6278 complete:
6279 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
6280 sizeof(rp));
6281
6282 if (conn) {
6283 hci_conn_drop(conn);
6284 hci_conn_put(conn);
6285 }
6286
6287 return err;
6288 }
6289
6290 static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
6291 {
6292 struct hci_cp_read_clock *hci_cp;
6293 struct mgmt_pending_cmd *cmd;
6294 struct hci_conn *conn;
6295
6296 bt_dev_dbg(hdev, "status %u", status);
6297
6298 hci_dev_lock(hdev);
6299
6300 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
6301 if (!hci_cp)
6302 goto unlock;
6303
6304 if (hci_cp->which) {
6305 u16 handle = __le16_to_cpu(hci_cp->handle);
6306 conn = hci_conn_hash_lookup_handle(hdev, handle);
6307 } else {
6308 conn = NULL;
6309 }
6310
6311 cmd = pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
6312 if (!cmd)
6313 goto unlock;
6314
6315 cmd->cmd_complete(cmd, mgmt_status(status));
6316 mgmt_pending_remove(cmd);
6317
6318 unlock:
6319 hci_dev_unlock(hdev);
6320 }
6321
6322 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
6323 u16 len)
6324 {
6325 struct mgmt_cp_get_clock_info *cp = data;
6326 struct mgmt_rp_get_clock_info rp;
6327 struct hci_cp_read_clock hci_cp;
6328 struct mgmt_pending_cmd *cmd;
6329 struct hci_request req;
6330 struct hci_conn *conn;
6331 int err;
6332
6333 bt_dev_dbg(hdev, "sock %p", sk);
6334
6335 memset(&rp, 0, sizeof(rp));
6336 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6337 rp.addr.type = cp->addr.type;
6338
6339 if (cp->addr.type != BDADDR_BREDR)
6340 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6341 MGMT_STATUS_INVALID_PARAMS,
6342 &rp, sizeof(rp));
6343
6344 hci_dev_lock(hdev);
6345
6346 if (!hdev_is_powered(hdev)) {
6347 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6348 MGMT_STATUS_NOT_POWERED, &rp,
6349 sizeof(rp));
6350 goto unlock;
6351 }
6352
6353 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
6354 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6355 &cp->addr.bdaddr);
6356 if (!conn || conn->state != BT_CONNECTED) {
6357 err = mgmt_cmd_complete(sk, hdev->id,
6358 MGMT_OP_GET_CLOCK_INFO,
6359 MGMT_STATUS_NOT_CONNECTED,
6360 &rp, sizeof(rp));
6361 goto unlock;
6362 }
6363 } else {
6364 conn = NULL;
6365 }
6366
6367 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
6368 if (!cmd) {
6369 err = -ENOMEM;
6370 goto unlock;
6371 }
6372
6373 cmd->cmd_complete = clock_info_cmd_complete;
6374
6375 hci_req_init(&req, hdev);
6376
6377 memset(&hci_cp, 0, sizeof(hci_cp));
6378 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
6379
6380 if (conn) {
6381 hci_conn_hold(conn);
6382 cmd->user_data = hci_conn_get(conn);
6383
6384 hci_cp.handle = cpu_to_le16(conn->handle);
6385 hci_cp.which = 0x01; /* Piconet clock */
6386 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
6387 }
6388
6389 err = hci_req_run(&req, get_clock_info_complete);
6390 if (err < 0)
6391 mgmt_pending_remove(cmd);
6392
6393 unlock:
6394 hci_dev_unlock(hdev);
6395 return err;
6396 }
6397
6398 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
6399 {
6400 struct hci_conn *conn;
6401
6402 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
6403 if (!conn)
6404 return false;
6405
6406 if (conn->dst_type != type)
6407 return false;
6408
6409 if (conn->state != BT_CONNECTED)
6410 return false;
6411
6412 return true;
6413 }
6414
6415 /* This function requires the caller holds hdev->lock */
6416 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
6417 u8 addr_type, u8 auto_connect)
6418 {
6419 struct hci_conn_params *params;
6420
6421 params = hci_conn_params_add(hdev, addr, addr_type);
6422 if (!params)
6423 return -EIO;
6424
6425 if (params->auto_connect == auto_connect)
6426 return 0;
6427
6428 list_del_init(&params->action);
6429
6430 switch (auto_connect) {
6431 case HCI_AUTO_CONN_DISABLED:
6432 case HCI_AUTO_CONN_LINK_LOSS:
6433 /* If auto connect is being disabled when we're trying to
6434 * connect to device, keep connecting.
6435 */
6436 if (params->explicit_connect)
6437 list_add(&params->action, &hdev->pend_le_conns);
6438 break;
6439 case HCI_AUTO_CONN_REPORT:
6440 if (params->explicit_connect)
6441 list_add(&params->action, &hdev->pend_le_conns);
6442 else
6443 list_add(&params->action, &hdev->pend_le_reports);
6444 break;
6445 case HCI_AUTO_CONN_DIRECT:
6446 case HCI_AUTO_CONN_ALWAYS:
6447 if (!is_connected(hdev, addr, addr_type))
6448 list_add(&params->action, &hdev->pend_le_conns);
6449 break;
6450 }
6451
6452 params->auto_connect = auto_connect;
6453
6454 bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
6455 addr, addr_type, auto_connect);
6456
6457 return 0;
6458 }
6459
6460 static void device_added(struct sock *sk, struct hci_dev *hdev,
6461 bdaddr_t *bdaddr, u8 type, u8 action)
6462 {
6463 struct mgmt_ev_device_added ev;
6464
6465 bacpy(&ev.addr.bdaddr, bdaddr);
6466 ev.addr.type = type;
6467 ev.action = action;
6468
6469 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
6470 }
6471
6472 static int add_device(struct sock *sk, struct hci_dev *hdev,
6473 void *data, u16 len)
6474 {
6475 struct mgmt_cp_add_device *cp = data;
6476 u8 auto_conn, addr_type;
6477 struct hci_conn_params *params;
6478 int err;
6479 u32 current_flags = 0;
6480
6481 bt_dev_dbg(hdev, "sock %p", sk);
6482
6483 if (!bdaddr_type_is_valid(cp->addr.type) ||
6484 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
6485 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6486 MGMT_STATUS_INVALID_PARAMS,
6487 &cp->addr, sizeof(cp->addr));
6488
6489 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
6490 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6491 MGMT_STATUS_INVALID_PARAMS,
6492 &cp->addr, sizeof(cp->addr));
6493
6494 hci_dev_lock(hdev);
6495
6496 if (cp->addr.type == BDADDR_BREDR) {
6497 /* Only incoming connections action is supported for now */
6498 if (cp->action != 0x01) {
6499 err = mgmt_cmd_complete(sk, hdev->id,
6500 MGMT_OP_ADD_DEVICE,
6501 MGMT_STATUS_INVALID_PARAMS,
6502 &cp->addr, sizeof(cp->addr));
6503 goto unlock;
6504 }
6505
6506 err = hci_bdaddr_list_add_with_flags(&hdev->whitelist,
6507 &cp->addr.bdaddr,
6508 cp->addr.type, 0);
6509 if (err)
6510 goto unlock;
6511
6512 hci_req_update_scan(hdev);
6513
6514 goto added;
6515 }
6516
6517 addr_type = le_addr_type(cp->addr.type);
6518
6519 if (cp->action == 0x02)
6520 auto_conn = HCI_AUTO_CONN_ALWAYS;
6521 else if (cp->action == 0x01)
6522 auto_conn = HCI_AUTO_CONN_DIRECT;
6523 else
6524 auto_conn = HCI_AUTO_CONN_REPORT;
6525
6526 /* Kernel internally uses conn_params with resolvable private
6527 * address, but Add Device allows only identity addresses.
6528 * Make sure it is enforced before calling
6529 * hci_conn_params_lookup.
6530 */
6531 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
6532 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6533 MGMT_STATUS_INVALID_PARAMS,
6534 &cp->addr, sizeof(cp->addr));
6535 goto unlock;
6536 }
6537
6538 /* If the connection parameters don't exist for this device,
6539 * they will be created and configured with defaults.
6540 */
6541 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
6542 auto_conn) < 0) {
6543 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6544 MGMT_STATUS_FAILED, &cp->addr,
6545 sizeof(cp->addr));
6546 goto unlock;
6547 } else {
6548 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
6549 addr_type);
6550 if (params)
6551 current_flags = params->current_flags;
6552 }
6553
6554 hci_update_background_scan(hdev);
6555
6556 added:
6557 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
6558 device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
6559 SUPPORTED_DEVICE_FLAGS(), current_flags);
6560
6561 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6562 MGMT_STATUS_SUCCESS, &cp->addr,
6563 sizeof(cp->addr));
6564
6565 unlock:
6566 hci_dev_unlock(hdev);
6567 return err;
6568 }
6569
6570 static void device_removed(struct sock *sk, struct hci_dev *hdev,
6571 bdaddr_t *bdaddr, u8 type)
6572 {
6573 struct mgmt_ev_device_removed ev;
6574
6575 bacpy(&ev.addr.bdaddr, bdaddr);
6576 ev.addr.type = type;
6577
6578 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
6579 }
6580
6581 static int remove_device(struct sock *sk, struct hci_dev *hdev,
6582 void *data, u16 len)
6583 {
6584 struct mgmt_cp_remove_device *cp = data;
6585 int err;
6586
6587 bt_dev_dbg(hdev, "sock %p", sk);
6588
6589 hci_dev_lock(hdev);
6590
6591 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
6592 struct hci_conn_params *params;
6593 u8 addr_type;
6594
6595 if (!bdaddr_type_is_valid(cp->addr.type)) {
6596 err = mgmt_cmd_complete(sk, hdev->id,
6597 MGMT_OP_REMOVE_DEVICE,
6598 MGMT_STATUS_INVALID_PARAMS,
6599 &cp->addr, sizeof(cp->addr));
6600 goto unlock;
6601 }
6602
6603 if (cp->addr.type == BDADDR_BREDR) {
6604 err = hci_bdaddr_list_del(&hdev->whitelist,
6605 &cp->addr.bdaddr,
6606 cp->addr.type);
6607 if (err) {
6608 err = mgmt_cmd_complete(sk, hdev->id,
6609 MGMT_OP_REMOVE_DEVICE,
6610 MGMT_STATUS_INVALID_PARAMS,
6611 &cp->addr,
6612 sizeof(cp->addr));
6613 goto unlock;
6614 }
6615
6616 hci_req_update_scan(hdev);
6617
6618 device_removed(sk, hdev, &cp->addr.bdaddr,
6619 cp->addr.type);
6620 goto complete;
6621 }
6622
6623 addr_type = le_addr_type(cp->addr.type);
6624
6625 /* Kernel internally uses conn_params with resolvable private
6626 * address, but Remove Device allows only identity addresses.
6627 * Make sure it is enforced before calling
6628 * hci_conn_params_lookup.
6629 */
6630 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
6631 err = mgmt_cmd_complete(sk, hdev->id,
6632 MGMT_OP_REMOVE_DEVICE,
6633 MGMT_STATUS_INVALID_PARAMS,
6634 &cp->addr, sizeof(cp->addr));
6635 goto unlock;
6636 }
6637
6638 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
6639 addr_type);
6640 if (!params) {
6641 err = mgmt_cmd_complete(sk, hdev->id,
6642 MGMT_OP_REMOVE_DEVICE,
6643 MGMT_STATUS_INVALID_PARAMS,
6644 &cp->addr, sizeof(cp->addr));
6645 goto unlock;
6646 }
6647
6648 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
6649 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
6650 err = mgmt_cmd_complete(sk, hdev->id,
6651 MGMT_OP_REMOVE_DEVICE,
6652 MGMT_STATUS_INVALID_PARAMS,
6653 &cp->addr, sizeof(cp->addr));
6654 goto unlock;
6655 }
6656
6657 list_del(&params->action);
6658 list_del(&params->list);
6659 kfree(params);
6660 hci_update_background_scan(hdev);
6661
6662 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
6663 } else {
6664 struct hci_conn_params *p, *tmp;
6665 struct bdaddr_list *b, *btmp;
6666
6667 if (cp->addr.type) {
6668 err = mgmt_cmd_complete(sk, hdev->id,
6669 MGMT_OP_REMOVE_DEVICE,
6670 MGMT_STATUS_INVALID_PARAMS,
6671 &cp->addr, sizeof(cp->addr));
6672 goto unlock;
6673 }
6674
6675 list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
6676 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
6677 list_del(&b->list);
6678 kfree(b);
6679 }
6680
6681 hci_req_update_scan(hdev);
6682
6683 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
6684 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
6685 continue;
6686 device_removed(sk, hdev, &p->addr, p->addr_type);
6687 if (p->explicit_connect) {
6688 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
6689 continue;
6690 }
6691 list_del(&p->action);
6692 list_del(&p->list);
6693 kfree(p);
6694 }
6695
6696 bt_dev_dbg(hdev, "All LE connection parameters were removed");
6697
6698 hci_update_background_scan(hdev);
6699 }
6700
6701 complete:
6702 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
6703 MGMT_STATUS_SUCCESS, &cp->addr,
6704 sizeof(cp->addr));
6705 unlock:
6706 hci_dev_unlock(hdev);
6707 return err;
6708 }
6709
6710 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
6711 u16 len)
6712 {
6713 struct mgmt_cp_load_conn_param *cp = data;
6714 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
6715 sizeof(struct mgmt_conn_param));
6716 u16 param_count, expected_len;
6717 int i;
6718
6719 if (!lmp_le_capable(hdev))
6720 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6721 MGMT_STATUS_NOT_SUPPORTED);
6722
6723 param_count = __le16_to_cpu(cp->param_count);
6724 if (param_count > max_param_count) {
6725 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
6726 param_count);
6727 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6728 MGMT_STATUS_INVALID_PARAMS);
6729 }
6730
6731 expected_len = struct_size(cp, params, param_count);
6732 if (expected_len != len) {
6733 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
6734 expected_len, len);
6735 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6736 MGMT_STATUS_INVALID_PARAMS);
6737 }
6738
6739 bt_dev_dbg(hdev, "param_count %u", param_count);
6740
6741 hci_dev_lock(hdev);
6742
6743 hci_conn_params_clear_disabled(hdev);
6744
6745 for (i = 0; i < param_count; i++) {
6746 struct mgmt_conn_param *param = &cp->params[i];
6747 struct hci_conn_params *hci_param;
6748 u16 min, max, latency, timeout;
6749 u8 addr_type;
6750
6751 bt_dev_dbg(hdev, "Adding %pMR (type %u)", &param->addr.bdaddr,
6752 param->addr.type);
6753
6754 if (param->addr.type == BDADDR_LE_PUBLIC) {
6755 addr_type = ADDR_LE_DEV_PUBLIC;
6756 } else if (param->addr.type == BDADDR_LE_RANDOM) {
6757 addr_type = ADDR_LE_DEV_RANDOM;
6758 } else {
6759 bt_dev_err(hdev, "ignoring invalid connection parameters");
6760 continue;
6761 }
6762
6763 min = le16_to_cpu(param->min_interval);
6764 max = le16_to_cpu(param->max_interval);
6765 latency = le16_to_cpu(param->latency);
6766 timeout = le16_to_cpu(param->timeout);
6767
6768 bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
6769 min, max, latency, timeout);
6770
6771 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
6772 bt_dev_err(hdev, "ignoring invalid connection parameters");
6773 continue;
6774 }
6775
6776 hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
6777 addr_type);
6778 if (!hci_param) {
6779 bt_dev_err(hdev, "failed to add connection parameters");
6780 continue;
6781 }
6782
6783 hci_param->conn_min_interval = min;
6784 hci_param->conn_max_interval = max;
6785 hci_param->conn_latency = latency;
6786 hci_param->supervision_timeout = timeout;
6787 }
6788
6789 hci_dev_unlock(hdev);
6790
6791 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
6792 NULL, 0);
6793 }
6794
6795 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
6796 void *data, u16 len)
6797 {
6798 struct mgmt_cp_set_external_config *cp = data;
6799 bool changed;
6800 int err;
6801
6802 bt_dev_dbg(hdev, "sock %p", sk);
6803
6804 if (hdev_is_powered(hdev))
6805 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6806 MGMT_STATUS_REJECTED);
6807
6808 if (cp->config != 0x00 && cp->config != 0x01)
6809 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6810 MGMT_STATUS_INVALID_PARAMS);
6811
6812 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
6813 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6814 MGMT_STATUS_NOT_SUPPORTED);
6815
6816 hci_dev_lock(hdev);
6817
6818 if (cp->config)
6819 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
6820 else
6821 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
6822
6823 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
6824 if (err < 0)
6825 goto unlock;
6826
6827 if (!changed)
6828 goto unlock;
6829
6830 err = new_options(hdev, sk);
6831
6832 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
6833 mgmt_index_removed(hdev);
6834
6835 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
6836 hci_dev_set_flag(hdev, HCI_CONFIG);
6837 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6838
6839 queue_work(hdev->req_workqueue, &hdev->power_on);
6840 } else {
6841 set_bit(HCI_RAW, &hdev->flags);
6842 mgmt_index_added(hdev);
6843 }
6844 }
6845
6846 unlock:
6847 hci_dev_unlock(hdev);
6848 return err;
6849 }
6850
6851 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
6852 void *data, u16 len)
6853 {
6854 struct mgmt_cp_set_public_address *cp = data;
6855 bool changed;
6856 int err;
6857
6858 bt_dev_dbg(hdev, "sock %p", sk);
6859
6860 if (hdev_is_powered(hdev))
6861 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6862 MGMT_STATUS_REJECTED);
6863
6864 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
6865 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6866 MGMT_STATUS_INVALID_PARAMS);
6867
6868 if (!hdev->set_bdaddr)
6869 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6870 MGMT_STATUS_NOT_SUPPORTED);
6871
6872 hci_dev_lock(hdev);
6873
6874 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
6875 bacpy(&hdev->public_addr, &cp->bdaddr);
6876
6877 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
6878 if (err < 0)
6879 goto unlock;
6880
6881 if (!changed)
6882 goto unlock;
6883
6884 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
6885 err = new_options(hdev, sk);
6886
6887 if (is_configured(hdev)) {
6888 mgmt_index_removed(hdev);
6889
6890 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
6891
6892 hci_dev_set_flag(hdev, HCI_CONFIG);
6893 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6894
6895 queue_work(hdev->req_workqueue, &hdev->power_on);
6896 }
6897
6898 unlock:
6899 hci_dev_unlock(hdev);
6900 return err;
6901 }
6902
6903 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, u8 status,
6904 u16 opcode, struct sk_buff *skb)
6905 {
6906 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
6907 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
6908 u8 *h192, *r192, *h256, *r256;
6909 struct mgmt_pending_cmd *cmd;
6910 u16 eir_len;
6911 int err;
6912
6913 bt_dev_dbg(hdev, "status %u", status);
6914
6915 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev);
6916 if (!cmd)
6917 return;
6918
6919 mgmt_cp = cmd->param;
6920
6921 if (status) {
6922 status = mgmt_status(status);
6923 eir_len = 0;
6924
6925 h192 = NULL;
6926 r192 = NULL;
6927 h256 = NULL;
6928 r256 = NULL;
6929 } else if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
6930 struct hci_rp_read_local_oob_data *rp;
6931
6932 if (skb->len != sizeof(*rp)) {
6933 status = MGMT_STATUS_FAILED;
6934 eir_len = 0;
6935 } else {
6936 status = MGMT_STATUS_SUCCESS;
6937 rp = (void *)skb->data;
6938
6939 eir_len = 5 + 18 + 18;
6940 h192 = rp->hash;
6941 r192 = rp->rand;
6942 h256 = NULL;
6943 r256 = NULL;
6944 }
6945 } else {
6946 struct hci_rp_read_local_oob_ext_data *rp;
6947
6948 if (skb->len != sizeof(*rp)) {
6949 status = MGMT_STATUS_FAILED;
6950 eir_len = 0;
6951 } else {
6952 status = MGMT_STATUS_SUCCESS;
6953 rp = (void *)skb->data;
6954
6955 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6956 eir_len = 5 + 18 + 18;
6957 h192 = NULL;
6958 r192 = NULL;
6959 } else {
6960 eir_len = 5 + 18 + 18 + 18 + 18;
6961 h192 = rp->hash192;
6962 r192 = rp->rand192;
6963 }
6964
6965 h256 = rp->hash256;
6966 r256 = rp->rand256;
6967 }
6968 }
6969
6970 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
6971 if (!mgmt_rp)
6972 goto done;
6973
6974 if (status)
6975 goto send_rsp;
6976
6977 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
6978 hdev->dev_class, 3);
6979
6980 if (h192 && r192) {
6981 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6982 EIR_SSP_HASH_C192, h192, 16);
6983 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6984 EIR_SSP_RAND_R192, r192, 16);
6985 }
6986
6987 if (h256 && r256) {
6988 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6989 EIR_SSP_HASH_C256, h256, 16);
6990 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6991 EIR_SSP_RAND_R256, r256, 16);
6992 }
6993
6994 send_rsp:
6995 mgmt_rp->type = mgmt_cp->type;
6996 mgmt_rp->eir_len = cpu_to_le16(eir_len);
6997
6998 err = mgmt_cmd_complete(cmd->sk, hdev->id,
6999 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
7000 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
7001 if (err < 0 || status)
7002 goto done;
7003
7004 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
7005
7006 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
7007 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
7008 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
7009 done:
7010 kfree(mgmt_rp);
7011 mgmt_pending_remove(cmd);
7012 }
7013
7014 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
7015 struct mgmt_cp_read_local_oob_ext_data *cp)
7016 {
7017 struct mgmt_pending_cmd *cmd;
7018 struct hci_request req;
7019 int err;
7020
7021 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
7022 cp, sizeof(*cp));
7023 if (!cmd)
7024 return -ENOMEM;
7025
7026 hci_req_init(&req, hdev);
7027
7028 if (bredr_sc_enabled(hdev))
7029 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
7030 else
7031 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
7032
7033 err = hci_req_run_skb(&req, read_local_oob_ext_data_complete);
7034 if (err < 0) {
7035 mgmt_pending_remove(cmd);
7036 return err;
7037 }
7038
7039 return 0;
7040 }
7041
7042 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
7043 void *data, u16 data_len)
7044 {
7045 struct mgmt_cp_read_local_oob_ext_data *cp = data;
7046 struct mgmt_rp_read_local_oob_ext_data *rp;
7047 size_t rp_len;
7048 u16 eir_len;
7049 u8 status, flags, role, addr[7], hash[16], rand[16];
7050 int err;
7051
7052 bt_dev_dbg(hdev, "sock %p", sk);
7053
7054 if (hdev_is_powered(hdev)) {
7055 switch (cp->type) {
7056 case BIT(BDADDR_BREDR):
7057 status = mgmt_bredr_support(hdev);
7058 if (status)
7059 eir_len = 0;
7060 else
7061 eir_len = 5;
7062 break;
7063 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
7064 status = mgmt_le_support(hdev);
7065 if (status)
7066 eir_len = 0;
7067 else
7068 eir_len = 9 + 3 + 18 + 18 + 3;
7069 break;
7070 default:
7071 status = MGMT_STATUS_INVALID_PARAMS;
7072 eir_len = 0;
7073 break;
7074 }
7075 } else {
7076 status = MGMT_STATUS_NOT_POWERED;
7077 eir_len = 0;
7078 }
7079
7080 rp_len = sizeof(*rp) + eir_len;
7081 rp = kmalloc(rp_len, GFP_ATOMIC);
7082 if (!rp)
7083 return -ENOMEM;
7084
7085 if (status)
7086 goto complete;
7087
7088 hci_dev_lock(hdev);
7089
7090 eir_len = 0;
7091 switch (cp->type) {
7092 case BIT(BDADDR_BREDR):
7093 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
7094 err = read_local_ssp_oob_req(hdev, sk, cp);
7095 hci_dev_unlock(hdev);
7096 if (!err)
7097 goto done;
7098
7099 status = MGMT_STATUS_FAILED;
7100 goto complete;
7101 } else {
7102 eir_len = eir_append_data(rp->eir, eir_len,
7103 EIR_CLASS_OF_DEV,
7104 hdev->dev_class, 3);
7105 }
7106 break;
7107 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
7108 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
7109 smp_generate_oob(hdev, hash, rand) < 0) {
7110 hci_dev_unlock(hdev);
7111 status = MGMT_STATUS_FAILED;
7112 goto complete;
7113 }
7114
7115 /* This should return the active RPA, but since the RPA
7116 * is only programmed on demand, it is really hard to fill
7117 * this in at the moment. For now disallow retrieving
7118 * local out-of-band data when privacy is in use.
7119 *
7120 * Returning the identity address will not help here since
7121 * pairing happens before the identity resolving key is
7122 * known and thus the connection establishment happens
7123 * based on the RPA and not the identity address.
7124 */
7125 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
7126 hci_dev_unlock(hdev);
7127 status = MGMT_STATUS_REJECTED;
7128 goto complete;
7129 }
7130
7131 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
7132 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
7133 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
7134 bacmp(&hdev->static_addr, BDADDR_ANY))) {
7135 memcpy(addr, &hdev->static_addr, 6);
7136 addr[6] = 0x01;
7137 } else {
7138 memcpy(addr, &hdev->bdaddr, 6);
7139 addr[6] = 0x00;
7140 }
7141
7142 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
7143 addr, sizeof(addr));
7144
7145 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7146 role = 0x02;
7147 else
7148 role = 0x01;
7149
7150 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
7151 &role, sizeof(role));
7152
7153 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
7154 eir_len = eir_append_data(rp->eir, eir_len,
7155 EIR_LE_SC_CONFIRM,
7156 hash, sizeof(hash));
7157
7158 eir_len = eir_append_data(rp->eir, eir_len,
7159 EIR_LE_SC_RANDOM,
7160 rand, sizeof(rand));
7161 }
7162
7163 flags = mgmt_get_adv_discov_flags(hdev);
7164
7165 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
7166 flags |= LE_AD_NO_BREDR;
7167
7168 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
7169 &flags, sizeof(flags));
7170 break;
7171 }
7172
7173 hci_dev_unlock(hdev);
7174
7175 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
7176
7177 status = MGMT_STATUS_SUCCESS;
7178
7179 complete:
7180 rp->type = cp->type;
7181 rp->eir_len = cpu_to_le16(eir_len);
7182
7183 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
7184 status, rp, sizeof(*rp) + eir_len);
7185 if (err < 0 || status)
7186 goto done;
7187
7188 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
7189 rp, sizeof(*rp) + eir_len,
7190 HCI_MGMT_OOB_DATA_EVENTS, sk);
7191
7192 done:
7193 kfree(rp);
7194
7195 return err;
7196 }
7197
7198 static u32 get_supported_adv_flags(struct hci_dev *hdev)
7199 {
7200 u32 flags = 0;
7201
7202 flags |= MGMT_ADV_FLAG_CONNECTABLE;
7203 flags |= MGMT_ADV_FLAG_DISCOV;
7204 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
7205 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
7206 flags |= MGMT_ADV_FLAG_APPEARANCE;
7207 flags |= MGMT_ADV_FLAG_LOCAL_NAME;
7208 flags |= MGMT_ADV_PARAM_DURATION;
7209 flags |= MGMT_ADV_PARAM_TIMEOUT;
7210 flags |= MGMT_ADV_PARAM_INTERVALS;
7211 flags |= MGMT_ADV_PARAM_TX_POWER;
7212
7213 /* In extended adv TX_POWER returned from Set Adv Param
7214 * will be always valid.
7215 */
7216 if ((hdev->adv_tx_power != HCI_TX_POWER_INVALID) ||
7217 ext_adv_capable(hdev))
7218 flags |= MGMT_ADV_FLAG_TX_POWER;
7219
7220 if (ext_adv_capable(hdev)) {
7221 flags |= MGMT_ADV_FLAG_SEC_1M;
7222 flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
7223 flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
7224
7225 if (hdev->le_features[1] & HCI_LE_PHY_2M)
7226 flags |= MGMT_ADV_FLAG_SEC_2M;
7227
7228 if (hdev->le_features[1] & HCI_LE_PHY_CODED)
7229 flags |= MGMT_ADV_FLAG_SEC_CODED;
7230 }
7231
7232 return flags;
7233 }
7234
7235 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
7236 void *data, u16 data_len)
7237 {
7238 struct mgmt_rp_read_adv_features *rp;
7239 size_t rp_len;
7240 int err;
7241 struct adv_info *adv_instance;
7242 u32 supported_flags;
7243 u8 *instance;
7244
7245 bt_dev_dbg(hdev, "sock %p", sk);
7246
7247 if (!lmp_le_capable(hdev))
7248 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7249 MGMT_STATUS_REJECTED);
7250
7251 /* Enabling the experimental LL Privay support disables support for
7252 * advertising.
7253 */
7254 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
7255 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
7256 MGMT_STATUS_NOT_SUPPORTED);
7257
7258 hci_dev_lock(hdev);
7259
7260 rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
7261 rp = kmalloc(rp_len, GFP_ATOMIC);
7262 if (!rp) {
7263 hci_dev_unlock(hdev);
7264 return -ENOMEM;
7265 }
7266
7267 supported_flags = get_supported_adv_flags(hdev);
7268
7269 rp->supported_flags = cpu_to_le32(supported_flags);
7270 rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
7271 rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
7272 rp->max_instances = hdev->le_num_of_adv_sets;
7273 rp->num_instances = hdev->adv_instance_cnt;
7274
7275 instance = rp->instance;
7276 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
7277 *instance = adv_instance->instance;
7278 instance++;
7279 }
7280
7281 hci_dev_unlock(hdev);
7282
7283 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7284 MGMT_STATUS_SUCCESS, rp, rp_len);
7285
7286 kfree(rp);
7287
7288 return err;
7289 }
7290
7291 static u8 calculate_name_len(struct hci_dev *hdev)
7292 {
7293 u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
7294
7295 return append_local_name(hdev, buf, 0);
7296 }
7297
7298 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
7299 bool is_adv_data)
7300 {
7301 u8 max_len = HCI_MAX_AD_LENGTH;
7302
7303 if (is_adv_data) {
7304 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
7305 MGMT_ADV_FLAG_LIMITED_DISCOV |
7306 MGMT_ADV_FLAG_MANAGED_FLAGS))
7307 max_len -= 3;
7308
7309 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
7310 max_len -= 3;
7311 } else {
7312 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
7313 max_len -= calculate_name_len(hdev);
7314
7315 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
7316 max_len -= 4;
7317 }
7318
7319 return max_len;
7320 }
7321
7322 static bool flags_managed(u32 adv_flags)
7323 {
7324 return adv_flags & (MGMT_ADV_FLAG_DISCOV |
7325 MGMT_ADV_FLAG_LIMITED_DISCOV |
7326 MGMT_ADV_FLAG_MANAGED_FLAGS);
7327 }
7328
7329 static bool tx_power_managed(u32 adv_flags)
7330 {
7331 return adv_flags & MGMT_ADV_FLAG_TX_POWER;
7332 }
7333
7334 static bool name_managed(u32 adv_flags)
7335 {
7336 return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
7337 }
7338
7339 static bool appearance_managed(u32 adv_flags)
7340 {
7341 return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
7342 }
7343
7344 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
7345 u8 len, bool is_adv_data)
7346 {
7347 int i, cur_len;
7348 u8 max_len;
7349
7350 max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
7351
7352 if (len > max_len)
7353 return false;
7354
7355 /* Make sure that the data is correctly formatted. */
7356 for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
7357 cur_len = data[i];
7358
7359 if (data[i + 1] == EIR_FLAGS &&
7360 (!is_adv_data || flags_managed(adv_flags)))
7361 return false;
7362
7363 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
7364 return false;
7365
7366 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
7367 return false;
7368
7369 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
7370 return false;
7371
7372 if (data[i + 1] == EIR_APPEARANCE &&
7373 appearance_managed(adv_flags))
7374 return false;
7375
7376 /* If the current field length would exceed the total data
7377 * length, then it's invalid.
7378 */
7379 if (i + cur_len >= len)
7380 return false;
7381 }
7382
7383 return true;
7384 }
7385
7386 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
7387 {
7388 u32 supported_flags, phy_flags;
7389
7390 /* The current implementation only supports a subset of the specified
7391 * flags. Also need to check mutual exclusiveness of sec flags.
7392 */
7393 supported_flags = get_supported_adv_flags(hdev);
7394 phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
7395 if (adv_flags & ~supported_flags ||
7396 ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
7397 return false;
7398
7399 return true;
7400 }
7401
7402 static bool adv_busy(struct hci_dev *hdev)
7403 {
7404 return (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
7405 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
7406 pending_find(MGMT_OP_SET_LE, hdev) ||
7407 pending_find(MGMT_OP_ADD_EXT_ADV_PARAMS, hdev) ||
7408 pending_find(MGMT_OP_ADD_EXT_ADV_DATA, hdev));
7409 }
7410
7411 static void add_advertising_complete(struct hci_dev *hdev, u8 status,
7412 u16 opcode)
7413 {
7414 struct mgmt_pending_cmd *cmd;
7415 struct mgmt_cp_add_advertising *cp;
7416 struct mgmt_rp_add_advertising rp;
7417 struct adv_info *adv_instance, *n;
7418 u8 instance;
7419
7420 bt_dev_dbg(hdev, "status %d", status);
7421
7422 hci_dev_lock(hdev);
7423
7424 cmd = pending_find(MGMT_OP_ADD_ADVERTISING, hdev);
7425 if (!cmd)
7426 cmd = pending_find(MGMT_OP_ADD_EXT_ADV_DATA, hdev);
7427
7428 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
7429 if (!adv_instance->pending)
7430 continue;
7431
7432 if (!status) {
7433 adv_instance->pending = false;
7434 continue;
7435 }
7436
7437 instance = adv_instance->instance;
7438
7439 if (hdev->cur_adv_instance == instance)
7440 cancel_adv_timeout(hdev);
7441
7442 hci_remove_adv_instance(hdev, instance);
7443 mgmt_advertising_removed(cmd ? cmd->sk : NULL, hdev, instance);
7444 }
7445
7446 if (!cmd)
7447 goto unlock;
7448
7449 cp = cmd->param;
7450 rp.instance = cp->instance;
7451
7452 if (status)
7453 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
7454 mgmt_status(status));
7455 else
7456 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
7457 mgmt_status(status), &rp, sizeof(rp));
7458
7459 mgmt_pending_remove(cmd);
7460
7461 unlock:
7462 hci_dev_unlock(hdev);
7463 }
7464
7465 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
7466 void *data, u16 data_len)
7467 {
7468 struct mgmt_cp_add_advertising *cp = data;
7469 struct mgmt_rp_add_advertising rp;
7470 u32 flags;
7471 u8 status;
7472 u16 timeout, duration;
7473 unsigned int prev_instance_cnt = hdev->adv_instance_cnt;
7474 u8 schedule_instance = 0;
7475 struct adv_info *next_instance;
7476 int err;
7477 struct mgmt_pending_cmd *cmd;
7478 struct hci_request req;
7479
7480 bt_dev_dbg(hdev, "sock %p", sk);
7481
7482 status = mgmt_le_support(hdev);
7483 if (status)
7484 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7485 status);
7486
7487 /* Enabling the experimental LL Privay support disables support for
7488 * advertising.
7489 */
7490 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
7491 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
7492 MGMT_STATUS_NOT_SUPPORTED);
7493
7494 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
7495 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7496 MGMT_STATUS_INVALID_PARAMS);
7497
7498 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
7499 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7500 MGMT_STATUS_INVALID_PARAMS);
7501
7502 flags = __le32_to_cpu(cp->flags);
7503 timeout = __le16_to_cpu(cp->timeout);
7504 duration = __le16_to_cpu(cp->duration);
7505
7506 if (!requested_adv_flags_are_valid(hdev, flags))
7507 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7508 MGMT_STATUS_INVALID_PARAMS);
7509
7510 hci_dev_lock(hdev);
7511
7512 if (timeout && !hdev_is_powered(hdev)) {
7513 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7514 MGMT_STATUS_REJECTED);
7515 goto unlock;
7516 }
7517
7518 if (adv_busy(hdev)) {
7519 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7520 MGMT_STATUS_BUSY);
7521 goto unlock;
7522 }
7523
7524 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
7525 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
7526 cp->scan_rsp_len, false)) {
7527 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7528 MGMT_STATUS_INVALID_PARAMS);
7529 goto unlock;
7530 }
7531
7532 err = hci_add_adv_instance(hdev, cp->instance, flags,
7533 cp->adv_data_len, cp->data,
7534 cp->scan_rsp_len,
7535 cp->data + cp->adv_data_len,
7536 timeout, duration,
7537 HCI_ADV_TX_POWER_NO_PREFERENCE,
7538 hdev->le_adv_min_interval,
7539 hdev->le_adv_max_interval);
7540 if (err < 0) {
7541 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7542 MGMT_STATUS_FAILED);
7543 goto unlock;
7544 }
7545
7546 /* Only trigger an advertising added event if a new instance was
7547 * actually added.
7548 */
7549 if (hdev->adv_instance_cnt > prev_instance_cnt)
7550 mgmt_advertising_added(sk, hdev, cp->instance);
7551
7552 if (hdev->cur_adv_instance == cp->instance) {
7553 /* If the currently advertised instance is being changed then
7554 * cancel the current advertising and schedule the next
7555 * instance. If there is only one instance then the overridden
7556 * advertising data will be visible right away.
7557 */
7558 cancel_adv_timeout(hdev);
7559
7560 next_instance = hci_get_next_instance(hdev, cp->instance);
7561 if (next_instance)
7562 schedule_instance = next_instance->instance;
7563 } else if (!hdev->adv_instance_timeout) {
7564 /* Immediately advertise the new instance if no other
7565 * instance is currently being advertised.
7566 */
7567 schedule_instance = cp->instance;
7568 }
7569
7570 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
7571 * there is no instance to be advertised then we have no HCI
7572 * communication to make. Simply return.
7573 */
7574 if (!hdev_is_powered(hdev) ||
7575 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
7576 !schedule_instance) {
7577 rp.instance = cp->instance;
7578 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7579 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7580 goto unlock;
7581 }
7582
7583 /* We're good to go, update advertising data, parameters, and start
7584 * advertising.
7585 */
7586 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
7587 data_len);
7588 if (!cmd) {
7589 err = -ENOMEM;
7590 goto unlock;
7591 }
7592
7593 hci_req_init(&req, hdev);
7594
7595 err = __hci_req_schedule_adv_instance(&req, schedule_instance, true);
7596
7597 if (!err)
7598 err = hci_req_run(&req, add_advertising_complete);
7599
7600 if (err < 0) {
7601 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7602 MGMT_STATUS_FAILED);
7603 mgmt_pending_remove(cmd);
7604 }
7605
7606 unlock:
7607 hci_dev_unlock(hdev);
7608
7609 return err;
7610 }
7611
7612 static void add_ext_adv_params_complete(struct hci_dev *hdev, u8 status,
7613 u16 opcode)
7614 {
7615 struct mgmt_pending_cmd *cmd;
7616 struct mgmt_cp_add_ext_adv_params *cp;
7617 struct mgmt_rp_add_ext_adv_params rp;
7618 struct adv_info *adv_instance;
7619 u32 flags;
7620
7621 BT_DBG("%s", hdev->name);
7622
7623 hci_dev_lock(hdev);
7624
7625 cmd = pending_find(MGMT_OP_ADD_EXT_ADV_PARAMS, hdev);
7626 if (!cmd)
7627 goto unlock;
7628
7629 cp = cmd->param;
7630 adv_instance = hci_find_adv_instance(hdev, cp->instance);
7631 if (!adv_instance)
7632 goto unlock;
7633
7634 rp.instance = cp->instance;
7635 rp.tx_power = adv_instance->tx_power;
7636
7637 /* While we're at it, inform userspace of the available space for this
7638 * advertisement, given the flags that will be used.
7639 */
7640 flags = __le32_to_cpu(cp->flags);
7641 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
7642 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
7643
7644 if (status) {
7645 /* If this advertisement was previously advertising and we
7646 * failed to update it, we signal that it has been removed and
7647 * delete its structure
7648 */
7649 if (!adv_instance->pending)
7650 mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
7651
7652 hci_remove_adv_instance(hdev, cp->instance);
7653
7654 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
7655 mgmt_status(status));
7656
7657 } else {
7658 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
7659 mgmt_status(status), &rp, sizeof(rp));
7660 }
7661
7662 unlock:
7663 if (cmd)
7664 mgmt_pending_remove(cmd);
7665
7666 hci_dev_unlock(hdev);
7667 }
7668
7669 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
7670 void *data, u16 data_len)
7671 {
7672 struct mgmt_cp_add_ext_adv_params *cp = data;
7673 struct mgmt_rp_add_ext_adv_params rp;
7674 struct mgmt_pending_cmd *cmd = NULL;
7675 struct adv_info *adv_instance;
7676 struct hci_request req;
7677 u32 flags, min_interval, max_interval;
7678 u16 timeout, duration;
7679 u8 status;
7680 s8 tx_power;
7681 int err;
7682
7683 BT_DBG("%s", hdev->name);
7684
7685 status = mgmt_le_support(hdev);
7686 if (status)
7687 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
7688 status);
7689
7690 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
7691 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
7692 MGMT_STATUS_INVALID_PARAMS);
7693
7694 /* The purpose of breaking add_advertising into two separate MGMT calls
7695 * for params and data is to allow more parameters to be added to this
7696 * structure in the future. For this reason, we verify that we have the
7697 * bare minimum structure we know of when the interface was defined. Any
7698 * extra parameters we don't know about will be ignored in this request.
7699 */
7700 if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
7701 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7702 MGMT_STATUS_INVALID_PARAMS);
7703
7704 flags = __le32_to_cpu(cp->flags);
7705
7706 if (!requested_adv_flags_are_valid(hdev, flags))
7707 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
7708 MGMT_STATUS_INVALID_PARAMS);
7709
7710 hci_dev_lock(hdev);
7711
7712 /* In new interface, we require that we are powered to register */
7713 if (!hdev_is_powered(hdev)) {
7714 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
7715 MGMT_STATUS_REJECTED);
7716 goto unlock;
7717 }
7718
7719 if (adv_busy(hdev)) {
7720 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
7721 MGMT_STATUS_BUSY);
7722 goto unlock;
7723 }
7724
7725 /* Parse defined parameters from request, use defaults otherwise */
7726 timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
7727 __le16_to_cpu(cp->timeout) : 0;
7728
7729 duration = (flags & MGMT_ADV_PARAM_DURATION) ?
7730 __le16_to_cpu(cp->duration) :
7731 hdev->def_multi_adv_rotation_duration;
7732
7733 min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
7734 __le32_to_cpu(cp->min_interval) :
7735 hdev->le_adv_min_interval;
7736
7737 max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
7738 __le32_to_cpu(cp->max_interval) :
7739 hdev->le_adv_max_interval;
7740
7741 tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
7742 cp->tx_power :
7743 HCI_ADV_TX_POWER_NO_PREFERENCE;
7744
7745 /* Create advertising instance with no advertising or response data */
7746 err = hci_add_adv_instance(hdev, cp->instance, flags,
7747 0, NULL, 0, NULL, timeout, duration,
7748 tx_power, min_interval, max_interval);
7749
7750 if (err < 0) {
7751 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
7752 MGMT_STATUS_FAILED);
7753 goto unlock;
7754 }
7755
7756 hdev->cur_adv_instance = cp->instance;
7757 /* Submit request for advertising params if ext adv available */
7758 if (ext_adv_capable(hdev)) {
7759 hci_req_init(&req, hdev);
7760 adv_instance = hci_find_adv_instance(hdev, cp->instance);
7761
7762 /* Updating parameters of an active instance will return a
7763 * Command Disallowed error, so we must first disable the
7764 * instance if it is active.
7765 */
7766 if (!adv_instance->pending)
7767 __hci_req_disable_ext_adv_instance(&req, cp->instance);
7768
7769 __hci_req_setup_ext_adv_instance(&req, cp->instance);
7770
7771 err = hci_req_run(&req, add_ext_adv_params_complete);
7772
7773 if (!err)
7774 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_EXT_ADV_PARAMS,
7775 hdev, data, data_len);
7776 if (!cmd) {
7777 err = -ENOMEM;
7778 hci_remove_adv_instance(hdev, cp->instance);
7779 goto unlock;
7780 }
7781
7782 } else {
7783 rp.instance = cp->instance;
7784 rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
7785 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
7786 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
7787 err = mgmt_cmd_complete(sk, hdev->id,
7788 MGMT_OP_ADD_EXT_ADV_PARAMS,
7789 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7790 }
7791
7792 unlock:
7793 hci_dev_unlock(hdev);
7794
7795 return err;
7796 }
7797
7798 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
7799 u16 data_len)
7800 {
7801 struct mgmt_cp_add_ext_adv_data *cp = data;
7802 struct mgmt_rp_add_ext_adv_data rp;
7803 u8 schedule_instance = 0;
7804 struct adv_info *next_instance;
7805 struct adv_info *adv_instance;
7806 int err = 0;
7807 struct mgmt_pending_cmd *cmd;
7808 struct hci_request req;
7809
7810 BT_DBG("%s", hdev->name);
7811
7812 hci_dev_lock(hdev);
7813
7814 adv_instance = hci_find_adv_instance(hdev, cp->instance);
7815
7816 if (!adv_instance) {
7817 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
7818 MGMT_STATUS_INVALID_PARAMS);
7819 goto unlock;
7820 }
7821
7822 /* In new interface, we require that we are powered to register */
7823 if (!hdev_is_powered(hdev)) {
7824 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
7825 MGMT_STATUS_REJECTED);
7826 goto clear_new_instance;
7827 }
7828
7829 if (adv_busy(hdev)) {
7830 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
7831 MGMT_STATUS_BUSY);
7832 goto clear_new_instance;
7833 }
7834
7835 /* Validate new data */
7836 if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
7837 cp->adv_data_len, true) ||
7838 !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
7839 cp->adv_data_len, cp->scan_rsp_len, false)) {
7840 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
7841 MGMT_STATUS_INVALID_PARAMS);
7842 goto clear_new_instance;
7843 }
7844
7845 /* Set the data in the advertising instance */
7846 hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
7847 cp->data, cp->scan_rsp_len,
7848 cp->data + cp->adv_data_len);
7849
7850 /* We're good to go, update advertising data, parameters, and start
7851 * advertising.
7852 */
7853
7854 hci_req_init(&req, hdev);
7855
7856 hci_req_add(&req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
7857
7858 if (ext_adv_capable(hdev)) {
7859 __hci_req_update_adv_data(&req, cp->instance);
7860 __hci_req_update_scan_rsp_data(&req, cp->instance);
7861 __hci_req_enable_ext_advertising(&req, cp->instance);
7862
7863 } else {
7864 /* If using software rotation, determine next instance to use */
7865
7866 if (hdev->cur_adv_instance == cp->instance) {
7867 /* If the currently advertised instance is being changed
7868 * then cancel the current advertising and schedule the
7869 * next instance. If there is only one instance then the
7870 * overridden advertising data will be visible right
7871 * away
7872 */
7873 cancel_adv_timeout(hdev);
7874
7875 next_instance = hci_get_next_instance(hdev,
7876 cp->instance);
7877 if (next_instance)
7878 schedule_instance = next_instance->instance;
7879 } else if (!hdev->adv_instance_timeout) {
7880 /* Immediately advertise the new instance if no other
7881 * instance is currently being advertised.
7882 */
7883 schedule_instance = cp->instance;
7884 }
7885
7886 /* If the HCI_ADVERTISING flag is set or there is no instance to
7887 * be advertised then we have no HCI communication to make.
7888 * Simply return.
7889 */
7890 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
7891 !schedule_instance) {
7892 if (adv_instance->pending) {
7893 mgmt_advertising_added(sk, hdev, cp->instance);
7894 adv_instance->pending = false;
7895 }
7896 rp.instance = cp->instance;
7897 err = mgmt_cmd_complete(sk, hdev->id,
7898 MGMT_OP_ADD_EXT_ADV_DATA,
7899 MGMT_STATUS_SUCCESS, &rp,
7900 sizeof(rp));
7901 goto unlock;
7902 }
7903
7904 err = __hci_req_schedule_adv_instance(&req, schedule_instance,
7905 true);
7906 }
7907
7908 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
7909 data_len);
7910 if (!cmd) {
7911 err = -ENOMEM;
7912 goto clear_new_instance;
7913 }
7914
7915 if (!err)
7916 err = hci_req_run(&req, add_advertising_complete);
7917
7918 if (err < 0) {
7919 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
7920 MGMT_STATUS_FAILED);
7921 mgmt_pending_remove(cmd);
7922 goto clear_new_instance;
7923 }
7924
7925 /* We were successful in updating data, so trigger advertising_added
7926 * event if this is an instance that wasn't previously advertising. If
7927 * a failure occurs in the requests we initiated, we will remove the
7928 * instance again in add_advertising_complete
7929 */
7930 if (adv_instance->pending)
7931 mgmt_advertising_added(sk, hdev, cp->instance);
7932
7933 goto unlock;
7934
7935 clear_new_instance:
7936 hci_remove_adv_instance(hdev, cp->instance);
7937
7938 unlock:
7939 hci_dev_unlock(hdev);
7940
7941 return err;
7942 }
7943
7944 static void remove_advertising_complete(struct hci_dev *hdev, u8 status,
7945 u16 opcode)
7946 {
7947 struct mgmt_pending_cmd *cmd;
7948 struct mgmt_cp_remove_advertising *cp;
7949 struct mgmt_rp_remove_advertising rp;
7950
7951 bt_dev_dbg(hdev, "status %d", status);
7952
7953 hci_dev_lock(hdev);
7954
7955 /* A failure status here only means that we failed to disable
7956 * advertising. Otherwise, the advertising instance has been removed,
7957 * so report success.
7958 */
7959 cmd = pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev);
7960 if (!cmd)
7961 goto unlock;
7962
7963 cp = cmd->param;
7964 rp.instance = cp->instance;
7965
7966 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, MGMT_STATUS_SUCCESS,
7967 &rp, sizeof(rp));
7968 mgmt_pending_remove(cmd);
7969
7970 unlock:
7971 hci_dev_unlock(hdev);
7972 }
7973
7974 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
7975 void *data, u16 data_len)
7976 {
7977 struct mgmt_cp_remove_advertising *cp = data;
7978 struct mgmt_rp_remove_advertising rp;
7979 struct mgmt_pending_cmd *cmd;
7980 struct hci_request req;
7981 int err;
7982
7983 bt_dev_dbg(hdev, "sock %p", sk);
7984
7985 /* Enabling the experimental LL Privay support disables support for
7986 * advertising.
7987 */
7988 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
7989 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
7990 MGMT_STATUS_NOT_SUPPORTED);
7991
7992 hci_dev_lock(hdev);
7993
7994 if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
7995 err = mgmt_cmd_status(sk, hdev->id,
7996 MGMT_OP_REMOVE_ADVERTISING,
7997 MGMT_STATUS_INVALID_PARAMS);
7998 goto unlock;
7999 }
8000
8001 if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
8002 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
8003 pending_find(MGMT_OP_SET_LE, hdev)) {
8004 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
8005 MGMT_STATUS_BUSY);
8006 goto unlock;
8007 }
8008
8009 if (list_empty(&hdev->adv_instances)) {
8010 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
8011 MGMT_STATUS_INVALID_PARAMS);
8012 goto unlock;
8013 }
8014
8015 hci_req_init(&req, hdev);
8016
8017 /* If we use extended advertising, instance is disabled and removed */
8018 if (ext_adv_capable(hdev)) {
8019 __hci_req_disable_ext_adv_instance(&req, cp->instance);
8020 __hci_req_remove_ext_adv_instance(&req, cp->instance);
8021 }
8022
8023 hci_req_clear_adv_instance(hdev, sk, &req, cp->instance, true);
8024
8025 if (list_empty(&hdev->adv_instances))
8026 __hci_req_disable_advertising(&req);
8027
8028 /* If no HCI commands have been collected so far or the HCI_ADVERTISING
8029 * flag is set or the device isn't powered then we have no HCI
8030 * communication to make. Simply return.
8031 */
8032 if (skb_queue_empty(&req.cmd_q) ||
8033 !hdev_is_powered(hdev) ||
8034 hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
8035 hci_req_purge(&req);
8036 rp.instance = cp->instance;
8037 err = mgmt_cmd_complete(sk, hdev->id,
8038 MGMT_OP_REMOVE_ADVERTISING,
8039 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8040 goto unlock;
8041 }
8042
8043 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
8044 data_len);
8045 if (!cmd) {
8046 err = -ENOMEM;
8047 goto unlock;
8048 }
8049
8050 err = hci_req_run(&req, remove_advertising_complete);
8051 if (err < 0)
8052 mgmt_pending_remove(cmd);
8053
8054 unlock:
8055 hci_dev_unlock(hdev);
8056
8057 return err;
8058 }
8059
8060 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
8061 void *data, u16 data_len)
8062 {
8063 struct mgmt_cp_get_adv_size_info *cp = data;
8064 struct mgmt_rp_get_adv_size_info rp;
8065 u32 flags, supported_flags;
8066 int err;
8067
8068 bt_dev_dbg(hdev, "sock %p", sk);
8069
8070 if (!lmp_le_capable(hdev))
8071 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8072 MGMT_STATUS_REJECTED);
8073
8074 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8075 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8076 MGMT_STATUS_INVALID_PARAMS);
8077
8078 flags = __le32_to_cpu(cp->flags);
8079
8080 /* The current implementation only supports a subset of the specified
8081 * flags.
8082 */
8083 supported_flags = get_supported_adv_flags(hdev);
8084 if (flags & ~supported_flags)
8085 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8086 MGMT_STATUS_INVALID_PARAMS);
8087
8088 rp.instance = cp->instance;
8089 rp.flags = cp->flags;
8090 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8091 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8092
8093 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8094 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8095
8096 return err;
8097 }
8098
8099 static const struct hci_mgmt_handler mgmt_handlers[] = {
8100 { NULL }, /* 0x0000 (no command) */
8101 { read_version, MGMT_READ_VERSION_SIZE,
8102 HCI_MGMT_NO_HDEV |
8103 HCI_MGMT_UNTRUSTED },
8104 { read_commands, MGMT_READ_COMMANDS_SIZE,
8105 HCI_MGMT_NO_HDEV |
8106 HCI_MGMT_UNTRUSTED },
8107 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
8108 HCI_MGMT_NO_HDEV |
8109 HCI_MGMT_UNTRUSTED },
8110 { read_controller_info, MGMT_READ_INFO_SIZE,
8111 HCI_MGMT_UNTRUSTED },
8112 { set_powered, MGMT_SETTING_SIZE },
8113 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
8114 { set_connectable, MGMT_SETTING_SIZE },
8115 { set_fast_connectable, MGMT_SETTING_SIZE },
8116 { set_bondable, MGMT_SETTING_SIZE },
8117 { set_link_security, MGMT_SETTING_SIZE },
8118 { set_ssp, MGMT_SETTING_SIZE },
8119 { set_hs, MGMT_SETTING_SIZE },
8120 { set_le, MGMT_SETTING_SIZE },
8121 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
8122 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
8123 { add_uuid, MGMT_ADD_UUID_SIZE },
8124 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
8125 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
8126 HCI_MGMT_VAR_LEN },
8127 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
8128 HCI_MGMT_VAR_LEN },
8129 { disconnect, MGMT_DISCONNECT_SIZE },
8130 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
8131 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
8132 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
8133 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
8134 { pair_device, MGMT_PAIR_DEVICE_SIZE },
8135 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
8136 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
8137 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
8138 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
8139 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
8140 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
8141 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
8142 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
8143 HCI_MGMT_VAR_LEN },
8144 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
8145 { start_discovery, MGMT_START_DISCOVERY_SIZE },
8146 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
8147 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
8148 { block_device, MGMT_BLOCK_DEVICE_SIZE },
8149 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
8150 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
8151 { set_advertising, MGMT_SETTING_SIZE },
8152 { set_bredr, MGMT_SETTING_SIZE },
8153 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
8154 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
8155 { set_secure_conn, MGMT_SETTING_SIZE },
8156 { set_debug_keys, MGMT_SETTING_SIZE },
8157 { set_privacy, MGMT_SET_PRIVACY_SIZE },
8158 { load_irks, MGMT_LOAD_IRKS_SIZE,
8159 HCI_MGMT_VAR_LEN },
8160 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
8161 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
8162 { add_device, MGMT_ADD_DEVICE_SIZE },
8163 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
8164 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
8165 HCI_MGMT_VAR_LEN },
8166 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
8167 HCI_MGMT_NO_HDEV |
8168 HCI_MGMT_UNTRUSTED },
8169 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
8170 HCI_MGMT_UNCONFIGURED |
8171 HCI_MGMT_UNTRUSTED },
8172 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
8173 HCI_MGMT_UNCONFIGURED },
8174 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
8175 HCI_MGMT_UNCONFIGURED },
8176 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
8177 HCI_MGMT_VAR_LEN },
8178 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
8179 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
8180 HCI_MGMT_NO_HDEV |
8181 HCI_MGMT_UNTRUSTED },
8182 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
8183 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
8184 HCI_MGMT_VAR_LEN },
8185 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
8186 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
8187 { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
8188 { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
8189 HCI_MGMT_UNTRUSTED },
8190 { set_appearance, MGMT_SET_APPEARANCE_SIZE },
8191 { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE },
8192 { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE },
8193 { set_blocked_keys, MGMT_OP_SET_BLOCKED_KEYS_SIZE,
8194 HCI_MGMT_VAR_LEN },
8195 { set_wideband_speech, MGMT_SETTING_SIZE },
8196 { read_security_info, MGMT_READ_SECURITY_INFO_SIZE,
8197 HCI_MGMT_UNTRUSTED },
8198 { read_exp_features_info, MGMT_READ_EXP_FEATURES_INFO_SIZE,
8199 HCI_MGMT_UNTRUSTED |
8200 HCI_MGMT_HDEV_OPTIONAL },
8201 { set_exp_feature, MGMT_SET_EXP_FEATURE_SIZE,
8202 HCI_MGMT_VAR_LEN |
8203 HCI_MGMT_HDEV_OPTIONAL },
8204 { read_def_system_config, MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
8205 HCI_MGMT_UNTRUSTED },
8206 { set_def_system_config, MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
8207 HCI_MGMT_VAR_LEN },
8208 { read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
8209 HCI_MGMT_UNTRUSTED },
8210 { set_def_runtime_config, MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
8211 HCI_MGMT_VAR_LEN },
8212 { get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE },
8213 { set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE },
8214 { read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
8215 { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
8216 HCI_MGMT_VAR_LEN },
8217 { remove_adv_monitor, MGMT_REMOVE_ADV_MONITOR_SIZE },
8218 { add_ext_adv_params, MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
8219 HCI_MGMT_VAR_LEN },
8220 { add_ext_adv_data, MGMT_ADD_EXT_ADV_DATA_SIZE,
8221 HCI_MGMT_VAR_LEN },
8222 };
8223
8224 void mgmt_index_added(struct hci_dev *hdev)
8225 {
8226 struct mgmt_ev_ext_index ev;
8227
8228 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
8229 return;
8230
8231 switch (hdev->dev_type) {
8232 case HCI_PRIMARY:
8233 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
8234 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
8235 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
8236 ev.type = 0x01;
8237 } else {
8238 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
8239 HCI_MGMT_INDEX_EVENTS);
8240 ev.type = 0x00;
8241 }
8242 break;
8243 case HCI_AMP:
8244 ev.type = 0x02;
8245 break;
8246 default:
8247 return;
8248 }
8249
8250 ev.bus = hdev->bus;
8251
8252 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
8253 HCI_MGMT_EXT_INDEX_EVENTS);
8254 }
8255
8256 void mgmt_index_removed(struct hci_dev *hdev)
8257 {
8258 struct mgmt_ev_ext_index ev;
8259 u8 status = MGMT_STATUS_INVALID_INDEX;
8260
8261 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
8262 return;
8263
8264 switch (hdev->dev_type) {
8265 case HCI_PRIMARY:
8266 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
8267
8268 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
8269 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
8270 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
8271 ev.type = 0x01;
8272 } else {
8273 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
8274 HCI_MGMT_INDEX_EVENTS);
8275 ev.type = 0x00;
8276 }
8277 break;
8278 case HCI_AMP:
8279 ev.type = 0x02;
8280 break;
8281 default:
8282 return;
8283 }
8284
8285 ev.bus = hdev->bus;
8286
8287 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
8288 HCI_MGMT_EXT_INDEX_EVENTS);
8289 }
8290
8291 /* This function requires the caller holds hdev->lock */
8292 static void restart_le_actions(struct hci_dev *hdev)
8293 {
8294 struct hci_conn_params *p;
8295
8296 list_for_each_entry(p, &hdev->le_conn_params, list) {
8297 /* Needed for AUTO_OFF case where might not "really"
8298 * have been powered off.
8299 */
8300 list_del_init(&p->action);
8301
8302 switch (p->auto_connect) {
8303 case HCI_AUTO_CONN_DIRECT:
8304 case HCI_AUTO_CONN_ALWAYS:
8305 list_add(&p->action, &hdev->pend_le_conns);
8306 break;
8307 case HCI_AUTO_CONN_REPORT:
8308 list_add(&p->action, &hdev->pend_le_reports);
8309 break;
8310 default:
8311 break;
8312 }
8313 }
8314 }
8315
8316 void mgmt_power_on(struct hci_dev *hdev, int err)
8317 {
8318 struct cmd_lookup match = { NULL, hdev };
8319
8320 bt_dev_dbg(hdev, "err %d", err);
8321
8322 hci_dev_lock(hdev);
8323
8324 if (!err) {
8325 restart_le_actions(hdev);
8326 hci_update_background_scan(hdev);
8327 }
8328
8329 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
8330
8331 new_settings(hdev, match.sk);
8332
8333 if (match.sk)
8334 sock_put(match.sk);
8335
8336 hci_dev_unlock(hdev);
8337 }
8338
8339 void __mgmt_power_off(struct hci_dev *hdev)
8340 {
8341 struct cmd_lookup match = { NULL, hdev };
8342 u8 status, zero_cod[] = { 0, 0, 0 };
8343
8344 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
8345
8346 /* If the power off is because of hdev unregistration let
8347 * use the appropriate INVALID_INDEX status. Otherwise use
8348 * NOT_POWERED. We cover both scenarios here since later in
8349 * mgmt_index_removed() any hci_conn callbacks will have already
8350 * been triggered, potentially causing misleading DISCONNECTED
8351 * status responses.
8352 */
8353 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
8354 status = MGMT_STATUS_INVALID_INDEX;
8355 else
8356 status = MGMT_STATUS_NOT_POWERED;
8357
8358 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
8359
8360 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
8361 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
8362 zero_cod, sizeof(zero_cod),
8363 HCI_MGMT_DEV_CLASS_EVENTS, NULL);
8364 ext_info_changed(hdev, NULL);
8365 }
8366
8367 new_settings(hdev, match.sk);
8368
8369 if (match.sk)
8370 sock_put(match.sk);
8371 }
8372
8373 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
8374 {
8375 struct mgmt_pending_cmd *cmd;
8376 u8 status;
8377
8378 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
8379 if (!cmd)
8380 return;
8381
8382 if (err == -ERFKILL)
8383 status = MGMT_STATUS_RFKILLED;
8384 else
8385 status = MGMT_STATUS_FAILED;
8386
8387 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
8388
8389 mgmt_pending_remove(cmd);
8390 }
8391
8392 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
8393 bool persistent)
8394 {
8395 struct mgmt_ev_new_link_key ev;
8396
8397 memset(&ev, 0, sizeof(ev));
8398
8399 ev.store_hint = persistent;
8400 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
8401 ev.key.addr.type = BDADDR_BREDR;
8402 ev.key.type = key->type;
8403 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
8404 ev.key.pin_len = key->pin_len;
8405
8406 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
8407 }
8408
8409 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
8410 {
8411 switch (ltk->type) {
8412 case SMP_LTK:
8413 case SMP_LTK_SLAVE:
8414 if (ltk->authenticated)
8415 return MGMT_LTK_AUTHENTICATED;
8416 return MGMT_LTK_UNAUTHENTICATED;
8417 case SMP_LTK_P256:
8418 if (ltk->authenticated)
8419 return MGMT_LTK_P256_AUTH;
8420 return MGMT_LTK_P256_UNAUTH;
8421 case SMP_LTK_P256_DEBUG:
8422 return MGMT_LTK_P256_DEBUG;
8423 }
8424
8425 return MGMT_LTK_UNAUTHENTICATED;
8426 }
8427
8428 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
8429 {
8430 struct mgmt_ev_new_long_term_key ev;
8431
8432 memset(&ev, 0, sizeof(ev));
8433
8434 /* Devices using resolvable or non-resolvable random addresses
8435 * without providing an identity resolving key don't require
8436 * to store long term keys. Their addresses will change the
8437 * next time around.
8438 *
8439 * Only when a remote device provides an identity address
8440 * make sure the long term key is stored. If the remote
8441 * identity is known, the long term keys are internally
8442 * mapped to the identity address. So allow static random
8443 * and public addresses here.
8444 */
8445 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
8446 (key->bdaddr.b[5] & 0xc0) != 0xc0)
8447 ev.store_hint = 0x00;
8448 else
8449 ev.store_hint = persistent;
8450
8451 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
8452 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
8453 ev.key.type = mgmt_ltk_type(key);
8454 ev.key.enc_size = key->enc_size;
8455 ev.key.ediv = key->ediv;
8456 ev.key.rand = key->rand;
8457
8458 if (key->type == SMP_LTK)
8459 ev.key.master = 1;
8460
8461 /* Make sure we copy only the significant bytes based on the
8462 * encryption key size, and set the rest of the value to zeroes.
8463 */
8464 memcpy(ev.key.val, key->val, key->enc_size);
8465 memset(ev.key.val + key->enc_size, 0,
8466 sizeof(ev.key.val) - key->enc_size);
8467
8468 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
8469 }
8470
8471 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
8472 {
8473 struct mgmt_ev_new_irk ev;
8474
8475 memset(&ev, 0, sizeof(ev));
8476
8477 ev.store_hint = persistent;
8478
8479 bacpy(&ev.rpa, &irk->rpa);
8480 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
8481 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
8482 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
8483
8484 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
8485 }
8486
8487 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
8488 bool persistent)
8489 {
8490 struct mgmt_ev_new_csrk ev;
8491
8492 memset(&ev, 0, sizeof(ev));
8493
8494 /* Devices using resolvable or non-resolvable random addresses
8495 * without providing an identity resolving key don't require
8496 * to store signature resolving keys. Their addresses will change
8497 * the next time around.
8498 *
8499 * Only when a remote device provides an identity address
8500 * make sure the signature resolving key is stored. So allow
8501 * static random and public addresses here.
8502 */
8503 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
8504 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
8505 ev.store_hint = 0x00;
8506 else
8507 ev.store_hint = persistent;
8508
8509 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
8510 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
8511 ev.key.type = csrk->type;
8512 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
8513
8514 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
8515 }
8516
8517 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
8518 u8 bdaddr_type, u8 store_hint, u16 min_interval,
8519 u16 max_interval, u16 latency, u16 timeout)
8520 {
8521 struct mgmt_ev_new_conn_param ev;
8522
8523 if (!hci_is_identity_address(bdaddr, bdaddr_type))
8524 return;
8525
8526 memset(&ev, 0, sizeof(ev));
8527 bacpy(&ev.addr.bdaddr, bdaddr);
8528 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
8529 ev.store_hint = store_hint;
8530 ev.min_interval = cpu_to_le16(min_interval);
8531 ev.max_interval = cpu_to_le16(max_interval);
8532 ev.latency = cpu_to_le16(latency);
8533 ev.timeout = cpu_to_le16(timeout);
8534
8535 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
8536 }
8537
8538 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
8539 u32 flags, u8 *name, u8 name_len)
8540 {
8541 char buf[512];
8542 struct mgmt_ev_device_connected *ev = (void *) buf;
8543 u16 eir_len = 0;
8544
8545 bacpy(&ev->addr.bdaddr, &conn->dst);
8546 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
8547
8548 ev->flags = __cpu_to_le32(flags);
8549
8550 /* We must ensure that the EIR Data fields are ordered and
8551 * unique. Keep it simple for now and avoid the problem by not
8552 * adding any BR/EDR data to the LE adv.
8553 */
8554 if (conn->le_adv_data_len > 0) {
8555 memcpy(&ev->eir[eir_len],
8556 conn->le_adv_data, conn->le_adv_data_len);
8557 eir_len = conn->le_adv_data_len;
8558 } else {
8559 if (name_len > 0)
8560 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
8561 name, name_len);
8562
8563 if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
8564 eir_len = eir_append_data(ev->eir, eir_len,
8565 EIR_CLASS_OF_DEV,
8566 conn->dev_class, 3);
8567 }
8568
8569 ev->eir_len = cpu_to_le16(eir_len);
8570
8571 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
8572 sizeof(*ev) + eir_len, NULL);
8573 }
8574
8575 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
8576 {
8577 struct sock **sk = data;
8578
8579 cmd->cmd_complete(cmd, 0);
8580
8581 *sk = cmd->sk;
8582 sock_hold(*sk);
8583
8584 mgmt_pending_remove(cmd);
8585 }
8586
8587 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
8588 {
8589 struct hci_dev *hdev = data;
8590 struct mgmt_cp_unpair_device *cp = cmd->param;
8591
8592 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
8593
8594 cmd->cmd_complete(cmd, 0);
8595 mgmt_pending_remove(cmd);
8596 }
8597
8598 bool mgmt_powering_down(struct hci_dev *hdev)
8599 {
8600 struct mgmt_pending_cmd *cmd;
8601 struct mgmt_mode *cp;
8602
8603 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
8604 if (!cmd)
8605 return false;
8606
8607 cp = cmd->param;
8608 if (!cp->val)
8609 return true;
8610
8611 return false;
8612 }
8613
8614 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
8615 u8 link_type, u8 addr_type, u8 reason,
8616 bool mgmt_connected)
8617 {
8618 struct mgmt_ev_device_disconnected ev;
8619 struct sock *sk = NULL;
8620
8621 /* The connection is still in hci_conn_hash so test for 1
8622 * instead of 0 to know if this is the last one.
8623 */
8624 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
8625 cancel_delayed_work(&hdev->power_off);
8626 queue_work(hdev->req_workqueue, &hdev->power_off.work);
8627 }
8628
8629 if (!mgmt_connected)
8630 return;
8631
8632 if (link_type != ACL_LINK && link_type != LE_LINK)
8633 return;
8634
8635 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
8636
8637 bacpy(&ev.addr.bdaddr, bdaddr);
8638 ev.addr.type = link_to_bdaddr(link_type, addr_type);
8639 ev.reason = reason;
8640
8641 /* Report disconnects due to suspend */
8642 if (hdev->suspended)
8643 ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
8644
8645 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
8646
8647 if (sk)
8648 sock_put(sk);
8649
8650 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
8651 hdev);
8652 }
8653
8654 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
8655 u8 link_type, u8 addr_type, u8 status)
8656 {
8657 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
8658 struct mgmt_cp_disconnect *cp;
8659 struct mgmt_pending_cmd *cmd;
8660
8661 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
8662 hdev);
8663
8664 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
8665 if (!cmd)
8666 return;
8667
8668 cp = cmd->param;
8669
8670 if (bacmp(bdaddr, &cp->addr.bdaddr))
8671 return;
8672
8673 if (cp->addr.type != bdaddr_type)
8674 return;
8675
8676 cmd->cmd_complete(cmd, mgmt_status(status));
8677 mgmt_pending_remove(cmd);
8678 }
8679
8680 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
8681 u8 addr_type, u8 status)
8682 {
8683 struct mgmt_ev_connect_failed ev;
8684
8685 /* The connection is still in hci_conn_hash so test for 1
8686 * instead of 0 to know if this is the last one.
8687 */
8688 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
8689 cancel_delayed_work(&hdev->power_off);
8690 queue_work(hdev->req_workqueue, &hdev->power_off.work);
8691 }
8692
8693 bacpy(&ev.addr.bdaddr, bdaddr);
8694 ev.addr.type = link_to_bdaddr(link_type, addr_type);
8695 ev.status = mgmt_status(status);
8696
8697 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
8698 }
8699
8700 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
8701 {
8702 struct mgmt_ev_pin_code_request ev;
8703
8704 bacpy(&ev.addr.bdaddr, bdaddr);
8705 ev.addr.type = BDADDR_BREDR;
8706 ev.secure = secure;
8707
8708 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
8709 }
8710
8711 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8712 u8 status)
8713 {
8714 struct mgmt_pending_cmd *cmd;
8715
8716 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
8717 if (!cmd)
8718 return;
8719
8720 cmd->cmd_complete(cmd, mgmt_status(status));
8721 mgmt_pending_remove(cmd);
8722 }
8723
8724 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8725 u8 status)
8726 {
8727 struct mgmt_pending_cmd *cmd;
8728
8729 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
8730 if (!cmd)
8731 return;
8732
8733 cmd->cmd_complete(cmd, mgmt_status(status));
8734 mgmt_pending_remove(cmd);
8735 }
8736
8737 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
8738 u8 link_type, u8 addr_type, u32 value,
8739 u8 confirm_hint)
8740 {
8741 struct mgmt_ev_user_confirm_request ev;
8742
8743 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
8744
8745 bacpy(&ev.addr.bdaddr, bdaddr);
8746 ev.addr.type = link_to_bdaddr(link_type, addr_type);
8747 ev.confirm_hint = confirm_hint;
8748 ev.value = cpu_to_le32(value);
8749
8750 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
8751 NULL);
8752 }
8753
8754 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
8755 u8 link_type, u8 addr_type)
8756 {
8757 struct mgmt_ev_user_passkey_request ev;
8758
8759 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
8760
8761 bacpy(&ev.addr.bdaddr, bdaddr);
8762 ev.addr.type = link_to_bdaddr(link_type, addr_type);
8763
8764 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
8765 NULL);
8766 }
8767
8768 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8769 u8 link_type, u8 addr_type, u8 status,
8770 u8 opcode)
8771 {
8772 struct mgmt_pending_cmd *cmd;
8773
8774 cmd = pending_find(opcode, hdev);
8775 if (!cmd)
8776 return -ENOENT;
8777
8778 cmd->cmd_complete(cmd, mgmt_status(status));
8779 mgmt_pending_remove(cmd);
8780
8781 return 0;
8782 }
8783
8784 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8785 u8 link_type, u8 addr_type, u8 status)
8786 {
8787 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
8788 status, MGMT_OP_USER_CONFIRM_REPLY);
8789 }
8790
8791 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8792 u8 link_type, u8 addr_type, u8 status)
8793 {
8794 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
8795 status,
8796 MGMT_OP_USER_CONFIRM_NEG_REPLY);
8797 }
8798
8799 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8800 u8 link_type, u8 addr_type, u8 status)
8801 {
8802 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
8803 status, MGMT_OP_USER_PASSKEY_REPLY);
8804 }
8805
8806 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8807 u8 link_type, u8 addr_type, u8 status)
8808 {
8809 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
8810 status,
8811 MGMT_OP_USER_PASSKEY_NEG_REPLY);
8812 }
8813
8814 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
8815 u8 link_type, u8 addr_type, u32 passkey,
8816 u8 entered)
8817 {
8818 struct mgmt_ev_passkey_notify ev;
8819
8820 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
8821
8822 bacpy(&ev.addr.bdaddr, bdaddr);
8823 ev.addr.type = link_to_bdaddr(link_type, addr_type);
8824 ev.passkey = __cpu_to_le32(passkey);
8825 ev.entered = entered;
8826
8827 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
8828 }
8829
8830 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
8831 {
8832 struct mgmt_ev_auth_failed ev;
8833 struct mgmt_pending_cmd *cmd;
8834 u8 status = mgmt_status(hci_status);
8835
8836 bacpy(&ev.addr.bdaddr, &conn->dst);
8837 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
8838 ev.status = status;
8839
8840 cmd = find_pairing(conn);
8841
8842 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
8843 cmd ? cmd->sk : NULL);
8844
8845 if (cmd) {
8846 cmd->cmd_complete(cmd, status);
8847 mgmt_pending_remove(cmd);
8848 }
8849 }
8850
8851 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
8852 {
8853 struct cmd_lookup match = { NULL, hdev };
8854 bool changed;
8855
8856 if (status) {
8857 u8 mgmt_err = mgmt_status(status);
8858 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
8859 cmd_status_rsp, &mgmt_err);
8860 return;
8861 }
8862
8863 if (test_bit(HCI_AUTH, &hdev->flags))
8864 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
8865 else
8866 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
8867
8868 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
8869 &match);
8870
8871 if (changed)
8872 new_settings(hdev, match.sk);
8873
8874 if (match.sk)
8875 sock_put(match.sk);
8876 }
8877
8878 static void clear_eir(struct hci_request *req)
8879 {
8880 struct hci_dev *hdev = req->hdev;
8881 struct hci_cp_write_eir cp;
8882
8883 if (!lmp_ext_inq_capable(hdev))
8884 return;
8885
8886 memset(hdev->eir, 0, sizeof(hdev->eir));
8887
8888 memset(&cp, 0, sizeof(cp));
8889
8890 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
8891 }
8892
8893 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
8894 {
8895 struct cmd_lookup match = { NULL, hdev };
8896 struct hci_request req;
8897 bool changed = false;
8898
8899 if (status) {
8900 u8 mgmt_err = mgmt_status(status);
8901
8902 if (enable && hci_dev_test_and_clear_flag(hdev,
8903 HCI_SSP_ENABLED)) {
8904 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
8905 new_settings(hdev, NULL);
8906 }
8907
8908 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
8909 &mgmt_err);
8910 return;
8911 }
8912
8913 if (enable) {
8914 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
8915 } else {
8916 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
8917 if (!changed)
8918 changed = hci_dev_test_and_clear_flag(hdev,
8919 HCI_HS_ENABLED);
8920 else
8921 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
8922 }
8923
8924 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
8925
8926 if (changed)
8927 new_settings(hdev, match.sk);
8928
8929 if (match.sk)
8930 sock_put(match.sk);
8931
8932 hci_req_init(&req, hdev);
8933
8934 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
8935 if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
8936 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
8937 sizeof(enable), &enable);
8938 __hci_req_update_eir(&req);
8939 } else {
8940 clear_eir(&req);
8941 }
8942
8943 hci_req_run(&req, NULL);
8944 }
8945
8946 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
8947 {
8948 struct cmd_lookup *match = data;
8949
8950 if (match->sk == NULL) {
8951 match->sk = cmd->sk;
8952 sock_hold(match->sk);
8953 }
8954 }
8955
8956 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
8957 u8 status)
8958 {
8959 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
8960
8961 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
8962 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
8963 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
8964
8965 if (!status) {
8966 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
8967 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
8968 ext_info_changed(hdev, NULL);
8969 }
8970
8971 if (match.sk)
8972 sock_put(match.sk);
8973 }
8974
8975 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
8976 {
8977 struct mgmt_cp_set_local_name ev;
8978 struct mgmt_pending_cmd *cmd;
8979
8980 if (status)
8981 return;
8982
8983 memset(&ev, 0, sizeof(ev));
8984 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
8985 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
8986
8987 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
8988 if (!cmd) {
8989 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
8990
8991 /* If this is a HCI command related to powering on the
8992 * HCI dev don't send any mgmt signals.
8993 */
8994 if (pending_find(MGMT_OP_SET_POWERED, hdev))
8995 return;
8996 }
8997
8998 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
8999 HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
9000 ext_info_changed(hdev, cmd ? cmd->sk : NULL);
9001 }
9002
9003 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
9004 {
9005 int i;
9006
9007 for (i = 0; i < uuid_count; i++) {
9008 if (!memcmp(uuid, uuids[i], 16))
9009 return true;
9010 }
9011
9012 return false;
9013 }
9014
9015 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
9016 {
9017 u16 parsed = 0;
9018
9019 while (parsed < eir_len) {
9020 u8 field_len = eir[0];
9021 u8 uuid[16];
9022 int i;
9023
9024 if (field_len == 0)
9025 break;
9026
9027 if (eir_len - parsed < field_len + 1)
9028 break;
9029
9030 switch (eir[1]) {
9031 case EIR_UUID16_ALL:
9032 case EIR_UUID16_SOME:
9033 for (i = 0; i + 3 <= field_len; i += 2) {
9034 memcpy(uuid, bluetooth_base_uuid, 16);
9035 uuid[13] = eir[i + 3];
9036 uuid[12] = eir[i + 2];
9037 if (has_uuid(uuid, uuid_count, uuids))
9038 return true;
9039 }
9040 break;
9041 case EIR_UUID32_ALL:
9042 case EIR_UUID32_SOME:
9043 for (i = 0; i + 5 <= field_len; i += 4) {
9044 memcpy(uuid, bluetooth_base_uuid, 16);
9045 uuid[15] = eir[i + 5];
9046 uuid[14] = eir[i + 4];
9047 uuid[13] = eir[i + 3];
9048 uuid[12] = eir[i + 2];
9049 if (has_uuid(uuid, uuid_count, uuids))
9050 return true;
9051 }
9052 break;
9053 case EIR_UUID128_ALL:
9054 case EIR_UUID128_SOME:
9055 for (i = 0; i + 17 <= field_len; i += 16) {
9056 memcpy(uuid, eir + i + 2, 16);
9057 if (has_uuid(uuid, uuid_count, uuids))
9058 return true;
9059 }
9060 break;
9061 }
9062
9063 parsed += field_len + 1;
9064 eir += field_len + 1;
9065 }
9066
9067 return false;
9068 }
9069
9070 static void restart_le_scan(struct hci_dev *hdev)
9071 {
9072 /* If controller is not scanning we are done. */
9073 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
9074 return;
9075
9076 if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
9077 hdev->discovery.scan_start +
9078 hdev->discovery.scan_duration))
9079 return;
9080
9081 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
9082 DISCOV_LE_RESTART_DELAY);
9083 }
9084
9085 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
9086 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
9087 {
9088 /* If a RSSI threshold has been specified, and
9089 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
9090 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
9091 * is set, let it through for further processing, as we might need to
9092 * restart the scan.
9093 *
9094 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
9095 * the results are also dropped.
9096 */
9097 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
9098 (rssi == HCI_RSSI_INVALID ||
9099 (rssi < hdev->discovery.rssi &&
9100 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
9101 return false;
9102
9103 if (hdev->discovery.uuid_count != 0) {
9104 /* If a list of UUIDs is provided in filter, results with no
9105 * matching UUID should be dropped.
9106 */
9107 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
9108 hdev->discovery.uuids) &&
9109 !eir_has_uuids(scan_rsp, scan_rsp_len,
9110 hdev->discovery.uuid_count,
9111 hdev->discovery.uuids))
9112 return false;
9113 }
9114
9115 /* If duplicate filtering does not report RSSI changes, then restart
9116 * scanning to ensure updated result with updated RSSI values.
9117 */
9118 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
9119 restart_le_scan(hdev);
9120
9121 /* Validate RSSI value against the RSSI threshold once more. */
9122 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
9123 rssi < hdev->discovery.rssi)
9124 return false;
9125 }
9126
9127 return true;
9128 }
9129
9130 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9131 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
9132 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
9133 {
9134 char buf[512];
9135 struct mgmt_ev_device_found *ev = (void *)buf;
9136 size_t ev_size;
9137
9138 /* Don't send events for a non-kernel initiated discovery. With
9139 * LE one exception is if we have pend_le_reports > 0 in which
9140 * case we're doing passive scanning and want these events.
9141 */
9142 if (!hci_discovery_active(hdev)) {
9143 if (link_type == ACL_LINK)
9144 return;
9145 if (link_type == LE_LINK &&
9146 list_empty(&hdev->pend_le_reports) &&
9147 !hci_is_adv_monitoring(hdev)) {
9148 return;
9149 }
9150 }
9151
9152 if (hdev->discovery.result_filtering) {
9153 /* We are using service discovery */
9154 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
9155 scan_rsp_len))
9156 return;
9157 }
9158
9159 if (hdev->discovery.limited) {
9160 /* Check for limited discoverable bit */
9161 if (dev_class) {
9162 if (!(dev_class[1] & 0x20))
9163 return;
9164 } else {
9165 u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
9166 if (!flags || !(flags[0] & LE_AD_LIMITED))
9167 return;
9168 }
9169 }
9170
9171 /* Make sure that the buffer is big enough. The 5 extra bytes
9172 * are for the potential CoD field.
9173 */
9174 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
9175 return;
9176
9177 memset(buf, 0, sizeof(buf));
9178
9179 /* In case of device discovery with BR/EDR devices (pre 1.2), the
9180 * RSSI value was reported as 0 when not available. This behavior
9181 * is kept when using device discovery. This is required for full
9182 * backwards compatibility with the API.
9183 *
9184 * However when using service discovery, the value 127 will be
9185 * returned when the RSSI is not available.
9186 */
9187 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
9188 link_type == ACL_LINK)
9189 rssi = 0;
9190
9191 bacpy(&ev->addr.bdaddr, bdaddr);
9192 ev->addr.type = link_to_bdaddr(link_type, addr_type);
9193 ev->rssi = rssi;
9194 ev->flags = cpu_to_le32(flags);
9195
9196 if (eir_len > 0)
9197 /* Copy EIR or advertising data into event */
9198 memcpy(ev->eir, eir, eir_len);
9199
9200 if (dev_class && !eir_get_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
9201 NULL))
9202 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
9203 dev_class, 3);
9204
9205 if (scan_rsp_len > 0)
9206 /* Append scan response data to event */
9207 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
9208
9209 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
9210 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
9211
9212 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
9213 }
9214
9215 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9216 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
9217 {
9218 struct mgmt_ev_device_found *ev;
9219 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
9220 u16 eir_len;
9221
9222 ev = (struct mgmt_ev_device_found *) buf;
9223
9224 memset(buf, 0, sizeof(buf));
9225
9226 bacpy(&ev->addr.bdaddr, bdaddr);
9227 ev->addr.type = link_to_bdaddr(link_type, addr_type);
9228 ev->rssi = rssi;
9229
9230 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
9231 name_len);
9232
9233 ev->eir_len = cpu_to_le16(eir_len);
9234
9235 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
9236 }
9237
9238 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
9239 {
9240 struct mgmt_ev_discovering ev;
9241
9242 bt_dev_dbg(hdev, "discovering %u", discovering);
9243
9244 memset(&ev, 0, sizeof(ev));
9245 ev.type = hdev->discovery.type;
9246 ev.discovering = discovering;
9247
9248 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
9249 }
9250
9251 void mgmt_suspending(struct hci_dev *hdev, u8 state)
9252 {
9253 struct mgmt_ev_controller_suspend ev;
9254
9255 ev.suspend_state = state;
9256 mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
9257 }
9258
9259 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
9260 u8 addr_type)
9261 {
9262 struct mgmt_ev_controller_resume ev;
9263
9264 ev.wake_reason = reason;
9265 if (bdaddr) {
9266 bacpy(&ev.addr.bdaddr, bdaddr);
9267 ev.addr.type = addr_type;
9268 } else {
9269 memset(&ev.addr, 0, sizeof(ev.addr));
9270 }
9271
9272 mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
9273 }
9274
9275 static struct hci_mgmt_chan chan = {
9276 .channel = HCI_CHANNEL_CONTROL,
9277 .handler_count = ARRAY_SIZE(mgmt_handlers),
9278 .handlers = mgmt_handlers,
9279 .hdev_init = mgmt_init_hdev,
9280 };
9281
9282 int mgmt_init(void)
9283 {
9284 return hci_mgmt_chan_register(&chan);
9285 }
9286
9287 void mgmt_exit(void)
9288 {
9289 hci_mgmt_chan_unregister(&chan);
9290 }