]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/bluetooth/mgmt.c
Bluetooth: Add framework for Extended Controller Information
[mirror_ubuntu-bionic-kernel.git] / net / bluetooth / mgmt.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI Management interface */
26
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35
36 #include "hci_request.h"
37 #include "smp.h"
38 #include "mgmt_util.h"
39
40 #define MGMT_VERSION 1
41 #define MGMT_REVISION 13
42
43 static const u16 mgmt_commands[] = {
44 MGMT_OP_READ_INDEX_LIST,
45 MGMT_OP_READ_INFO,
46 MGMT_OP_SET_POWERED,
47 MGMT_OP_SET_DISCOVERABLE,
48 MGMT_OP_SET_CONNECTABLE,
49 MGMT_OP_SET_FAST_CONNECTABLE,
50 MGMT_OP_SET_BONDABLE,
51 MGMT_OP_SET_LINK_SECURITY,
52 MGMT_OP_SET_SSP,
53 MGMT_OP_SET_HS,
54 MGMT_OP_SET_LE,
55 MGMT_OP_SET_DEV_CLASS,
56 MGMT_OP_SET_LOCAL_NAME,
57 MGMT_OP_ADD_UUID,
58 MGMT_OP_REMOVE_UUID,
59 MGMT_OP_LOAD_LINK_KEYS,
60 MGMT_OP_LOAD_LONG_TERM_KEYS,
61 MGMT_OP_DISCONNECT,
62 MGMT_OP_GET_CONNECTIONS,
63 MGMT_OP_PIN_CODE_REPLY,
64 MGMT_OP_PIN_CODE_NEG_REPLY,
65 MGMT_OP_SET_IO_CAPABILITY,
66 MGMT_OP_PAIR_DEVICE,
67 MGMT_OP_CANCEL_PAIR_DEVICE,
68 MGMT_OP_UNPAIR_DEVICE,
69 MGMT_OP_USER_CONFIRM_REPLY,
70 MGMT_OP_USER_CONFIRM_NEG_REPLY,
71 MGMT_OP_USER_PASSKEY_REPLY,
72 MGMT_OP_USER_PASSKEY_NEG_REPLY,
73 MGMT_OP_READ_LOCAL_OOB_DATA,
74 MGMT_OP_ADD_REMOTE_OOB_DATA,
75 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
76 MGMT_OP_START_DISCOVERY,
77 MGMT_OP_STOP_DISCOVERY,
78 MGMT_OP_CONFIRM_NAME,
79 MGMT_OP_BLOCK_DEVICE,
80 MGMT_OP_UNBLOCK_DEVICE,
81 MGMT_OP_SET_DEVICE_ID,
82 MGMT_OP_SET_ADVERTISING,
83 MGMT_OP_SET_BREDR,
84 MGMT_OP_SET_STATIC_ADDRESS,
85 MGMT_OP_SET_SCAN_PARAMS,
86 MGMT_OP_SET_SECURE_CONN,
87 MGMT_OP_SET_DEBUG_KEYS,
88 MGMT_OP_SET_PRIVACY,
89 MGMT_OP_LOAD_IRKS,
90 MGMT_OP_GET_CONN_INFO,
91 MGMT_OP_GET_CLOCK_INFO,
92 MGMT_OP_ADD_DEVICE,
93 MGMT_OP_REMOVE_DEVICE,
94 MGMT_OP_LOAD_CONN_PARAM,
95 MGMT_OP_READ_UNCONF_INDEX_LIST,
96 MGMT_OP_READ_CONFIG_INFO,
97 MGMT_OP_SET_EXTERNAL_CONFIG,
98 MGMT_OP_SET_PUBLIC_ADDRESS,
99 MGMT_OP_START_SERVICE_DISCOVERY,
100 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
101 MGMT_OP_READ_EXT_INDEX_LIST,
102 MGMT_OP_READ_ADV_FEATURES,
103 MGMT_OP_ADD_ADVERTISING,
104 MGMT_OP_REMOVE_ADVERTISING,
105 MGMT_OP_GET_ADV_SIZE_INFO,
106 MGMT_OP_START_LIMITED_DISCOVERY,
107 MGMT_OP_READ_EXT_INFO,
108 };
109
110 static const u16 mgmt_events[] = {
111 MGMT_EV_CONTROLLER_ERROR,
112 MGMT_EV_INDEX_ADDED,
113 MGMT_EV_INDEX_REMOVED,
114 MGMT_EV_NEW_SETTINGS,
115 MGMT_EV_CLASS_OF_DEV_CHANGED,
116 MGMT_EV_LOCAL_NAME_CHANGED,
117 MGMT_EV_NEW_LINK_KEY,
118 MGMT_EV_NEW_LONG_TERM_KEY,
119 MGMT_EV_DEVICE_CONNECTED,
120 MGMT_EV_DEVICE_DISCONNECTED,
121 MGMT_EV_CONNECT_FAILED,
122 MGMT_EV_PIN_CODE_REQUEST,
123 MGMT_EV_USER_CONFIRM_REQUEST,
124 MGMT_EV_USER_PASSKEY_REQUEST,
125 MGMT_EV_AUTH_FAILED,
126 MGMT_EV_DEVICE_FOUND,
127 MGMT_EV_DISCOVERING,
128 MGMT_EV_DEVICE_BLOCKED,
129 MGMT_EV_DEVICE_UNBLOCKED,
130 MGMT_EV_DEVICE_UNPAIRED,
131 MGMT_EV_PASSKEY_NOTIFY,
132 MGMT_EV_NEW_IRK,
133 MGMT_EV_NEW_CSRK,
134 MGMT_EV_DEVICE_ADDED,
135 MGMT_EV_DEVICE_REMOVED,
136 MGMT_EV_NEW_CONN_PARAM,
137 MGMT_EV_UNCONF_INDEX_ADDED,
138 MGMT_EV_UNCONF_INDEX_REMOVED,
139 MGMT_EV_NEW_CONFIG_OPTIONS,
140 MGMT_EV_EXT_INDEX_ADDED,
141 MGMT_EV_EXT_INDEX_REMOVED,
142 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
143 MGMT_EV_ADVERTISING_ADDED,
144 MGMT_EV_ADVERTISING_REMOVED,
145 MGMT_EV_EXT_INFO_CHANGED,
146 };
147
148 static const u16 mgmt_untrusted_commands[] = {
149 MGMT_OP_READ_INDEX_LIST,
150 MGMT_OP_READ_INFO,
151 MGMT_OP_READ_UNCONF_INDEX_LIST,
152 MGMT_OP_READ_CONFIG_INFO,
153 MGMT_OP_READ_EXT_INDEX_LIST,
154 MGMT_OP_READ_EXT_INFO,
155 };
156
157 static const u16 mgmt_untrusted_events[] = {
158 MGMT_EV_INDEX_ADDED,
159 MGMT_EV_INDEX_REMOVED,
160 MGMT_EV_NEW_SETTINGS,
161 MGMT_EV_CLASS_OF_DEV_CHANGED,
162 MGMT_EV_LOCAL_NAME_CHANGED,
163 MGMT_EV_UNCONF_INDEX_ADDED,
164 MGMT_EV_UNCONF_INDEX_REMOVED,
165 MGMT_EV_NEW_CONFIG_OPTIONS,
166 MGMT_EV_EXT_INDEX_ADDED,
167 MGMT_EV_EXT_INDEX_REMOVED,
168 MGMT_EV_EXT_INFO_CHANGED,
169 };
170
171 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
172
173 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
174 "\x00\x00\x00\x00\x00\x00\x00\x00"
175
176 /* HCI to MGMT error code conversion table */
177 static u8 mgmt_status_table[] = {
178 MGMT_STATUS_SUCCESS,
179 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
180 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
181 MGMT_STATUS_FAILED, /* Hardware Failure */
182 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
183 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
184 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
185 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
186 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
187 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
188 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
189 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
190 MGMT_STATUS_BUSY, /* Command Disallowed */
191 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
192 MGMT_STATUS_REJECTED, /* Rejected Security */
193 MGMT_STATUS_REJECTED, /* Rejected Personal */
194 MGMT_STATUS_TIMEOUT, /* Host Timeout */
195 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
196 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
197 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
198 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
199 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
200 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
201 MGMT_STATUS_BUSY, /* Repeated Attempts */
202 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
203 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
204 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
205 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
206 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
207 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
208 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
209 MGMT_STATUS_FAILED, /* Unspecified Error */
210 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
211 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
212 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
213 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
214 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
215 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
216 MGMT_STATUS_FAILED, /* Unit Link Key Used */
217 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
218 MGMT_STATUS_TIMEOUT, /* Instant Passed */
219 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
220 MGMT_STATUS_FAILED, /* Transaction Collision */
221 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
222 MGMT_STATUS_REJECTED, /* QoS Rejected */
223 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
224 MGMT_STATUS_REJECTED, /* Insufficient Security */
225 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
226 MGMT_STATUS_BUSY, /* Role Switch Pending */
227 MGMT_STATUS_FAILED, /* Slot Violation */
228 MGMT_STATUS_FAILED, /* Role Switch Failed */
229 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
230 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
231 MGMT_STATUS_BUSY, /* Host Busy Pairing */
232 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
233 MGMT_STATUS_BUSY, /* Controller Busy */
234 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
235 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
236 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
237 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
238 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
239 };
240
241 static u8 mgmt_status(u8 hci_status)
242 {
243 if (hci_status < ARRAY_SIZE(mgmt_status_table))
244 return mgmt_status_table[hci_status];
245
246 return MGMT_STATUS_FAILED;
247 }
248
249 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
250 u16 len, int flag)
251 {
252 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
253 flag, NULL);
254 }
255
256 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
257 u16 len, int flag, struct sock *skip_sk)
258 {
259 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
260 flag, skip_sk);
261 }
262
263 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
264 struct sock *skip_sk)
265 {
266 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
267 HCI_SOCK_TRUSTED, skip_sk);
268 }
269
270 static u8 le_addr_type(u8 mgmt_addr_type)
271 {
272 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
273 return ADDR_LE_DEV_PUBLIC;
274 else
275 return ADDR_LE_DEV_RANDOM;
276 }
277
278 void mgmt_fill_version_info(void *ver)
279 {
280 struct mgmt_rp_read_version *rp = ver;
281
282 rp->version = MGMT_VERSION;
283 rp->revision = cpu_to_le16(MGMT_REVISION);
284 }
285
286 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
287 u16 data_len)
288 {
289 struct mgmt_rp_read_version rp;
290
291 BT_DBG("sock %p", sk);
292
293 mgmt_fill_version_info(&rp);
294
295 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
296 &rp, sizeof(rp));
297 }
298
299 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
300 u16 data_len)
301 {
302 struct mgmt_rp_read_commands *rp;
303 u16 num_commands, num_events;
304 size_t rp_size;
305 int i, err;
306
307 BT_DBG("sock %p", sk);
308
309 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
310 num_commands = ARRAY_SIZE(mgmt_commands);
311 num_events = ARRAY_SIZE(mgmt_events);
312 } else {
313 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
314 num_events = ARRAY_SIZE(mgmt_untrusted_events);
315 }
316
317 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
318
319 rp = kmalloc(rp_size, GFP_KERNEL);
320 if (!rp)
321 return -ENOMEM;
322
323 rp->num_commands = cpu_to_le16(num_commands);
324 rp->num_events = cpu_to_le16(num_events);
325
326 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
327 __le16 *opcode = rp->opcodes;
328
329 for (i = 0; i < num_commands; i++, opcode++)
330 put_unaligned_le16(mgmt_commands[i], opcode);
331
332 for (i = 0; i < num_events; i++, opcode++)
333 put_unaligned_le16(mgmt_events[i], opcode);
334 } else {
335 __le16 *opcode = rp->opcodes;
336
337 for (i = 0; i < num_commands; i++, opcode++)
338 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
339
340 for (i = 0; i < num_events; i++, opcode++)
341 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
342 }
343
344 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
345 rp, rp_size);
346 kfree(rp);
347
348 return err;
349 }
350
351 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
352 u16 data_len)
353 {
354 struct mgmt_rp_read_index_list *rp;
355 struct hci_dev *d;
356 size_t rp_len;
357 u16 count;
358 int err;
359
360 BT_DBG("sock %p", sk);
361
362 read_lock(&hci_dev_list_lock);
363
364 count = 0;
365 list_for_each_entry(d, &hci_dev_list, list) {
366 if (d->dev_type == HCI_PRIMARY &&
367 !hci_dev_test_flag(d, HCI_UNCONFIGURED))
368 count++;
369 }
370
371 rp_len = sizeof(*rp) + (2 * count);
372 rp = kmalloc(rp_len, GFP_ATOMIC);
373 if (!rp) {
374 read_unlock(&hci_dev_list_lock);
375 return -ENOMEM;
376 }
377
378 count = 0;
379 list_for_each_entry(d, &hci_dev_list, list) {
380 if (hci_dev_test_flag(d, HCI_SETUP) ||
381 hci_dev_test_flag(d, HCI_CONFIG) ||
382 hci_dev_test_flag(d, HCI_USER_CHANNEL))
383 continue;
384
385 /* Devices marked as raw-only are neither configured
386 * nor unconfigured controllers.
387 */
388 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
389 continue;
390
391 if (d->dev_type == HCI_PRIMARY &&
392 !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
393 rp->index[count++] = cpu_to_le16(d->id);
394 BT_DBG("Added hci%u", d->id);
395 }
396 }
397
398 rp->num_controllers = cpu_to_le16(count);
399 rp_len = sizeof(*rp) + (2 * count);
400
401 read_unlock(&hci_dev_list_lock);
402
403 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
404 0, rp, rp_len);
405
406 kfree(rp);
407
408 return err;
409 }
410
411 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
412 void *data, u16 data_len)
413 {
414 struct mgmt_rp_read_unconf_index_list *rp;
415 struct hci_dev *d;
416 size_t rp_len;
417 u16 count;
418 int err;
419
420 BT_DBG("sock %p", sk);
421
422 read_lock(&hci_dev_list_lock);
423
424 count = 0;
425 list_for_each_entry(d, &hci_dev_list, list) {
426 if (d->dev_type == HCI_PRIMARY &&
427 hci_dev_test_flag(d, HCI_UNCONFIGURED))
428 count++;
429 }
430
431 rp_len = sizeof(*rp) + (2 * count);
432 rp = kmalloc(rp_len, GFP_ATOMIC);
433 if (!rp) {
434 read_unlock(&hci_dev_list_lock);
435 return -ENOMEM;
436 }
437
438 count = 0;
439 list_for_each_entry(d, &hci_dev_list, list) {
440 if (hci_dev_test_flag(d, HCI_SETUP) ||
441 hci_dev_test_flag(d, HCI_CONFIG) ||
442 hci_dev_test_flag(d, HCI_USER_CHANNEL))
443 continue;
444
445 /* Devices marked as raw-only are neither configured
446 * nor unconfigured controllers.
447 */
448 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
449 continue;
450
451 if (d->dev_type == HCI_PRIMARY &&
452 hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
453 rp->index[count++] = cpu_to_le16(d->id);
454 BT_DBG("Added hci%u", d->id);
455 }
456 }
457
458 rp->num_controllers = cpu_to_le16(count);
459 rp_len = sizeof(*rp) + (2 * count);
460
461 read_unlock(&hci_dev_list_lock);
462
463 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
464 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
465
466 kfree(rp);
467
468 return err;
469 }
470
471 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
472 void *data, u16 data_len)
473 {
474 struct mgmt_rp_read_ext_index_list *rp;
475 struct hci_dev *d;
476 size_t rp_len;
477 u16 count;
478 int err;
479
480 BT_DBG("sock %p", sk);
481
482 read_lock(&hci_dev_list_lock);
483
484 count = 0;
485 list_for_each_entry(d, &hci_dev_list, list) {
486 if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
487 count++;
488 }
489
490 rp_len = sizeof(*rp) + (sizeof(rp->entry[0]) * count);
491 rp = kmalloc(rp_len, GFP_ATOMIC);
492 if (!rp) {
493 read_unlock(&hci_dev_list_lock);
494 return -ENOMEM;
495 }
496
497 count = 0;
498 list_for_each_entry(d, &hci_dev_list, list) {
499 if (hci_dev_test_flag(d, HCI_SETUP) ||
500 hci_dev_test_flag(d, HCI_CONFIG) ||
501 hci_dev_test_flag(d, HCI_USER_CHANNEL))
502 continue;
503
504 /* Devices marked as raw-only are neither configured
505 * nor unconfigured controllers.
506 */
507 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
508 continue;
509
510 if (d->dev_type == HCI_PRIMARY) {
511 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
512 rp->entry[count].type = 0x01;
513 else
514 rp->entry[count].type = 0x00;
515 } else if (d->dev_type == HCI_AMP) {
516 rp->entry[count].type = 0x02;
517 } else {
518 continue;
519 }
520
521 rp->entry[count].bus = d->bus;
522 rp->entry[count++].index = cpu_to_le16(d->id);
523 BT_DBG("Added hci%u", d->id);
524 }
525
526 rp->num_controllers = cpu_to_le16(count);
527 rp_len = sizeof(*rp) + (sizeof(rp->entry[0]) * count);
528
529 read_unlock(&hci_dev_list_lock);
530
531 /* If this command is called at least once, then all the
532 * default index and unconfigured index events are disabled
533 * and from now on only extended index events are used.
534 */
535 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
536 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
537 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
538
539 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
540 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp, rp_len);
541
542 kfree(rp);
543
544 return err;
545 }
546
547 static bool is_configured(struct hci_dev *hdev)
548 {
549 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
550 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
551 return false;
552
553 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
554 !bacmp(&hdev->public_addr, BDADDR_ANY))
555 return false;
556
557 return true;
558 }
559
560 static __le32 get_missing_options(struct hci_dev *hdev)
561 {
562 u32 options = 0;
563
564 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
565 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
566 options |= MGMT_OPTION_EXTERNAL_CONFIG;
567
568 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
569 !bacmp(&hdev->public_addr, BDADDR_ANY))
570 options |= MGMT_OPTION_PUBLIC_ADDRESS;
571
572 return cpu_to_le32(options);
573 }
574
575 static int new_options(struct hci_dev *hdev, struct sock *skip)
576 {
577 __le32 options = get_missing_options(hdev);
578
579 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
580 sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
581 }
582
583 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
584 {
585 __le32 options = get_missing_options(hdev);
586
587 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
588 sizeof(options));
589 }
590
591 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
592 void *data, u16 data_len)
593 {
594 struct mgmt_rp_read_config_info rp;
595 u32 options = 0;
596
597 BT_DBG("sock %p %s", sk, hdev->name);
598
599 hci_dev_lock(hdev);
600
601 memset(&rp, 0, sizeof(rp));
602 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
603
604 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
605 options |= MGMT_OPTION_EXTERNAL_CONFIG;
606
607 if (hdev->set_bdaddr)
608 options |= MGMT_OPTION_PUBLIC_ADDRESS;
609
610 rp.supported_options = cpu_to_le32(options);
611 rp.missing_options = get_missing_options(hdev);
612
613 hci_dev_unlock(hdev);
614
615 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
616 &rp, sizeof(rp));
617 }
618
619 static u32 get_supported_settings(struct hci_dev *hdev)
620 {
621 u32 settings = 0;
622
623 settings |= MGMT_SETTING_POWERED;
624 settings |= MGMT_SETTING_BONDABLE;
625 settings |= MGMT_SETTING_DEBUG_KEYS;
626 settings |= MGMT_SETTING_CONNECTABLE;
627 settings |= MGMT_SETTING_DISCOVERABLE;
628
629 if (lmp_bredr_capable(hdev)) {
630 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
631 settings |= MGMT_SETTING_FAST_CONNECTABLE;
632 settings |= MGMT_SETTING_BREDR;
633 settings |= MGMT_SETTING_LINK_SECURITY;
634
635 if (lmp_ssp_capable(hdev)) {
636 settings |= MGMT_SETTING_SSP;
637 settings |= MGMT_SETTING_HS;
638 }
639
640 if (lmp_sc_capable(hdev))
641 settings |= MGMT_SETTING_SECURE_CONN;
642 }
643
644 if (lmp_le_capable(hdev)) {
645 settings |= MGMT_SETTING_LE;
646 settings |= MGMT_SETTING_ADVERTISING;
647 settings |= MGMT_SETTING_SECURE_CONN;
648 settings |= MGMT_SETTING_PRIVACY;
649 settings |= MGMT_SETTING_STATIC_ADDRESS;
650 }
651
652 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
653 hdev->set_bdaddr)
654 settings |= MGMT_SETTING_CONFIGURATION;
655
656 return settings;
657 }
658
659 static u32 get_current_settings(struct hci_dev *hdev)
660 {
661 u32 settings = 0;
662
663 if (hdev_is_powered(hdev))
664 settings |= MGMT_SETTING_POWERED;
665
666 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
667 settings |= MGMT_SETTING_CONNECTABLE;
668
669 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
670 settings |= MGMT_SETTING_FAST_CONNECTABLE;
671
672 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
673 settings |= MGMT_SETTING_DISCOVERABLE;
674
675 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
676 settings |= MGMT_SETTING_BONDABLE;
677
678 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
679 settings |= MGMT_SETTING_BREDR;
680
681 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
682 settings |= MGMT_SETTING_LE;
683
684 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
685 settings |= MGMT_SETTING_LINK_SECURITY;
686
687 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
688 settings |= MGMT_SETTING_SSP;
689
690 if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
691 settings |= MGMT_SETTING_HS;
692
693 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
694 settings |= MGMT_SETTING_ADVERTISING;
695
696 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
697 settings |= MGMT_SETTING_SECURE_CONN;
698
699 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
700 settings |= MGMT_SETTING_DEBUG_KEYS;
701
702 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
703 settings |= MGMT_SETTING_PRIVACY;
704
705 /* The current setting for static address has two purposes. The
706 * first is to indicate if the static address will be used and
707 * the second is to indicate if it is actually set.
708 *
709 * This means if the static address is not configured, this flag
710 * will never be set. If the address is configured, then if the
711 * address is actually used decides if the flag is set or not.
712 *
713 * For single mode LE only controllers and dual-mode controllers
714 * with BR/EDR disabled, the existence of the static address will
715 * be evaluated.
716 */
717 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
718 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
719 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
720 if (bacmp(&hdev->static_addr, BDADDR_ANY))
721 settings |= MGMT_SETTING_STATIC_ADDRESS;
722 }
723
724 return settings;
725 }
726
727 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
728 {
729 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
730 }
731
732 static struct mgmt_pending_cmd *pending_find_data(u16 opcode,
733 struct hci_dev *hdev,
734 const void *data)
735 {
736 return mgmt_pending_find_data(HCI_CHANNEL_CONTROL, opcode, hdev, data);
737 }
738
739 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
740 {
741 struct mgmt_pending_cmd *cmd;
742
743 /* If there's a pending mgmt command the flags will not yet have
744 * their final values, so check for this first.
745 */
746 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
747 if (cmd) {
748 struct mgmt_mode *cp = cmd->param;
749 if (cp->val == 0x01)
750 return LE_AD_GENERAL;
751 else if (cp->val == 0x02)
752 return LE_AD_LIMITED;
753 } else {
754 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
755 return LE_AD_LIMITED;
756 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
757 return LE_AD_GENERAL;
758 }
759
760 return 0;
761 }
762
763 bool mgmt_get_connectable(struct hci_dev *hdev)
764 {
765 struct mgmt_pending_cmd *cmd;
766
767 /* If there's a pending mgmt command the flag will not yet have
768 * it's final value, so check for this first.
769 */
770 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
771 if (cmd) {
772 struct mgmt_mode *cp = cmd->param;
773
774 return cp->val;
775 }
776
777 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
778 }
779
780 static void service_cache_off(struct work_struct *work)
781 {
782 struct hci_dev *hdev = container_of(work, struct hci_dev,
783 service_cache.work);
784 struct hci_request req;
785
786 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
787 return;
788
789 hci_req_init(&req, hdev);
790
791 hci_dev_lock(hdev);
792
793 __hci_req_update_eir(&req);
794 __hci_req_update_class(&req);
795
796 hci_dev_unlock(hdev);
797
798 hci_req_run(&req, NULL);
799 }
800
801 static void rpa_expired(struct work_struct *work)
802 {
803 struct hci_dev *hdev = container_of(work, struct hci_dev,
804 rpa_expired.work);
805 struct hci_request req;
806
807 BT_DBG("");
808
809 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
810
811 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
812 return;
813
814 /* The generation of a new RPA and programming it into the
815 * controller happens in the hci_req_enable_advertising()
816 * function.
817 */
818 hci_req_init(&req, hdev);
819 __hci_req_enable_advertising(&req);
820 hci_req_run(&req, NULL);
821 }
822
823 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
824 {
825 if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
826 return;
827
828 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
829 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
830
831 /* Non-mgmt controlled devices get this bit set
832 * implicitly so that pairing works for them, however
833 * for mgmt we require user-space to explicitly enable
834 * it
835 */
836 hci_dev_clear_flag(hdev, HCI_BONDABLE);
837 }
838
839 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
840 void *data, u16 data_len)
841 {
842 struct mgmt_rp_read_info rp;
843
844 BT_DBG("sock %p %s", sk, hdev->name);
845
846 hci_dev_lock(hdev);
847
848 memset(&rp, 0, sizeof(rp));
849
850 bacpy(&rp.bdaddr, &hdev->bdaddr);
851
852 rp.version = hdev->hci_ver;
853 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
854
855 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
856 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
857
858 memcpy(rp.dev_class, hdev->dev_class, 3);
859
860 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
861 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
862
863 hci_dev_unlock(hdev);
864
865 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
866 sizeof(rp));
867 }
868
869 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
870 void *data, u16 data_len)
871 {
872 struct mgmt_rp_read_ext_info rp;
873
874 BT_DBG("sock %p %s", sk, hdev->name);
875
876 hci_dev_lock(hdev);
877
878 memset(&rp, 0, sizeof(rp));
879
880 bacpy(&rp.bdaddr, &hdev->bdaddr);
881
882 rp.version = hdev->hci_ver;
883 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
884
885 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
886 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
887
888 rp.eir_len = cpu_to_le16(0);
889
890 hci_dev_unlock(hdev);
891
892 /* If this command is called at least once, then the events
893 * for class of device and local name changes are disabled
894 * and only the new extended controller information event
895 * is used.
896 */
897 hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
898 hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
899 hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
900
901 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, &rp,
902 sizeof(rp));
903 }
904
905 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
906 {
907 struct mgmt_ev_ext_info_changed ev;
908
909 ev.eir_len = cpu_to_le16(0);
910
911 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, &ev,
912 sizeof(ev), HCI_MGMT_EXT_INFO_EVENTS, skip);
913 }
914
915 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
916 {
917 __le32 settings = cpu_to_le32(get_current_settings(hdev));
918
919 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
920 sizeof(settings));
921 }
922
923 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
924 {
925 BT_DBG("%s status 0x%02x", hdev->name, status);
926
927 if (hci_conn_count(hdev) == 0) {
928 cancel_delayed_work(&hdev->power_off);
929 queue_work(hdev->req_workqueue, &hdev->power_off.work);
930 }
931 }
932
933 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
934 {
935 struct mgmt_ev_advertising_added ev;
936
937 ev.instance = instance;
938
939 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
940 }
941
942 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
943 u8 instance)
944 {
945 struct mgmt_ev_advertising_removed ev;
946
947 ev.instance = instance;
948
949 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
950 }
951
952 static void cancel_adv_timeout(struct hci_dev *hdev)
953 {
954 if (hdev->adv_instance_timeout) {
955 hdev->adv_instance_timeout = 0;
956 cancel_delayed_work(&hdev->adv_instance_expire);
957 }
958 }
959
960 static int clean_up_hci_state(struct hci_dev *hdev)
961 {
962 struct hci_request req;
963 struct hci_conn *conn;
964 bool discov_stopped;
965 int err;
966
967 hci_req_init(&req, hdev);
968
969 if (test_bit(HCI_ISCAN, &hdev->flags) ||
970 test_bit(HCI_PSCAN, &hdev->flags)) {
971 u8 scan = 0x00;
972 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
973 }
974
975 hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, false);
976
977 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
978 __hci_req_disable_advertising(&req);
979
980 discov_stopped = hci_req_stop_discovery(&req);
981
982 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
983 /* 0x15 == Terminated due to Power Off */
984 __hci_abort_conn(&req, conn, 0x15);
985 }
986
987 err = hci_req_run(&req, clean_up_hci_complete);
988 if (!err && discov_stopped)
989 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
990
991 return err;
992 }
993
994 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
995 u16 len)
996 {
997 struct mgmt_mode *cp = data;
998 struct mgmt_pending_cmd *cmd;
999 int err;
1000
1001 BT_DBG("request for %s", hdev->name);
1002
1003 if (cp->val != 0x00 && cp->val != 0x01)
1004 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1005 MGMT_STATUS_INVALID_PARAMS);
1006
1007 hci_dev_lock(hdev);
1008
1009 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1010 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1011 MGMT_STATUS_BUSY);
1012 goto failed;
1013 }
1014
1015 if (!!cp->val == hdev_is_powered(hdev)) {
1016 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1017 goto failed;
1018 }
1019
1020 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1021 if (!cmd) {
1022 err = -ENOMEM;
1023 goto failed;
1024 }
1025
1026 if (cp->val) {
1027 queue_work(hdev->req_workqueue, &hdev->power_on);
1028 err = 0;
1029 } else {
1030 /* Disconnect connections, stop scans, etc */
1031 err = clean_up_hci_state(hdev);
1032 if (!err)
1033 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1034 HCI_POWER_OFF_TIMEOUT);
1035
1036 /* ENODATA means there were no HCI commands queued */
1037 if (err == -ENODATA) {
1038 cancel_delayed_work(&hdev->power_off);
1039 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1040 err = 0;
1041 }
1042 }
1043
1044 failed:
1045 hci_dev_unlock(hdev);
1046 return err;
1047 }
1048
1049 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1050 {
1051 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1052
1053 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1054 sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1055 }
1056
1057 int mgmt_new_settings(struct hci_dev *hdev)
1058 {
1059 return new_settings(hdev, NULL);
1060 }
1061
1062 struct cmd_lookup {
1063 struct sock *sk;
1064 struct hci_dev *hdev;
1065 u8 mgmt_status;
1066 };
1067
1068 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1069 {
1070 struct cmd_lookup *match = data;
1071
1072 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1073
1074 list_del(&cmd->list);
1075
1076 if (match->sk == NULL) {
1077 match->sk = cmd->sk;
1078 sock_hold(match->sk);
1079 }
1080
1081 mgmt_pending_free(cmd);
1082 }
1083
1084 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1085 {
1086 u8 *status = data;
1087
1088 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1089 mgmt_pending_remove(cmd);
1090 }
1091
1092 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1093 {
1094 if (cmd->cmd_complete) {
1095 u8 *status = data;
1096
1097 cmd->cmd_complete(cmd, *status);
1098 mgmt_pending_remove(cmd);
1099
1100 return;
1101 }
1102
1103 cmd_status_rsp(cmd, data);
1104 }
1105
1106 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1107 {
1108 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1109 cmd->param, cmd->param_len);
1110 }
1111
1112 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1113 {
1114 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1115 cmd->param, sizeof(struct mgmt_addr_info));
1116 }
1117
1118 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1119 {
1120 if (!lmp_bredr_capable(hdev))
1121 return MGMT_STATUS_NOT_SUPPORTED;
1122 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1123 return MGMT_STATUS_REJECTED;
1124 else
1125 return MGMT_STATUS_SUCCESS;
1126 }
1127
1128 static u8 mgmt_le_support(struct hci_dev *hdev)
1129 {
1130 if (!lmp_le_capable(hdev))
1131 return MGMT_STATUS_NOT_SUPPORTED;
1132 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1133 return MGMT_STATUS_REJECTED;
1134 else
1135 return MGMT_STATUS_SUCCESS;
1136 }
1137
1138 void mgmt_set_discoverable_complete(struct hci_dev *hdev, u8 status)
1139 {
1140 struct mgmt_pending_cmd *cmd;
1141
1142 BT_DBG("status 0x%02x", status);
1143
1144 hci_dev_lock(hdev);
1145
1146 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1147 if (!cmd)
1148 goto unlock;
1149
1150 if (status) {
1151 u8 mgmt_err = mgmt_status(status);
1152 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1153 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1154 goto remove_cmd;
1155 }
1156
1157 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1158 hdev->discov_timeout > 0) {
1159 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1160 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1161 }
1162
1163 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1164 new_settings(hdev, cmd->sk);
1165
1166 remove_cmd:
1167 mgmt_pending_remove(cmd);
1168
1169 unlock:
1170 hci_dev_unlock(hdev);
1171 }
1172
1173 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1174 u16 len)
1175 {
1176 struct mgmt_cp_set_discoverable *cp = data;
1177 struct mgmt_pending_cmd *cmd;
1178 u16 timeout;
1179 int err;
1180
1181 BT_DBG("request for %s", hdev->name);
1182
1183 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1184 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1185 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1186 MGMT_STATUS_REJECTED);
1187
1188 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1189 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1190 MGMT_STATUS_INVALID_PARAMS);
1191
1192 timeout = __le16_to_cpu(cp->timeout);
1193
1194 /* Disabling discoverable requires that no timeout is set,
1195 * and enabling limited discoverable requires a timeout.
1196 */
1197 if ((cp->val == 0x00 && timeout > 0) ||
1198 (cp->val == 0x02 && timeout == 0))
1199 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1200 MGMT_STATUS_INVALID_PARAMS);
1201
1202 hci_dev_lock(hdev);
1203
1204 if (!hdev_is_powered(hdev) && timeout > 0) {
1205 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1206 MGMT_STATUS_NOT_POWERED);
1207 goto failed;
1208 }
1209
1210 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1211 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1212 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1213 MGMT_STATUS_BUSY);
1214 goto failed;
1215 }
1216
1217 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1218 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1219 MGMT_STATUS_REJECTED);
1220 goto failed;
1221 }
1222
1223 if (!hdev_is_powered(hdev)) {
1224 bool changed = false;
1225
1226 /* Setting limited discoverable when powered off is
1227 * not a valid operation since it requires a timeout
1228 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1229 */
1230 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1231 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1232 changed = true;
1233 }
1234
1235 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1236 if (err < 0)
1237 goto failed;
1238
1239 if (changed)
1240 err = new_settings(hdev, sk);
1241
1242 goto failed;
1243 }
1244
1245 /* If the current mode is the same, then just update the timeout
1246 * value with the new value. And if only the timeout gets updated,
1247 * then no need for any HCI transactions.
1248 */
1249 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1250 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1251 HCI_LIMITED_DISCOVERABLE)) {
1252 cancel_delayed_work(&hdev->discov_off);
1253 hdev->discov_timeout = timeout;
1254
1255 if (cp->val && hdev->discov_timeout > 0) {
1256 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1257 queue_delayed_work(hdev->req_workqueue,
1258 &hdev->discov_off, to);
1259 }
1260
1261 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1262 goto failed;
1263 }
1264
1265 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1266 if (!cmd) {
1267 err = -ENOMEM;
1268 goto failed;
1269 }
1270
1271 /* Cancel any potential discoverable timeout that might be
1272 * still active and store new timeout value. The arming of
1273 * the timeout happens in the complete handler.
1274 */
1275 cancel_delayed_work(&hdev->discov_off);
1276 hdev->discov_timeout = timeout;
1277
1278 if (cp->val)
1279 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1280 else
1281 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1282
1283 /* Limited discoverable mode */
1284 if (cp->val == 0x02)
1285 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1286 else
1287 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1288
1289 queue_work(hdev->req_workqueue, &hdev->discoverable_update);
1290 err = 0;
1291
1292 failed:
1293 hci_dev_unlock(hdev);
1294 return err;
1295 }
1296
1297 void mgmt_set_connectable_complete(struct hci_dev *hdev, u8 status)
1298 {
1299 struct mgmt_pending_cmd *cmd;
1300
1301 BT_DBG("status 0x%02x", status);
1302
1303 hci_dev_lock(hdev);
1304
1305 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1306 if (!cmd)
1307 goto unlock;
1308
1309 if (status) {
1310 u8 mgmt_err = mgmt_status(status);
1311 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1312 goto remove_cmd;
1313 }
1314
1315 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1316 new_settings(hdev, cmd->sk);
1317
1318 remove_cmd:
1319 mgmt_pending_remove(cmd);
1320
1321 unlock:
1322 hci_dev_unlock(hdev);
1323 }
1324
1325 static int set_connectable_update_settings(struct hci_dev *hdev,
1326 struct sock *sk, u8 val)
1327 {
1328 bool changed = false;
1329 int err;
1330
1331 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1332 changed = true;
1333
1334 if (val) {
1335 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1336 } else {
1337 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1338 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1339 }
1340
1341 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1342 if (err < 0)
1343 return err;
1344
1345 if (changed) {
1346 hci_req_update_scan(hdev);
1347 hci_update_background_scan(hdev);
1348 return new_settings(hdev, sk);
1349 }
1350
1351 return 0;
1352 }
1353
1354 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1355 u16 len)
1356 {
1357 struct mgmt_mode *cp = data;
1358 struct mgmt_pending_cmd *cmd;
1359 int err;
1360
1361 BT_DBG("request for %s", hdev->name);
1362
1363 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1364 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1365 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1366 MGMT_STATUS_REJECTED);
1367
1368 if (cp->val != 0x00 && cp->val != 0x01)
1369 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1370 MGMT_STATUS_INVALID_PARAMS);
1371
1372 hci_dev_lock(hdev);
1373
1374 if (!hdev_is_powered(hdev)) {
1375 err = set_connectable_update_settings(hdev, sk, cp->val);
1376 goto failed;
1377 }
1378
1379 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1380 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1381 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1382 MGMT_STATUS_BUSY);
1383 goto failed;
1384 }
1385
1386 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1387 if (!cmd) {
1388 err = -ENOMEM;
1389 goto failed;
1390 }
1391
1392 if (cp->val) {
1393 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1394 } else {
1395 if (hdev->discov_timeout > 0)
1396 cancel_delayed_work(&hdev->discov_off);
1397
1398 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1399 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1400 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1401 }
1402
1403 queue_work(hdev->req_workqueue, &hdev->connectable_update);
1404 err = 0;
1405
1406 failed:
1407 hci_dev_unlock(hdev);
1408 return err;
1409 }
1410
1411 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1412 u16 len)
1413 {
1414 struct mgmt_mode *cp = data;
1415 bool changed;
1416 int err;
1417
1418 BT_DBG("request for %s", hdev->name);
1419
1420 if (cp->val != 0x00 && cp->val != 0x01)
1421 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1422 MGMT_STATUS_INVALID_PARAMS);
1423
1424 hci_dev_lock(hdev);
1425
1426 if (cp->val)
1427 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1428 else
1429 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1430
1431 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1432 if (err < 0)
1433 goto unlock;
1434
1435 if (changed) {
1436 /* In limited privacy mode the change of bondable mode
1437 * may affect the local advertising address.
1438 */
1439 if (hdev_is_powered(hdev) &&
1440 hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1441 hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1442 hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1443 queue_work(hdev->req_workqueue,
1444 &hdev->discoverable_update);
1445
1446 err = new_settings(hdev, sk);
1447 }
1448
1449 unlock:
1450 hci_dev_unlock(hdev);
1451 return err;
1452 }
1453
1454 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1455 u16 len)
1456 {
1457 struct mgmt_mode *cp = data;
1458 struct mgmt_pending_cmd *cmd;
1459 u8 val, status;
1460 int err;
1461
1462 BT_DBG("request for %s", hdev->name);
1463
1464 status = mgmt_bredr_support(hdev);
1465 if (status)
1466 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1467 status);
1468
1469 if (cp->val != 0x00 && cp->val != 0x01)
1470 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1471 MGMT_STATUS_INVALID_PARAMS);
1472
1473 hci_dev_lock(hdev);
1474
1475 if (!hdev_is_powered(hdev)) {
1476 bool changed = false;
1477
1478 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1479 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1480 changed = true;
1481 }
1482
1483 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1484 if (err < 0)
1485 goto failed;
1486
1487 if (changed)
1488 err = new_settings(hdev, sk);
1489
1490 goto failed;
1491 }
1492
1493 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1494 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1495 MGMT_STATUS_BUSY);
1496 goto failed;
1497 }
1498
1499 val = !!cp->val;
1500
1501 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1502 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1503 goto failed;
1504 }
1505
1506 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1507 if (!cmd) {
1508 err = -ENOMEM;
1509 goto failed;
1510 }
1511
1512 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1513 if (err < 0) {
1514 mgmt_pending_remove(cmd);
1515 goto failed;
1516 }
1517
1518 failed:
1519 hci_dev_unlock(hdev);
1520 return err;
1521 }
1522
1523 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1524 {
1525 struct mgmt_mode *cp = data;
1526 struct mgmt_pending_cmd *cmd;
1527 u8 status;
1528 int err;
1529
1530 BT_DBG("request for %s", hdev->name);
1531
1532 status = mgmt_bredr_support(hdev);
1533 if (status)
1534 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1535
1536 if (!lmp_ssp_capable(hdev))
1537 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1538 MGMT_STATUS_NOT_SUPPORTED);
1539
1540 if (cp->val != 0x00 && cp->val != 0x01)
1541 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1542 MGMT_STATUS_INVALID_PARAMS);
1543
1544 hci_dev_lock(hdev);
1545
1546 if (!hdev_is_powered(hdev)) {
1547 bool changed;
1548
1549 if (cp->val) {
1550 changed = !hci_dev_test_and_set_flag(hdev,
1551 HCI_SSP_ENABLED);
1552 } else {
1553 changed = hci_dev_test_and_clear_flag(hdev,
1554 HCI_SSP_ENABLED);
1555 if (!changed)
1556 changed = hci_dev_test_and_clear_flag(hdev,
1557 HCI_HS_ENABLED);
1558 else
1559 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1560 }
1561
1562 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1563 if (err < 0)
1564 goto failed;
1565
1566 if (changed)
1567 err = new_settings(hdev, sk);
1568
1569 goto failed;
1570 }
1571
1572 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1573 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1574 MGMT_STATUS_BUSY);
1575 goto failed;
1576 }
1577
1578 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
1579 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1580 goto failed;
1581 }
1582
1583 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1584 if (!cmd) {
1585 err = -ENOMEM;
1586 goto failed;
1587 }
1588
1589 if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
1590 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
1591 sizeof(cp->val), &cp->val);
1592
1593 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1594 if (err < 0) {
1595 mgmt_pending_remove(cmd);
1596 goto failed;
1597 }
1598
1599 failed:
1600 hci_dev_unlock(hdev);
1601 return err;
1602 }
1603
1604 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1605 {
1606 struct mgmt_mode *cp = data;
1607 bool changed;
1608 u8 status;
1609 int err;
1610
1611 BT_DBG("request for %s", hdev->name);
1612
1613 status = mgmt_bredr_support(hdev);
1614 if (status)
1615 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1616
1617 if (!lmp_ssp_capable(hdev))
1618 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1619 MGMT_STATUS_NOT_SUPPORTED);
1620
1621 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1622 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1623 MGMT_STATUS_REJECTED);
1624
1625 if (cp->val != 0x00 && cp->val != 0x01)
1626 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1627 MGMT_STATUS_INVALID_PARAMS);
1628
1629 hci_dev_lock(hdev);
1630
1631 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1632 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1633 MGMT_STATUS_BUSY);
1634 goto unlock;
1635 }
1636
1637 if (cp->val) {
1638 changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
1639 } else {
1640 if (hdev_is_powered(hdev)) {
1641 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1642 MGMT_STATUS_REJECTED);
1643 goto unlock;
1644 }
1645
1646 changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
1647 }
1648
1649 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1650 if (err < 0)
1651 goto unlock;
1652
1653 if (changed)
1654 err = new_settings(hdev, sk);
1655
1656 unlock:
1657 hci_dev_unlock(hdev);
1658 return err;
1659 }
1660
1661 static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1662 {
1663 struct cmd_lookup match = { NULL, hdev };
1664
1665 hci_dev_lock(hdev);
1666
1667 if (status) {
1668 u8 mgmt_err = mgmt_status(status);
1669
1670 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1671 &mgmt_err);
1672 goto unlock;
1673 }
1674
1675 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1676
1677 new_settings(hdev, match.sk);
1678
1679 if (match.sk)
1680 sock_put(match.sk);
1681
1682 /* Make sure the controller has a good default for
1683 * advertising data. Restrict the update to when LE
1684 * has actually been enabled. During power on, the
1685 * update in powered_update_hci will take care of it.
1686 */
1687 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1688 struct hci_request req;
1689
1690 hci_req_init(&req, hdev);
1691 __hci_req_update_adv_data(&req, 0x00);
1692 __hci_req_update_scan_rsp_data(&req, 0x00);
1693 hci_req_run(&req, NULL);
1694 hci_update_background_scan(hdev);
1695 }
1696
1697 unlock:
1698 hci_dev_unlock(hdev);
1699 }
1700
1701 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1702 {
1703 struct mgmt_mode *cp = data;
1704 struct hci_cp_write_le_host_supported hci_cp;
1705 struct mgmt_pending_cmd *cmd;
1706 struct hci_request req;
1707 int err;
1708 u8 val, enabled;
1709
1710 BT_DBG("request for %s", hdev->name);
1711
1712 if (!lmp_le_capable(hdev))
1713 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1714 MGMT_STATUS_NOT_SUPPORTED);
1715
1716 if (cp->val != 0x00 && cp->val != 0x01)
1717 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1718 MGMT_STATUS_INVALID_PARAMS);
1719
1720 /* Bluetooth single mode LE only controllers or dual-mode
1721 * controllers configured as LE only devices, do not allow
1722 * switching LE off. These have either LE enabled explicitly
1723 * or BR/EDR has been previously switched off.
1724 *
1725 * When trying to enable an already enabled LE, then gracefully
1726 * send a positive response. Trying to disable it however will
1727 * result into rejection.
1728 */
1729 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1730 if (cp->val == 0x01)
1731 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1732
1733 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1734 MGMT_STATUS_REJECTED);
1735 }
1736
1737 hci_dev_lock(hdev);
1738
1739 val = !!cp->val;
1740 enabled = lmp_host_le_capable(hdev);
1741
1742 if (!val)
1743 hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, true);
1744
1745 if (!hdev_is_powered(hdev) || val == enabled) {
1746 bool changed = false;
1747
1748 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1749 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
1750 changed = true;
1751 }
1752
1753 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
1754 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1755 changed = true;
1756 }
1757
1758 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1759 if (err < 0)
1760 goto unlock;
1761
1762 if (changed)
1763 err = new_settings(hdev, sk);
1764
1765 goto unlock;
1766 }
1767
1768 if (pending_find(MGMT_OP_SET_LE, hdev) ||
1769 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
1770 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1771 MGMT_STATUS_BUSY);
1772 goto unlock;
1773 }
1774
1775 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
1776 if (!cmd) {
1777 err = -ENOMEM;
1778 goto unlock;
1779 }
1780
1781 hci_req_init(&req, hdev);
1782
1783 memset(&hci_cp, 0, sizeof(hci_cp));
1784
1785 if (val) {
1786 hci_cp.le = val;
1787 hci_cp.simul = 0x00;
1788 } else {
1789 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1790 __hci_req_disable_advertising(&req);
1791 }
1792
1793 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
1794 &hci_cp);
1795
1796 err = hci_req_run(&req, le_enable_complete);
1797 if (err < 0)
1798 mgmt_pending_remove(cmd);
1799
1800 unlock:
1801 hci_dev_unlock(hdev);
1802 return err;
1803 }
1804
1805 /* This is a helper function to test for pending mgmt commands that can
1806 * cause CoD or EIR HCI commands. We can only allow one such pending
1807 * mgmt command at a time since otherwise we cannot easily track what
1808 * the current values are, will be, and based on that calculate if a new
1809 * HCI command needs to be sent and if yes with what value.
1810 */
1811 static bool pending_eir_or_class(struct hci_dev *hdev)
1812 {
1813 struct mgmt_pending_cmd *cmd;
1814
1815 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
1816 switch (cmd->opcode) {
1817 case MGMT_OP_ADD_UUID:
1818 case MGMT_OP_REMOVE_UUID:
1819 case MGMT_OP_SET_DEV_CLASS:
1820 case MGMT_OP_SET_POWERED:
1821 return true;
1822 }
1823 }
1824
1825 return false;
1826 }
1827
1828 static const u8 bluetooth_base_uuid[] = {
1829 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
1830 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1831 };
1832
1833 static u8 get_uuid_size(const u8 *uuid)
1834 {
1835 u32 val;
1836
1837 if (memcmp(uuid, bluetooth_base_uuid, 12))
1838 return 128;
1839
1840 val = get_unaligned_le32(&uuid[12]);
1841 if (val > 0xffff)
1842 return 32;
1843
1844 return 16;
1845 }
1846
1847 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
1848 {
1849 struct mgmt_pending_cmd *cmd;
1850
1851 hci_dev_lock(hdev);
1852
1853 cmd = pending_find(mgmt_op, hdev);
1854 if (!cmd)
1855 goto unlock;
1856
1857 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
1858 mgmt_status(status), hdev->dev_class, 3);
1859
1860 mgmt_pending_remove(cmd);
1861
1862 unlock:
1863 hci_dev_unlock(hdev);
1864 }
1865
1866 static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1867 {
1868 BT_DBG("status 0x%02x", status);
1869
1870 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
1871 }
1872
1873 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1874 {
1875 struct mgmt_cp_add_uuid *cp = data;
1876 struct mgmt_pending_cmd *cmd;
1877 struct hci_request req;
1878 struct bt_uuid *uuid;
1879 int err;
1880
1881 BT_DBG("request for %s", hdev->name);
1882
1883 hci_dev_lock(hdev);
1884
1885 if (pending_eir_or_class(hdev)) {
1886 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
1887 MGMT_STATUS_BUSY);
1888 goto failed;
1889 }
1890
1891 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
1892 if (!uuid) {
1893 err = -ENOMEM;
1894 goto failed;
1895 }
1896
1897 memcpy(uuid->uuid, cp->uuid, 16);
1898 uuid->svc_hint = cp->svc_hint;
1899 uuid->size = get_uuid_size(cp->uuid);
1900
1901 list_add_tail(&uuid->list, &hdev->uuids);
1902
1903 hci_req_init(&req, hdev);
1904
1905 __hci_req_update_class(&req);
1906 __hci_req_update_eir(&req);
1907
1908 err = hci_req_run(&req, add_uuid_complete);
1909 if (err < 0) {
1910 if (err != -ENODATA)
1911 goto failed;
1912
1913 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
1914 hdev->dev_class, 3);
1915 goto failed;
1916 }
1917
1918 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
1919 if (!cmd) {
1920 err = -ENOMEM;
1921 goto failed;
1922 }
1923
1924 err = 0;
1925
1926 failed:
1927 hci_dev_unlock(hdev);
1928 return err;
1929 }
1930
1931 static bool enable_service_cache(struct hci_dev *hdev)
1932 {
1933 if (!hdev_is_powered(hdev))
1934 return false;
1935
1936 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
1937 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
1938 CACHE_TIMEOUT);
1939 return true;
1940 }
1941
1942 return false;
1943 }
1944
1945 static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1946 {
1947 BT_DBG("status 0x%02x", status);
1948
1949 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
1950 }
1951
1952 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
1953 u16 len)
1954 {
1955 struct mgmt_cp_remove_uuid *cp = data;
1956 struct mgmt_pending_cmd *cmd;
1957 struct bt_uuid *match, *tmp;
1958 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
1959 struct hci_request req;
1960 int err, found;
1961
1962 BT_DBG("request for %s", hdev->name);
1963
1964 hci_dev_lock(hdev);
1965
1966 if (pending_eir_or_class(hdev)) {
1967 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1968 MGMT_STATUS_BUSY);
1969 goto unlock;
1970 }
1971
1972 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
1973 hci_uuids_clear(hdev);
1974
1975 if (enable_service_cache(hdev)) {
1976 err = mgmt_cmd_complete(sk, hdev->id,
1977 MGMT_OP_REMOVE_UUID,
1978 0, hdev->dev_class, 3);
1979 goto unlock;
1980 }
1981
1982 goto update_class;
1983 }
1984
1985 found = 0;
1986
1987 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
1988 if (memcmp(match->uuid, cp->uuid, 16) != 0)
1989 continue;
1990
1991 list_del(&match->list);
1992 kfree(match);
1993 found++;
1994 }
1995
1996 if (found == 0) {
1997 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1998 MGMT_STATUS_INVALID_PARAMS);
1999 goto unlock;
2000 }
2001
2002 update_class:
2003 hci_req_init(&req, hdev);
2004
2005 __hci_req_update_class(&req);
2006 __hci_req_update_eir(&req);
2007
2008 err = hci_req_run(&req, remove_uuid_complete);
2009 if (err < 0) {
2010 if (err != -ENODATA)
2011 goto unlock;
2012
2013 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2014 hdev->dev_class, 3);
2015 goto unlock;
2016 }
2017
2018 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2019 if (!cmd) {
2020 err = -ENOMEM;
2021 goto unlock;
2022 }
2023
2024 err = 0;
2025
2026 unlock:
2027 hci_dev_unlock(hdev);
2028 return err;
2029 }
2030
2031 static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2032 {
2033 BT_DBG("status 0x%02x", status);
2034
2035 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2036 }
2037
2038 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2039 u16 len)
2040 {
2041 struct mgmt_cp_set_dev_class *cp = data;
2042 struct mgmt_pending_cmd *cmd;
2043 struct hci_request req;
2044 int err;
2045
2046 BT_DBG("request for %s", hdev->name);
2047
2048 if (!lmp_bredr_capable(hdev))
2049 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2050 MGMT_STATUS_NOT_SUPPORTED);
2051
2052 hci_dev_lock(hdev);
2053
2054 if (pending_eir_or_class(hdev)) {
2055 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2056 MGMT_STATUS_BUSY);
2057 goto unlock;
2058 }
2059
2060 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2061 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2062 MGMT_STATUS_INVALID_PARAMS);
2063 goto unlock;
2064 }
2065
2066 hdev->major_class = cp->major;
2067 hdev->minor_class = cp->minor;
2068
2069 if (!hdev_is_powered(hdev)) {
2070 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2071 hdev->dev_class, 3);
2072 goto unlock;
2073 }
2074
2075 hci_req_init(&req, hdev);
2076
2077 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2078 hci_dev_unlock(hdev);
2079 cancel_delayed_work_sync(&hdev->service_cache);
2080 hci_dev_lock(hdev);
2081 __hci_req_update_eir(&req);
2082 }
2083
2084 __hci_req_update_class(&req);
2085
2086 err = hci_req_run(&req, set_class_complete);
2087 if (err < 0) {
2088 if (err != -ENODATA)
2089 goto unlock;
2090
2091 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2092 hdev->dev_class, 3);
2093 goto unlock;
2094 }
2095
2096 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2097 if (!cmd) {
2098 err = -ENOMEM;
2099 goto unlock;
2100 }
2101
2102 err = 0;
2103
2104 unlock:
2105 hci_dev_unlock(hdev);
2106 return err;
2107 }
2108
2109 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2110 u16 len)
2111 {
2112 struct mgmt_cp_load_link_keys *cp = data;
2113 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2114 sizeof(struct mgmt_link_key_info));
2115 u16 key_count, expected_len;
2116 bool changed;
2117 int i;
2118
2119 BT_DBG("request for %s", hdev->name);
2120
2121 if (!lmp_bredr_capable(hdev))
2122 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2123 MGMT_STATUS_NOT_SUPPORTED);
2124
2125 key_count = __le16_to_cpu(cp->key_count);
2126 if (key_count > max_key_count) {
2127 BT_ERR("load_link_keys: too big key_count value %u",
2128 key_count);
2129 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2130 MGMT_STATUS_INVALID_PARAMS);
2131 }
2132
2133 expected_len = sizeof(*cp) + key_count *
2134 sizeof(struct mgmt_link_key_info);
2135 if (expected_len != len) {
2136 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2137 expected_len, len);
2138 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2139 MGMT_STATUS_INVALID_PARAMS);
2140 }
2141
2142 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2143 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2144 MGMT_STATUS_INVALID_PARAMS);
2145
2146 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2147 key_count);
2148
2149 for (i = 0; i < key_count; i++) {
2150 struct mgmt_link_key_info *key = &cp->keys[i];
2151
2152 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2153 return mgmt_cmd_status(sk, hdev->id,
2154 MGMT_OP_LOAD_LINK_KEYS,
2155 MGMT_STATUS_INVALID_PARAMS);
2156 }
2157
2158 hci_dev_lock(hdev);
2159
2160 hci_link_keys_clear(hdev);
2161
2162 if (cp->debug_keys)
2163 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2164 else
2165 changed = hci_dev_test_and_clear_flag(hdev,
2166 HCI_KEEP_DEBUG_KEYS);
2167
2168 if (changed)
2169 new_settings(hdev, NULL);
2170
2171 for (i = 0; i < key_count; i++) {
2172 struct mgmt_link_key_info *key = &cp->keys[i];
2173
2174 /* Always ignore debug keys and require a new pairing if
2175 * the user wants to use them.
2176 */
2177 if (key->type == HCI_LK_DEBUG_COMBINATION)
2178 continue;
2179
2180 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2181 key->type, key->pin_len, NULL);
2182 }
2183
2184 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2185
2186 hci_dev_unlock(hdev);
2187
2188 return 0;
2189 }
2190
2191 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2192 u8 addr_type, struct sock *skip_sk)
2193 {
2194 struct mgmt_ev_device_unpaired ev;
2195
2196 bacpy(&ev.addr.bdaddr, bdaddr);
2197 ev.addr.type = addr_type;
2198
2199 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2200 skip_sk);
2201 }
2202
2203 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2204 u16 len)
2205 {
2206 struct mgmt_cp_unpair_device *cp = data;
2207 struct mgmt_rp_unpair_device rp;
2208 struct hci_conn_params *params;
2209 struct mgmt_pending_cmd *cmd;
2210 struct hci_conn *conn;
2211 u8 addr_type;
2212 int err;
2213
2214 memset(&rp, 0, sizeof(rp));
2215 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2216 rp.addr.type = cp->addr.type;
2217
2218 if (!bdaddr_type_is_valid(cp->addr.type))
2219 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2220 MGMT_STATUS_INVALID_PARAMS,
2221 &rp, sizeof(rp));
2222
2223 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2224 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2225 MGMT_STATUS_INVALID_PARAMS,
2226 &rp, sizeof(rp));
2227
2228 hci_dev_lock(hdev);
2229
2230 if (!hdev_is_powered(hdev)) {
2231 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2232 MGMT_STATUS_NOT_POWERED, &rp,
2233 sizeof(rp));
2234 goto unlock;
2235 }
2236
2237 if (cp->addr.type == BDADDR_BREDR) {
2238 /* If disconnection is requested, then look up the
2239 * connection. If the remote device is connected, it
2240 * will be later used to terminate the link.
2241 *
2242 * Setting it to NULL explicitly will cause no
2243 * termination of the link.
2244 */
2245 if (cp->disconnect)
2246 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2247 &cp->addr.bdaddr);
2248 else
2249 conn = NULL;
2250
2251 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2252 if (err < 0) {
2253 err = mgmt_cmd_complete(sk, hdev->id,
2254 MGMT_OP_UNPAIR_DEVICE,
2255 MGMT_STATUS_NOT_PAIRED, &rp,
2256 sizeof(rp));
2257 goto unlock;
2258 }
2259
2260 goto done;
2261 }
2262
2263 /* LE address type */
2264 addr_type = le_addr_type(cp->addr.type);
2265
2266 hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2267
2268 err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2269 if (err < 0) {
2270 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2271 MGMT_STATUS_NOT_PAIRED, &rp,
2272 sizeof(rp));
2273 goto unlock;
2274 }
2275
2276 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
2277 if (!conn) {
2278 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2279 goto done;
2280 }
2281
2282 /* Abort any ongoing SMP pairing */
2283 smp_cancel_pairing(conn);
2284
2285 /* Defer clearing up the connection parameters until closing to
2286 * give a chance of keeping them if a repairing happens.
2287 */
2288 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2289
2290 /* Disable auto-connection parameters if present */
2291 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
2292 if (params) {
2293 if (params->explicit_connect)
2294 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2295 else
2296 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2297 }
2298
2299 /* If disconnection is not requested, then clear the connection
2300 * variable so that the link is not terminated.
2301 */
2302 if (!cp->disconnect)
2303 conn = NULL;
2304
2305 done:
2306 /* If the connection variable is set, then termination of the
2307 * link is requested.
2308 */
2309 if (!conn) {
2310 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2311 &rp, sizeof(rp));
2312 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2313 goto unlock;
2314 }
2315
2316 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2317 sizeof(*cp));
2318 if (!cmd) {
2319 err = -ENOMEM;
2320 goto unlock;
2321 }
2322
2323 cmd->cmd_complete = addr_cmd_complete;
2324
2325 err = hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
2326 if (err < 0)
2327 mgmt_pending_remove(cmd);
2328
2329 unlock:
2330 hci_dev_unlock(hdev);
2331 return err;
2332 }
2333
2334 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2335 u16 len)
2336 {
2337 struct mgmt_cp_disconnect *cp = data;
2338 struct mgmt_rp_disconnect rp;
2339 struct mgmt_pending_cmd *cmd;
2340 struct hci_conn *conn;
2341 int err;
2342
2343 BT_DBG("");
2344
2345 memset(&rp, 0, sizeof(rp));
2346 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2347 rp.addr.type = cp->addr.type;
2348
2349 if (!bdaddr_type_is_valid(cp->addr.type))
2350 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2351 MGMT_STATUS_INVALID_PARAMS,
2352 &rp, sizeof(rp));
2353
2354 hci_dev_lock(hdev);
2355
2356 if (!test_bit(HCI_UP, &hdev->flags)) {
2357 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2358 MGMT_STATUS_NOT_POWERED, &rp,
2359 sizeof(rp));
2360 goto failed;
2361 }
2362
2363 if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
2364 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2365 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2366 goto failed;
2367 }
2368
2369 if (cp->addr.type == BDADDR_BREDR)
2370 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2371 &cp->addr.bdaddr);
2372 else
2373 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2374 le_addr_type(cp->addr.type));
2375
2376 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2377 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2378 MGMT_STATUS_NOT_CONNECTED, &rp,
2379 sizeof(rp));
2380 goto failed;
2381 }
2382
2383 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2384 if (!cmd) {
2385 err = -ENOMEM;
2386 goto failed;
2387 }
2388
2389 cmd->cmd_complete = generic_cmd_complete;
2390
2391 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2392 if (err < 0)
2393 mgmt_pending_remove(cmd);
2394
2395 failed:
2396 hci_dev_unlock(hdev);
2397 return err;
2398 }
2399
2400 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2401 {
2402 switch (link_type) {
2403 case LE_LINK:
2404 switch (addr_type) {
2405 case ADDR_LE_DEV_PUBLIC:
2406 return BDADDR_LE_PUBLIC;
2407
2408 default:
2409 /* Fallback to LE Random address type */
2410 return BDADDR_LE_RANDOM;
2411 }
2412
2413 default:
2414 /* Fallback to BR/EDR type */
2415 return BDADDR_BREDR;
2416 }
2417 }
2418
2419 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2420 u16 data_len)
2421 {
2422 struct mgmt_rp_get_connections *rp;
2423 struct hci_conn *c;
2424 size_t rp_len;
2425 int err;
2426 u16 i;
2427
2428 BT_DBG("");
2429
2430 hci_dev_lock(hdev);
2431
2432 if (!hdev_is_powered(hdev)) {
2433 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2434 MGMT_STATUS_NOT_POWERED);
2435 goto unlock;
2436 }
2437
2438 i = 0;
2439 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2440 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2441 i++;
2442 }
2443
2444 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2445 rp = kmalloc(rp_len, GFP_KERNEL);
2446 if (!rp) {
2447 err = -ENOMEM;
2448 goto unlock;
2449 }
2450
2451 i = 0;
2452 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2453 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2454 continue;
2455 bacpy(&rp->addr[i].bdaddr, &c->dst);
2456 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2457 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2458 continue;
2459 i++;
2460 }
2461
2462 rp->conn_count = cpu_to_le16(i);
2463
2464 /* Recalculate length in case of filtered SCO connections, etc */
2465 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2466
2467 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2468 rp_len);
2469
2470 kfree(rp);
2471
2472 unlock:
2473 hci_dev_unlock(hdev);
2474 return err;
2475 }
2476
2477 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2478 struct mgmt_cp_pin_code_neg_reply *cp)
2479 {
2480 struct mgmt_pending_cmd *cmd;
2481 int err;
2482
2483 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2484 sizeof(*cp));
2485 if (!cmd)
2486 return -ENOMEM;
2487
2488 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2489 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2490 if (err < 0)
2491 mgmt_pending_remove(cmd);
2492
2493 return err;
2494 }
2495
2496 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2497 u16 len)
2498 {
2499 struct hci_conn *conn;
2500 struct mgmt_cp_pin_code_reply *cp = data;
2501 struct hci_cp_pin_code_reply reply;
2502 struct mgmt_pending_cmd *cmd;
2503 int err;
2504
2505 BT_DBG("");
2506
2507 hci_dev_lock(hdev);
2508
2509 if (!hdev_is_powered(hdev)) {
2510 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2511 MGMT_STATUS_NOT_POWERED);
2512 goto failed;
2513 }
2514
2515 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2516 if (!conn) {
2517 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2518 MGMT_STATUS_NOT_CONNECTED);
2519 goto failed;
2520 }
2521
2522 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2523 struct mgmt_cp_pin_code_neg_reply ncp;
2524
2525 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2526
2527 BT_ERR("PIN code is not 16 bytes long");
2528
2529 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2530 if (err >= 0)
2531 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2532 MGMT_STATUS_INVALID_PARAMS);
2533
2534 goto failed;
2535 }
2536
2537 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2538 if (!cmd) {
2539 err = -ENOMEM;
2540 goto failed;
2541 }
2542
2543 cmd->cmd_complete = addr_cmd_complete;
2544
2545 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2546 reply.pin_len = cp->pin_len;
2547 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2548
2549 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2550 if (err < 0)
2551 mgmt_pending_remove(cmd);
2552
2553 failed:
2554 hci_dev_unlock(hdev);
2555 return err;
2556 }
2557
2558 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2559 u16 len)
2560 {
2561 struct mgmt_cp_set_io_capability *cp = data;
2562
2563 BT_DBG("");
2564
2565 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2566 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2567 MGMT_STATUS_INVALID_PARAMS);
2568
2569 hci_dev_lock(hdev);
2570
2571 hdev->io_capability = cp->io_capability;
2572
2573 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2574 hdev->io_capability);
2575
2576 hci_dev_unlock(hdev);
2577
2578 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
2579 NULL, 0);
2580 }
2581
2582 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
2583 {
2584 struct hci_dev *hdev = conn->hdev;
2585 struct mgmt_pending_cmd *cmd;
2586
2587 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2588 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2589 continue;
2590
2591 if (cmd->user_data != conn)
2592 continue;
2593
2594 return cmd;
2595 }
2596
2597 return NULL;
2598 }
2599
2600 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
2601 {
2602 struct mgmt_rp_pair_device rp;
2603 struct hci_conn *conn = cmd->user_data;
2604 int err;
2605
2606 bacpy(&rp.addr.bdaddr, &conn->dst);
2607 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2608
2609 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
2610 status, &rp, sizeof(rp));
2611
2612 /* So we don't get further callbacks for this connection */
2613 conn->connect_cfm_cb = NULL;
2614 conn->security_cfm_cb = NULL;
2615 conn->disconn_cfm_cb = NULL;
2616
2617 hci_conn_drop(conn);
2618
2619 /* The device is paired so there is no need to remove
2620 * its connection parameters anymore.
2621 */
2622 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2623
2624 hci_conn_put(conn);
2625
2626 return err;
2627 }
2628
2629 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2630 {
2631 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2632 struct mgmt_pending_cmd *cmd;
2633
2634 cmd = find_pairing(conn);
2635 if (cmd) {
2636 cmd->cmd_complete(cmd, status);
2637 mgmt_pending_remove(cmd);
2638 }
2639 }
2640
2641 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2642 {
2643 struct mgmt_pending_cmd *cmd;
2644
2645 BT_DBG("status %u", status);
2646
2647 cmd = find_pairing(conn);
2648 if (!cmd) {
2649 BT_DBG("Unable to find a pending command");
2650 return;
2651 }
2652
2653 cmd->cmd_complete(cmd, mgmt_status(status));
2654 mgmt_pending_remove(cmd);
2655 }
2656
2657 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2658 {
2659 struct mgmt_pending_cmd *cmd;
2660
2661 BT_DBG("status %u", status);
2662
2663 if (!status)
2664 return;
2665
2666 cmd = find_pairing(conn);
2667 if (!cmd) {
2668 BT_DBG("Unable to find a pending command");
2669 return;
2670 }
2671
2672 cmd->cmd_complete(cmd, mgmt_status(status));
2673 mgmt_pending_remove(cmd);
2674 }
2675
2676 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2677 u16 len)
2678 {
2679 struct mgmt_cp_pair_device *cp = data;
2680 struct mgmt_rp_pair_device rp;
2681 struct mgmt_pending_cmd *cmd;
2682 u8 sec_level, auth_type;
2683 struct hci_conn *conn;
2684 int err;
2685
2686 BT_DBG("");
2687
2688 memset(&rp, 0, sizeof(rp));
2689 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2690 rp.addr.type = cp->addr.type;
2691
2692 if (!bdaddr_type_is_valid(cp->addr.type))
2693 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2694 MGMT_STATUS_INVALID_PARAMS,
2695 &rp, sizeof(rp));
2696
2697 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
2698 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2699 MGMT_STATUS_INVALID_PARAMS,
2700 &rp, sizeof(rp));
2701
2702 hci_dev_lock(hdev);
2703
2704 if (!hdev_is_powered(hdev)) {
2705 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2706 MGMT_STATUS_NOT_POWERED, &rp,
2707 sizeof(rp));
2708 goto unlock;
2709 }
2710
2711 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
2712 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2713 MGMT_STATUS_ALREADY_PAIRED, &rp,
2714 sizeof(rp));
2715 goto unlock;
2716 }
2717
2718 sec_level = BT_SECURITY_MEDIUM;
2719 auth_type = HCI_AT_DEDICATED_BONDING;
2720
2721 if (cp->addr.type == BDADDR_BREDR) {
2722 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
2723 auth_type);
2724 } else {
2725 u8 addr_type = le_addr_type(cp->addr.type);
2726 struct hci_conn_params *p;
2727
2728 /* When pairing a new device, it is expected to remember
2729 * this device for future connections. Adding the connection
2730 * parameter information ahead of time allows tracking
2731 * of the slave preferred values and will speed up any
2732 * further connection establishment.
2733 *
2734 * If connection parameters already exist, then they
2735 * will be kept and this function does nothing.
2736 */
2737 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
2738
2739 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
2740 p->auto_connect = HCI_AUTO_CONN_DISABLED;
2741
2742 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr,
2743 addr_type, sec_level,
2744 HCI_LE_CONN_TIMEOUT);
2745 }
2746
2747 if (IS_ERR(conn)) {
2748 int status;
2749
2750 if (PTR_ERR(conn) == -EBUSY)
2751 status = MGMT_STATUS_BUSY;
2752 else if (PTR_ERR(conn) == -EOPNOTSUPP)
2753 status = MGMT_STATUS_NOT_SUPPORTED;
2754 else if (PTR_ERR(conn) == -ECONNREFUSED)
2755 status = MGMT_STATUS_REJECTED;
2756 else
2757 status = MGMT_STATUS_CONNECT_FAILED;
2758
2759 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2760 status, &rp, sizeof(rp));
2761 goto unlock;
2762 }
2763
2764 if (conn->connect_cfm_cb) {
2765 hci_conn_drop(conn);
2766 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2767 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2768 goto unlock;
2769 }
2770
2771 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2772 if (!cmd) {
2773 err = -ENOMEM;
2774 hci_conn_drop(conn);
2775 goto unlock;
2776 }
2777
2778 cmd->cmd_complete = pairing_complete;
2779
2780 /* For LE, just connecting isn't a proof that the pairing finished */
2781 if (cp->addr.type == BDADDR_BREDR) {
2782 conn->connect_cfm_cb = pairing_complete_cb;
2783 conn->security_cfm_cb = pairing_complete_cb;
2784 conn->disconn_cfm_cb = pairing_complete_cb;
2785 } else {
2786 conn->connect_cfm_cb = le_pairing_complete_cb;
2787 conn->security_cfm_cb = le_pairing_complete_cb;
2788 conn->disconn_cfm_cb = le_pairing_complete_cb;
2789 }
2790
2791 conn->io_capability = cp->io_cap;
2792 cmd->user_data = hci_conn_get(conn);
2793
2794 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
2795 hci_conn_security(conn, sec_level, auth_type, true)) {
2796 cmd->cmd_complete(cmd, 0);
2797 mgmt_pending_remove(cmd);
2798 }
2799
2800 err = 0;
2801
2802 unlock:
2803 hci_dev_unlock(hdev);
2804 return err;
2805 }
2806
2807 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2808 u16 len)
2809 {
2810 struct mgmt_addr_info *addr = data;
2811 struct mgmt_pending_cmd *cmd;
2812 struct hci_conn *conn;
2813 int err;
2814
2815 BT_DBG("");
2816
2817 hci_dev_lock(hdev);
2818
2819 if (!hdev_is_powered(hdev)) {
2820 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2821 MGMT_STATUS_NOT_POWERED);
2822 goto unlock;
2823 }
2824
2825 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
2826 if (!cmd) {
2827 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2828 MGMT_STATUS_INVALID_PARAMS);
2829 goto unlock;
2830 }
2831
2832 conn = cmd->user_data;
2833
2834 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
2835 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2836 MGMT_STATUS_INVALID_PARAMS);
2837 goto unlock;
2838 }
2839
2840 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
2841 mgmt_pending_remove(cmd);
2842
2843 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
2844 addr, sizeof(*addr));
2845 unlock:
2846 hci_dev_unlock(hdev);
2847 return err;
2848 }
2849
2850 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
2851 struct mgmt_addr_info *addr, u16 mgmt_op,
2852 u16 hci_op, __le32 passkey)
2853 {
2854 struct mgmt_pending_cmd *cmd;
2855 struct hci_conn *conn;
2856 int err;
2857
2858 hci_dev_lock(hdev);
2859
2860 if (!hdev_is_powered(hdev)) {
2861 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
2862 MGMT_STATUS_NOT_POWERED, addr,
2863 sizeof(*addr));
2864 goto done;
2865 }
2866
2867 if (addr->type == BDADDR_BREDR)
2868 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
2869 else
2870 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
2871 le_addr_type(addr->type));
2872
2873 if (!conn) {
2874 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
2875 MGMT_STATUS_NOT_CONNECTED, addr,
2876 sizeof(*addr));
2877 goto done;
2878 }
2879
2880 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
2881 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
2882 if (!err)
2883 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
2884 MGMT_STATUS_SUCCESS, addr,
2885 sizeof(*addr));
2886 else
2887 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
2888 MGMT_STATUS_FAILED, addr,
2889 sizeof(*addr));
2890
2891 goto done;
2892 }
2893
2894 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
2895 if (!cmd) {
2896 err = -ENOMEM;
2897 goto done;
2898 }
2899
2900 cmd->cmd_complete = addr_cmd_complete;
2901
2902 /* Continue with pairing via HCI */
2903 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
2904 struct hci_cp_user_passkey_reply cp;
2905
2906 bacpy(&cp.bdaddr, &addr->bdaddr);
2907 cp.passkey = passkey;
2908 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
2909 } else
2910 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
2911 &addr->bdaddr);
2912
2913 if (err < 0)
2914 mgmt_pending_remove(cmd);
2915
2916 done:
2917 hci_dev_unlock(hdev);
2918 return err;
2919 }
2920
2921 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2922 void *data, u16 len)
2923 {
2924 struct mgmt_cp_pin_code_neg_reply *cp = data;
2925
2926 BT_DBG("");
2927
2928 return user_pairing_resp(sk, hdev, &cp->addr,
2929 MGMT_OP_PIN_CODE_NEG_REPLY,
2930 HCI_OP_PIN_CODE_NEG_REPLY, 0);
2931 }
2932
2933 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2934 u16 len)
2935 {
2936 struct mgmt_cp_user_confirm_reply *cp = data;
2937
2938 BT_DBG("");
2939
2940 if (len != sizeof(*cp))
2941 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
2942 MGMT_STATUS_INVALID_PARAMS);
2943
2944 return user_pairing_resp(sk, hdev, &cp->addr,
2945 MGMT_OP_USER_CONFIRM_REPLY,
2946 HCI_OP_USER_CONFIRM_REPLY, 0);
2947 }
2948
2949 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
2950 void *data, u16 len)
2951 {
2952 struct mgmt_cp_user_confirm_neg_reply *cp = data;
2953
2954 BT_DBG("");
2955
2956 return user_pairing_resp(sk, hdev, &cp->addr,
2957 MGMT_OP_USER_CONFIRM_NEG_REPLY,
2958 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
2959 }
2960
2961 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2962 u16 len)
2963 {
2964 struct mgmt_cp_user_passkey_reply *cp = data;
2965
2966 BT_DBG("");
2967
2968 return user_pairing_resp(sk, hdev, &cp->addr,
2969 MGMT_OP_USER_PASSKEY_REPLY,
2970 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
2971 }
2972
2973 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
2974 void *data, u16 len)
2975 {
2976 struct mgmt_cp_user_passkey_neg_reply *cp = data;
2977
2978 BT_DBG("");
2979
2980 return user_pairing_resp(sk, hdev, &cp->addr,
2981 MGMT_OP_USER_PASSKEY_NEG_REPLY,
2982 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
2983 }
2984
2985 static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2986 {
2987 struct mgmt_cp_set_local_name *cp;
2988 struct mgmt_pending_cmd *cmd;
2989
2990 BT_DBG("status 0x%02x", status);
2991
2992 hci_dev_lock(hdev);
2993
2994 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
2995 if (!cmd)
2996 goto unlock;
2997
2998 cp = cmd->param;
2999
3000 if (status)
3001 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3002 mgmt_status(status));
3003 else
3004 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3005 cp, sizeof(*cp));
3006
3007 mgmt_pending_remove(cmd);
3008
3009 unlock:
3010 hci_dev_unlock(hdev);
3011 }
3012
3013 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3014 u16 len)
3015 {
3016 struct mgmt_cp_set_local_name *cp = data;
3017 struct mgmt_pending_cmd *cmd;
3018 struct hci_request req;
3019 int err;
3020
3021 BT_DBG("");
3022
3023 hci_dev_lock(hdev);
3024
3025 /* If the old values are the same as the new ones just return a
3026 * direct command complete event.
3027 */
3028 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3029 !memcmp(hdev->short_name, cp->short_name,
3030 sizeof(hdev->short_name))) {
3031 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3032 data, len);
3033 goto failed;
3034 }
3035
3036 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3037
3038 if (!hdev_is_powered(hdev)) {
3039 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3040
3041 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3042 data, len);
3043 if (err < 0)
3044 goto failed;
3045
3046 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3047 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3048 ext_info_changed(hdev, sk);
3049
3050 goto failed;
3051 }
3052
3053 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3054 if (!cmd) {
3055 err = -ENOMEM;
3056 goto failed;
3057 }
3058
3059 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3060
3061 hci_req_init(&req, hdev);
3062
3063 if (lmp_bredr_capable(hdev)) {
3064 __hci_req_update_name(&req);
3065 __hci_req_update_eir(&req);
3066 }
3067
3068 /* The name is stored in the scan response data and so
3069 * no need to udpate the advertising data here.
3070 */
3071 if (lmp_le_capable(hdev))
3072 __hci_req_update_scan_rsp_data(&req, hdev->cur_adv_instance);
3073
3074 err = hci_req_run(&req, set_name_complete);
3075 if (err < 0)
3076 mgmt_pending_remove(cmd);
3077
3078 failed:
3079 hci_dev_unlock(hdev);
3080 return err;
3081 }
3082
3083 static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status,
3084 u16 opcode, struct sk_buff *skb)
3085 {
3086 struct mgmt_rp_read_local_oob_data mgmt_rp;
3087 size_t rp_size = sizeof(mgmt_rp);
3088 struct mgmt_pending_cmd *cmd;
3089
3090 BT_DBG("%s status %u", hdev->name, status);
3091
3092 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
3093 if (!cmd)
3094 return;
3095
3096 if (status || !skb) {
3097 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3098 status ? mgmt_status(status) : MGMT_STATUS_FAILED);
3099 goto remove;
3100 }
3101
3102 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
3103
3104 if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
3105 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
3106
3107 if (skb->len < sizeof(*rp)) {
3108 mgmt_cmd_status(cmd->sk, hdev->id,
3109 MGMT_OP_READ_LOCAL_OOB_DATA,
3110 MGMT_STATUS_FAILED);
3111 goto remove;
3112 }
3113
3114 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
3115 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
3116
3117 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
3118 } else {
3119 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
3120
3121 if (skb->len < sizeof(*rp)) {
3122 mgmt_cmd_status(cmd->sk, hdev->id,
3123 MGMT_OP_READ_LOCAL_OOB_DATA,
3124 MGMT_STATUS_FAILED);
3125 goto remove;
3126 }
3127
3128 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
3129 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
3130
3131 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
3132 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
3133 }
3134
3135 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3136 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
3137
3138 remove:
3139 mgmt_pending_remove(cmd);
3140 }
3141
3142 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3143 void *data, u16 data_len)
3144 {
3145 struct mgmt_pending_cmd *cmd;
3146 struct hci_request req;
3147 int err;
3148
3149 BT_DBG("%s", hdev->name);
3150
3151 hci_dev_lock(hdev);
3152
3153 if (!hdev_is_powered(hdev)) {
3154 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3155 MGMT_STATUS_NOT_POWERED);
3156 goto unlock;
3157 }
3158
3159 if (!lmp_ssp_capable(hdev)) {
3160 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3161 MGMT_STATUS_NOT_SUPPORTED);
3162 goto unlock;
3163 }
3164
3165 if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3166 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3167 MGMT_STATUS_BUSY);
3168 goto unlock;
3169 }
3170
3171 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3172 if (!cmd) {
3173 err = -ENOMEM;
3174 goto unlock;
3175 }
3176
3177 hci_req_init(&req, hdev);
3178
3179 if (bredr_sc_enabled(hdev))
3180 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
3181 else
3182 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3183
3184 err = hci_req_run_skb(&req, read_local_oob_data_complete);
3185 if (err < 0)
3186 mgmt_pending_remove(cmd);
3187
3188 unlock:
3189 hci_dev_unlock(hdev);
3190 return err;
3191 }
3192
3193 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3194 void *data, u16 len)
3195 {
3196 struct mgmt_addr_info *addr = data;
3197 int err;
3198
3199 BT_DBG("%s ", hdev->name);
3200
3201 if (!bdaddr_type_is_valid(addr->type))
3202 return mgmt_cmd_complete(sk, hdev->id,
3203 MGMT_OP_ADD_REMOTE_OOB_DATA,
3204 MGMT_STATUS_INVALID_PARAMS,
3205 addr, sizeof(*addr));
3206
3207 hci_dev_lock(hdev);
3208
3209 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3210 struct mgmt_cp_add_remote_oob_data *cp = data;
3211 u8 status;
3212
3213 if (cp->addr.type != BDADDR_BREDR) {
3214 err = mgmt_cmd_complete(sk, hdev->id,
3215 MGMT_OP_ADD_REMOTE_OOB_DATA,
3216 MGMT_STATUS_INVALID_PARAMS,
3217 &cp->addr, sizeof(cp->addr));
3218 goto unlock;
3219 }
3220
3221 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3222 cp->addr.type, cp->hash,
3223 cp->rand, NULL, NULL);
3224 if (err < 0)
3225 status = MGMT_STATUS_FAILED;
3226 else
3227 status = MGMT_STATUS_SUCCESS;
3228
3229 err = mgmt_cmd_complete(sk, hdev->id,
3230 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
3231 &cp->addr, sizeof(cp->addr));
3232 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3233 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3234 u8 *rand192, *hash192, *rand256, *hash256;
3235 u8 status;
3236
3237 if (bdaddr_type_is_le(cp->addr.type)) {
3238 /* Enforce zero-valued 192-bit parameters as
3239 * long as legacy SMP OOB isn't implemented.
3240 */
3241 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
3242 memcmp(cp->hash192, ZERO_KEY, 16)) {
3243 err = mgmt_cmd_complete(sk, hdev->id,
3244 MGMT_OP_ADD_REMOTE_OOB_DATA,
3245 MGMT_STATUS_INVALID_PARAMS,
3246 addr, sizeof(*addr));
3247 goto unlock;
3248 }
3249
3250 rand192 = NULL;
3251 hash192 = NULL;
3252 } else {
3253 /* In case one of the P-192 values is set to zero,
3254 * then just disable OOB data for P-192.
3255 */
3256 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
3257 !memcmp(cp->hash192, ZERO_KEY, 16)) {
3258 rand192 = NULL;
3259 hash192 = NULL;
3260 } else {
3261 rand192 = cp->rand192;
3262 hash192 = cp->hash192;
3263 }
3264 }
3265
3266 /* In case one of the P-256 values is set to zero, then just
3267 * disable OOB data for P-256.
3268 */
3269 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
3270 !memcmp(cp->hash256, ZERO_KEY, 16)) {
3271 rand256 = NULL;
3272 hash256 = NULL;
3273 } else {
3274 rand256 = cp->rand256;
3275 hash256 = cp->hash256;
3276 }
3277
3278 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3279 cp->addr.type, hash192, rand192,
3280 hash256, rand256);
3281 if (err < 0)
3282 status = MGMT_STATUS_FAILED;
3283 else
3284 status = MGMT_STATUS_SUCCESS;
3285
3286 err = mgmt_cmd_complete(sk, hdev->id,
3287 MGMT_OP_ADD_REMOTE_OOB_DATA,
3288 status, &cp->addr, sizeof(cp->addr));
3289 } else {
3290 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3291 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3292 MGMT_STATUS_INVALID_PARAMS);
3293 }
3294
3295 unlock:
3296 hci_dev_unlock(hdev);
3297 return err;
3298 }
3299
3300 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3301 void *data, u16 len)
3302 {
3303 struct mgmt_cp_remove_remote_oob_data *cp = data;
3304 u8 status;
3305 int err;
3306
3307 BT_DBG("%s", hdev->name);
3308
3309 if (cp->addr.type != BDADDR_BREDR)
3310 return mgmt_cmd_complete(sk, hdev->id,
3311 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3312 MGMT_STATUS_INVALID_PARAMS,
3313 &cp->addr, sizeof(cp->addr));
3314
3315 hci_dev_lock(hdev);
3316
3317 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
3318 hci_remote_oob_data_clear(hdev);
3319 status = MGMT_STATUS_SUCCESS;
3320 goto done;
3321 }
3322
3323 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
3324 if (err < 0)
3325 status = MGMT_STATUS_INVALID_PARAMS;
3326 else
3327 status = MGMT_STATUS_SUCCESS;
3328
3329 done:
3330 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3331 status, &cp->addr, sizeof(cp->addr));
3332
3333 hci_dev_unlock(hdev);
3334 return err;
3335 }
3336
3337 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
3338 {
3339 struct mgmt_pending_cmd *cmd;
3340
3341 BT_DBG("status %d", status);
3342
3343 hci_dev_lock(hdev);
3344
3345 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
3346 if (!cmd)
3347 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
3348
3349 if (!cmd)
3350 cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
3351
3352 if (cmd) {
3353 cmd->cmd_complete(cmd, mgmt_status(status));
3354 mgmt_pending_remove(cmd);
3355 }
3356
3357 hci_dev_unlock(hdev);
3358 }
3359
3360 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
3361 uint8_t *mgmt_status)
3362 {
3363 switch (type) {
3364 case DISCOV_TYPE_LE:
3365 *mgmt_status = mgmt_le_support(hdev);
3366 if (*mgmt_status)
3367 return false;
3368 break;
3369 case DISCOV_TYPE_INTERLEAVED:
3370 *mgmt_status = mgmt_le_support(hdev);
3371 if (*mgmt_status)
3372 return false;
3373 /* Intentional fall-through */
3374 case DISCOV_TYPE_BREDR:
3375 *mgmt_status = mgmt_bredr_support(hdev);
3376 if (*mgmt_status)
3377 return false;
3378 break;
3379 default:
3380 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
3381 return false;
3382 }
3383
3384 return true;
3385 }
3386
3387 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
3388 u16 op, void *data, u16 len)
3389 {
3390 struct mgmt_cp_start_discovery *cp = data;
3391 struct mgmt_pending_cmd *cmd;
3392 u8 status;
3393 int err;
3394
3395 BT_DBG("%s", hdev->name);
3396
3397 hci_dev_lock(hdev);
3398
3399 if (!hdev_is_powered(hdev)) {
3400 err = mgmt_cmd_complete(sk, hdev->id, op,
3401 MGMT_STATUS_NOT_POWERED,
3402 &cp->type, sizeof(cp->type));
3403 goto failed;
3404 }
3405
3406 if (hdev->discovery.state != DISCOVERY_STOPPED ||
3407 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
3408 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
3409 &cp->type, sizeof(cp->type));
3410 goto failed;
3411 }
3412
3413 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
3414 err = mgmt_cmd_complete(sk, hdev->id, op, status,
3415 &cp->type, sizeof(cp->type));
3416 goto failed;
3417 }
3418
3419 /* Clear the discovery filter first to free any previously
3420 * allocated memory for the UUID list.
3421 */
3422 hci_discovery_filter_clear(hdev);
3423
3424 hdev->discovery.type = cp->type;
3425 hdev->discovery.report_invalid_rssi = false;
3426 if (op == MGMT_OP_START_LIMITED_DISCOVERY)
3427 hdev->discovery.limited = true;
3428 else
3429 hdev->discovery.limited = false;
3430
3431 cmd = mgmt_pending_add(sk, op, hdev, data, len);
3432 if (!cmd) {
3433 err = -ENOMEM;
3434 goto failed;
3435 }
3436
3437 cmd->cmd_complete = generic_cmd_complete;
3438
3439 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3440 queue_work(hdev->req_workqueue, &hdev->discov_update);
3441 err = 0;
3442
3443 failed:
3444 hci_dev_unlock(hdev);
3445 return err;
3446 }
3447
3448 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3449 void *data, u16 len)
3450 {
3451 return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
3452 data, len);
3453 }
3454
3455 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
3456 void *data, u16 len)
3457 {
3458 return start_discovery_internal(sk, hdev,
3459 MGMT_OP_START_LIMITED_DISCOVERY,
3460 data, len);
3461 }
3462
3463 static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd,
3464 u8 status)
3465 {
3466 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
3467 cmd->param, 1);
3468 }
3469
3470 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
3471 void *data, u16 len)
3472 {
3473 struct mgmt_cp_start_service_discovery *cp = data;
3474 struct mgmt_pending_cmd *cmd;
3475 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
3476 u16 uuid_count, expected_len;
3477 u8 status;
3478 int err;
3479
3480 BT_DBG("%s", hdev->name);
3481
3482 hci_dev_lock(hdev);
3483
3484 if (!hdev_is_powered(hdev)) {
3485 err = mgmt_cmd_complete(sk, hdev->id,
3486 MGMT_OP_START_SERVICE_DISCOVERY,
3487 MGMT_STATUS_NOT_POWERED,
3488 &cp->type, sizeof(cp->type));
3489 goto failed;
3490 }
3491
3492 if (hdev->discovery.state != DISCOVERY_STOPPED ||
3493 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
3494 err = mgmt_cmd_complete(sk, hdev->id,
3495 MGMT_OP_START_SERVICE_DISCOVERY,
3496 MGMT_STATUS_BUSY, &cp->type,
3497 sizeof(cp->type));
3498 goto failed;
3499 }
3500
3501 uuid_count = __le16_to_cpu(cp->uuid_count);
3502 if (uuid_count > max_uuid_count) {
3503 BT_ERR("service_discovery: too big uuid_count value %u",
3504 uuid_count);
3505 err = mgmt_cmd_complete(sk, hdev->id,
3506 MGMT_OP_START_SERVICE_DISCOVERY,
3507 MGMT_STATUS_INVALID_PARAMS, &cp->type,
3508 sizeof(cp->type));
3509 goto failed;
3510 }
3511
3512 expected_len = sizeof(*cp) + uuid_count * 16;
3513 if (expected_len != len) {
3514 BT_ERR("service_discovery: expected %u bytes, got %u bytes",
3515 expected_len, len);
3516 err = mgmt_cmd_complete(sk, hdev->id,
3517 MGMT_OP_START_SERVICE_DISCOVERY,
3518 MGMT_STATUS_INVALID_PARAMS, &cp->type,
3519 sizeof(cp->type));
3520 goto failed;
3521 }
3522
3523 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
3524 err = mgmt_cmd_complete(sk, hdev->id,
3525 MGMT_OP_START_SERVICE_DISCOVERY,
3526 status, &cp->type, sizeof(cp->type));
3527 goto failed;
3528 }
3529
3530 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
3531 hdev, data, len);
3532 if (!cmd) {
3533 err = -ENOMEM;
3534 goto failed;
3535 }
3536
3537 cmd->cmd_complete = service_discovery_cmd_complete;
3538
3539 /* Clear the discovery filter first to free any previously
3540 * allocated memory for the UUID list.
3541 */
3542 hci_discovery_filter_clear(hdev);
3543
3544 hdev->discovery.result_filtering = true;
3545 hdev->discovery.type = cp->type;
3546 hdev->discovery.rssi = cp->rssi;
3547 hdev->discovery.uuid_count = uuid_count;
3548
3549 if (uuid_count > 0) {
3550 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
3551 GFP_KERNEL);
3552 if (!hdev->discovery.uuids) {
3553 err = mgmt_cmd_complete(sk, hdev->id,
3554 MGMT_OP_START_SERVICE_DISCOVERY,
3555 MGMT_STATUS_FAILED,
3556 &cp->type, sizeof(cp->type));
3557 mgmt_pending_remove(cmd);
3558 goto failed;
3559 }
3560 }
3561
3562 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3563 queue_work(hdev->req_workqueue, &hdev->discov_update);
3564 err = 0;
3565
3566 failed:
3567 hci_dev_unlock(hdev);
3568 return err;
3569 }
3570
3571 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
3572 {
3573 struct mgmt_pending_cmd *cmd;
3574
3575 BT_DBG("status %d", status);
3576
3577 hci_dev_lock(hdev);
3578
3579 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
3580 if (cmd) {
3581 cmd->cmd_complete(cmd, mgmt_status(status));
3582 mgmt_pending_remove(cmd);
3583 }
3584
3585 hci_dev_unlock(hdev);
3586 }
3587
3588 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
3589 u16 len)
3590 {
3591 struct mgmt_cp_stop_discovery *mgmt_cp = data;
3592 struct mgmt_pending_cmd *cmd;
3593 int err;
3594
3595 BT_DBG("%s", hdev->name);
3596
3597 hci_dev_lock(hdev);
3598
3599 if (!hci_discovery_active(hdev)) {
3600 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3601 MGMT_STATUS_REJECTED, &mgmt_cp->type,
3602 sizeof(mgmt_cp->type));
3603 goto unlock;
3604 }
3605
3606 if (hdev->discovery.type != mgmt_cp->type) {
3607 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3608 MGMT_STATUS_INVALID_PARAMS,
3609 &mgmt_cp->type, sizeof(mgmt_cp->type));
3610 goto unlock;
3611 }
3612
3613 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
3614 if (!cmd) {
3615 err = -ENOMEM;
3616 goto unlock;
3617 }
3618
3619 cmd->cmd_complete = generic_cmd_complete;
3620
3621 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
3622 queue_work(hdev->req_workqueue, &hdev->discov_update);
3623 err = 0;
3624
3625 unlock:
3626 hci_dev_unlock(hdev);
3627 return err;
3628 }
3629
3630 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
3631 u16 len)
3632 {
3633 struct mgmt_cp_confirm_name *cp = data;
3634 struct inquiry_entry *e;
3635 int err;
3636
3637 BT_DBG("%s", hdev->name);
3638
3639 hci_dev_lock(hdev);
3640
3641 if (!hci_discovery_active(hdev)) {
3642 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3643 MGMT_STATUS_FAILED, &cp->addr,
3644 sizeof(cp->addr));
3645 goto failed;
3646 }
3647
3648 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
3649 if (!e) {
3650 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3651 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
3652 sizeof(cp->addr));
3653 goto failed;
3654 }
3655
3656 if (cp->name_known) {
3657 e->name_state = NAME_KNOWN;
3658 list_del(&e->list);
3659 } else {
3660 e->name_state = NAME_NEEDED;
3661 hci_inquiry_cache_update_resolve(hdev, e);
3662 }
3663
3664 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
3665 &cp->addr, sizeof(cp->addr));
3666
3667 failed:
3668 hci_dev_unlock(hdev);
3669 return err;
3670 }
3671
3672 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
3673 u16 len)
3674 {
3675 struct mgmt_cp_block_device *cp = data;
3676 u8 status;
3677 int err;
3678
3679 BT_DBG("%s", hdev->name);
3680
3681 if (!bdaddr_type_is_valid(cp->addr.type))
3682 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
3683 MGMT_STATUS_INVALID_PARAMS,
3684 &cp->addr, sizeof(cp->addr));
3685
3686 hci_dev_lock(hdev);
3687
3688 err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
3689 cp->addr.type);
3690 if (err < 0) {
3691 status = MGMT_STATUS_FAILED;
3692 goto done;
3693 }
3694
3695 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
3696 sk);
3697 status = MGMT_STATUS_SUCCESS;
3698
3699 done:
3700 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
3701 &cp->addr, sizeof(cp->addr));
3702
3703 hci_dev_unlock(hdev);
3704
3705 return err;
3706 }
3707
3708 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
3709 u16 len)
3710 {
3711 struct mgmt_cp_unblock_device *cp = data;
3712 u8 status;
3713 int err;
3714
3715 BT_DBG("%s", hdev->name);
3716
3717 if (!bdaddr_type_is_valid(cp->addr.type))
3718 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
3719 MGMT_STATUS_INVALID_PARAMS,
3720 &cp->addr, sizeof(cp->addr));
3721
3722 hci_dev_lock(hdev);
3723
3724 err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
3725 cp->addr.type);
3726 if (err < 0) {
3727 status = MGMT_STATUS_INVALID_PARAMS;
3728 goto done;
3729 }
3730
3731 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
3732 sk);
3733 status = MGMT_STATUS_SUCCESS;
3734
3735 done:
3736 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
3737 &cp->addr, sizeof(cp->addr));
3738
3739 hci_dev_unlock(hdev);
3740
3741 return err;
3742 }
3743
3744 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
3745 u16 len)
3746 {
3747 struct mgmt_cp_set_device_id *cp = data;
3748 struct hci_request req;
3749 int err;
3750 __u16 source;
3751
3752 BT_DBG("%s", hdev->name);
3753
3754 source = __le16_to_cpu(cp->source);
3755
3756 if (source > 0x0002)
3757 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
3758 MGMT_STATUS_INVALID_PARAMS);
3759
3760 hci_dev_lock(hdev);
3761
3762 hdev->devid_source = source;
3763 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
3764 hdev->devid_product = __le16_to_cpu(cp->product);
3765 hdev->devid_version = __le16_to_cpu(cp->version);
3766
3767 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
3768 NULL, 0);
3769
3770 hci_req_init(&req, hdev);
3771 __hci_req_update_eir(&req);
3772 hci_req_run(&req, NULL);
3773
3774 hci_dev_unlock(hdev);
3775
3776 return err;
3777 }
3778
3779 static void enable_advertising_instance(struct hci_dev *hdev, u8 status,
3780 u16 opcode)
3781 {
3782 BT_DBG("status %d", status);
3783 }
3784
3785 static void set_advertising_complete(struct hci_dev *hdev, u8 status,
3786 u16 opcode)
3787 {
3788 struct cmd_lookup match = { NULL, hdev };
3789 struct hci_request req;
3790 u8 instance;
3791 struct adv_info *adv_instance;
3792 int err;
3793
3794 hci_dev_lock(hdev);
3795
3796 if (status) {
3797 u8 mgmt_err = mgmt_status(status);
3798
3799 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
3800 cmd_status_rsp, &mgmt_err);
3801 goto unlock;
3802 }
3803
3804 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3805 hci_dev_set_flag(hdev, HCI_ADVERTISING);
3806 else
3807 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
3808
3809 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
3810 &match);
3811
3812 new_settings(hdev, match.sk);
3813
3814 if (match.sk)
3815 sock_put(match.sk);
3816
3817 /* If "Set Advertising" was just disabled and instance advertising was
3818 * set up earlier, then re-enable multi-instance advertising.
3819 */
3820 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
3821 list_empty(&hdev->adv_instances))
3822 goto unlock;
3823
3824 instance = hdev->cur_adv_instance;
3825 if (!instance) {
3826 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
3827 struct adv_info, list);
3828 if (!adv_instance)
3829 goto unlock;
3830
3831 instance = adv_instance->instance;
3832 }
3833
3834 hci_req_init(&req, hdev);
3835
3836 err = __hci_req_schedule_adv_instance(&req, instance, true);
3837
3838 if (!err)
3839 err = hci_req_run(&req, enable_advertising_instance);
3840
3841 if (err)
3842 BT_ERR("Failed to re-configure advertising");
3843
3844 unlock:
3845 hci_dev_unlock(hdev);
3846 }
3847
3848 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
3849 u16 len)
3850 {
3851 struct mgmt_mode *cp = data;
3852 struct mgmt_pending_cmd *cmd;
3853 struct hci_request req;
3854 u8 val, status;
3855 int err;
3856
3857 BT_DBG("request for %s", hdev->name);
3858
3859 status = mgmt_le_support(hdev);
3860 if (status)
3861 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3862 status);
3863
3864 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
3865 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3866 MGMT_STATUS_INVALID_PARAMS);
3867
3868 hci_dev_lock(hdev);
3869
3870 val = !!cp->val;
3871
3872 /* The following conditions are ones which mean that we should
3873 * not do any HCI communication but directly send a mgmt
3874 * response to user space (after toggling the flag if
3875 * necessary).
3876 */
3877 if (!hdev_is_powered(hdev) ||
3878 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
3879 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
3880 hci_conn_num(hdev, LE_LINK) > 0 ||
3881 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
3882 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
3883 bool changed;
3884
3885 if (cp->val) {
3886 hdev->cur_adv_instance = 0x00;
3887 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
3888 if (cp->val == 0x02)
3889 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
3890 else
3891 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
3892 } else {
3893 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
3894 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
3895 }
3896
3897 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
3898 if (err < 0)
3899 goto unlock;
3900
3901 if (changed)
3902 err = new_settings(hdev, sk);
3903
3904 goto unlock;
3905 }
3906
3907 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
3908 pending_find(MGMT_OP_SET_LE, hdev)) {
3909 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3910 MGMT_STATUS_BUSY);
3911 goto unlock;
3912 }
3913
3914 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
3915 if (!cmd) {
3916 err = -ENOMEM;
3917 goto unlock;
3918 }
3919
3920 hci_req_init(&req, hdev);
3921
3922 if (cp->val == 0x02)
3923 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
3924 else
3925 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
3926
3927 cancel_adv_timeout(hdev);
3928
3929 if (val) {
3930 /* Switch to instance "0" for the Set Advertising setting.
3931 * We cannot use update_[adv|scan_rsp]_data() here as the
3932 * HCI_ADVERTISING flag is not yet set.
3933 */
3934 hdev->cur_adv_instance = 0x00;
3935 __hci_req_update_adv_data(&req, 0x00);
3936 __hci_req_update_scan_rsp_data(&req, 0x00);
3937 __hci_req_enable_advertising(&req);
3938 } else {
3939 __hci_req_disable_advertising(&req);
3940 }
3941
3942 err = hci_req_run(&req, set_advertising_complete);
3943 if (err < 0)
3944 mgmt_pending_remove(cmd);
3945
3946 unlock:
3947 hci_dev_unlock(hdev);
3948 return err;
3949 }
3950
3951 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
3952 void *data, u16 len)
3953 {
3954 struct mgmt_cp_set_static_address *cp = data;
3955 int err;
3956
3957 BT_DBG("%s", hdev->name);
3958
3959 if (!lmp_le_capable(hdev))
3960 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
3961 MGMT_STATUS_NOT_SUPPORTED);
3962
3963 if (hdev_is_powered(hdev))
3964 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
3965 MGMT_STATUS_REJECTED);
3966
3967 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
3968 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
3969 return mgmt_cmd_status(sk, hdev->id,
3970 MGMT_OP_SET_STATIC_ADDRESS,
3971 MGMT_STATUS_INVALID_PARAMS);
3972
3973 /* Two most significant bits shall be set */
3974 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
3975 return mgmt_cmd_status(sk, hdev->id,
3976 MGMT_OP_SET_STATIC_ADDRESS,
3977 MGMT_STATUS_INVALID_PARAMS);
3978 }
3979
3980 hci_dev_lock(hdev);
3981
3982 bacpy(&hdev->static_addr, &cp->bdaddr);
3983
3984 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
3985 if (err < 0)
3986 goto unlock;
3987
3988 err = new_settings(hdev, sk);
3989
3990 unlock:
3991 hci_dev_unlock(hdev);
3992 return err;
3993 }
3994
3995 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
3996 void *data, u16 len)
3997 {
3998 struct mgmt_cp_set_scan_params *cp = data;
3999 __u16 interval, window;
4000 int err;
4001
4002 BT_DBG("%s", hdev->name);
4003
4004 if (!lmp_le_capable(hdev))
4005 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4006 MGMT_STATUS_NOT_SUPPORTED);
4007
4008 interval = __le16_to_cpu(cp->interval);
4009
4010 if (interval < 0x0004 || interval > 0x4000)
4011 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4012 MGMT_STATUS_INVALID_PARAMS);
4013
4014 window = __le16_to_cpu(cp->window);
4015
4016 if (window < 0x0004 || window > 0x4000)
4017 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4018 MGMT_STATUS_INVALID_PARAMS);
4019
4020 if (window > interval)
4021 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4022 MGMT_STATUS_INVALID_PARAMS);
4023
4024 hci_dev_lock(hdev);
4025
4026 hdev->le_scan_interval = interval;
4027 hdev->le_scan_window = window;
4028
4029 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
4030 NULL, 0);
4031
4032 /* If background scan is running, restart it so new parameters are
4033 * loaded.
4034 */
4035 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
4036 hdev->discovery.state == DISCOVERY_STOPPED) {
4037 struct hci_request req;
4038
4039 hci_req_init(&req, hdev);
4040
4041 hci_req_add_le_scan_disable(&req);
4042 hci_req_add_le_passive_scan(&req);
4043
4044 hci_req_run(&req, NULL);
4045 }
4046
4047 hci_dev_unlock(hdev);
4048
4049 return err;
4050 }
4051
4052 static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
4053 u16 opcode)
4054 {
4055 struct mgmt_pending_cmd *cmd;
4056
4057 BT_DBG("status 0x%02x", status);
4058
4059 hci_dev_lock(hdev);
4060
4061 cmd = pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4062 if (!cmd)
4063 goto unlock;
4064
4065 if (status) {
4066 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4067 mgmt_status(status));
4068 } else {
4069 struct mgmt_mode *cp = cmd->param;
4070
4071 if (cp->val)
4072 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
4073 else
4074 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
4075
4076 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4077 new_settings(hdev, cmd->sk);
4078 }
4079
4080 mgmt_pending_remove(cmd);
4081
4082 unlock:
4083 hci_dev_unlock(hdev);
4084 }
4085
4086 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4087 void *data, u16 len)
4088 {
4089 struct mgmt_mode *cp = data;
4090 struct mgmt_pending_cmd *cmd;
4091 struct hci_request req;
4092 int err;
4093
4094 BT_DBG("%s", hdev->name);
4095
4096 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
4097 hdev->hci_ver < BLUETOOTH_VER_1_2)
4098 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4099 MGMT_STATUS_NOT_SUPPORTED);
4100
4101 if (cp->val != 0x00 && cp->val != 0x01)
4102 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4103 MGMT_STATUS_INVALID_PARAMS);
4104
4105 hci_dev_lock(hdev);
4106
4107 if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4108 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4109 MGMT_STATUS_BUSY);
4110 goto unlock;
4111 }
4112
4113 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
4114 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4115 hdev);
4116 goto unlock;
4117 }
4118
4119 if (!hdev_is_powered(hdev)) {
4120 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
4121 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4122 hdev);
4123 new_settings(hdev, sk);
4124 goto unlock;
4125 }
4126
4127 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4128 data, len);
4129 if (!cmd) {
4130 err = -ENOMEM;
4131 goto unlock;
4132 }
4133
4134 hci_req_init(&req, hdev);
4135
4136 __hci_req_write_fast_connectable(&req, cp->val);
4137
4138 err = hci_req_run(&req, fast_connectable_complete);
4139 if (err < 0) {
4140 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4141 MGMT_STATUS_FAILED);
4142 mgmt_pending_remove(cmd);
4143 }
4144
4145 unlock:
4146 hci_dev_unlock(hdev);
4147
4148 return err;
4149 }
4150
4151 static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4152 {
4153 struct mgmt_pending_cmd *cmd;
4154
4155 BT_DBG("status 0x%02x", status);
4156
4157 hci_dev_lock(hdev);
4158
4159 cmd = pending_find(MGMT_OP_SET_BREDR, hdev);
4160 if (!cmd)
4161 goto unlock;
4162
4163 if (status) {
4164 u8 mgmt_err = mgmt_status(status);
4165
4166 /* We need to restore the flag if related HCI commands
4167 * failed.
4168 */
4169 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
4170
4171 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4172 } else {
4173 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4174 new_settings(hdev, cmd->sk);
4175 }
4176
4177 mgmt_pending_remove(cmd);
4178
4179 unlock:
4180 hci_dev_unlock(hdev);
4181 }
4182
4183 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4184 {
4185 struct mgmt_mode *cp = data;
4186 struct mgmt_pending_cmd *cmd;
4187 struct hci_request req;
4188 int err;
4189
4190 BT_DBG("request for %s", hdev->name);
4191
4192 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4193 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4194 MGMT_STATUS_NOT_SUPPORTED);
4195
4196 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
4197 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4198 MGMT_STATUS_REJECTED);
4199
4200 if (cp->val != 0x00 && cp->val != 0x01)
4201 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4202 MGMT_STATUS_INVALID_PARAMS);
4203
4204 hci_dev_lock(hdev);
4205
4206 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
4207 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4208 goto unlock;
4209 }
4210
4211 if (!hdev_is_powered(hdev)) {
4212 if (!cp->val) {
4213 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
4214 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
4215 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
4216 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
4217 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
4218 }
4219
4220 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
4221
4222 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4223 if (err < 0)
4224 goto unlock;
4225
4226 err = new_settings(hdev, sk);
4227 goto unlock;
4228 }
4229
4230 /* Reject disabling when powered on */
4231 if (!cp->val) {
4232 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4233 MGMT_STATUS_REJECTED);
4234 goto unlock;
4235 } else {
4236 /* When configuring a dual-mode controller to operate
4237 * with LE only and using a static address, then switching
4238 * BR/EDR back on is not allowed.
4239 *
4240 * Dual-mode controllers shall operate with the public
4241 * address as its identity address for BR/EDR and LE. So
4242 * reject the attempt to create an invalid configuration.
4243 *
4244 * The same restrictions applies when secure connections
4245 * has been enabled. For BR/EDR this is a controller feature
4246 * while for LE it is a host stack feature. This means that
4247 * switching BR/EDR back on when secure connections has been
4248 * enabled is not a supported transaction.
4249 */
4250 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
4251 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
4252 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
4253 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4254 MGMT_STATUS_REJECTED);
4255 goto unlock;
4256 }
4257 }
4258
4259 if (pending_find(MGMT_OP_SET_BREDR, hdev)) {
4260 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4261 MGMT_STATUS_BUSY);
4262 goto unlock;
4263 }
4264
4265 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4266 if (!cmd) {
4267 err = -ENOMEM;
4268 goto unlock;
4269 }
4270
4271 /* We need to flip the bit already here so that
4272 * hci_req_update_adv_data generates the correct flags.
4273 */
4274 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
4275
4276 hci_req_init(&req, hdev);
4277
4278 __hci_req_write_fast_connectable(&req, false);
4279 __hci_req_update_scan(&req);
4280
4281 /* Since only the advertising data flags will change, there
4282 * is no need to update the scan response data.
4283 */
4284 __hci_req_update_adv_data(&req, hdev->cur_adv_instance);
4285
4286 err = hci_req_run(&req, set_bredr_complete);
4287 if (err < 0)
4288 mgmt_pending_remove(cmd);
4289
4290 unlock:
4291 hci_dev_unlock(hdev);
4292 return err;
4293 }
4294
4295 static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4296 {
4297 struct mgmt_pending_cmd *cmd;
4298 struct mgmt_mode *cp;
4299
4300 BT_DBG("%s status %u", hdev->name, status);
4301
4302 hci_dev_lock(hdev);
4303
4304 cmd = pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
4305 if (!cmd)
4306 goto unlock;
4307
4308 if (status) {
4309 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
4310 mgmt_status(status));
4311 goto remove;
4312 }
4313
4314 cp = cmd->param;
4315
4316 switch (cp->val) {
4317 case 0x00:
4318 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
4319 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4320 break;
4321 case 0x01:
4322 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
4323 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4324 break;
4325 case 0x02:
4326 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
4327 hci_dev_set_flag(hdev, HCI_SC_ONLY);
4328 break;
4329 }
4330
4331 send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
4332 new_settings(hdev, cmd->sk);
4333
4334 remove:
4335 mgmt_pending_remove(cmd);
4336 unlock:
4337 hci_dev_unlock(hdev);
4338 }
4339
4340 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4341 void *data, u16 len)
4342 {
4343 struct mgmt_mode *cp = data;
4344 struct mgmt_pending_cmd *cmd;
4345 struct hci_request req;
4346 u8 val;
4347 int err;
4348
4349 BT_DBG("request for %s", hdev->name);
4350
4351 if (!lmp_sc_capable(hdev) &&
4352 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
4353 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4354 MGMT_STATUS_NOT_SUPPORTED);
4355
4356 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
4357 lmp_sc_capable(hdev) &&
4358 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
4359 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4360 MGMT_STATUS_REJECTED);
4361
4362 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4363 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4364 MGMT_STATUS_INVALID_PARAMS);
4365
4366 hci_dev_lock(hdev);
4367
4368 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
4369 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
4370 bool changed;
4371
4372 if (cp->val) {
4373 changed = !hci_dev_test_and_set_flag(hdev,
4374 HCI_SC_ENABLED);
4375 if (cp->val == 0x02)
4376 hci_dev_set_flag(hdev, HCI_SC_ONLY);
4377 else
4378 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4379 } else {
4380 changed = hci_dev_test_and_clear_flag(hdev,
4381 HCI_SC_ENABLED);
4382 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4383 }
4384
4385 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4386 if (err < 0)
4387 goto failed;
4388
4389 if (changed)
4390 err = new_settings(hdev, sk);
4391
4392 goto failed;
4393 }
4394
4395 if (pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4396 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4397 MGMT_STATUS_BUSY);
4398 goto failed;
4399 }
4400
4401 val = !!cp->val;
4402
4403 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
4404 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
4405 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4406 goto failed;
4407 }
4408
4409 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4410 if (!cmd) {
4411 err = -ENOMEM;
4412 goto failed;
4413 }
4414
4415 hci_req_init(&req, hdev);
4416 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4417 err = hci_req_run(&req, sc_enable_complete);
4418 if (err < 0) {
4419 mgmt_pending_remove(cmd);
4420 goto failed;
4421 }
4422
4423 failed:
4424 hci_dev_unlock(hdev);
4425 return err;
4426 }
4427
4428 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
4429 void *data, u16 len)
4430 {
4431 struct mgmt_mode *cp = data;
4432 bool changed, use_changed;
4433 int err;
4434
4435 BT_DBG("request for %s", hdev->name);
4436
4437 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4438 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
4439 MGMT_STATUS_INVALID_PARAMS);
4440
4441 hci_dev_lock(hdev);
4442
4443 if (cp->val)
4444 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
4445 else
4446 changed = hci_dev_test_and_clear_flag(hdev,
4447 HCI_KEEP_DEBUG_KEYS);
4448
4449 if (cp->val == 0x02)
4450 use_changed = !hci_dev_test_and_set_flag(hdev,
4451 HCI_USE_DEBUG_KEYS);
4452 else
4453 use_changed = hci_dev_test_and_clear_flag(hdev,
4454 HCI_USE_DEBUG_KEYS);
4455
4456 if (hdev_is_powered(hdev) && use_changed &&
4457 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
4458 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
4459 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
4460 sizeof(mode), &mode);
4461 }
4462
4463 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
4464 if (err < 0)
4465 goto unlock;
4466
4467 if (changed)
4468 err = new_settings(hdev, sk);
4469
4470 unlock:
4471 hci_dev_unlock(hdev);
4472 return err;
4473 }
4474
4475 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4476 u16 len)
4477 {
4478 struct mgmt_cp_set_privacy *cp = cp_data;
4479 bool changed;
4480 int err;
4481
4482 BT_DBG("request for %s", hdev->name);
4483
4484 if (!lmp_le_capable(hdev))
4485 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4486 MGMT_STATUS_NOT_SUPPORTED);
4487
4488 if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
4489 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4490 MGMT_STATUS_INVALID_PARAMS);
4491
4492 if (hdev_is_powered(hdev))
4493 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4494 MGMT_STATUS_REJECTED);
4495
4496 hci_dev_lock(hdev);
4497
4498 /* If user space supports this command it is also expected to
4499 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
4500 */
4501 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
4502
4503 if (cp->privacy) {
4504 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
4505 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
4506 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
4507 if (cp->privacy == 0x02)
4508 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
4509 else
4510 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
4511 } else {
4512 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
4513 memset(hdev->irk, 0, sizeof(hdev->irk));
4514 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
4515 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
4516 }
4517
4518 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
4519 if (err < 0)
4520 goto unlock;
4521
4522 if (changed)
4523 err = new_settings(hdev, sk);
4524
4525 unlock:
4526 hci_dev_unlock(hdev);
4527 return err;
4528 }
4529
4530 static bool irk_is_valid(struct mgmt_irk_info *irk)
4531 {
4532 switch (irk->addr.type) {
4533 case BDADDR_LE_PUBLIC:
4534 return true;
4535
4536 case BDADDR_LE_RANDOM:
4537 /* Two most significant bits shall be set */
4538 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4539 return false;
4540 return true;
4541 }
4542
4543 return false;
4544 }
4545
4546 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4547 u16 len)
4548 {
4549 struct mgmt_cp_load_irks *cp = cp_data;
4550 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
4551 sizeof(struct mgmt_irk_info));
4552 u16 irk_count, expected_len;
4553 int i, err;
4554
4555 BT_DBG("request for %s", hdev->name);
4556
4557 if (!lmp_le_capable(hdev))
4558 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4559 MGMT_STATUS_NOT_SUPPORTED);
4560
4561 irk_count = __le16_to_cpu(cp->irk_count);
4562 if (irk_count > max_irk_count) {
4563 BT_ERR("load_irks: too big irk_count value %u", irk_count);
4564 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4565 MGMT_STATUS_INVALID_PARAMS);
4566 }
4567
4568 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
4569 if (expected_len != len) {
4570 BT_ERR("load_irks: expected %u bytes, got %u bytes",
4571 expected_len, len);
4572 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4573 MGMT_STATUS_INVALID_PARAMS);
4574 }
4575
4576 BT_DBG("%s irk_count %u", hdev->name, irk_count);
4577
4578 for (i = 0; i < irk_count; i++) {
4579 struct mgmt_irk_info *key = &cp->irks[i];
4580
4581 if (!irk_is_valid(key))
4582 return mgmt_cmd_status(sk, hdev->id,
4583 MGMT_OP_LOAD_IRKS,
4584 MGMT_STATUS_INVALID_PARAMS);
4585 }
4586
4587 hci_dev_lock(hdev);
4588
4589 hci_smp_irks_clear(hdev);
4590
4591 for (i = 0; i < irk_count; i++) {
4592 struct mgmt_irk_info *irk = &cp->irks[i];
4593
4594 hci_add_irk(hdev, &irk->addr.bdaddr,
4595 le_addr_type(irk->addr.type), irk->val,
4596 BDADDR_ANY);
4597 }
4598
4599 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
4600
4601 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
4602
4603 hci_dev_unlock(hdev);
4604
4605 return err;
4606 }
4607
4608 static bool ltk_is_valid(struct mgmt_ltk_info *key)
4609 {
4610 if (key->master != 0x00 && key->master != 0x01)
4611 return false;
4612
4613 switch (key->addr.type) {
4614 case BDADDR_LE_PUBLIC:
4615 return true;
4616
4617 case BDADDR_LE_RANDOM:
4618 /* Two most significant bits shall be set */
4619 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4620 return false;
4621 return true;
4622 }
4623
4624 return false;
4625 }
4626
4627 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
4628 void *cp_data, u16 len)
4629 {
4630 struct mgmt_cp_load_long_term_keys *cp = cp_data;
4631 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
4632 sizeof(struct mgmt_ltk_info));
4633 u16 key_count, expected_len;
4634 int i, err;
4635
4636 BT_DBG("request for %s", hdev->name);
4637
4638 if (!lmp_le_capable(hdev))
4639 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4640 MGMT_STATUS_NOT_SUPPORTED);
4641
4642 key_count = __le16_to_cpu(cp->key_count);
4643 if (key_count > max_key_count) {
4644 BT_ERR("load_ltks: too big key_count value %u", key_count);
4645 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4646 MGMT_STATUS_INVALID_PARAMS);
4647 }
4648
4649 expected_len = sizeof(*cp) + key_count *
4650 sizeof(struct mgmt_ltk_info);
4651 if (expected_len != len) {
4652 BT_ERR("load_keys: expected %u bytes, got %u bytes",
4653 expected_len, len);
4654 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4655 MGMT_STATUS_INVALID_PARAMS);
4656 }
4657
4658 BT_DBG("%s key_count %u", hdev->name, key_count);
4659
4660 for (i = 0; i < key_count; i++) {
4661 struct mgmt_ltk_info *key = &cp->keys[i];
4662
4663 if (!ltk_is_valid(key))
4664 return mgmt_cmd_status(sk, hdev->id,
4665 MGMT_OP_LOAD_LONG_TERM_KEYS,
4666 MGMT_STATUS_INVALID_PARAMS);
4667 }
4668
4669 hci_dev_lock(hdev);
4670
4671 hci_smp_ltks_clear(hdev);
4672
4673 for (i = 0; i < key_count; i++) {
4674 struct mgmt_ltk_info *key = &cp->keys[i];
4675 u8 type, authenticated;
4676
4677 switch (key->type) {
4678 case MGMT_LTK_UNAUTHENTICATED:
4679 authenticated = 0x00;
4680 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
4681 break;
4682 case MGMT_LTK_AUTHENTICATED:
4683 authenticated = 0x01;
4684 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
4685 break;
4686 case MGMT_LTK_P256_UNAUTH:
4687 authenticated = 0x00;
4688 type = SMP_LTK_P256;
4689 break;
4690 case MGMT_LTK_P256_AUTH:
4691 authenticated = 0x01;
4692 type = SMP_LTK_P256;
4693 break;
4694 case MGMT_LTK_P256_DEBUG:
4695 authenticated = 0x00;
4696 type = SMP_LTK_P256_DEBUG;
4697 default:
4698 continue;
4699 }
4700
4701 hci_add_ltk(hdev, &key->addr.bdaddr,
4702 le_addr_type(key->addr.type), type, authenticated,
4703 key->val, key->enc_size, key->ediv, key->rand);
4704 }
4705
4706 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
4707 NULL, 0);
4708
4709 hci_dev_unlock(hdev);
4710
4711 return err;
4712 }
4713
4714 static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
4715 {
4716 struct hci_conn *conn = cmd->user_data;
4717 struct mgmt_rp_get_conn_info rp;
4718 int err;
4719
4720 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
4721
4722 if (status == MGMT_STATUS_SUCCESS) {
4723 rp.rssi = conn->rssi;
4724 rp.tx_power = conn->tx_power;
4725 rp.max_tx_power = conn->max_tx_power;
4726 } else {
4727 rp.rssi = HCI_RSSI_INVALID;
4728 rp.tx_power = HCI_TX_POWER_INVALID;
4729 rp.max_tx_power = HCI_TX_POWER_INVALID;
4730 }
4731
4732 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
4733 status, &rp, sizeof(rp));
4734
4735 hci_conn_drop(conn);
4736 hci_conn_put(conn);
4737
4738 return err;
4739 }
4740
4741 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
4742 u16 opcode)
4743 {
4744 struct hci_cp_read_rssi *cp;
4745 struct mgmt_pending_cmd *cmd;
4746 struct hci_conn *conn;
4747 u16 handle;
4748 u8 status;
4749
4750 BT_DBG("status 0x%02x", hci_status);
4751
4752 hci_dev_lock(hdev);
4753
4754 /* Commands sent in request are either Read RSSI or Read Transmit Power
4755 * Level so we check which one was last sent to retrieve connection
4756 * handle. Both commands have handle as first parameter so it's safe to
4757 * cast data on the same command struct.
4758 *
4759 * First command sent is always Read RSSI and we fail only if it fails.
4760 * In other case we simply override error to indicate success as we
4761 * already remembered if TX power value is actually valid.
4762 */
4763 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
4764 if (!cp) {
4765 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
4766 status = MGMT_STATUS_SUCCESS;
4767 } else {
4768 status = mgmt_status(hci_status);
4769 }
4770
4771 if (!cp) {
4772 BT_ERR("invalid sent_cmd in conn_info response");
4773 goto unlock;
4774 }
4775
4776 handle = __le16_to_cpu(cp->handle);
4777 conn = hci_conn_hash_lookup_handle(hdev, handle);
4778 if (!conn) {
4779 BT_ERR("unknown handle (%d) in conn_info response", handle);
4780 goto unlock;
4781 }
4782
4783 cmd = pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
4784 if (!cmd)
4785 goto unlock;
4786
4787 cmd->cmd_complete(cmd, status);
4788 mgmt_pending_remove(cmd);
4789
4790 unlock:
4791 hci_dev_unlock(hdev);
4792 }
4793
4794 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
4795 u16 len)
4796 {
4797 struct mgmt_cp_get_conn_info *cp = data;
4798 struct mgmt_rp_get_conn_info rp;
4799 struct hci_conn *conn;
4800 unsigned long conn_info_age;
4801 int err = 0;
4802
4803 BT_DBG("%s", hdev->name);
4804
4805 memset(&rp, 0, sizeof(rp));
4806 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4807 rp.addr.type = cp->addr.type;
4808
4809 if (!bdaddr_type_is_valid(cp->addr.type))
4810 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4811 MGMT_STATUS_INVALID_PARAMS,
4812 &rp, sizeof(rp));
4813
4814 hci_dev_lock(hdev);
4815
4816 if (!hdev_is_powered(hdev)) {
4817 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4818 MGMT_STATUS_NOT_POWERED, &rp,
4819 sizeof(rp));
4820 goto unlock;
4821 }
4822
4823 if (cp->addr.type == BDADDR_BREDR)
4824 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
4825 &cp->addr.bdaddr);
4826 else
4827 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
4828
4829 if (!conn || conn->state != BT_CONNECTED) {
4830 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4831 MGMT_STATUS_NOT_CONNECTED, &rp,
4832 sizeof(rp));
4833 goto unlock;
4834 }
4835
4836 if (pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
4837 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4838 MGMT_STATUS_BUSY, &rp, sizeof(rp));
4839 goto unlock;
4840 }
4841
4842 /* To avoid client trying to guess when to poll again for information we
4843 * calculate conn info age as random value between min/max set in hdev.
4844 */
4845 conn_info_age = hdev->conn_info_min_age +
4846 prandom_u32_max(hdev->conn_info_max_age -
4847 hdev->conn_info_min_age);
4848
4849 /* Query controller to refresh cached values if they are too old or were
4850 * never read.
4851 */
4852 if (time_after(jiffies, conn->conn_info_timestamp +
4853 msecs_to_jiffies(conn_info_age)) ||
4854 !conn->conn_info_timestamp) {
4855 struct hci_request req;
4856 struct hci_cp_read_tx_power req_txp_cp;
4857 struct hci_cp_read_rssi req_rssi_cp;
4858 struct mgmt_pending_cmd *cmd;
4859
4860 hci_req_init(&req, hdev);
4861 req_rssi_cp.handle = cpu_to_le16(conn->handle);
4862 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
4863 &req_rssi_cp);
4864
4865 /* For LE links TX power does not change thus we don't need to
4866 * query for it once value is known.
4867 */
4868 if (!bdaddr_type_is_le(cp->addr.type) ||
4869 conn->tx_power == HCI_TX_POWER_INVALID) {
4870 req_txp_cp.handle = cpu_to_le16(conn->handle);
4871 req_txp_cp.type = 0x00;
4872 hci_req_add(&req, HCI_OP_READ_TX_POWER,
4873 sizeof(req_txp_cp), &req_txp_cp);
4874 }
4875
4876 /* Max TX power needs to be read only once per connection */
4877 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
4878 req_txp_cp.handle = cpu_to_le16(conn->handle);
4879 req_txp_cp.type = 0x01;
4880 hci_req_add(&req, HCI_OP_READ_TX_POWER,
4881 sizeof(req_txp_cp), &req_txp_cp);
4882 }
4883
4884 err = hci_req_run(&req, conn_info_refresh_complete);
4885 if (err < 0)
4886 goto unlock;
4887
4888 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
4889 data, len);
4890 if (!cmd) {
4891 err = -ENOMEM;
4892 goto unlock;
4893 }
4894
4895 hci_conn_hold(conn);
4896 cmd->user_data = hci_conn_get(conn);
4897 cmd->cmd_complete = conn_info_cmd_complete;
4898
4899 conn->conn_info_timestamp = jiffies;
4900 } else {
4901 /* Cache is valid, just reply with values cached in hci_conn */
4902 rp.rssi = conn->rssi;
4903 rp.tx_power = conn->tx_power;
4904 rp.max_tx_power = conn->max_tx_power;
4905
4906 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4907 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
4908 }
4909
4910 unlock:
4911 hci_dev_unlock(hdev);
4912 return err;
4913 }
4914
4915 static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
4916 {
4917 struct hci_conn *conn = cmd->user_data;
4918 struct mgmt_rp_get_clock_info rp;
4919 struct hci_dev *hdev;
4920 int err;
4921
4922 memset(&rp, 0, sizeof(rp));
4923 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
4924
4925 if (status)
4926 goto complete;
4927
4928 hdev = hci_dev_get(cmd->index);
4929 if (hdev) {
4930 rp.local_clock = cpu_to_le32(hdev->clock);
4931 hci_dev_put(hdev);
4932 }
4933
4934 if (conn) {
4935 rp.piconet_clock = cpu_to_le32(conn->clock);
4936 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
4937 }
4938
4939 complete:
4940 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
4941 sizeof(rp));
4942
4943 if (conn) {
4944 hci_conn_drop(conn);
4945 hci_conn_put(conn);
4946 }
4947
4948 return err;
4949 }
4950
4951 static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4952 {
4953 struct hci_cp_read_clock *hci_cp;
4954 struct mgmt_pending_cmd *cmd;
4955 struct hci_conn *conn;
4956
4957 BT_DBG("%s status %u", hdev->name, status);
4958
4959 hci_dev_lock(hdev);
4960
4961 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
4962 if (!hci_cp)
4963 goto unlock;
4964
4965 if (hci_cp->which) {
4966 u16 handle = __le16_to_cpu(hci_cp->handle);
4967 conn = hci_conn_hash_lookup_handle(hdev, handle);
4968 } else {
4969 conn = NULL;
4970 }
4971
4972 cmd = pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
4973 if (!cmd)
4974 goto unlock;
4975
4976 cmd->cmd_complete(cmd, mgmt_status(status));
4977 mgmt_pending_remove(cmd);
4978
4979 unlock:
4980 hci_dev_unlock(hdev);
4981 }
4982
4983 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
4984 u16 len)
4985 {
4986 struct mgmt_cp_get_clock_info *cp = data;
4987 struct mgmt_rp_get_clock_info rp;
4988 struct hci_cp_read_clock hci_cp;
4989 struct mgmt_pending_cmd *cmd;
4990 struct hci_request req;
4991 struct hci_conn *conn;
4992 int err;
4993
4994 BT_DBG("%s", hdev->name);
4995
4996 memset(&rp, 0, sizeof(rp));
4997 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4998 rp.addr.type = cp->addr.type;
4999
5000 if (cp->addr.type != BDADDR_BREDR)
5001 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5002 MGMT_STATUS_INVALID_PARAMS,
5003 &rp, sizeof(rp));
5004
5005 hci_dev_lock(hdev);
5006
5007 if (!hdev_is_powered(hdev)) {
5008 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5009 MGMT_STATUS_NOT_POWERED, &rp,
5010 sizeof(rp));
5011 goto unlock;
5012 }
5013
5014 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5015 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5016 &cp->addr.bdaddr);
5017 if (!conn || conn->state != BT_CONNECTED) {
5018 err = mgmt_cmd_complete(sk, hdev->id,
5019 MGMT_OP_GET_CLOCK_INFO,
5020 MGMT_STATUS_NOT_CONNECTED,
5021 &rp, sizeof(rp));
5022 goto unlock;
5023 }
5024 } else {
5025 conn = NULL;
5026 }
5027
5028 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
5029 if (!cmd) {
5030 err = -ENOMEM;
5031 goto unlock;
5032 }
5033
5034 cmd->cmd_complete = clock_info_cmd_complete;
5035
5036 hci_req_init(&req, hdev);
5037
5038 memset(&hci_cp, 0, sizeof(hci_cp));
5039 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5040
5041 if (conn) {
5042 hci_conn_hold(conn);
5043 cmd->user_data = hci_conn_get(conn);
5044
5045 hci_cp.handle = cpu_to_le16(conn->handle);
5046 hci_cp.which = 0x01; /* Piconet clock */
5047 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5048 }
5049
5050 err = hci_req_run(&req, get_clock_info_complete);
5051 if (err < 0)
5052 mgmt_pending_remove(cmd);
5053
5054 unlock:
5055 hci_dev_unlock(hdev);
5056 return err;
5057 }
5058
5059 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
5060 {
5061 struct hci_conn *conn;
5062
5063 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
5064 if (!conn)
5065 return false;
5066
5067 if (conn->dst_type != type)
5068 return false;
5069
5070 if (conn->state != BT_CONNECTED)
5071 return false;
5072
5073 return true;
5074 }
5075
5076 /* This function requires the caller holds hdev->lock */
5077 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
5078 u8 addr_type, u8 auto_connect)
5079 {
5080 struct hci_conn_params *params;
5081
5082 params = hci_conn_params_add(hdev, addr, addr_type);
5083 if (!params)
5084 return -EIO;
5085
5086 if (params->auto_connect == auto_connect)
5087 return 0;
5088
5089 list_del_init(&params->action);
5090
5091 switch (auto_connect) {
5092 case HCI_AUTO_CONN_DISABLED:
5093 case HCI_AUTO_CONN_LINK_LOSS:
5094 /* If auto connect is being disabled when we're trying to
5095 * connect to device, keep connecting.
5096 */
5097 if (params->explicit_connect)
5098 list_add(&params->action, &hdev->pend_le_conns);
5099 break;
5100 case HCI_AUTO_CONN_REPORT:
5101 if (params->explicit_connect)
5102 list_add(&params->action, &hdev->pend_le_conns);
5103 else
5104 list_add(&params->action, &hdev->pend_le_reports);
5105 break;
5106 case HCI_AUTO_CONN_DIRECT:
5107 case HCI_AUTO_CONN_ALWAYS:
5108 if (!is_connected(hdev, addr, addr_type))
5109 list_add(&params->action, &hdev->pend_le_conns);
5110 break;
5111 }
5112
5113 params->auto_connect = auto_connect;
5114
5115 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
5116 auto_connect);
5117
5118 return 0;
5119 }
5120
5121 static void device_added(struct sock *sk, struct hci_dev *hdev,
5122 bdaddr_t *bdaddr, u8 type, u8 action)
5123 {
5124 struct mgmt_ev_device_added ev;
5125
5126 bacpy(&ev.addr.bdaddr, bdaddr);
5127 ev.addr.type = type;
5128 ev.action = action;
5129
5130 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
5131 }
5132
5133 static int add_device(struct sock *sk, struct hci_dev *hdev,
5134 void *data, u16 len)
5135 {
5136 struct mgmt_cp_add_device *cp = data;
5137 u8 auto_conn, addr_type;
5138 int err;
5139
5140 BT_DBG("%s", hdev->name);
5141
5142 if (!bdaddr_type_is_valid(cp->addr.type) ||
5143 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
5144 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5145 MGMT_STATUS_INVALID_PARAMS,
5146 &cp->addr, sizeof(cp->addr));
5147
5148 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
5149 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5150 MGMT_STATUS_INVALID_PARAMS,
5151 &cp->addr, sizeof(cp->addr));
5152
5153 hci_dev_lock(hdev);
5154
5155 if (cp->addr.type == BDADDR_BREDR) {
5156 /* Only incoming connections action is supported for now */
5157 if (cp->action != 0x01) {
5158 err = mgmt_cmd_complete(sk, hdev->id,
5159 MGMT_OP_ADD_DEVICE,
5160 MGMT_STATUS_INVALID_PARAMS,
5161 &cp->addr, sizeof(cp->addr));
5162 goto unlock;
5163 }
5164
5165 err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr,
5166 cp->addr.type);
5167 if (err)
5168 goto unlock;
5169
5170 hci_req_update_scan(hdev);
5171
5172 goto added;
5173 }
5174
5175 addr_type = le_addr_type(cp->addr.type);
5176
5177 if (cp->action == 0x02)
5178 auto_conn = HCI_AUTO_CONN_ALWAYS;
5179 else if (cp->action == 0x01)
5180 auto_conn = HCI_AUTO_CONN_DIRECT;
5181 else
5182 auto_conn = HCI_AUTO_CONN_REPORT;
5183
5184 /* Kernel internally uses conn_params with resolvable private
5185 * address, but Add Device allows only identity addresses.
5186 * Make sure it is enforced before calling
5187 * hci_conn_params_lookup.
5188 */
5189 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
5190 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5191 MGMT_STATUS_INVALID_PARAMS,
5192 &cp->addr, sizeof(cp->addr));
5193 goto unlock;
5194 }
5195
5196 /* If the connection parameters don't exist for this device,
5197 * they will be created and configured with defaults.
5198 */
5199 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
5200 auto_conn) < 0) {
5201 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5202 MGMT_STATUS_FAILED, &cp->addr,
5203 sizeof(cp->addr));
5204 goto unlock;
5205 }
5206
5207 hci_update_background_scan(hdev);
5208
5209 added:
5210 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
5211
5212 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5213 MGMT_STATUS_SUCCESS, &cp->addr,
5214 sizeof(cp->addr));
5215
5216 unlock:
5217 hci_dev_unlock(hdev);
5218 return err;
5219 }
5220
5221 static void device_removed(struct sock *sk, struct hci_dev *hdev,
5222 bdaddr_t *bdaddr, u8 type)
5223 {
5224 struct mgmt_ev_device_removed ev;
5225
5226 bacpy(&ev.addr.bdaddr, bdaddr);
5227 ev.addr.type = type;
5228
5229 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
5230 }
5231
5232 static int remove_device(struct sock *sk, struct hci_dev *hdev,
5233 void *data, u16 len)
5234 {
5235 struct mgmt_cp_remove_device *cp = data;
5236 int err;
5237
5238 BT_DBG("%s", hdev->name);
5239
5240 hci_dev_lock(hdev);
5241
5242 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5243 struct hci_conn_params *params;
5244 u8 addr_type;
5245
5246 if (!bdaddr_type_is_valid(cp->addr.type)) {
5247 err = mgmt_cmd_complete(sk, hdev->id,
5248 MGMT_OP_REMOVE_DEVICE,
5249 MGMT_STATUS_INVALID_PARAMS,
5250 &cp->addr, sizeof(cp->addr));
5251 goto unlock;
5252 }
5253
5254 if (cp->addr.type == BDADDR_BREDR) {
5255 err = hci_bdaddr_list_del(&hdev->whitelist,
5256 &cp->addr.bdaddr,
5257 cp->addr.type);
5258 if (err) {
5259 err = mgmt_cmd_complete(sk, hdev->id,
5260 MGMT_OP_REMOVE_DEVICE,
5261 MGMT_STATUS_INVALID_PARAMS,
5262 &cp->addr,
5263 sizeof(cp->addr));
5264 goto unlock;
5265 }
5266
5267 hci_req_update_scan(hdev);
5268
5269 device_removed(sk, hdev, &cp->addr.bdaddr,
5270 cp->addr.type);
5271 goto complete;
5272 }
5273
5274 addr_type = le_addr_type(cp->addr.type);
5275
5276 /* Kernel internally uses conn_params with resolvable private
5277 * address, but Remove Device allows only identity addresses.
5278 * Make sure it is enforced before calling
5279 * hci_conn_params_lookup.
5280 */
5281 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
5282 err = mgmt_cmd_complete(sk, hdev->id,
5283 MGMT_OP_REMOVE_DEVICE,
5284 MGMT_STATUS_INVALID_PARAMS,
5285 &cp->addr, sizeof(cp->addr));
5286 goto unlock;
5287 }
5288
5289 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5290 addr_type);
5291 if (!params) {
5292 err = mgmt_cmd_complete(sk, hdev->id,
5293 MGMT_OP_REMOVE_DEVICE,
5294 MGMT_STATUS_INVALID_PARAMS,
5295 &cp->addr, sizeof(cp->addr));
5296 goto unlock;
5297 }
5298
5299 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
5300 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
5301 err = mgmt_cmd_complete(sk, hdev->id,
5302 MGMT_OP_REMOVE_DEVICE,
5303 MGMT_STATUS_INVALID_PARAMS,
5304 &cp->addr, sizeof(cp->addr));
5305 goto unlock;
5306 }
5307
5308 list_del(&params->action);
5309 list_del(&params->list);
5310 kfree(params);
5311 hci_update_background_scan(hdev);
5312
5313 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
5314 } else {
5315 struct hci_conn_params *p, *tmp;
5316 struct bdaddr_list *b, *btmp;
5317
5318 if (cp->addr.type) {
5319 err = mgmt_cmd_complete(sk, hdev->id,
5320 MGMT_OP_REMOVE_DEVICE,
5321 MGMT_STATUS_INVALID_PARAMS,
5322 &cp->addr, sizeof(cp->addr));
5323 goto unlock;
5324 }
5325
5326 list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
5327 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
5328 list_del(&b->list);
5329 kfree(b);
5330 }
5331
5332 hci_req_update_scan(hdev);
5333
5334 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
5335 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
5336 continue;
5337 device_removed(sk, hdev, &p->addr, p->addr_type);
5338 if (p->explicit_connect) {
5339 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
5340 continue;
5341 }
5342 list_del(&p->action);
5343 list_del(&p->list);
5344 kfree(p);
5345 }
5346
5347 BT_DBG("All LE connection parameters were removed");
5348
5349 hci_update_background_scan(hdev);
5350 }
5351
5352 complete:
5353 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5354 MGMT_STATUS_SUCCESS, &cp->addr,
5355 sizeof(cp->addr));
5356 unlock:
5357 hci_dev_unlock(hdev);
5358 return err;
5359 }
5360
5361 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
5362 u16 len)
5363 {
5364 struct mgmt_cp_load_conn_param *cp = data;
5365 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
5366 sizeof(struct mgmt_conn_param));
5367 u16 param_count, expected_len;
5368 int i;
5369
5370 if (!lmp_le_capable(hdev))
5371 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5372 MGMT_STATUS_NOT_SUPPORTED);
5373
5374 param_count = __le16_to_cpu(cp->param_count);
5375 if (param_count > max_param_count) {
5376 BT_ERR("load_conn_param: too big param_count value %u",
5377 param_count);
5378 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5379 MGMT_STATUS_INVALID_PARAMS);
5380 }
5381
5382 expected_len = sizeof(*cp) + param_count *
5383 sizeof(struct mgmt_conn_param);
5384 if (expected_len != len) {
5385 BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
5386 expected_len, len);
5387 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5388 MGMT_STATUS_INVALID_PARAMS);
5389 }
5390
5391 BT_DBG("%s param_count %u", hdev->name, param_count);
5392
5393 hci_dev_lock(hdev);
5394
5395 hci_conn_params_clear_disabled(hdev);
5396
5397 for (i = 0; i < param_count; i++) {
5398 struct mgmt_conn_param *param = &cp->params[i];
5399 struct hci_conn_params *hci_param;
5400 u16 min, max, latency, timeout;
5401 u8 addr_type;
5402
5403 BT_DBG("Adding %pMR (type %u)", &param->addr.bdaddr,
5404 param->addr.type);
5405
5406 if (param->addr.type == BDADDR_LE_PUBLIC) {
5407 addr_type = ADDR_LE_DEV_PUBLIC;
5408 } else if (param->addr.type == BDADDR_LE_RANDOM) {
5409 addr_type = ADDR_LE_DEV_RANDOM;
5410 } else {
5411 BT_ERR("Ignoring invalid connection parameters");
5412 continue;
5413 }
5414
5415 min = le16_to_cpu(param->min_interval);
5416 max = le16_to_cpu(param->max_interval);
5417 latency = le16_to_cpu(param->latency);
5418 timeout = le16_to_cpu(param->timeout);
5419
5420 BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
5421 min, max, latency, timeout);
5422
5423 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
5424 BT_ERR("Ignoring invalid connection parameters");
5425 continue;
5426 }
5427
5428 hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
5429 addr_type);
5430 if (!hci_param) {
5431 BT_ERR("Failed to add connection parameters");
5432 continue;
5433 }
5434
5435 hci_param->conn_min_interval = min;
5436 hci_param->conn_max_interval = max;
5437 hci_param->conn_latency = latency;
5438 hci_param->supervision_timeout = timeout;
5439 }
5440
5441 hci_dev_unlock(hdev);
5442
5443 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
5444 NULL, 0);
5445 }
5446
5447 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
5448 void *data, u16 len)
5449 {
5450 struct mgmt_cp_set_external_config *cp = data;
5451 bool changed;
5452 int err;
5453
5454 BT_DBG("%s", hdev->name);
5455
5456 if (hdev_is_powered(hdev))
5457 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5458 MGMT_STATUS_REJECTED);
5459
5460 if (cp->config != 0x00 && cp->config != 0x01)
5461 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5462 MGMT_STATUS_INVALID_PARAMS);
5463
5464 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
5465 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5466 MGMT_STATUS_NOT_SUPPORTED);
5467
5468 hci_dev_lock(hdev);
5469
5470 if (cp->config)
5471 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
5472 else
5473 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
5474
5475 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
5476 if (err < 0)
5477 goto unlock;
5478
5479 if (!changed)
5480 goto unlock;
5481
5482 err = new_options(hdev, sk);
5483
5484 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
5485 mgmt_index_removed(hdev);
5486
5487 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
5488 hci_dev_set_flag(hdev, HCI_CONFIG);
5489 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
5490
5491 queue_work(hdev->req_workqueue, &hdev->power_on);
5492 } else {
5493 set_bit(HCI_RAW, &hdev->flags);
5494 mgmt_index_added(hdev);
5495 }
5496 }
5497
5498 unlock:
5499 hci_dev_unlock(hdev);
5500 return err;
5501 }
5502
5503 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
5504 void *data, u16 len)
5505 {
5506 struct mgmt_cp_set_public_address *cp = data;
5507 bool changed;
5508 int err;
5509
5510 BT_DBG("%s", hdev->name);
5511
5512 if (hdev_is_powered(hdev))
5513 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5514 MGMT_STATUS_REJECTED);
5515
5516 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
5517 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5518 MGMT_STATUS_INVALID_PARAMS);
5519
5520 if (!hdev->set_bdaddr)
5521 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5522 MGMT_STATUS_NOT_SUPPORTED);
5523
5524 hci_dev_lock(hdev);
5525
5526 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
5527 bacpy(&hdev->public_addr, &cp->bdaddr);
5528
5529 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
5530 if (err < 0)
5531 goto unlock;
5532
5533 if (!changed)
5534 goto unlock;
5535
5536 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
5537 err = new_options(hdev, sk);
5538
5539 if (is_configured(hdev)) {
5540 mgmt_index_removed(hdev);
5541
5542 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
5543
5544 hci_dev_set_flag(hdev, HCI_CONFIG);
5545 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
5546
5547 queue_work(hdev->req_workqueue, &hdev->power_on);
5548 }
5549
5550 unlock:
5551 hci_dev_unlock(hdev);
5552 return err;
5553 }
5554
5555 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
5556 u8 data_len)
5557 {
5558 eir[eir_len++] = sizeof(type) + data_len;
5559 eir[eir_len++] = type;
5560 memcpy(&eir[eir_len], data, data_len);
5561 eir_len += data_len;
5562
5563 return eir_len;
5564 }
5565
5566 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, u8 status,
5567 u16 opcode, struct sk_buff *skb)
5568 {
5569 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
5570 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
5571 u8 *h192, *r192, *h256, *r256;
5572 struct mgmt_pending_cmd *cmd;
5573 u16 eir_len;
5574 int err;
5575
5576 BT_DBG("%s status %u", hdev->name, status);
5577
5578 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev);
5579 if (!cmd)
5580 return;
5581
5582 mgmt_cp = cmd->param;
5583
5584 if (status) {
5585 status = mgmt_status(status);
5586 eir_len = 0;
5587
5588 h192 = NULL;
5589 r192 = NULL;
5590 h256 = NULL;
5591 r256 = NULL;
5592 } else if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
5593 struct hci_rp_read_local_oob_data *rp;
5594
5595 if (skb->len != sizeof(*rp)) {
5596 status = MGMT_STATUS_FAILED;
5597 eir_len = 0;
5598 } else {
5599 status = MGMT_STATUS_SUCCESS;
5600 rp = (void *)skb->data;
5601
5602 eir_len = 5 + 18 + 18;
5603 h192 = rp->hash;
5604 r192 = rp->rand;
5605 h256 = NULL;
5606 r256 = NULL;
5607 }
5608 } else {
5609 struct hci_rp_read_local_oob_ext_data *rp;
5610
5611 if (skb->len != sizeof(*rp)) {
5612 status = MGMT_STATUS_FAILED;
5613 eir_len = 0;
5614 } else {
5615 status = MGMT_STATUS_SUCCESS;
5616 rp = (void *)skb->data;
5617
5618 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5619 eir_len = 5 + 18 + 18;
5620 h192 = NULL;
5621 r192 = NULL;
5622 } else {
5623 eir_len = 5 + 18 + 18 + 18 + 18;
5624 h192 = rp->hash192;
5625 r192 = rp->rand192;
5626 }
5627
5628 h256 = rp->hash256;
5629 r256 = rp->rand256;
5630 }
5631 }
5632
5633 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
5634 if (!mgmt_rp)
5635 goto done;
5636
5637 if (status)
5638 goto send_rsp;
5639
5640 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
5641 hdev->dev_class, 3);
5642
5643 if (h192 && r192) {
5644 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
5645 EIR_SSP_HASH_C192, h192, 16);
5646 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
5647 EIR_SSP_RAND_R192, r192, 16);
5648 }
5649
5650 if (h256 && r256) {
5651 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
5652 EIR_SSP_HASH_C256, h256, 16);
5653 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
5654 EIR_SSP_RAND_R256, r256, 16);
5655 }
5656
5657 send_rsp:
5658 mgmt_rp->type = mgmt_cp->type;
5659 mgmt_rp->eir_len = cpu_to_le16(eir_len);
5660
5661 err = mgmt_cmd_complete(cmd->sk, hdev->id,
5662 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
5663 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
5664 if (err < 0 || status)
5665 goto done;
5666
5667 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
5668
5669 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
5670 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
5671 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
5672 done:
5673 kfree(mgmt_rp);
5674 mgmt_pending_remove(cmd);
5675 }
5676
5677 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
5678 struct mgmt_cp_read_local_oob_ext_data *cp)
5679 {
5680 struct mgmt_pending_cmd *cmd;
5681 struct hci_request req;
5682 int err;
5683
5684 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
5685 cp, sizeof(*cp));
5686 if (!cmd)
5687 return -ENOMEM;
5688
5689 hci_req_init(&req, hdev);
5690
5691 if (bredr_sc_enabled(hdev))
5692 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
5693 else
5694 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
5695
5696 err = hci_req_run_skb(&req, read_local_oob_ext_data_complete);
5697 if (err < 0) {
5698 mgmt_pending_remove(cmd);
5699 return err;
5700 }
5701
5702 return 0;
5703 }
5704
5705 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
5706 void *data, u16 data_len)
5707 {
5708 struct mgmt_cp_read_local_oob_ext_data *cp = data;
5709 struct mgmt_rp_read_local_oob_ext_data *rp;
5710 size_t rp_len;
5711 u16 eir_len;
5712 u8 status, flags, role, addr[7], hash[16], rand[16];
5713 int err;
5714
5715 BT_DBG("%s", hdev->name);
5716
5717 if (hdev_is_powered(hdev)) {
5718 switch (cp->type) {
5719 case BIT(BDADDR_BREDR):
5720 status = mgmt_bredr_support(hdev);
5721 if (status)
5722 eir_len = 0;
5723 else
5724 eir_len = 5;
5725 break;
5726 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
5727 status = mgmt_le_support(hdev);
5728 if (status)
5729 eir_len = 0;
5730 else
5731 eir_len = 9 + 3 + 18 + 18 + 3;
5732 break;
5733 default:
5734 status = MGMT_STATUS_INVALID_PARAMS;
5735 eir_len = 0;
5736 break;
5737 }
5738 } else {
5739 status = MGMT_STATUS_NOT_POWERED;
5740 eir_len = 0;
5741 }
5742
5743 rp_len = sizeof(*rp) + eir_len;
5744 rp = kmalloc(rp_len, GFP_ATOMIC);
5745 if (!rp)
5746 return -ENOMEM;
5747
5748 if (status)
5749 goto complete;
5750
5751 hci_dev_lock(hdev);
5752
5753 eir_len = 0;
5754 switch (cp->type) {
5755 case BIT(BDADDR_BREDR):
5756 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
5757 err = read_local_ssp_oob_req(hdev, sk, cp);
5758 hci_dev_unlock(hdev);
5759 if (!err)
5760 goto done;
5761
5762 status = MGMT_STATUS_FAILED;
5763 goto complete;
5764 } else {
5765 eir_len = eir_append_data(rp->eir, eir_len,
5766 EIR_CLASS_OF_DEV,
5767 hdev->dev_class, 3);
5768 }
5769 break;
5770 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
5771 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
5772 smp_generate_oob(hdev, hash, rand) < 0) {
5773 hci_dev_unlock(hdev);
5774 status = MGMT_STATUS_FAILED;
5775 goto complete;
5776 }
5777
5778 /* This should return the active RPA, but since the RPA
5779 * is only programmed on demand, it is really hard to fill
5780 * this in at the moment. For now disallow retrieving
5781 * local out-of-band data when privacy is in use.
5782 *
5783 * Returning the identity address will not help here since
5784 * pairing happens before the identity resolving key is
5785 * known and thus the connection establishment happens
5786 * based on the RPA and not the identity address.
5787 */
5788 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
5789 hci_dev_unlock(hdev);
5790 status = MGMT_STATUS_REJECTED;
5791 goto complete;
5792 }
5793
5794 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
5795 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
5796 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5797 bacmp(&hdev->static_addr, BDADDR_ANY))) {
5798 memcpy(addr, &hdev->static_addr, 6);
5799 addr[6] = 0x01;
5800 } else {
5801 memcpy(addr, &hdev->bdaddr, 6);
5802 addr[6] = 0x00;
5803 }
5804
5805 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
5806 addr, sizeof(addr));
5807
5808 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
5809 role = 0x02;
5810 else
5811 role = 0x01;
5812
5813 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
5814 &role, sizeof(role));
5815
5816 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
5817 eir_len = eir_append_data(rp->eir, eir_len,
5818 EIR_LE_SC_CONFIRM,
5819 hash, sizeof(hash));
5820
5821 eir_len = eir_append_data(rp->eir, eir_len,
5822 EIR_LE_SC_RANDOM,
5823 rand, sizeof(rand));
5824 }
5825
5826 flags = mgmt_get_adv_discov_flags(hdev);
5827
5828 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
5829 flags |= LE_AD_NO_BREDR;
5830
5831 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
5832 &flags, sizeof(flags));
5833 break;
5834 }
5835
5836 hci_dev_unlock(hdev);
5837
5838 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
5839
5840 status = MGMT_STATUS_SUCCESS;
5841
5842 complete:
5843 rp->type = cp->type;
5844 rp->eir_len = cpu_to_le16(eir_len);
5845
5846 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
5847 status, rp, sizeof(*rp) + eir_len);
5848 if (err < 0 || status)
5849 goto done;
5850
5851 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
5852 rp, sizeof(*rp) + eir_len,
5853 HCI_MGMT_OOB_DATA_EVENTS, sk);
5854
5855 done:
5856 kfree(rp);
5857
5858 return err;
5859 }
5860
5861 static u32 get_supported_adv_flags(struct hci_dev *hdev)
5862 {
5863 u32 flags = 0;
5864
5865 flags |= MGMT_ADV_FLAG_CONNECTABLE;
5866 flags |= MGMT_ADV_FLAG_DISCOV;
5867 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
5868 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
5869
5870 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID)
5871 flags |= MGMT_ADV_FLAG_TX_POWER;
5872
5873 return flags;
5874 }
5875
5876 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
5877 void *data, u16 data_len)
5878 {
5879 struct mgmt_rp_read_adv_features *rp;
5880 size_t rp_len;
5881 int err;
5882 struct adv_info *adv_instance;
5883 u32 supported_flags;
5884 u8 *instance;
5885
5886 BT_DBG("%s", hdev->name);
5887
5888 if (!lmp_le_capable(hdev))
5889 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
5890 MGMT_STATUS_REJECTED);
5891
5892 hci_dev_lock(hdev);
5893
5894 rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
5895 rp = kmalloc(rp_len, GFP_ATOMIC);
5896 if (!rp) {
5897 hci_dev_unlock(hdev);
5898 return -ENOMEM;
5899 }
5900
5901 supported_flags = get_supported_adv_flags(hdev);
5902
5903 rp->supported_flags = cpu_to_le32(supported_flags);
5904 rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
5905 rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
5906 rp->max_instances = HCI_MAX_ADV_INSTANCES;
5907 rp->num_instances = hdev->adv_instance_cnt;
5908
5909 instance = rp->instance;
5910 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
5911 *instance = adv_instance->instance;
5912 instance++;
5913 }
5914
5915 hci_dev_unlock(hdev);
5916
5917 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
5918 MGMT_STATUS_SUCCESS, rp, rp_len);
5919
5920 kfree(rp);
5921
5922 return err;
5923 }
5924
5925 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
5926 u8 len, bool is_adv_data)
5927 {
5928 u8 max_len = HCI_MAX_AD_LENGTH;
5929 int i, cur_len;
5930 bool flags_managed = false;
5931 bool tx_power_managed = false;
5932
5933 if (is_adv_data) {
5934 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
5935 MGMT_ADV_FLAG_LIMITED_DISCOV |
5936 MGMT_ADV_FLAG_MANAGED_FLAGS)) {
5937 flags_managed = true;
5938 max_len -= 3;
5939 }
5940
5941 if (adv_flags & MGMT_ADV_FLAG_TX_POWER) {
5942 tx_power_managed = true;
5943 max_len -= 3;
5944 }
5945 }
5946
5947 if (len > max_len)
5948 return false;
5949
5950 /* Make sure that the data is correctly formatted. */
5951 for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
5952 cur_len = data[i];
5953
5954 if (flags_managed && data[i + 1] == EIR_FLAGS)
5955 return false;
5956
5957 if (tx_power_managed && data[i + 1] == EIR_TX_POWER)
5958 return false;
5959
5960 /* If the current field length would exceed the total data
5961 * length, then it's invalid.
5962 */
5963 if (i + cur_len >= len)
5964 return false;
5965 }
5966
5967 return true;
5968 }
5969
5970 static void add_advertising_complete(struct hci_dev *hdev, u8 status,
5971 u16 opcode)
5972 {
5973 struct mgmt_pending_cmd *cmd;
5974 struct mgmt_cp_add_advertising *cp;
5975 struct mgmt_rp_add_advertising rp;
5976 struct adv_info *adv_instance, *n;
5977 u8 instance;
5978
5979 BT_DBG("status %d", status);
5980
5981 hci_dev_lock(hdev);
5982
5983 cmd = pending_find(MGMT_OP_ADD_ADVERTISING, hdev);
5984
5985 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
5986 if (!adv_instance->pending)
5987 continue;
5988
5989 if (!status) {
5990 adv_instance->pending = false;
5991 continue;
5992 }
5993
5994 instance = adv_instance->instance;
5995
5996 if (hdev->cur_adv_instance == instance)
5997 cancel_adv_timeout(hdev);
5998
5999 hci_remove_adv_instance(hdev, instance);
6000 mgmt_advertising_removed(cmd ? cmd->sk : NULL, hdev, instance);
6001 }
6002
6003 if (!cmd)
6004 goto unlock;
6005
6006 cp = cmd->param;
6007 rp.instance = cp->instance;
6008
6009 if (status)
6010 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
6011 mgmt_status(status));
6012 else
6013 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
6014 mgmt_status(status), &rp, sizeof(rp));
6015
6016 mgmt_pending_remove(cmd);
6017
6018 unlock:
6019 hci_dev_unlock(hdev);
6020 }
6021
6022 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
6023 void *data, u16 data_len)
6024 {
6025 struct mgmt_cp_add_advertising *cp = data;
6026 struct mgmt_rp_add_advertising rp;
6027 u32 flags;
6028 u32 supported_flags;
6029 u8 status;
6030 u16 timeout, duration;
6031 unsigned int prev_instance_cnt = hdev->adv_instance_cnt;
6032 u8 schedule_instance = 0;
6033 struct adv_info *next_instance;
6034 int err;
6035 struct mgmt_pending_cmd *cmd;
6036 struct hci_request req;
6037
6038 BT_DBG("%s", hdev->name);
6039
6040 status = mgmt_le_support(hdev);
6041 if (status)
6042 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6043 status);
6044
6045 if (cp->instance < 1 || cp->instance > HCI_MAX_ADV_INSTANCES)
6046 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6047 MGMT_STATUS_INVALID_PARAMS);
6048
6049 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
6050 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6051 MGMT_STATUS_INVALID_PARAMS);
6052
6053 flags = __le32_to_cpu(cp->flags);
6054 timeout = __le16_to_cpu(cp->timeout);
6055 duration = __le16_to_cpu(cp->duration);
6056
6057 /* The current implementation only supports a subset of the specified
6058 * flags.
6059 */
6060 supported_flags = get_supported_adv_flags(hdev);
6061 if (flags & ~supported_flags)
6062 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6063 MGMT_STATUS_INVALID_PARAMS);
6064
6065 hci_dev_lock(hdev);
6066
6067 if (timeout && !hdev_is_powered(hdev)) {
6068 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6069 MGMT_STATUS_REJECTED);
6070 goto unlock;
6071 }
6072
6073 if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
6074 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
6075 pending_find(MGMT_OP_SET_LE, hdev)) {
6076 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6077 MGMT_STATUS_BUSY);
6078 goto unlock;
6079 }
6080
6081 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
6082 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
6083 cp->scan_rsp_len, false)) {
6084 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6085 MGMT_STATUS_INVALID_PARAMS);
6086 goto unlock;
6087 }
6088
6089 err = hci_add_adv_instance(hdev, cp->instance, flags,
6090 cp->adv_data_len, cp->data,
6091 cp->scan_rsp_len,
6092 cp->data + cp->adv_data_len,
6093 timeout, duration);
6094 if (err < 0) {
6095 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6096 MGMT_STATUS_FAILED);
6097 goto unlock;
6098 }
6099
6100 /* Only trigger an advertising added event if a new instance was
6101 * actually added.
6102 */
6103 if (hdev->adv_instance_cnt > prev_instance_cnt)
6104 mgmt_advertising_added(sk, hdev, cp->instance);
6105
6106 if (hdev->cur_adv_instance == cp->instance) {
6107 /* If the currently advertised instance is being changed then
6108 * cancel the current advertising and schedule the next
6109 * instance. If there is only one instance then the overridden
6110 * advertising data will be visible right away.
6111 */
6112 cancel_adv_timeout(hdev);
6113
6114 next_instance = hci_get_next_instance(hdev, cp->instance);
6115 if (next_instance)
6116 schedule_instance = next_instance->instance;
6117 } else if (!hdev->adv_instance_timeout) {
6118 /* Immediately advertise the new instance if no other
6119 * instance is currently being advertised.
6120 */
6121 schedule_instance = cp->instance;
6122 }
6123
6124 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
6125 * there is no instance to be advertised then we have no HCI
6126 * communication to make. Simply return.
6127 */
6128 if (!hdev_is_powered(hdev) ||
6129 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
6130 !schedule_instance) {
6131 rp.instance = cp->instance;
6132 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6133 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6134 goto unlock;
6135 }
6136
6137 /* We're good to go, update advertising data, parameters, and start
6138 * advertising.
6139 */
6140 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
6141 data_len);
6142 if (!cmd) {
6143 err = -ENOMEM;
6144 goto unlock;
6145 }
6146
6147 hci_req_init(&req, hdev);
6148
6149 err = __hci_req_schedule_adv_instance(&req, schedule_instance, true);
6150
6151 if (!err)
6152 err = hci_req_run(&req, add_advertising_complete);
6153
6154 if (err < 0)
6155 mgmt_pending_remove(cmd);
6156
6157 unlock:
6158 hci_dev_unlock(hdev);
6159
6160 return err;
6161 }
6162
6163 static void remove_advertising_complete(struct hci_dev *hdev, u8 status,
6164 u16 opcode)
6165 {
6166 struct mgmt_pending_cmd *cmd;
6167 struct mgmt_cp_remove_advertising *cp;
6168 struct mgmt_rp_remove_advertising rp;
6169
6170 BT_DBG("status %d", status);
6171
6172 hci_dev_lock(hdev);
6173
6174 /* A failure status here only means that we failed to disable
6175 * advertising. Otherwise, the advertising instance has been removed,
6176 * so report success.
6177 */
6178 cmd = pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev);
6179 if (!cmd)
6180 goto unlock;
6181
6182 cp = cmd->param;
6183 rp.instance = cp->instance;
6184
6185 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, MGMT_STATUS_SUCCESS,
6186 &rp, sizeof(rp));
6187 mgmt_pending_remove(cmd);
6188
6189 unlock:
6190 hci_dev_unlock(hdev);
6191 }
6192
6193 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
6194 void *data, u16 data_len)
6195 {
6196 struct mgmt_cp_remove_advertising *cp = data;
6197 struct mgmt_rp_remove_advertising rp;
6198 struct mgmt_pending_cmd *cmd;
6199 struct hci_request req;
6200 int err;
6201
6202 BT_DBG("%s", hdev->name);
6203
6204 hci_dev_lock(hdev);
6205
6206 if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
6207 err = mgmt_cmd_status(sk, hdev->id,
6208 MGMT_OP_REMOVE_ADVERTISING,
6209 MGMT_STATUS_INVALID_PARAMS);
6210 goto unlock;
6211 }
6212
6213 if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
6214 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
6215 pending_find(MGMT_OP_SET_LE, hdev)) {
6216 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
6217 MGMT_STATUS_BUSY);
6218 goto unlock;
6219 }
6220
6221 if (list_empty(&hdev->adv_instances)) {
6222 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
6223 MGMT_STATUS_INVALID_PARAMS);
6224 goto unlock;
6225 }
6226
6227 hci_req_init(&req, hdev);
6228
6229 hci_req_clear_adv_instance(hdev, sk, &req, cp->instance, true);
6230
6231 if (list_empty(&hdev->adv_instances))
6232 __hci_req_disable_advertising(&req);
6233
6234 /* If no HCI commands have been collected so far or the HCI_ADVERTISING
6235 * flag is set or the device isn't powered then we have no HCI
6236 * communication to make. Simply return.
6237 */
6238 if (skb_queue_empty(&req.cmd_q) ||
6239 !hdev_is_powered(hdev) ||
6240 hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
6241 rp.instance = cp->instance;
6242 err = mgmt_cmd_complete(sk, hdev->id,
6243 MGMT_OP_REMOVE_ADVERTISING,
6244 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6245 goto unlock;
6246 }
6247
6248 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
6249 data_len);
6250 if (!cmd) {
6251 err = -ENOMEM;
6252 goto unlock;
6253 }
6254
6255 err = hci_req_run(&req, remove_advertising_complete);
6256 if (err < 0)
6257 mgmt_pending_remove(cmd);
6258
6259 unlock:
6260 hci_dev_unlock(hdev);
6261
6262 return err;
6263 }
6264
6265 static u8 tlv_data_max_len(u32 adv_flags, bool is_adv_data)
6266 {
6267 u8 max_len = HCI_MAX_AD_LENGTH;
6268
6269 if (is_adv_data) {
6270 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
6271 MGMT_ADV_FLAG_LIMITED_DISCOV |
6272 MGMT_ADV_FLAG_MANAGED_FLAGS))
6273 max_len -= 3;
6274
6275 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
6276 max_len -= 3;
6277 }
6278
6279 return max_len;
6280 }
6281
6282 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
6283 void *data, u16 data_len)
6284 {
6285 struct mgmt_cp_get_adv_size_info *cp = data;
6286 struct mgmt_rp_get_adv_size_info rp;
6287 u32 flags, supported_flags;
6288 int err;
6289
6290 BT_DBG("%s", hdev->name);
6291
6292 if (!lmp_le_capable(hdev))
6293 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
6294 MGMT_STATUS_REJECTED);
6295
6296 if (cp->instance < 1 || cp->instance > HCI_MAX_ADV_INSTANCES)
6297 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
6298 MGMT_STATUS_INVALID_PARAMS);
6299
6300 flags = __le32_to_cpu(cp->flags);
6301
6302 /* The current implementation only supports a subset of the specified
6303 * flags.
6304 */
6305 supported_flags = get_supported_adv_flags(hdev);
6306 if (flags & ~supported_flags)
6307 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
6308 MGMT_STATUS_INVALID_PARAMS);
6309
6310 rp.instance = cp->instance;
6311 rp.flags = cp->flags;
6312 rp.max_adv_data_len = tlv_data_max_len(flags, true);
6313 rp.max_scan_rsp_len = tlv_data_max_len(flags, false);
6314
6315 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
6316 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6317
6318 return err;
6319 }
6320
6321 static const struct hci_mgmt_handler mgmt_handlers[] = {
6322 { NULL }, /* 0x0000 (no command) */
6323 { read_version, MGMT_READ_VERSION_SIZE,
6324 HCI_MGMT_NO_HDEV |
6325 HCI_MGMT_UNTRUSTED },
6326 { read_commands, MGMT_READ_COMMANDS_SIZE,
6327 HCI_MGMT_NO_HDEV |
6328 HCI_MGMT_UNTRUSTED },
6329 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
6330 HCI_MGMT_NO_HDEV |
6331 HCI_MGMT_UNTRUSTED },
6332 { read_controller_info, MGMT_READ_INFO_SIZE,
6333 HCI_MGMT_UNTRUSTED },
6334 { set_powered, MGMT_SETTING_SIZE },
6335 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
6336 { set_connectable, MGMT_SETTING_SIZE },
6337 { set_fast_connectable, MGMT_SETTING_SIZE },
6338 { set_bondable, MGMT_SETTING_SIZE },
6339 { set_link_security, MGMT_SETTING_SIZE },
6340 { set_ssp, MGMT_SETTING_SIZE },
6341 { set_hs, MGMT_SETTING_SIZE },
6342 { set_le, MGMT_SETTING_SIZE },
6343 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
6344 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
6345 { add_uuid, MGMT_ADD_UUID_SIZE },
6346 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
6347 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
6348 HCI_MGMT_VAR_LEN },
6349 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
6350 HCI_MGMT_VAR_LEN },
6351 { disconnect, MGMT_DISCONNECT_SIZE },
6352 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
6353 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
6354 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
6355 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
6356 { pair_device, MGMT_PAIR_DEVICE_SIZE },
6357 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
6358 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
6359 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
6360 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
6361 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
6362 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
6363 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
6364 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
6365 HCI_MGMT_VAR_LEN },
6366 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
6367 { start_discovery, MGMT_START_DISCOVERY_SIZE },
6368 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
6369 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
6370 { block_device, MGMT_BLOCK_DEVICE_SIZE },
6371 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
6372 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
6373 { set_advertising, MGMT_SETTING_SIZE },
6374 { set_bredr, MGMT_SETTING_SIZE },
6375 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
6376 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
6377 { set_secure_conn, MGMT_SETTING_SIZE },
6378 { set_debug_keys, MGMT_SETTING_SIZE },
6379 { set_privacy, MGMT_SET_PRIVACY_SIZE },
6380 { load_irks, MGMT_LOAD_IRKS_SIZE,
6381 HCI_MGMT_VAR_LEN },
6382 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
6383 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
6384 { add_device, MGMT_ADD_DEVICE_SIZE },
6385 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
6386 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
6387 HCI_MGMT_VAR_LEN },
6388 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
6389 HCI_MGMT_NO_HDEV |
6390 HCI_MGMT_UNTRUSTED },
6391 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
6392 HCI_MGMT_UNCONFIGURED |
6393 HCI_MGMT_UNTRUSTED },
6394 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
6395 HCI_MGMT_UNCONFIGURED },
6396 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
6397 HCI_MGMT_UNCONFIGURED },
6398 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
6399 HCI_MGMT_VAR_LEN },
6400 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
6401 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
6402 HCI_MGMT_NO_HDEV |
6403 HCI_MGMT_UNTRUSTED },
6404 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
6405 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
6406 HCI_MGMT_VAR_LEN },
6407 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
6408 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
6409 { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
6410 { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
6411 HCI_MGMT_UNTRUSTED },
6412 };
6413
6414 void mgmt_index_added(struct hci_dev *hdev)
6415 {
6416 struct mgmt_ev_ext_index ev;
6417
6418 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
6419 return;
6420
6421 switch (hdev->dev_type) {
6422 case HCI_PRIMARY:
6423 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
6424 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
6425 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
6426 ev.type = 0x01;
6427 } else {
6428 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
6429 HCI_MGMT_INDEX_EVENTS);
6430 ev.type = 0x00;
6431 }
6432 break;
6433 case HCI_AMP:
6434 ev.type = 0x02;
6435 break;
6436 default:
6437 return;
6438 }
6439
6440 ev.bus = hdev->bus;
6441
6442 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
6443 HCI_MGMT_EXT_INDEX_EVENTS);
6444 }
6445
6446 void mgmt_index_removed(struct hci_dev *hdev)
6447 {
6448 struct mgmt_ev_ext_index ev;
6449 u8 status = MGMT_STATUS_INVALID_INDEX;
6450
6451 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
6452 return;
6453
6454 switch (hdev->dev_type) {
6455 case HCI_PRIMARY:
6456 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
6457
6458 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
6459 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
6460 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
6461 ev.type = 0x01;
6462 } else {
6463 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
6464 HCI_MGMT_INDEX_EVENTS);
6465 ev.type = 0x00;
6466 }
6467 break;
6468 case HCI_AMP:
6469 ev.type = 0x02;
6470 break;
6471 default:
6472 return;
6473 }
6474
6475 ev.bus = hdev->bus;
6476
6477 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
6478 HCI_MGMT_EXT_INDEX_EVENTS);
6479 }
6480
6481 /* This function requires the caller holds hdev->lock */
6482 static void restart_le_actions(struct hci_dev *hdev)
6483 {
6484 struct hci_conn_params *p;
6485
6486 list_for_each_entry(p, &hdev->le_conn_params, list) {
6487 /* Needed for AUTO_OFF case where might not "really"
6488 * have been powered off.
6489 */
6490 list_del_init(&p->action);
6491
6492 switch (p->auto_connect) {
6493 case HCI_AUTO_CONN_DIRECT:
6494 case HCI_AUTO_CONN_ALWAYS:
6495 list_add(&p->action, &hdev->pend_le_conns);
6496 break;
6497 case HCI_AUTO_CONN_REPORT:
6498 list_add(&p->action, &hdev->pend_le_reports);
6499 break;
6500 default:
6501 break;
6502 }
6503 }
6504 }
6505
6506 void mgmt_power_on(struct hci_dev *hdev, int err)
6507 {
6508 struct cmd_lookup match = { NULL, hdev };
6509
6510 BT_DBG("err %d", err);
6511
6512 hci_dev_lock(hdev);
6513
6514 if (!err) {
6515 restart_le_actions(hdev);
6516 hci_update_background_scan(hdev);
6517 }
6518
6519 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
6520
6521 new_settings(hdev, match.sk);
6522
6523 if (match.sk)
6524 sock_put(match.sk);
6525
6526 hci_dev_unlock(hdev);
6527 }
6528
6529 void __mgmt_power_off(struct hci_dev *hdev)
6530 {
6531 struct cmd_lookup match = { NULL, hdev };
6532 u8 status, zero_cod[] = { 0, 0, 0 };
6533
6534 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
6535
6536 /* If the power off is because of hdev unregistration let
6537 * use the appropriate INVALID_INDEX status. Otherwise use
6538 * NOT_POWERED. We cover both scenarios here since later in
6539 * mgmt_index_removed() any hci_conn callbacks will have already
6540 * been triggered, potentially causing misleading DISCONNECTED
6541 * status responses.
6542 */
6543 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
6544 status = MGMT_STATUS_INVALID_INDEX;
6545 else
6546 status = MGMT_STATUS_NOT_POWERED;
6547
6548 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
6549
6550 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
6551 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
6552 zero_cod, sizeof(zero_cod),
6553 HCI_MGMT_DEV_CLASS_EVENTS, NULL);
6554 ext_info_changed(hdev, NULL);
6555 }
6556
6557 new_settings(hdev, match.sk);
6558
6559 if (match.sk)
6560 sock_put(match.sk);
6561 }
6562
6563 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
6564 {
6565 struct mgmt_pending_cmd *cmd;
6566 u8 status;
6567
6568 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
6569 if (!cmd)
6570 return;
6571
6572 if (err == -ERFKILL)
6573 status = MGMT_STATUS_RFKILLED;
6574 else
6575 status = MGMT_STATUS_FAILED;
6576
6577 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
6578
6579 mgmt_pending_remove(cmd);
6580 }
6581
6582 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
6583 bool persistent)
6584 {
6585 struct mgmt_ev_new_link_key ev;
6586
6587 memset(&ev, 0, sizeof(ev));
6588
6589 ev.store_hint = persistent;
6590 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6591 ev.key.addr.type = BDADDR_BREDR;
6592 ev.key.type = key->type;
6593 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
6594 ev.key.pin_len = key->pin_len;
6595
6596 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
6597 }
6598
6599 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
6600 {
6601 switch (ltk->type) {
6602 case SMP_LTK:
6603 case SMP_LTK_SLAVE:
6604 if (ltk->authenticated)
6605 return MGMT_LTK_AUTHENTICATED;
6606 return MGMT_LTK_UNAUTHENTICATED;
6607 case SMP_LTK_P256:
6608 if (ltk->authenticated)
6609 return MGMT_LTK_P256_AUTH;
6610 return MGMT_LTK_P256_UNAUTH;
6611 case SMP_LTK_P256_DEBUG:
6612 return MGMT_LTK_P256_DEBUG;
6613 }
6614
6615 return MGMT_LTK_UNAUTHENTICATED;
6616 }
6617
6618 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
6619 {
6620 struct mgmt_ev_new_long_term_key ev;
6621
6622 memset(&ev, 0, sizeof(ev));
6623
6624 /* Devices using resolvable or non-resolvable random addresses
6625 * without providing an identity resolving key don't require
6626 * to store long term keys. Their addresses will change the
6627 * next time around.
6628 *
6629 * Only when a remote device provides an identity address
6630 * make sure the long term key is stored. If the remote
6631 * identity is known, the long term keys are internally
6632 * mapped to the identity address. So allow static random
6633 * and public addresses here.
6634 */
6635 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6636 (key->bdaddr.b[5] & 0xc0) != 0xc0)
6637 ev.store_hint = 0x00;
6638 else
6639 ev.store_hint = persistent;
6640
6641 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6642 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
6643 ev.key.type = mgmt_ltk_type(key);
6644 ev.key.enc_size = key->enc_size;
6645 ev.key.ediv = key->ediv;
6646 ev.key.rand = key->rand;
6647
6648 if (key->type == SMP_LTK)
6649 ev.key.master = 1;
6650
6651 /* Make sure we copy only the significant bytes based on the
6652 * encryption key size, and set the rest of the value to zeroes.
6653 */
6654 memcpy(ev.key.val, key->val, key->enc_size);
6655 memset(ev.key.val + key->enc_size, 0,
6656 sizeof(ev.key.val) - key->enc_size);
6657
6658 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
6659 }
6660
6661 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
6662 {
6663 struct mgmt_ev_new_irk ev;
6664
6665 memset(&ev, 0, sizeof(ev));
6666
6667 ev.store_hint = persistent;
6668
6669 bacpy(&ev.rpa, &irk->rpa);
6670 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
6671 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
6672 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
6673
6674 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
6675 }
6676
6677 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
6678 bool persistent)
6679 {
6680 struct mgmt_ev_new_csrk ev;
6681
6682 memset(&ev, 0, sizeof(ev));
6683
6684 /* Devices using resolvable or non-resolvable random addresses
6685 * without providing an identity resolving key don't require
6686 * to store signature resolving keys. Their addresses will change
6687 * the next time around.
6688 *
6689 * Only when a remote device provides an identity address
6690 * make sure the signature resolving key is stored. So allow
6691 * static random and public addresses here.
6692 */
6693 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6694 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
6695 ev.store_hint = 0x00;
6696 else
6697 ev.store_hint = persistent;
6698
6699 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
6700 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
6701 ev.key.type = csrk->type;
6702 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
6703
6704 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
6705 }
6706
6707 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
6708 u8 bdaddr_type, u8 store_hint, u16 min_interval,
6709 u16 max_interval, u16 latency, u16 timeout)
6710 {
6711 struct mgmt_ev_new_conn_param ev;
6712
6713 if (!hci_is_identity_address(bdaddr, bdaddr_type))
6714 return;
6715
6716 memset(&ev, 0, sizeof(ev));
6717 bacpy(&ev.addr.bdaddr, bdaddr);
6718 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
6719 ev.store_hint = store_hint;
6720 ev.min_interval = cpu_to_le16(min_interval);
6721 ev.max_interval = cpu_to_le16(max_interval);
6722 ev.latency = cpu_to_le16(latency);
6723 ev.timeout = cpu_to_le16(timeout);
6724
6725 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
6726 }
6727
6728 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
6729 u32 flags, u8 *name, u8 name_len)
6730 {
6731 char buf[512];
6732 struct mgmt_ev_device_connected *ev = (void *) buf;
6733 u16 eir_len = 0;
6734
6735 bacpy(&ev->addr.bdaddr, &conn->dst);
6736 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
6737
6738 ev->flags = __cpu_to_le32(flags);
6739
6740 /* We must ensure that the EIR Data fields are ordered and
6741 * unique. Keep it simple for now and avoid the problem by not
6742 * adding any BR/EDR data to the LE adv.
6743 */
6744 if (conn->le_adv_data_len > 0) {
6745 memcpy(&ev->eir[eir_len],
6746 conn->le_adv_data, conn->le_adv_data_len);
6747 eir_len = conn->le_adv_data_len;
6748 } else {
6749 if (name_len > 0)
6750 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
6751 name, name_len);
6752
6753 if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
6754 eir_len = eir_append_data(ev->eir, eir_len,
6755 EIR_CLASS_OF_DEV,
6756 conn->dev_class, 3);
6757 }
6758
6759 ev->eir_len = cpu_to_le16(eir_len);
6760
6761 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
6762 sizeof(*ev) + eir_len, NULL);
6763 }
6764
6765 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
6766 {
6767 struct sock **sk = data;
6768
6769 cmd->cmd_complete(cmd, 0);
6770
6771 *sk = cmd->sk;
6772 sock_hold(*sk);
6773
6774 mgmt_pending_remove(cmd);
6775 }
6776
6777 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
6778 {
6779 struct hci_dev *hdev = data;
6780 struct mgmt_cp_unpair_device *cp = cmd->param;
6781
6782 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
6783
6784 cmd->cmd_complete(cmd, 0);
6785 mgmt_pending_remove(cmd);
6786 }
6787
6788 bool mgmt_powering_down(struct hci_dev *hdev)
6789 {
6790 struct mgmt_pending_cmd *cmd;
6791 struct mgmt_mode *cp;
6792
6793 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
6794 if (!cmd)
6795 return false;
6796
6797 cp = cmd->param;
6798 if (!cp->val)
6799 return true;
6800
6801 return false;
6802 }
6803
6804 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
6805 u8 link_type, u8 addr_type, u8 reason,
6806 bool mgmt_connected)
6807 {
6808 struct mgmt_ev_device_disconnected ev;
6809 struct sock *sk = NULL;
6810
6811 /* The connection is still in hci_conn_hash so test for 1
6812 * instead of 0 to know if this is the last one.
6813 */
6814 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
6815 cancel_delayed_work(&hdev->power_off);
6816 queue_work(hdev->req_workqueue, &hdev->power_off.work);
6817 }
6818
6819 if (!mgmt_connected)
6820 return;
6821
6822 if (link_type != ACL_LINK && link_type != LE_LINK)
6823 return;
6824
6825 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
6826
6827 bacpy(&ev.addr.bdaddr, bdaddr);
6828 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6829 ev.reason = reason;
6830
6831 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
6832
6833 if (sk)
6834 sock_put(sk);
6835
6836 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6837 hdev);
6838 }
6839
6840 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
6841 u8 link_type, u8 addr_type, u8 status)
6842 {
6843 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
6844 struct mgmt_cp_disconnect *cp;
6845 struct mgmt_pending_cmd *cmd;
6846
6847 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6848 hdev);
6849
6850 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
6851 if (!cmd)
6852 return;
6853
6854 cp = cmd->param;
6855
6856 if (bacmp(bdaddr, &cp->addr.bdaddr))
6857 return;
6858
6859 if (cp->addr.type != bdaddr_type)
6860 return;
6861
6862 cmd->cmd_complete(cmd, mgmt_status(status));
6863 mgmt_pending_remove(cmd);
6864 }
6865
6866 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6867 u8 addr_type, u8 status)
6868 {
6869 struct mgmt_ev_connect_failed ev;
6870
6871 /* The connection is still in hci_conn_hash so test for 1
6872 * instead of 0 to know if this is the last one.
6873 */
6874 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
6875 cancel_delayed_work(&hdev->power_off);
6876 queue_work(hdev->req_workqueue, &hdev->power_off.work);
6877 }
6878
6879 bacpy(&ev.addr.bdaddr, bdaddr);
6880 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6881 ev.status = mgmt_status(status);
6882
6883 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
6884 }
6885
6886 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
6887 {
6888 struct mgmt_ev_pin_code_request ev;
6889
6890 bacpy(&ev.addr.bdaddr, bdaddr);
6891 ev.addr.type = BDADDR_BREDR;
6892 ev.secure = secure;
6893
6894 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
6895 }
6896
6897 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6898 u8 status)
6899 {
6900 struct mgmt_pending_cmd *cmd;
6901
6902 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
6903 if (!cmd)
6904 return;
6905
6906 cmd->cmd_complete(cmd, mgmt_status(status));
6907 mgmt_pending_remove(cmd);
6908 }
6909
6910 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6911 u8 status)
6912 {
6913 struct mgmt_pending_cmd *cmd;
6914
6915 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
6916 if (!cmd)
6917 return;
6918
6919 cmd->cmd_complete(cmd, mgmt_status(status));
6920 mgmt_pending_remove(cmd);
6921 }
6922
6923 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6924 u8 link_type, u8 addr_type, u32 value,
6925 u8 confirm_hint)
6926 {
6927 struct mgmt_ev_user_confirm_request ev;
6928
6929 BT_DBG("%s", hdev->name);
6930
6931 bacpy(&ev.addr.bdaddr, bdaddr);
6932 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6933 ev.confirm_hint = confirm_hint;
6934 ev.value = cpu_to_le32(value);
6935
6936 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
6937 NULL);
6938 }
6939
6940 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6941 u8 link_type, u8 addr_type)
6942 {
6943 struct mgmt_ev_user_passkey_request ev;
6944
6945 BT_DBG("%s", hdev->name);
6946
6947 bacpy(&ev.addr.bdaddr, bdaddr);
6948 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6949
6950 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
6951 NULL);
6952 }
6953
6954 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6955 u8 link_type, u8 addr_type, u8 status,
6956 u8 opcode)
6957 {
6958 struct mgmt_pending_cmd *cmd;
6959
6960 cmd = pending_find(opcode, hdev);
6961 if (!cmd)
6962 return -ENOENT;
6963
6964 cmd->cmd_complete(cmd, mgmt_status(status));
6965 mgmt_pending_remove(cmd);
6966
6967 return 0;
6968 }
6969
6970 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6971 u8 link_type, u8 addr_type, u8 status)
6972 {
6973 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6974 status, MGMT_OP_USER_CONFIRM_REPLY);
6975 }
6976
6977 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6978 u8 link_type, u8 addr_type, u8 status)
6979 {
6980 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6981 status,
6982 MGMT_OP_USER_CONFIRM_NEG_REPLY);
6983 }
6984
6985 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6986 u8 link_type, u8 addr_type, u8 status)
6987 {
6988 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6989 status, MGMT_OP_USER_PASSKEY_REPLY);
6990 }
6991
6992 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6993 u8 link_type, u8 addr_type, u8 status)
6994 {
6995 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6996 status,
6997 MGMT_OP_USER_PASSKEY_NEG_REPLY);
6998 }
6999
7000 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
7001 u8 link_type, u8 addr_type, u32 passkey,
7002 u8 entered)
7003 {
7004 struct mgmt_ev_passkey_notify ev;
7005
7006 BT_DBG("%s", hdev->name);
7007
7008 bacpy(&ev.addr.bdaddr, bdaddr);
7009 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7010 ev.passkey = __cpu_to_le32(passkey);
7011 ev.entered = entered;
7012
7013 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
7014 }
7015
7016 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
7017 {
7018 struct mgmt_ev_auth_failed ev;
7019 struct mgmt_pending_cmd *cmd;
7020 u8 status = mgmt_status(hci_status);
7021
7022 bacpy(&ev.addr.bdaddr, &conn->dst);
7023 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
7024 ev.status = status;
7025
7026 cmd = find_pairing(conn);
7027
7028 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
7029 cmd ? cmd->sk : NULL);
7030
7031 if (cmd) {
7032 cmd->cmd_complete(cmd, status);
7033 mgmt_pending_remove(cmd);
7034 }
7035 }
7036
7037 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
7038 {
7039 struct cmd_lookup match = { NULL, hdev };
7040 bool changed;
7041
7042 if (status) {
7043 u8 mgmt_err = mgmt_status(status);
7044 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
7045 cmd_status_rsp, &mgmt_err);
7046 return;
7047 }
7048
7049 if (test_bit(HCI_AUTH, &hdev->flags))
7050 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
7051 else
7052 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
7053
7054 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
7055 &match);
7056
7057 if (changed)
7058 new_settings(hdev, match.sk);
7059
7060 if (match.sk)
7061 sock_put(match.sk);
7062 }
7063
7064 static void clear_eir(struct hci_request *req)
7065 {
7066 struct hci_dev *hdev = req->hdev;
7067 struct hci_cp_write_eir cp;
7068
7069 if (!lmp_ext_inq_capable(hdev))
7070 return;
7071
7072 memset(hdev->eir, 0, sizeof(hdev->eir));
7073
7074 memset(&cp, 0, sizeof(cp));
7075
7076 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
7077 }
7078
7079 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
7080 {
7081 struct cmd_lookup match = { NULL, hdev };
7082 struct hci_request req;
7083 bool changed = false;
7084
7085 if (status) {
7086 u8 mgmt_err = mgmt_status(status);
7087
7088 if (enable && hci_dev_test_and_clear_flag(hdev,
7089 HCI_SSP_ENABLED)) {
7090 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
7091 new_settings(hdev, NULL);
7092 }
7093
7094 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
7095 &mgmt_err);
7096 return;
7097 }
7098
7099 if (enable) {
7100 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
7101 } else {
7102 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
7103 if (!changed)
7104 changed = hci_dev_test_and_clear_flag(hdev,
7105 HCI_HS_ENABLED);
7106 else
7107 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
7108 }
7109
7110 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
7111
7112 if (changed)
7113 new_settings(hdev, match.sk);
7114
7115 if (match.sk)
7116 sock_put(match.sk);
7117
7118 hci_req_init(&req, hdev);
7119
7120 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
7121 if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
7122 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
7123 sizeof(enable), &enable);
7124 __hci_req_update_eir(&req);
7125 } else {
7126 clear_eir(&req);
7127 }
7128
7129 hci_req_run(&req, NULL);
7130 }
7131
7132 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
7133 {
7134 struct cmd_lookup *match = data;
7135
7136 if (match->sk == NULL) {
7137 match->sk = cmd->sk;
7138 sock_hold(match->sk);
7139 }
7140 }
7141
7142 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
7143 u8 status)
7144 {
7145 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
7146
7147 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
7148 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
7149 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
7150
7151 if (!status) {
7152 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
7153 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
7154 ext_info_changed(hdev, NULL);
7155 }
7156
7157 if (match.sk)
7158 sock_put(match.sk);
7159 }
7160
7161 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
7162 {
7163 struct mgmt_cp_set_local_name ev;
7164 struct mgmt_pending_cmd *cmd;
7165
7166 if (status)
7167 return;
7168
7169 memset(&ev, 0, sizeof(ev));
7170 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
7171 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
7172
7173 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
7174 if (!cmd) {
7175 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
7176
7177 /* If this is a HCI command related to powering on the
7178 * HCI dev don't send any mgmt signals.
7179 */
7180 if (pending_find(MGMT_OP_SET_POWERED, hdev))
7181 return;
7182 }
7183
7184 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
7185 HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
7186 ext_info_changed(hdev, cmd ? cmd->sk : NULL);
7187 }
7188
7189 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
7190 {
7191 int i;
7192
7193 for (i = 0; i < uuid_count; i++) {
7194 if (!memcmp(uuid, uuids[i], 16))
7195 return true;
7196 }
7197
7198 return false;
7199 }
7200
7201 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
7202 {
7203 u16 parsed = 0;
7204
7205 while (parsed < eir_len) {
7206 u8 field_len = eir[0];
7207 u8 uuid[16];
7208 int i;
7209
7210 if (field_len == 0)
7211 break;
7212
7213 if (eir_len - parsed < field_len + 1)
7214 break;
7215
7216 switch (eir[1]) {
7217 case EIR_UUID16_ALL:
7218 case EIR_UUID16_SOME:
7219 for (i = 0; i + 3 <= field_len; i += 2) {
7220 memcpy(uuid, bluetooth_base_uuid, 16);
7221 uuid[13] = eir[i + 3];
7222 uuid[12] = eir[i + 2];
7223 if (has_uuid(uuid, uuid_count, uuids))
7224 return true;
7225 }
7226 break;
7227 case EIR_UUID32_ALL:
7228 case EIR_UUID32_SOME:
7229 for (i = 0; i + 5 <= field_len; i += 4) {
7230 memcpy(uuid, bluetooth_base_uuid, 16);
7231 uuid[15] = eir[i + 5];
7232 uuid[14] = eir[i + 4];
7233 uuid[13] = eir[i + 3];
7234 uuid[12] = eir[i + 2];
7235 if (has_uuid(uuid, uuid_count, uuids))
7236 return true;
7237 }
7238 break;
7239 case EIR_UUID128_ALL:
7240 case EIR_UUID128_SOME:
7241 for (i = 0; i + 17 <= field_len; i += 16) {
7242 memcpy(uuid, eir + i + 2, 16);
7243 if (has_uuid(uuid, uuid_count, uuids))
7244 return true;
7245 }
7246 break;
7247 }
7248
7249 parsed += field_len + 1;
7250 eir += field_len + 1;
7251 }
7252
7253 return false;
7254 }
7255
7256 static void restart_le_scan(struct hci_dev *hdev)
7257 {
7258 /* If controller is not scanning we are done. */
7259 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
7260 return;
7261
7262 if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
7263 hdev->discovery.scan_start +
7264 hdev->discovery.scan_duration))
7265 return;
7266
7267 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
7268 DISCOV_LE_RESTART_DELAY);
7269 }
7270
7271 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
7272 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
7273 {
7274 /* If a RSSI threshold has been specified, and
7275 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
7276 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
7277 * is set, let it through for further processing, as we might need to
7278 * restart the scan.
7279 *
7280 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
7281 * the results are also dropped.
7282 */
7283 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
7284 (rssi == HCI_RSSI_INVALID ||
7285 (rssi < hdev->discovery.rssi &&
7286 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
7287 return false;
7288
7289 if (hdev->discovery.uuid_count != 0) {
7290 /* If a list of UUIDs is provided in filter, results with no
7291 * matching UUID should be dropped.
7292 */
7293 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
7294 hdev->discovery.uuids) &&
7295 !eir_has_uuids(scan_rsp, scan_rsp_len,
7296 hdev->discovery.uuid_count,
7297 hdev->discovery.uuids))
7298 return false;
7299 }
7300
7301 /* If duplicate filtering does not report RSSI changes, then restart
7302 * scanning to ensure updated result with updated RSSI values.
7303 */
7304 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
7305 restart_le_scan(hdev);
7306
7307 /* Validate RSSI value against the RSSI threshold once more. */
7308 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
7309 rssi < hdev->discovery.rssi)
7310 return false;
7311 }
7312
7313 return true;
7314 }
7315
7316 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7317 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
7318 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
7319 {
7320 char buf[512];
7321 struct mgmt_ev_device_found *ev = (void *)buf;
7322 size_t ev_size;
7323
7324 /* Don't send events for a non-kernel initiated discovery. With
7325 * LE one exception is if we have pend_le_reports > 0 in which
7326 * case we're doing passive scanning and want these events.
7327 */
7328 if (!hci_discovery_active(hdev)) {
7329 if (link_type == ACL_LINK)
7330 return;
7331 if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
7332 return;
7333 }
7334
7335 if (hdev->discovery.result_filtering) {
7336 /* We are using service discovery */
7337 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
7338 scan_rsp_len))
7339 return;
7340 }
7341
7342 if (hdev->discovery.limited) {
7343 /* Check for limited discoverable bit */
7344 if (dev_class) {
7345 if (!(dev_class[1] & 0x20))
7346 return;
7347 } else {
7348 u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
7349 if (!flags || !(flags[0] & LE_AD_LIMITED))
7350 return;
7351 }
7352 }
7353
7354 /* Make sure that the buffer is big enough. The 5 extra bytes
7355 * are for the potential CoD field.
7356 */
7357 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
7358 return;
7359
7360 memset(buf, 0, sizeof(buf));
7361
7362 /* In case of device discovery with BR/EDR devices (pre 1.2), the
7363 * RSSI value was reported as 0 when not available. This behavior
7364 * is kept when using device discovery. This is required for full
7365 * backwards compatibility with the API.
7366 *
7367 * However when using service discovery, the value 127 will be
7368 * returned when the RSSI is not available.
7369 */
7370 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
7371 link_type == ACL_LINK)
7372 rssi = 0;
7373
7374 bacpy(&ev->addr.bdaddr, bdaddr);
7375 ev->addr.type = link_to_bdaddr(link_type, addr_type);
7376 ev->rssi = rssi;
7377 ev->flags = cpu_to_le32(flags);
7378
7379 if (eir_len > 0)
7380 /* Copy EIR or advertising data into event */
7381 memcpy(ev->eir, eir, eir_len);
7382
7383 if (dev_class && !eir_get_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
7384 NULL))
7385 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
7386 dev_class, 3);
7387
7388 if (scan_rsp_len > 0)
7389 /* Append scan response data to event */
7390 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
7391
7392 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
7393 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
7394
7395 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
7396 }
7397
7398 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7399 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
7400 {
7401 struct mgmt_ev_device_found *ev;
7402 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
7403 u16 eir_len;
7404
7405 ev = (struct mgmt_ev_device_found *) buf;
7406
7407 memset(buf, 0, sizeof(buf));
7408
7409 bacpy(&ev->addr.bdaddr, bdaddr);
7410 ev->addr.type = link_to_bdaddr(link_type, addr_type);
7411 ev->rssi = rssi;
7412
7413 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
7414 name_len);
7415
7416 ev->eir_len = cpu_to_le16(eir_len);
7417
7418 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
7419 }
7420
7421 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
7422 {
7423 struct mgmt_ev_discovering ev;
7424
7425 BT_DBG("%s discovering %u", hdev->name, discovering);
7426
7427 memset(&ev, 0, sizeof(ev));
7428 ev.type = hdev->discovery.type;
7429 ev.discovering = discovering;
7430
7431 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
7432 }
7433
7434 static struct hci_mgmt_chan chan = {
7435 .channel = HCI_CHANNEL_CONTROL,
7436 .handler_count = ARRAY_SIZE(mgmt_handlers),
7437 .handlers = mgmt_handlers,
7438 .hdev_init = mgmt_init_hdev,
7439 };
7440
7441 int mgmt_init(void)
7442 {
7443 return hci_mgmt_chan_register(&chan);
7444 }
7445
7446 void mgmt_exit(void)
7447 {
7448 hci_mgmt_chan_unregister(&chan);
7449 }