]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/bluetooth/mgmt.c
Bluetooth: Add instance range check for Add Advertising command
[mirror_ubuntu-bionic-kernel.git] / net / bluetooth / mgmt.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI Management interface */
26
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35
36 #include "hci_request.h"
37 #include "smp.h"
38 #include "mgmt_util.h"
39
40 #define MGMT_VERSION 1
41 #define MGMT_REVISION 10
42
43 static const u16 mgmt_commands[] = {
44 MGMT_OP_READ_INDEX_LIST,
45 MGMT_OP_READ_INFO,
46 MGMT_OP_SET_POWERED,
47 MGMT_OP_SET_DISCOVERABLE,
48 MGMT_OP_SET_CONNECTABLE,
49 MGMT_OP_SET_FAST_CONNECTABLE,
50 MGMT_OP_SET_BONDABLE,
51 MGMT_OP_SET_LINK_SECURITY,
52 MGMT_OP_SET_SSP,
53 MGMT_OP_SET_HS,
54 MGMT_OP_SET_LE,
55 MGMT_OP_SET_DEV_CLASS,
56 MGMT_OP_SET_LOCAL_NAME,
57 MGMT_OP_ADD_UUID,
58 MGMT_OP_REMOVE_UUID,
59 MGMT_OP_LOAD_LINK_KEYS,
60 MGMT_OP_LOAD_LONG_TERM_KEYS,
61 MGMT_OP_DISCONNECT,
62 MGMT_OP_GET_CONNECTIONS,
63 MGMT_OP_PIN_CODE_REPLY,
64 MGMT_OP_PIN_CODE_NEG_REPLY,
65 MGMT_OP_SET_IO_CAPABILITY,
66 MGMT_OP_PAIR_DEVICE,
67 MGMT_OP_CANCEL_PAIR_DEVICE,
68 MGMT_OP_UNPAIR_DEVICE,
69 MGMT_OP_USER_CONFIRM_REPLY,
70 MGMT_OP_USER_CONFIRM_NEG_REPLY,
71 MGMT_OP_USER_PASSKEY_REPLY,
72 MGMT_OP_USER_PASSKEY_NEG_REPLY,
73 MGMT_OP_READ_LOCAL_OOB_DATA,
74 MGMT_OP_ADD_REMOTE_OOB_DATA,
75 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
76 MGMT_OP_START_DISCOVERY,
77 MGMT_OP_STOP_DISCOVERY,
78 MGMT_OP_CONFIRM_NAME,
79 MGMT_OP_BLOCK_DEVICE,
80 MGMT_OP_UNBLOCK_DEVICE,
81 MGMT_OP_SET_DEVICE_ID,
82 MGMT_OP_SET_ADVERTISING,
83 MGMT_OP_SET_BREDR,
84 MGMT_OP_SET_STATIC_ADDRESS,
85 MGMT_OP_SET_SCAN_PARAMS,
86 MGMT_OP_SET_SECURE_CONN,
87 MGMT_OP_SET_DEBUG_KEYS,
88 MGMT_OP_SET_PRIVACY,
89 MGMT_OP_LOAD_IRKS,
90 MGMT_OP_GET_CONN_INFO,
91 MGMT_OP_GET_CLOCK_INFO,
92 MGMT_OP_ADD_DEVICE,
93 MGMT_OP_REMOVE_DEVICE,
94 MGMT_OP_LOAD_CONN_PARAM,
95 MGMT_OP_READ_UNCONF_INDEX_LIST,
96 MGMT_OP_READ_CONFIG_INFO,
97 MGMT_OP_SET_EXTERNAL_CONFIG,
98 MGMT_OP_SET_PUBLIC_ADDRESS,
99 MGMT_OP_START_SERVICE_DISCOVERY,
100 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
101 MGMT_OP_READ_EXT_INDEX_LIST,
102 MGMT_OP_READ_ADV_FEATURES,
103 MGMT_OP_ADD_ADVERTISING,
104 MGMT_OP_REMOVE_ADVERTISING,
105 };
106
107 static const u16 mgmt_events[] = {
108 MGMT_EV_CONTROLLER_ERROR,
109 MGMT_EV_INDEX_ADDED,
110 MGMT_EV_INDEX_REMOVED,
111 MGMT_EV_NEW_SETTINGS,
112 MGMT_EV_CLASS_OF_DEV_CHANGED,
113 MGMT_EV_LOCAL_NAME_CHANGED,
114 MGMT_EV_NEW_LINK_KEY,
115 MGMT_EV_NEW_LONG_TERM_KEY,
116 MGMT_EV_DEVICE_CONNECTED,
117 MGMT_EV_DEVICE_DISCONNECTED,
118 MGMT_EV_CONNECT_FAILED,
119 MGMT_EV_PIN_CODE_REQUEST,
120 MGMT_EV_USER_CONFIRM_REQUEST,
121 MGMT_EV_USER_PASSKEY_REQUEST,
122 MGMT_EV_AUTH_FAILED,
123 MGMT_EV_DEVICE_FOUND,
124 MGMT_EV_DISCOVERING,
125 MGMT_EV_DEVICE_BLOCKED,
126 MGMT_EV_DEVICE_UNBLOCKED,
127 MGMT_EV_DEVICE_UNPAIRED,
128 MGMT_EV_PASSKEY_NOTIFY,
129 MGMT_EV_NEW_IRK,
130 MGMT_EV_NEW_CSRK,
131 MGMT_EV_DEVICE_ADDED,
132 MGMT_EV_DEVICE_REMOVED,
133 MGMT_EV_NEW_CONN_PARAM,
134 MGMT_EV_UNCONF_INDEX_ADDED,
135 MGMT_EV_UNCONF_INDEX_REMOVED,
136 MGMT_EV_NEW_CONFIG_OPTIONS,
137 MGMT_EV_EXT_INDEX_ADDED,
138 MGMT_EV_EXT_INDEX_REMOVED,
139 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
140 MGMT_EV_ADVERTISING_ADDED,
141 MGMT_EV_ADVERTISING_REMOVED,
142 };
143
144 static const u16 mgmt_untrusted_commands[] = {
145 MGMT_OP_READ_INDEX_LIST,
146 MGMT_OP_READ_INFO,
147 MGMT_OP_READ_UNCONF_INDEX_LIST,
148 MGMT_OP_READ_CONFIG_INFO,
149 MGMT_OP_READ_EXT_INDEX_LIST,
150 };
151
152 static const u16 mgmt_untrusted_events[] = {
153 MGMT_EV_INDEX_ADDED,
154 MGMT_EV_INDEX_REMOVED,
155 MGMT_EV_NEW_SETTINGS,
156 MGMT_EV_CLASS_OF_DEV_CHANGED,
157 MGMT_EV_LOCAL_NAME_CHANGED,
158 MGMT_EV_UNCONF_INDEX_ADDED,
159 MGMT_EV_UNCONF_INDEX_REMOVED,
160 MGMT_EV_NEW_CONFIG_OPTIONS,
161 MGMT_EV_EXT_INDEX_ADDED,
162 MGMT_EV_EXT_INDEX_REMOVED,
163 };
164
165 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
166
167 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
168 "\x00\x00\x00\x00\x00\x00\x00\x00"
169
170 /* HCI to MGMT error code conversion table */
171 static u8 mgmt_status_table[] = {
172 MGMT_STATUS_SUCCESS,
173 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
174 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
175 MGMT_STATUS_FAILED, /* Hardware Failure */
176 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
177 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
178 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
179 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
180 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
181 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
182 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
183 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
184 MGMT_STATUS_BUSY, /* Command Disallowed */
185 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
186 MGMT_STATUS_REJECTED, /* Rejected Security */
187 MGMT_STATUS_REJECTED, /* Rejected Personal */
188 MGMT_STATUS_TIMEOUT, /* Host Timeout */
189 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
190 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
191 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
192 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
193 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
194 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
195 MGMT_STATUS_BUSY, /* Repeated Attempts */
196 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
197 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
198 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
199 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
200 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
201 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
202 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
203 MGMT_STATUS_FAILED, /* Unspecified Error */
204 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
205 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
206 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
207 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
208 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
209 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
210 MGMT_STATUS_FAILED, /* Unit Link Key Used */
211 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
212 MGMT_STATUS_TIMEOUT, /* Instant Passed */
213 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
214 MGMT_STATUS_FAILED, /* Transaction Collision */
215 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
216 MGMT_STATUS_REJECTED, /* QoS Rejected */
217 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
218 MGMT_STATUS_REJECTED, /* Insufficient Security */
219 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
220 MGMT_STATUS_BUSY, /* Role Switch Pending */
221 MGMT_STATUS_FAILED, /* Slot Violation */
222 MGMT_STATUS_FAILED, /* Role Switch Failed */
223 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
224 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
225 MGMT_STATUS_BUSY, /* Host Busy Pairing */
226 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
227 MGMT_STATUS_BUSY, /* Controller Busy */
228 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
229 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
230 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
231 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
232 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
233 };
234
235 static u8 mgmt_status(u8 hci_status)
236 {
237 if (hci_status < ARRAY_SIZE(mgmt_status_table))
238 return mgmt_status_table[hci_status];
239
240 return MGMT_STATUS_FAILED;
241 }
242
243 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
244 u16 len, int flag)
245 {
246 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
247 flag, NULL);
248 }
249
250 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
251 u16 len, int flag, struct sock *skip_sk)
252 {
253 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
254 flag, skip_sk);
255 }
256
257 static int mgmt_generic_event(u16 event, struct hci_dev *hdev, void *data,
258 u16 len, struct sock *skip_sk)
259 {
260 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
261 HCI_MGMT_GENERIC_EVENTS, skip_sk);
262 }
263
264 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
265 struct sock *skip_sk)
266 {
267 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
268 HCI_SOCK_TRUSTED, skip_sk);
269 }
270
271 static u8 le_addr_type(u8 mgmt_addr_type)
272 {
273 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
274 return ADDR_LE_DEV_PUBLIC;
275 else
276 return ADDR_LE_DEV_RANDOM;
277 }
278
279 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
280 u16 data_len)
281 {
282 struct mgmt_rp_read_version rp;
283
284 BT_DBG("sock %p", sk);
285
286 rp.version = MGMT_VERSION;
287 rp.revision = cpu_to_le16(MGMT_REVISION);
288
289 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
290 &rp, sizeof(rp));
291 }
292
293 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
294 u16 data_len)
295 {
296 struct mgmt_rp_read_commands *rp;
297 u16 num_commands, num_events;
298 size_t rp_size;
299 int i, err;
300
301 BT_DBG("sock %p", sk);
302
303 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
304 num_commands = ARRAY_SIZE(mgmt_commands);
305 num_events = ARRAY_SIZE(mgmt_events);
306 } else {
307 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
308 num_events = ARRAY_SIZE(mgmt_untrusted_events);
309 }
310
311 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
312
313 rp = kmalloc(rp_size, GFP_KERNEL);
314 if (!rp)
315 return -ENOMEM;
316
317 rp->num_commands = cpu_to_le16(num_commands);
318 rp->num_events = cpu_to_le16(num_events);
319
320 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
321 __le16 *opcode = rp->opcodes;
322
323 for (i = 0; i < num_commands; i++, opcode++)
324 put_unaligned_le16(mgmt_commands[i], opcode);
325
326 for (i = 0; i < num_events; i++, opcode++)
327 put_unaligned_le16(mgmt_events[i], opcode);
328 } else {
329 __le16 *opcode = rp->opcodes;
330
331 for (i = 0; i < num_commands; i++, opcode++)
332 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
333
334 for (i = 0; i < num_events; i++, opcode++)
335 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
336 }
337
338 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
339 rp, rp_size);
340 kfree(rp);
341
342 return err;
343 }
344
345 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
346 u16 data_len)
347 {
348 struct mgmt_rp_read_index_list *rp;
349 struct hci_dev *d;
350 size_t rp_len;
351 u16 count;
352 int err;
353
354 BT_DBG("sock %p", sk);
355
356 read_lock(&hci_dev_list_lock);
357
358 count = 0;
359 list_for_each_entry(d, &hci_dev_list, list) {
360 if (d->dev_type == HCI_BREDR &&
361 !hci_dev_test_flag(d, HCI_UNCONFIGURED))
362 count++;
363 }
364
365 rp_len = sizeof(*rp) + (2 * count);
366 rp = kmalloc(rp_len, GFP_ATOMIC);
367 if (!rp) {
368 read_unlock(&hci_dev_list_lock);
369 return -ENOMEM;
370 }
371
372 count = 0;
373 list_for_each_entry(d, &hci_dev_list, list) {
374 if (hci_dev_test_flag(d, HCI_SETUP) ||
375 hci_dev_test_flag(d, HCI_CONFIG) ||
376 hci_dev_test_flag(d, HCI_USER_CHANNEL))
377 continue;
378
379 /* Devices marked as raw-only are neither configured
380 * nor unconfigured controllers.
381 */
382 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
383 continue;
384
385 if (d->dev_type == HCI_BREDR &&
386 !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
387 rp->index[count++] = cpu_to_le16(d->id);
388 BT_DBG("Added hci%u", d->id);
389 }
390 }
391
392 rp->num_controllers = cpu_to_le16(count);
393 rp_len = sizeof(*rp) + (2 * count);
394
395 read_unlock(&hci_dev_list_lock);
396
397 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
398 0, rp, rp_len);
399
400 kfree(rp);
401
402 return err;
403 }
404
405 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
406 void *data, u16 data_len)
407 {
408 struct mgmt_rp_read_unconf_index_list *rp;
409 struct hci_dev *d;
410 size_t rp_len;
411 u16 count;
412 int err;
413
414 BT_DBG("sock %p", sk);
415
416 read_lock(&hci_dev_list_lock);
417
418 count = 0;
419 list_for_each_entry(d, &hci_dev_list, list) {
420 if (d->dev_type == HCI_BREDR &&
421 hci_dev_test_flag(d, HCI_UNCONFIGURED))
422 count++;
423 }
424
425 rp_len = sizeof(*rp) + (2 * count);
426 rp = kmalloc(rp_len, GFP_ATOMIC);
427 if (!rp) {
428 read_unlock(&hci_dev_list_lock);
429 return -ENOMEM;
430 }
431
432 count = 0;
433 list_for_each_entry(d, &hci_dev_list, list) {
434 if (hci_dev_test_flag(d, HCI_SETUP) ||
435 hci_dev_test_flag(d, HCI_CONFIG) ||
436 hci_dev_test_flag(d, HCI_USER_CHANNEL))
437 continue;
438
439 /* Devices marked as raw-only are neither configured
440 * nor unconfigured controllers.
441 */
442 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
443 continue;
444
445 if (d->dev_type == HCI_BREDR &&
446 hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
447 rp->index[count++] = cpu_to_le16(d->id);
448 BT_DBG("Added hci%u", d->id);
449 }
450 }
451
452 rp->num_controllers = cpu_to_le16(count);
453 rp_len = sizeof(*rp) + (2 * count);
454
455 read_unlock(&hci_dev_list_lock);
456
457 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
458 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
459
460 kfree(rp);
461
462 return err;
463 }
464
465 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
466 void *data, u16 data_len)
467 {
468 struct mgmt_rp_read_ext_index_list *rp;
469 struct hci_dev *d;
470 size_t rp_len;
471 u16 count;
472 int err;
473
474 BT_DBG("sock %p", sk);
475
476 read_lock(&hci_dev_list_lock);
477
478 count = 0;
479 list_for_each_entry(d, &hci_dev_list, list) {
480 if (d->dev_type == HCI_BREDR || d->dev_type == HCI_AMP)
481 count++;
482 }
483
484 rp_len = sizeof(*rp) + (sizeof(rp->entry[0]) * count);
485 rp = kmalloc(rp_len, GFP_ATOMIC);
486 if (!rp) {
487 read_unlock(&hci_dev_list_lock);
488 return -ENOMEM;
489 }
490
491 count = 0;
492 list_for_each_entry(d, &hci_dev_list, list) {
493 if (hci_dev_test_flag(d, HCI_SETUP) ||
494 hci_dev_test_flag(d, HCI_CONFIG) ||
495 hci_dev_test_flag(d, HCI_USER_CHANNEL))
496 continue;
497
498 /* Devices marked as raw-only are neither configured
499 * nor unconfigured controllers.
500 */
501 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
502 continue;
503
504 if (d->dev_type == HCI_BREDR) {
505 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
506 rp->entry[count].type = 0x01;
507 else
508 rp->entry[count].type = 0x00;
509 } else if (d->dev_type == HCI_AMP) {
510 rp->entry[count].type = 0x02;
511 } else {
512 continue;
513 }
514
515 rp->entry[count].bus = d->bus;
516 rp->entry[count++].index = cpu_to_le16(d->id);
517 BT_DBG("Added hci%u", d->id);
518 }
519
520 rp->num_controllers = cpu_to_le16(count);
521 rp_len = sizeof(*rp) + (sizeof(rp->entry[0]) * count);
522
523 read_unlock(&hci_dev_list_lock);
524
525 /* If this command is called at least once, then all the
526 * default index and unconfigured index events are disabled
527 * and from now on only extended index events are used.
528 */
529 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
530 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
531 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
532
533 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
534 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp, rp_len);
535
536 kfree(rp);
537
538 return err;
539 }
540
541 static bool is_configured(struct hci_dev *hdev)
542 {
543 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
544 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
545 return false;
546
547 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
548 !bacmp(&hdev->public_addr, BDADDR_ANY))
549 return false;
550
551 return true;
552 }
553
554 static __le32 get_missing_options(struct hci_dev *hdev)
555 {
556 u32 options = 0;
557
558 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
559 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
560 options |= MGMT_OPTION_EXTERNAL_CONFIG;
561
562 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
563 !bacmp(&hdev->public_addr, BDADDR_ANY))
564 options |= MGMT_OPTION_PUBLIC_ADDRESS;
565
566 return cpu_to_le32(options);
567 }
568
569 static int new_options(struct hci_dev *hdev, struct sock *skip)
570 {
571 __le32 options = get_missing_options(hdev);
572
573 return mgmt_generic_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
574 sizeof(options), skip);
575 }
576
577 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
578 {
579 __le32 options = get_missing_options(hdev);
580
581 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
582 sizeof(options));
583 }
584
585 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
586 void *data, u16 data_len)
587 {
588 struct mgmt_rp_read_config_info rp;
589 u32 options = 0;
590
591 BT_DBG("sock %p %s", sk, hdev->name);
592
593 hci_dev_lock(hdev);
594
595 memset(&rp, 0, sizeof(rp));
596 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
597
598 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
599 options |= MGMT_OPTION_EXTERNAL_CONFIG;
600
601 if (hdev->set_bdaddr)
602 options |= MGMT_OPTION_PUBLIC_ADDRESS;
603
604 rp.supported_options = cpu_to_le32(options);
605 rp.missing_options = get_missing_options(hdev);
606
607 hci_dev_unlock(hdev);
608
609 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
610 &rp, sizeof(rp));
611 }
612
613 static u32 get_supported_settings(struct hci_dev *hdev)
614 {
615 u32 settings = 0;
616
617 settings |= MGMT_SETTING_POWERED;
618 settings |= MGMT_SETTING_BONDABLE;
619 settings |= MGMT_SETTING_DEBUG_KEYS;
620 settings |= MGMT_SETTING_CONNECTABLE;
621 settings |= MGMT_SETTING_DISCOVERABLE;
622
623 if (lmp_bredr_capable(hdev)) {
624 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
625 settings |= MGMT_SETTING_FAST_CONNECTABLE;
626 settings |= MGMT_SETTING_BREDR;
627 settings |= MGMT_SETTING_LINK_SECURITY;
628
629 if (lmp_ssp_capable(hdev)) {
630 settings |= MGMT_SETTING_SSP;
631 settings |= MGMT_SETTING_HS;
632 }
633
634 if (lmp_sc_capable(hdev))
635 settings |= MGMT_SETTING_SECURE_CONN;
636 }
637
638 if (lmp_le_capable(hdev)) {
639 settings |= MGMT_SETTING_LE;
640 settings |= MGMT_SETTING_ADVERTISING;
641 settings |= MGMT_SETTING_SECURE_CONN;
642 settings |= MGMT_SETTING_PRIVACY;
643 settings |= MGMT_SETTING_STATIC_ADDRESS;
644 }
645
646 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
647 hdev->set_bdaddr)
648 settings |= MGMT_SETTING_CONFIGURATION;
649
650 return settings;
651 }
652
653 static u32 get_current_settings(struct hci_dev *hdev)
654 {
655 u32 settings = 0;
656
657 if (hdev_is_powered(hdev))
658 settings |= MGMT_SETTING_POWERED;
659
660 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
661 settings |= MGMT_SETTING_CONNECTABLE;
662
663 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
664 settings |= MGMT_SETTING_FAST_CONNECTABLE;
665
666 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
667 settings |= MGMT_SETTING_DISCOVERABLE;
668
669 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
670 settings |= MGMT_SETTING_BONDABLE;
671
672 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
673 settings |= MGMT_SETTING_BREDR;
674
675 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
676 settings |= MGMT_SETTING_LE;
677
678 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
679 settings |= MGMT_SETTING_LINK_SECURITY;
680
681 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
682 settings |= MGMT_SETTING_SSP;
683
684 if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
685 settings |= MGMT_SETTING_HS;
686
687 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
688 settings |= MGMT_SETTING_ADVERTISING;
689
690 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
691 settings |= MGMT_SETTING_SECURE_CONN;
692
693 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
694 settings |= MGMT_SETTING_DEBUG_KEYS;
695
696 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
697 settings |= MGMT_SETTING_PRIVACY;
698
699 /* The current setting for static address has two purposes. The
700 * first is to indicate if the static address will be used and
701 * the second is to indicate if it is actually set.
702 *
703 * This means if the static address is not configured, this flag
704 * will never be set. If the address is configured, then if the
705 * address is actually used decides if the flag is set or not.
706 *
707 * For single mode LE only controllers and dual-mode controllers
708 * with BR/EDR disabled, the existence of the static address will
709 * be evaluated.
710 */
711 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
712 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
713 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
714 if (bacmp(&hdev->static_addr, BDADDR_ANY))
715 settings |= MGMT_SETTING_STATIC_ADDRESS;
716 }
717
718 return settings;
719 }
720
721 #define PNP_INFO_SVCLASS_ID 0x1200
722
723 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
724 {
725 u8 *ptr = data, *uuids_start = NULL;
726 struct bt_uuid *uuid;
727
728 if (len < 4)
729 return ptr;
730
731 list_for_each_entry(uuid, &hdev->uuids, list) {
732 u16 uuid16;
733
734 if (uuid->size != 16)
735 continue;
736
737 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
738 if (uuid16 < 0x1100)
739 continue;
740
741 if (uuid16 == PNP_INFO_SVCLASS_ID)
742 continue;
743
744 if (!uuids_start) {
745 uuids_start = ptr;
746 uuids_start[0] = 1;
747 uuids_start[1] = EIR_UUID16_ALL;
748 ptr += 2;
749 }
750
751 /* Stop if not enough space to put next UUID */
752 if ((ptr - data) + sizeof(u16) > len) {
753 uuids_start[1] = EIR_UUID16_SOME;
754 break;
755 }
756
757 *ptr++ = (uuid16 & 0x00ff);
758 *ptr++ = (uuid16 & 0xff00) >> 8;
759 uuids_start[0] += sizeof(uuid16);
760 }
761
762 return ptr;
763 }
764
765 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
766 {
767 u8 *ptr = data, *uuids_start = NULL;
768 struct bt_uuid *uuid;
769
770 if (len < 6)
771 return ptr;
772
773 list_for_each_entry(uuid, &hdev->uuids, list) {
774 if (uuid->size != 32)
775 continue;
776
777 if (!uuids_start) {
778 uuids_start = ptr;
779 uuids_start[0] = 1;
780 uuids_start[1] = EIR_UUID32_ALL;
781 ptr += 2;
782 }
783
784 /* Stop if not enough space to put next UUID */
785 if ((ptr - data) + sizeof(u32) > len) {
786 uuids_start[1] = EIR_UUID32_SOME;
787 break;
788 }
789
790 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
791 ptr += sizeof(u32);
792 uuids_start[0] += sizeof(u32);
793 }
794
795 return ptr;
796 }
797
798 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
799 {
800 u8 *ptr = data, *uuids_start = NULL;
801 struct bt_uuid *uuid;
802
803 if (len < 18)
804 return ptr;
805
806 list_for_each_entry(uuid, &hdev->uuids, list) {
807 if (uuid->size != 128)
808 continue;
809
810 if (!uuids_start) {
811 uuids_start = ptr;
812 uuids_start[0] = 1;
813 uuids_start[1] = EIR_UUID128_ALL;
814 ptr += 2;
815 }
816
817 /* Stop if not enough space to put next UUID */
818 if ((ptr - data) + 16 > len) {
819 uuids_start[1] = EIR_UUID128_SOME;
820 break;
821 }
822
823 memcpy(ptr, uuid->uuid, 16);
824 ptr += 16;
825 uuids_start[0] += 16;
826 }
827
828 return ptr;
829 }
830
831 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
832 {
833 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
834 }
835
836 static struct mgmt_pending_cmd *pending_find_data(u16 opcode,
837 struct hci_dev *hdev,
838 const void *data)
839 {
840 return mgmt_pending_find_data(HCI_CHANNEL_CONTROL, opcode, hdev, data);
841 }
842
843 static u8 get_current_adv_instance(struct hci_dev *hdev)
844 {
845 /* The "Set Advertising" setting supersedes the "Add Advertising"
846 * setting. Here we set the advertising data based on which
847 * setting was set. When neither apply, default to the global settings,
848 * represented by instance "0".
849 */
850 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE) &&
851 !hci_dev_test_flag(hdev, HCI_ADVERTISING))
852 return hdev->cur_adv_instance;
853
854 return 0x00;
855 }
856
857 static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
858 {
859 u8 ad_len = 0;
860 size_t name_len;
861
862 name_len = strlen(hdev->dev_name);
863 if (name_len > 0) {
864 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
865
866 if (name_len > max_len) {
867 name_len = max_len;
868 ptr[1] = EIR_NAME_SHORT;
869 } else
870 ptr[1] = EIR_NAME_COMPLETE;
871
872 ptr[0] = name_len + 1;
873
874 memcpy(ptr + 2, hdev->dev_name, name_len);
875
876 ad_len += (name_len + 2);
877 ptr += (name_len + 2);
878 }
879
880 return ad_len;
881 }
882
883 static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
884 u8 *ptr)
885 {
886 struct adv_info *adv_instance;
887
888 adv_instance = hci_find_adv_instance(hdev, instance);
889 if (!adv_instance)
890 return 0;
891
892 /* TODO: Set the appropriate entries based on advertising instance flags
893 * here once flags other than 0 are supported.
894 */
895 memcpy(ptr, adv_instance->scan_rsp_data,
896 adv_instance->scan_rsp_len);
897
898 return adv_instance->scan_rsp_len;
899 }
900
901 static void update_inst_scan_rsp_data(struct hci_request *req, u8 instance)
902 {
903 struct hci_dev *hdev = req->hdev;
904 struct hci_cp_le_set_scan_rsp_data cp;
905 u8 len;
906
907 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
908 return;
909
910 memset(&cp, 0, sizeof(cp));
911
912 if (instance)
913 len = create_instance_scan_rsp_data(hdev, instance, cp.data);
914 else
915 len = create_default_scan_rsp_data(hdev, cp.data);
916
917 if (hdev->scan_rsp_data_len == len &&
918 !memcmp(cp.data, hdev->scan_rsp_data, len))
919 return;
920
921 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
922 hdev->scan_rsp_data_len = len;
923
924 cp.length = len;
925
926 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
927 }
928
929 static void update_scan_rsp_data(struct hci_request *req)
930 {
931 update_inst_scan_rsp_data(req, get_current_adv_instance(req->hdev));
932 }
933
934 static u8 get_adv_discov_flags(struct hci_dev *hdev)
935 {
936 struct mgmt_pending_cmd *cmd;
937
938 /* If there's a pending mgmt command the flags will not yet have
939 * their final values, so check for this first.
940 */
941 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
942 if (cmd) {
943 struct mgmt_mode *cp = cmd->param;
944 if (cp->val == 0x01)
945 return LE_AD_GENERAL;
946 else if (cp->val == 0x02)
947 return LE_AD_LIMITED;
948 } else {
949 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
950 return LE_AD_LIMITED;
951 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
952 return LE_AD_GENERAL;
953 }
954
955 return 0;
956 }
957
958 static bool get_connectable(struct hci_dev *hdev)
959 {
960 struct mgmt_pending_cmd *cmd;
961
962 /* If there's a pending mgmt command the flag will not yet have
963 * it's final value, so check for this first.
964 */
965 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
966 if (cmd) {
967 struct mgmt_mode *cp = cmd->param;
968
969 return cp->val;
970 }
971
972 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
973 }
974
975 static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
976 {
977 u32 flags;
978 struct adv_info *adv_instance;
979
980 if (instance == 0x00) {
981 /* Instance 0 always manages the "Tx Power" and "Flags"
982 * fields
983 */
984 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
985
986 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
987 * corresponds to the "connectable" instance flag.
988 */
989 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
990 flags |= MGMT_ADV_FLAG_CONNECTABLE;
991
992 return flags;
993 }
994
995 adv_instance = hci_find_adv_instance(hdev, instance);
996
997 /* Return 0 when we got an invalid instance identifier. */
998 if (!adv_instance)
999 return 0;
1000
1001 return adv_instance->flags;
1002 }
1003
1004 static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
1005 {
1006 u8 instance = get_current_adv_instance(hdev);
1007 struct adv_info *adv_instance;
1008
1009 /* Ignore instance 0 */
1010 if (instance == 0x00)
1011 return 0;
1012
1013 adv_instance = hci_find_adv_instance(hdev, instance);
1014 if (!adv_instance)
1015 return 0;
1016
1017 /* TODO: Take into account the "appearance" and "local-name" flags here.
1018 * These are currently being ignored as they are not supported.
1019 */
1020 return adv_instance->scan_rsp_len;
1021 }
1022
1023 static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1024 {
1025 struct adv_info *adv_instance = NULL;
1026 u8 ad_len = 0, flags = 0;
1027 u32 instance_flags;
1028
1029 /* Return 0 when the current instance identifier is invalid. */
1030 if (instance) {
1031 adv_instance = hci_find_adv_instance(hdev, instance);
1032 if (!adv_instance)
1033 return 0;
1034 }
1035
1036 instance_flags = get_adv_instance_flags(hdev, instance);
1037
1038 /* The Add Advertising command allows userspace to set both the general
1039 * and limited discoverable flags.
1040 */
1041 if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1042 flags |= LE_AD_GENERAL;
1043
1044 if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1045 flags |= LE_AD_LIMITED;
1046
1047 if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1048 /* If a discovery flag wasn't provided, simply use the global
1049 * settings.
1050 */
1051 if (!flags)
1052 flags |= get_adv_discov_flags(hdev);
1053
1054 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1055 flags |= LE_AD_NO_BREDR;
1056
1057 /* If flags would still be empty, then there is no need to
1058 * include the "Flags" AD field".
1059 */
1060 if (flags) {
1061 ptr[0] = 0x02;
1062 ptr[1] = EIR_FLAGS;
1063 ptr[2] = flags;
1064
1065 ad_len += 3;
1066 ptr += 3;
1067 }
1068 }
1069
1070 if (adv_instance) {
1071 memcpy(ptr, adv_instance->adv_data,
1072 adv_instance->adv_data_len);
1073 ad_len += adv_instance->adv_data_len;
1074 ptr += adv_instance->adv_data_len;
1075 }
1076
1077 /* Provide Tx Power only if we can provide a valid value for it */
1078 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID &&
1079 (instance_flags & MGMT_ADV_FLAG_TX_POWER)) {
1080 ptr[0] = 0x02;
1081 ptr[1] = EIR_TX_POWER;
1082 ptr[2] = (u8)hdev->adv_tx_power;
1083
1084 ad_len += 3;
1085 ptr += 3;
1086 }
1087
1088 return ad_len;
1089 }
1090
1091 static void update_inst_adv_data(struct hci_request *req, u8 instance)
1092 {
1093 struct hci_dev *hdev = req->hdev;
1094 struct hci_cp_le_set_adv_data cp;
1095 u8 len;
1096
1097 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1098 return;
1099
1100 memset(&cp, 0, sizeof(cp));
1101
1102 len = create_instance_adv_data(hdev, instance, cp.data);
1103
1104 /* There's nothing to do if the data hasn't changed */
1105 if (hdev->adv_data_len == len &&
1106 memcmp(cp.data, hdev->adv_data, len) == 0)
1107 return;
1108
1109 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1110 hdev->adv_data_len = len;
1111
1112 cp.length = len;
1113
1114 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1115 }
1116
1117 static void update_adv_data(struct hci_request *req)
1118 {
1119 update_inst_adv_data(req, get_current_adv_instance(req->hdev));
1120 }
1121
1122 int mgmt_update_adv_data(struct hci_dev *hdev)
1123 {
1124 struct hci_request req;
1125
1126 hci_req_init(&req, hdev);
1127 update_adv_data(&req);
1128
1129 return hci_req_run(&req, NULL);
1130 }
1131
1132 static void create_eir(struct hci_dev *hdev, u8 *data)
1133 {
1134 u8 *ptr = data;
1135 size_t name_len;
1136
1137 name_len = strlen(hdev->dev_name);
1138
1139 if (name_len > 0) {
1140 /* EIR Data type */
1141 if (name_len > 48) {
1142 name_len = 48;
1143 ptr[1] = EIR_NAME_SHORT;
1144 } else
1145 ptr[1] = EIR_NAME_COMPLETE;
1146
1147 /* EIR Data length */
1148 ptr[0] = name_len + 1;
1149
1150 memcpy(ptr + 2, hdev->dev_name, name_len);
1151
1152 ptr += (name_len + 2);
1153 }
1154
1155 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
1156 ptr[0] = 2;
1157 ptr[1] = EIR_TX_POWER;
1158 ptr[2] = (u8) hdev->inq_tx_power;
1159
1160 ptr += 3;
1161 }
1162
1163 if (hdev->devid_source > 0) {
1164 ptr[0] = 9;
1165 ptr[1] = EIR_DEVICE_ID;
1166
1167 put_unaligned_le16(hdev->devid_source, ptr + 2);
1168 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
1169 put_unaligned_le16(hdev->devid_product, ptr + 6);
1170 put_unaligned_le16(hdev->devid_version, ptr + 8);
1171
1172 ptr += 10;
1173 }
1174
1175 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
1176 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
1177 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
1178 }
1179
1180 static void update_eir(struct hci_request *req)
1181 {
1182 struct hci_dev *hdev = req->hdev;
1183 struct hci_cp_write_eir cp;
1184
1185 if (!hdev_is_powered(hdev))
1186 return;
1187
1188 if (!lmp_ext_inq_capable(hdev))
1189 return;
1190
1191 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1192 return;
1193
1194 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1195 return;
1196
1197 memset(&cp, 0, sizeof(cp));
1198
1199 create_eir(hdev, cp.data);
1200
1201 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
1202 return;
1203
1204 memcpy(hdev->eir, cp.data, sizeof(cp.data));
1205
1206 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1207 }
1208
1209 static u8 get_service_classes(struct hci_dev *hdev)
1210 {
1211 struct bt_uuid *uuid;
1212 u8 val = 0;
1213
1214 list_for_each_entry(uuid, &hdev->uuids, list)
1215 val |= uuid->svc_hint;
1216
1217 return val;
1218 }
1219
1220 static void update_class(struct hci_request *req)
1221 {
1222 struct hci_dev *hdev = req->hdev;
1223 u8 cod[3];
1224
1225 BT_DBG("%s", hdev->name);
1226
1227 if (!hdev_is_powered(hdev))
1228 return;
1229
1230 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1231 return;
1232
1233 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1234 return;
1235
1236 cod[0] = hdev->minor_class;
1237 cod[1] = hdev->major_class;
1238 cod[2] = get_service_classes(hdev);
1239
1240 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1241 cod[1] |= 0x20;
1242
1243 if (memcmp(cod, hdev->dev_class, 3) == 0)
1244 return;
1245
1246 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1247 }
1248
1249 static void disable_advertising(struct hci_request *req)
1250 {
1251 u8 enable = 0x00;
1252
1253 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1254 }
1255
1256 static void enable_advertising(struct hci_request *req)
1257 {
1258 struct hci_dev *hdev = req->hdev;
1259 struct hci_cp_le_set_adv_param cp;
1260 u8 own_addr_type, enable = 0x01;
1261 bool connectable;
1262 u8 instance;
1263 u32 flags;
1264
1265 if (hci_conn_num(hdev, LE_LINK) > 0)
1266 return;
1267
1268 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1269 disable_advertising(req);
1270
1271 /* Clear the HCI_LE_ADV bit temporarily so that the
1272 * hci_update_random_address knows that it's safe to go ahead
1273 * and write a new random address. The flag will be set back on
1274 * as soon as the SET_ADV_ENABLE HCI command completes.
1275 */
1276 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1277
1278 instance = get_current_adv_instance(hdev);
1279 flags = get_adv_instance_flags(hdev, instance);
1280
1281 /* If the "connectable" instance flag was not set, then choose between
1282 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1283 */
1284 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1285 get_connectable(hdev);
1286
1287 /* Set require_privacy to true only when non-connectable
1288 * advertising is used. In that case it is fine to use a
1289 * non-resolvable private address.
1290 */
1291 if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
1292 return;
1293
1294 memset(&cp, 0, sizeof(cp));
1295 cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
1296 cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
1297
1298 if (connectable)
1299 cp.type = LE_ADV_IND;
1300 else if (get_cur_adv_instance_scan_rsp_len(hdev))
1301 cp.type = LE_ADV_SCAN_IND;
1302 else
1303 cp.type = LE_ADV_NONCONN_IND;
1304
1305 cp.own_address_type = own_addr_type;
1306 cp.channel_map = hdev->le_adv_channel_map;
1307
1308 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1309
1310 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1311 }
1312
1313 static void service_cache_off(struct work_struct *work)
1314 {
1315 struct hci_dev *hdev = container_of(work, struct hci_dev,
1316 service_cache.work);
1317 struct hci_request req;
1318
1319 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1320 return;
1321
1322 hci_req_init(&req, hdev);
1323
1324 hci_dev_lock(hdev);
1325
1326 update_eir(&req);
1327 update_class(&req);
1328
1329 hci_dev_unlock(hdev);
1330
1331 hci_req_run(&req, NULL);
1332 }
1333
1334 static void rpa_expired(struct work_struct *work)
1335 {
1336 struct hci_dev *hdev = container_of(work, struct hci_dev,
1337 rpa_expired.work);
1338 struct hci_request req;
1339
1340 BT_DBG("");
1341
1342 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1343
1344 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1345 return;
1346
1347 /* The generation of a new RPA and programming it into the
1348 * controller happens in the enable_advertising() function.
1349 */
1350 hci_req_init(&req, hdev);
1351 enable_advertising(&req);
1352 hci_req_run(&req, NULL);
1353 }
1354
1355 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1356 {
1357 if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
1358 return;
1359
1360 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1361 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1362
1363 /* Non-mgmt controlled devices get this bit set
1364 * implicitly so that pairing works for them, however
1365 * for mgmt we require user-space to explicitly enable
1366 * it
1367 */
1368 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1369 }
1370
1371 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1372 void *data, u16 data_len)
1373 {
1374 struct mgmt_rp_read_info rp;
1375
1376 BT_DBG("sock %p %s", sk, hdev->name);
1377
1378 hci_dev_lock(hdev);
1379
1380 memset(&rp, 0, sizeof(rp));
1381
1382 bacpy(&rp.bdaddr, &hdev->bdaddr);
1383
1384 rp.version = hdev->hci_ver;
1385 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1386
1387 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1388 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1389
1390 memcpy(rp.dev_class, hdev->dev_class, 3);
1391
1392 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1393 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1394
1395 hci_dev_unlock(hdev);
1396
1397 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1398 sizeof(rp));
1399 }
1400
1401 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1402 {
1403 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1404
1405 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1406 sizeof(settings));
1407 }
1408
1409 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1410 {
1411 BT_DBG("%s status 0x%02x", hdev->name, status);
1412
1413 if (hci_conn_count(hdev) == 0) {
1414 cancel_delayed_work(&hdev->power_off);
1415 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1416 }
1417 }
1418
1419 static void advertising_added(struct sock *sk, struct hci_dev *hdev,
1420 u8 instance)
1421 {
1422 struct mgmt_ev_advertising_added ev;
1423
1424 ev.instance = instance;
1425
1426 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1427 }
1428
1429 static void advertising_removed(struct sock *sk, struct hci_dev *hdev,
1430 u8 instance)
1431 {
1432 struct mgmt_ev_advertising_removed ev;
1433
1434 ev.instance = instance;
1435
1436 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1437 }
1438
1439 static int schedule_adv_instance(struct hci_request *req, u8 instance,
1440 bool force) {
1441 struct hci_dev *hdev = req->hdev;
1442 struct adv_info *adv_instance = NULL;
1443 u16 timeout;
1444
1445 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1446 !hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
1447 return -EPERM;
1448
1449 if (hdev->adv_instance_timeout)
1450 return -EBUSY;
1451
1452 adv_instance = hci_find_adv_instance(hdev, instance);
1453 if (!adv_instance)
1454 return -ENOENT;
1455
1456 /* A zero timeout means unlimited advertising. As long as there is
1457 * only one instance, duration should be ignored. We still set a timeout
1458 * in case further instances are being added later on.
1459 *
1460 * If the remaining lifetime of the instance is more than the duration
1461 * then the timeout corresponds to the duration, otherwise it will be
1462 * reduced to the remaining instance lifetime.
1463 */
1464 if (adv_instance->timeout == 0 ||
1465 adv_instance->duration <= adv_instance->remaining_time)
1466 timeout = adv_instance->duration;
1467 else
1468 timeout = adv_instance->remaining_time;
1469
1470 /* The remaining time is being reduced unless the instance is being
1471 * advertised without time limit.
1472 */
1473 if (adv_instance->timeout)
1474 adv_instance->remaining_time =
1475 adv_instance->remaining_time - timeout;
1476
1477 hdev->adv_instance_timeout = timeout;
1478 queue_delayed_work(hdev->workqueue,
1479 &hdev->adv_instance_expire,
1480 msecs_to_jiffies(timeout * 1000));
1481
1482 /* If we're just re-scheduling the same instance again then do not
1483 * execute any HCI commands. This happens when a single instance is
1484 * being advertised.
1485 */
1486 if (!force && hdev->cur_adv_instance == instance &&
1487 hci_dev_test_flag(hdev, HCI_LE_ADV))
1488 return 0;
1489
1490 hdev->cur_adv_instance = instance;
1491 update_adv_data(req);
1492 update_scan_rsp_data(req);
1493 enable_advertising(req);
1494
1495 return 0;
1496 }
1497
1498 static void cancel_adv_timeout(struct hci_dev *hdev)
1499 {
1500 if (hdev->adv_instance_timeout) {
1501 hdev->adv_instance_timeout = 0;
1502 cancel_delayed_work(&hdev->adv_instance_expire);
1503 }
1504 }
1505
1506 /* For a single instance:
1507 * - force == true: The instance will be removed even when its remaining
1508 * lifetime is not zero.
1509 * - force == false: the instance will be deactivated but kept stored unless
1510 * the remaining lifetime is zero.
1511 *
1512 * For instance == 0x00:
1513 * - force == true: All instances will be removed regardless of their timeout
1514 * setting.
1515 * - force == false: Only instances that have a timeout will be removed.
1516 */
1517 static void clear_adv_instance(struct hci_dev *hdev, struct hci_request *req,
1518 u8 instance, bool force)
1519 {
1520 struct adv_info *adv_instance, *n, *next_instance = NULL;
1521 int err;
1522 u8 rem_inst;
1523
1524 /* Cancel any timeout concerning the removed instance(s). */
1525 if (!instance || hdev->cur_adv_instance == instance)
1526 cancel_adv_timeout(hdev);
1527
1528 /* Get the next instance to advertise BEFORE we remove
1529 * the current one. This can be the same instance again
1530 * if there is only one instance.
1531 */
1532 if (instance && hdev->cur_adv_instance == instance)
1533 next_instance = hci_get_next_instance(hdev, instance);
1534
1535 if (instance == 0x00) {
1536 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
1537 list) {
1538 if (!(force || adv_instance->timeout))
1539 continue;
1540
1541 rem_inst = adv_instance->instance;
1542 err = hci_remove_adv_instance(hdev, rem_inst);
1543 if (!err)
1544 advertising_removed(NULL, hdev, rem_inst);
1545 }
1546 hdev->cur_adv_instance = 0x00;
1547 } else {
1548 adv_instance = hci_find_adv_instance(hdev, instance);
1549
1550 if (force || (adv_instance && adv_instance->timeout &&
1551 !adv_instance->remaining_time)) {
1552 /* Don't advertise a removed instance. */
1553 if (next_instance &&
1554 next_instance->instance == instance)
1555 next_instance = NULL;
1556
1557 err = hci_remove_adv_instance(hdev, instance);
1558 if (!err)
1559 advertising_removed(NULL, hdev, instance);
1560 }
1561 }
1562
1563 if (list_empty(&hdev->adv_instances)) {
1564 hdev->cur_adv_instance = 0x00;
1565 hci_dev_clear_flag(hdev, HCI_ADVERTISING_INSTANCE);
1566 }
1567
1568 if (!req || !hdev_is_powered(hdev) ||
1569 hci_dev_test_flag(hdev, HCI_ADVERTISING))
1570 return;
1571
1572 if (next_instance)
1573 schedule_adv_instance(req, next_instance->instance, false);
1574 }
1575
1576 static int clean_up_hci_state(struct hci_dev *hdev)
1577 {
1578 struct hci_request req;
1579 struct hci_conn *conn;
1580 bool discov_stopped;
1581 int err;
1582
1583 hci_req_init(&req, hdev);
1584
1585 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1586 test_bit(HCI_PSCAN, &hdev->flags)) {
1587 u8 scan = 0x00;
1588 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1589 }
1590
1591 clear_adv_instance(hdev, NULL, 0x00, false);
1592
1593 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1594 disable_advertising(&req);
1595
1596 discov_stopped = hci_req_stop_discovery(&req);
1597
1598 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1599 /* 0x15 == Terminated due to Power Off */
1600 __hci_abort_conn(&req, conn, 0x15);
1601 }
1602
1603 err = hci_req_run(&req, clean_up_hci_complete);
1604 if (!err && discov_stopped)
1605 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1606
1607 return err;
1608 }
1609
1610 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1611 u16 len)
1612 {
1613 struct mgmt_mode *cp = data;
1614 struct mgmt_pending_cmd *cmd;
1615 int err;
1616
1617 BT_DBG("request for %s", hdev->name);
1618
1619 if (cp->val != 0x00 && cp->val != 0x01)
1620 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1621 MGMT_STATUS_INVALID_PARAMS);
1622
1623 hci_dev_lock(hdev);
1624
1625 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1626 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1627 MGMT_STATUS_BUSY);
1628 goto failed;
1629 }
1630
1631 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
1632 cancel_delayed_work(&hdev->power_off);
1633
1634 if (cp->val) {
1635 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
1636 data, len);
1637 err = mgmt_powered(hdev, 1);
1638 goto failed;
1639 }
1640 }
1641
1642 if (!!cp->val == hdev_is_powered(hdev)) {
1643 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1644 goto failed;
1645 }
1646
1647 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1648 if (!cmd) {
1649 err = -ENOMEM;
1650 goto failed;
1651 }
1652
1653 if (cp->val) {
1654 queue_work(hdev->req_workqueue, &hdev->power_on);
1655 err = 0;
1656 } else {
1657 /* Disconnect connections, stop scans, etc */
1658 err = clean_up_hci_state(hdev);
1659 if (!err)
1660 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1661 HCI_POWER_OFF_TIMEOUT);
1662
1663 /* ENODATA means there were no HCI commands queued */
1664 if (err == -ENODATA) {
1665 cancel_delayed_work(&hdev->power_off);
1666 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1667 err = 0;
1668 }
1669 }
1670
1671 failed:
1672 hci_dev_unlock(hdev);
1673 return err;
1674 }
1675
1676 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1677 {
1678 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1679
1680 return mgmt_generic_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1681 sizeof(ev), skip);
1682 }
1683
1684 int mgmt_new_settings(struct hci_dev *hdev)
1685 {
1686 return new_settings(hdev, NULL);
1687 }
1688
1689 struct cmd_lookup {
1690 struct sock *sk;
1691 struct hci_dev *hdev;
1692 u8 mgmt_status;
1693 };
1694
1695 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1696 {
1697 struct cmd_lookup *match = data;
1698
1699 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1700
1701 list_del(&cmd->list);
1702
1703 if (match->sk == NULL) {
1704 match->sk = cmd->sk;
1705 sock_hold(match->sk);
1706 }
1707
1708 mgmt_pending_free(cmd);
1709 }
1710
1711 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1712 {
1713 u8 *status = data;
1714
1715 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1716 mgmt_pending_remove(cmd);
1717 }
1718
1719 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1720 {
1721 if (cmd->cmd_complete) {
1722 u8 *status = data;
1723
1724 cmd->cmd_complete(cmd, *status);
1725 mgmt_pending_remove(cmd);
1726
1727 return;
1728 }
1729
1730 cmd_status_rsp(cmd, data);
1731 }
1732
1733 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1734 {
1735 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1736 cmd->param, cmd->param_len);
1737 }
1738
1739 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1740 {
1741 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1742 cmd->param, sizeof(struct mgmt_addr_info));
1743 }
1744
1745 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1746 {
1747 if (!lmp_bredr_capable(hdev))
1748 return MGMT_STATUS_NOT_SUPPORTED;
1749 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1750 return MGMT_STATUS_REJECTED;
1751 else
1752 return MGMT_STATUS_SUCCESS;
1753 }
1754
1755 static u8 mgmt_le_support(struct hci_dev *hdev)
1756 {
1757 if (!lmp_le_capable(hdev))
1758 return MGMT_STATUS_NOT_SUPPORTED;
1759 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1760 return MGMT_STATUS_REJECTED;
1761 else
1762 return MGMT_STATUS_SUCCESS;
1763 }
1764
1765 static void set_discoverable_complete(struct hci_dev *hdev, u8 status,
1766 u16 opcode)
1767 {
1768 struct mgmt_pending_cmd *cmd;
1769 struct mgmt_mode *cp;
1770 struct hci_request req;
1771 bool changed;
1772
1773 BT_DBG("status 0x%02x", status);
1774
1775 hci_dev_lock(hdev);
1776
1777 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1778 if (!cmd)
1779 goto unlock;
1780
1781 if (status) {
1782 u8 mgmt_err = mgmt_status(status);
1783 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1784 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1785 goto remove_cmd;
1786 }
1787
1788 cp = cmd->param;
1789 if (cp->val) {
1790 changed = !hci_dev_test_and_set_flag(hdev, HCI_DISCOVERABLE);
1791
1792 if (hdev->discov_timeout > 0) {
1793 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1794 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1795 to);
1796 }
1797 } else {
1798 changed = hci_dev_test_and_clear_flag(hdev, HCI_DISCOVERABLE);
1799 }
1800
1801 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1802
1803 if (changed)
1804 new_settings(hdev, cmd->sk);
1805
1806 /* When the discoverable mode gets changed, make sure
1807 * that class of device has the limited discoverable
1808 * bit correctly set. Also update page scan based on whitelist
1809 * entries.
1810 */
1811 hci_req_init(&req, hdev);
1812 __hci_update_page_scan(&req);
1813 update_class(&req);
1814 hci_req_run(&req, NULL);
1815
1816 remove_cmd:
1817 mgmt_pending_remove(cmd);
1818
1819 unlock:
1820 hci_dev_unlock(hdev);
1821 }
1822
1823 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1824 u16 len)
1825 {
1826 struct mgmt_cp_set_discoverable *cp = data;
1827 struct mgmt_pending_cmd *cmd;
1828 struct hci_request req;
1829 u16 timeout;
1830 u8 scan;
1831 int err;
1832
1833 BT_DBG("request for %s", hdev->name);
1834
1835 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1836 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1837 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1838 MGMT_STATUS_REJECTED);
1839
1840 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1841 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1842 MGMT_STATUS_INVALID_PARAMS);
1843
1844 timeout = __le16_to_cpu(cp->timeout);
1845
1846 /* Disabling discoverable requires that no timeout is set,
1847 * and enabling limited discoverable requires a timeout.
1848 */
1849 if ((cp->val == 0x00 && timeout > 0) ||
1850 (cp->val == 0x02 && timeout == 0))
1851 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1852 MGMT_STATUS_INVALID_PARAMS);
1853
1854 hci_dev_lock(hdev);
1855
1856 if (!hdev_is_powered(hdev) && timeout > 0) {
1857 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1858 MGMT_STATUS_NOT_POWERED);
1859 goto failed;
1860 }
1861
1862 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1863 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1864 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1865 MGMT_STATUS_BUSY);
1866 goto failed;
1867 }
1868
1869 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1870 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1871 MGMT_STATUS_REJECTED);
1872 goto failed;
1873 }
1874
1875 if (!hdev_is_powered(hdev)) {
1876 bool changed = false;
1877
1878 /* Setting limited discoverable when powered off is
1879 * not a valid operation since it requires a timeout
1880 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1881 */
1882 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1883 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1884 changed = true;
1885 }
1886
1887 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1888 if (err < 0)
1889 goto failed;
1890
1891 if (changed)
1892 err = new_settings(hdev, sk);
1893
1894 goto failed;
1895 }
1896
1897 /* If the current mode is the same, then just update the timeout
1898 * value with the new value. And if only the timeout gets updated,
1899 * then no need for any HCI transactions.
1900 */
1901 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1902 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1903 HCI_LIMITED_DISCOVERABLE)) {
1904 cancel_delayed_work(&hdev->discov_off);
1905 hdev->discov_timeout = timeout;
1906
1907 if (cp->val && hdev->discov_timeout > 0) {
1908 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1909 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1910 to);
1911 }
1912
1913 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1914 goto failed;
1915 }
1916
1917 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1918 if (!cmd) {
1919 err = -ENOMEM;
1920 goto failed;
1921 }
1922
1923 /* Cancel any potential discoverable timeout that might be
1924 * still active and store new timeout value. The arming of
1925 * the timeout happens in the complete handler.
1926 */
1927 cancel_delayed_work(&hdev->discov_off);
1928 hdev->discov_timeout = timeout;
1929
1930 /* Limited discoverable mode */
1931 if (cp->val == 0x02)
1932 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1933 else
1934 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1935
1936 hci_req_init(&req, hdev);
1937
1938 /* The procedure for LE-only controllers is much simpler - just
1939 * update the advertising data.
1940 */
1941 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1942 goto update_ad;
1943
1944 scan = SCAN_PAGE;
1945
1946 if (cp->val) {
1947 struct hci_cp_write_current_iac_lap hci_cp;
1948
1949 if (cp->val == 0x02) {
1950 /* Limited discoverable mode */
1951 hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1952 hci_cp.iac_lap[0] = 0x00; /* LIAC */
1953 hci_cp.iac_lap[1] = 0x8b;
1954 hci_cp.iac_lap[2] = 0x9e;
1955 hci_cp.iac_lap[3] = 0x33; /* GIAC */
1956 hci_cp.iac_lap[4] = 0x8b;
1957 hci_cp.iac_lap[5] = 0x9e;
1958 } else {
1959 /* General discoverable mode */
1960 hci_cp.num_iac = 1;
1961 hci_cp.iac_lap[0] = 0x33; /* GIAC */
1962 hci_cp.iac_lap[1] = 0x8b;
1963 hci_cp.iac_lap[2] = 0x9e;
1964 }
1965
1966 hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1967 (hci_cp.num_iac * 3) + 1, &hci_cp);
1968
1969 scan |= SCAN_INQUIRY;
1970 } else {
1971 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1972 }
1973
1974 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1975
1976 update_ad:
1977 update_adv_data(&req);
1978
1979 err = hci_req_run(&req, set_discoverable_complete);
1980 if (err < 0)
1981 mgmt_pending_remove(cmd);
1982
1983 failed:
1984 hci_dev_unlock(hdev);
1985 return err;
1986 }
1987
1988 static void write_fast_connectable(struct hci_request *req, bool enable)
1989 {
1990 struct hci_dev *hdev = req->hdev;
1991 struct hci_cp_write_page_scan_activity acp;
1992 u8 type;
1993
1994 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1995 return;
1996
1997 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1998 return;
1999
2000 if (enable) {
2001 type = PAGE_SCAN_TYPE_INTERLACED;
2002
2003 /* 160 msec page scan interval */
2004 acp.interval = cpu_to_le16(0x0100);
2005 } else {
2006 type = PAGE_SCAN_TYPE_STANDARD; /* default */
2007
2008 /* default 1.28 sec page scan */
2009 acp.interval = cpu_to_le16(0x0800);
2010 }
2011
2012 acp.window = cpu_to_le16(0x0012);
2013
2014 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
2015 __cpu_to_le16(hdev->page_scan_window) != acp.window)
2016 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
2017 sizeof(acp), &acp);
2018
2019 if (hdev->page_scan_type != type)
2020 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
2021 }
2022
2023 static void set_connectable_complete(struct hci_dev *hdev, u8 status,
2024 u16 opcode)
2025 {
2026 struct mgmt_pending_cmd *cmd;
2027 struct mgmt_mode *cp;
2028 bool conn_changed, discov_changed;
2029
2030 BT_DBG("status 0x%02x", status);
2031
2032 hci_dev_lock(hdev);
2033
2034 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
2035 if (!cmd)
2036 goto unlock;
2037
2038 if (status) {
2039 u8 mgmt_err = mgmt_status(status);
2040 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
2041 goto remove_cmd;
2042 }
2043
2044 cp = cmd->param;
2045 if (cp->val) {
2046 conn_changed = !hci_dev_test_and_set_flag(hdev,
2047 HCI_CONNECTABLE);
2048 discov_changed = false;
2049 } else {
2050 conn_changed = hci_dev_test_and_clear_flag(hdev,
2051 HCI_CONNECTABLE);
2052 discov_changed = hci_dev_test_and_clear_flag(hdev,
2053 HCI_DISCOVERABLE);
2054 }
2055
2056 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
2057
2058 if (conn_changed || discov_changed) {
2059 new_settings(hdev, cmd->sk);
2060 hci_update_page_scan(hdev);
2061 if (discov_changed)
2062 mgmt_update_adv_data(hdev);
2063 hci_update_background_scan(hdev);
2064 }
2065
2066 remove_cmd:
2067 mgmt_pending_remove(cmd);
2068
2069 unlock:
2070 hci_dev_unlock(hdev);
2071 }
2072
2073 static int set_connectable_update_settings(struct hci_dev *hdev,
2074 struct sock *sk, u8 val)
2075 {
2076 bool changed = false;
2077 int err;
2078
2079 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
2080 changed = true;
2081
2082 if (val) {
2083 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
2084 } else {
2085 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
2086 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2087 }
2088
2089 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
2090 if (err < 0)
2091 return err;
2092
2093 if (changed) {
2094 hci_update_page_scan(hdev);
2095 hci_update_background_scan(hdev);
2096 return new_settings(hdev, sk);
2097 }
2098
2099 return 0;
2100 }
2101
2102 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
2103 u16 len)
2104 {
2105 struct mgmt_mode *cp = data;
2106 struct mgmt_pending_cmd *cmd;
2107 struct hci_request req;
2108 u8 scan;
2109 int err;
2110
2111 BT_DBG("request for %s", hdev->name);
2112
2113 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
2114 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2115 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
2116 MGMT_STATUS_REJECTED);
2117
2118 if (cp->val != 0x00 && cp->val != 0x01)
2119 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
2120 MGMT_STATUS_INVALID_PARAMS);
2121
2122 hci_dev_lock(hdev);
2123
2124 if (!hdev_is_powered(hdev)) {
2125 err = set_connectable_update_settings(hdev, sk, cp->val);
2126 goto failed;
2127 }
2128
2129 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
2130 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
2131 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
2132 MGMT_STATUS_BUSY);
2133 goto failed;
2134 }
2135
2136 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
2137 if (!cmd) {
2138 err = -ENOMEM;
2139 goto failed;
2140 }
2141
2142 hci_req_init(&req, hdev);
2143
2144 /* If BR/EDR is not enabled and we disable advertising as a
2145 * by-product of disabling connectable, we need to update the
2146 * advertising flags.
2147 */
2148 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2149 if (!cp->val) {
2150 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2151 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2152 }
2153 update_adv_data(&req);
2154 } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
2155 if (cp->val) {
2156 scan = SCAN_PAGE;
2157 } else {
2158 /* If we don't have any whitelist entries just
2159 * disable all scanning. If there are entries
2160 * and we had both page and inquiry scanning
2161 * enabled then fall back to only page scanning.
2162 * Otherwise no changes are needed.
2163 */
2164 if (list_empty(&hdev->whitelist))
2165 scan = SCAN_DISABLED;
2166 else if (test_bit(HCI_ISCAN, &hdev->flags))
2167 scan = SCAN_PAGE;
2168 else
2169 goto no_scan_update;
2170
2171 if (test_bit(HCI_ISCAN, &hdev->flags) &&
2172 hdev->discov_timeout > 0)
2173 cancel_delayed_work(&hdev->discov_off);
2174 }
2175
2176 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
2177 }
2178
2179 no_scan_update:
2180 /* Update the advertising parameters if necessary */
2181 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2182 hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
2183 enable_advertising(&req);
2184
2185 err = hci_req_run(&req, set_connectable_complete);
2186 if (err < 0) {
2187 mgmt_pending_remove(cmd);
2188 if (err == -ENODATA)
2189 err = set_connectable_update_settings(hdev, sk,
2190 cp->val);
2191 goto failed;
2192 }
2193
2194 failed:
2195 hci_dev_unlock(hdev);
2196 return err;
2197 }
2198
2199 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
2200 u16 len)
2201 {
2202 struct mgmt_mode *cp = data;
2203 bool changed;
2204 int err;
2205
2206 BT_DBG("request for %s", hdev->name);
2207
2208 if (cp->val != 0x00 && cp->val != 0x01)
2209 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
2210 MGMT_STATUS_INVALID_PARAMS);
2211
2212 hci_dev_lock(hdev);
2213
2214 if (cp->val)
2215 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
2216 else
2217 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
2218
2219 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
2220 if (err < 0)
2221 goto unlock;
2222
2223 if (changed)
2224 err = new_settings(hdev, sk);
2225
2226 unlock:
2227 hci_dev_unlock(hdev);
2228 return err;
2229 }
2230
2231 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
2232 u16 len)
2233 {
2234 struct mgmt_mode *cp = data;
2235 struct mgmt_pending_cmd *cmd;
2236 u8 val, status;
2237 int err;
2238
2239 BT_DBG("request for %s", hdev->name);
2240
2241 status = mgmt_bredr_support(hdev);
2242 if (status)
2243 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2244 status);
2245
2246 if (cp->val != 0x00 && cp->val != 0x01)
2247 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2248 MGMT_STATUS_INVALID_PARAMS);
2249
2250 hci_dev_lock(hdev);
2251
2252 if (!hdev_is_powered(hdev)) {
2253 bool changed = false;
2254
2255 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
2256 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
2257 changed = true;
2258 }
2259
2260 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2261 if (err < 0)
2262 goto failed;
2263
2264 if (changed)
2265 err = new_settings(hdev, sk);
2266
2267 goto failed;
2268 }
2269
2270 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
2271 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2272 MGMT_STATUS_BUSY);
2273 goto failed;
2274 }
2275
2276 val = !!cp->val;
2277
2278 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
2279 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2280 goto failed;
2281 }
2282
2283 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
2284 if (!cmd) {
2285 err = -ENOMEM;
2286 goto failed;
2287 }
2288
2289 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
2290 if (err < 0) {
2291 mgmt_pending_remove(cmd);
2292 goto failed;
2293 }
2294
2295 failed:
2296 hci_dev_unlock(hdev);
2297 return err;
2298 }
2299
2300 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2301 {
2302 struct mgmt_mode *cp = data;
2303 struct mgmt_pending_cmd *cmd;
2304 u8 status;
2305 int err;
2306
2307 BT_DBG("request for %s", hdev->name);
2308
2309 status = mgmt_bredr_support(hdev);
2310 if (status)
2311 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
2312
2313 if (!lmp_ssp_capable(hdev))
2314 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2315 MGMT_STATUS_NOT_SUPPORTED);
2316
2317 if (cp->val != 0x00 && cp->val != 0x01)
2318 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2319 MGMT_STATUS_INVALID_PARAMS);
2320
2321 hci_dev_lock(hdev);
2322
2323 if (!hdev_is_powered(hdev)) {
2324 bool changed;
2325
2326 if (cp->val) {
2327 changed = !hci_dev_test_and_set_flag(hdev,
2328 HCI_SSP_ENABLED);
2329 } else {
2330 changed = hci_dev_test_and_clear_flag(hdev,
2331 HCI_SSP_ENABLED);
2332 if (!changed)
2333 changed = hci_dev_test_and_clear_flag(hdev,
2334 HCI_HS_ENABLED);
2335 else
2336 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
2337 }
2338
2339 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2340 if (err < 0)
2341 goto failed;
2342
2343 if (changed)
2344 err = new_settings(hdev, sk);
2345
2346 goto failed;
2347 }
2348
2349 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2350 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2351 MGMT_STATUS_BUSY);
2352 goto failed;
2353 }
2354
2355 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2356 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2357 goto failed;
2358 }
2359
2360 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2361 if (!cmd) {
2362 err = -ENOMEM;
2363 goto failed;
2364 }
2365
2366 if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
2367 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
2368 sizeof(cp->val), &cp->val);
2369
2370 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
2371 if (err < 0) {
2372 mgmt_pending_remove(cmd);
2373 goto failed;
2374 }
2375
2376 failed:
2377 hci_dev_unlock(hdev);
2378 return err;
2379 }
2380
2381 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2382 {
2383 struct mgmt_mode *cp = data;
2384 bool changed;
2385 u8 status;
2386 int err;
2387
2388 BT_DBG("request for %s", hdev->name);
2389
2390 status = mgmt_bredr_support(hdev);
2391 if (status)
2392 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2393
2394 if (!lmp_ssp_capable(hdev))
2395 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2396 MGMT_STATUS_NOT_SUPPORTED);
2397
2398 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
2399 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2400 MGMT_STATUS_REJECTED);
2401
2402 if (cp->val != 0x00 && cp->val != 0x01)
2403 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2404 MGMT_STATUS_INVALID_PARAMS);
2405
2406 hci_dev_lock(hdev);
2407
2408 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2409 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2410 MGMT_STATUS_BUSY);
2411 goto unlock;
2412 }
2413
2414 if (cp->val) {
2415 changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
2416 } else {
2417 if (hdev_is_powered(hdev)) {
2418 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2419 MGMT_STATUS_REJECTED);
2420 goto unlock;
2421 }
2422
2423 changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
2424 }
2425
2426 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2427 if (err < 0)
2428 goto unlock;
2429
2430 if (changed)
2431 err = new_settings(hdev, sk);
2432
2433 unlock:
2434 hci_dev_unlock(hdev);
2435 return err;
2436 }
2437
2438 static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2439 {
2440 struct cmd_lookup match = { NULL, hdev };
2441
2442 hci_dev_lock(hdev);
2443
2444 if (status) {
2445 u8 mgmt_err = mgmt_status(status);
2446
2447 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2448 &mgmt_err);
2449 goto unlock;
2450 }
2451
2452 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2453
2454 new_settings(hdev, match.sk);
2455
2456 if (match.sk)
2457 sock_put(match.sk);
2458
2459 /* Make sure the controller has a good default for
2460 * advertising data. Restrict the update to when LE
2461 * has actually been enabled. During power on, the
2462 * update in powered_update_hci will take care of it.
2463 */
2464 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2465 struct hci_request req;
2466
2467 hci_req_init(&req, hdev);
2468 update_adv_data(&req);
2469 update_scan_rsp_data(&req);
2470 hci_req_run(&req, NULL);
2471 hci_update_background_scan(hdev);
2472 }
2473
2474 unlock:
2475 hci_dev_unlock(hdev);
2476 }
2477
2478 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2479 {
2480 struct mgmt_mode *cp = data;
2481 struct hci_cp_write_le_host_supported hci_cp;
2482 struct mgmt_pending_cmd *cmd;
2483 struct hci_request req;
2484 int err;
2485 u8 val, enabled;
2486
2487 BT_DBG("request for %s", hdev->name);
2488
2489 if (!lmp_le_capable(hdev))
2490 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2491 MGMT_STATUS_NOT_SUPPORTED);
2492
2493 if (cp->val != 0x00 && cp->val != 0x01)
2494 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2495 MGMT_STATUS_INVALID_PARAMS);
2496
2497 /* Bluetooth single mode LE only controllers or dual-mode
2498 * controllers configured as LE only devices, do not allow
2499 * switching LE off. These have either LE enabled explicitly
2500 * or BR/EDR has been previously switched off.
2501 *
2502 * When trying to enable an already enabled LE, then gracefully
2503 * send a positive response. Trying to disable it however will
2504 * result into rejection.
2505 */
2506 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2507 if (cp->val == 0x01)
2508 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2509
2510 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2511 MGMT_STATUS_REJECTED);
2512 }
2513
2514 hci_dev_lock(hdev);
2515
2516 val = !!cp->val;
2517 enabled = lmp_host_le_capable(hdev);
2518
2519 if (!val)
2520 clear_adv_instance(hdev, NULL, 0x00, true);
2521
2522 if (!hdev_is_powered(hdev) || val == enabled) {
2523 bool changed = false;
2524
2525 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2526 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2527 changed = true;
2528 }
2529
2530 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2531 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2532 changed = true;
2533 }
2534
2535 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2536 if (err < 0)
2537 goto unlock;
2538
2539 if (changed)
2540 err = new_settings(hdev, sk);
2541
2542 goto unlock;
2543 }
2544
2545 if (pending_find(MGMT_OP_SET_LE, hdev) ||
2546 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2547 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2548 MGMT_STATUS_BUSY);
2549 goto unlock;
2550 }
2551
2552 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2553 if (!cmd) {
2554 err = -ENOMEM;
2555 goto unlock;
2556 }
2557
2558 hci_req_init(&req, hdev);
2559
2560 memset(&hci_cp, 0, sizeof(hci_cp));
2561
2562 if (val) {
2563 hci_cp.le = val;
2564 hci_cp.simul = 0x00;
2565 } else {
2566 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2567 disable_advertising(&req);
2568 }
2569
2570 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2571 &hci_cp);
2572
2573 err = hci_req_run(&req, le_enable_complete);
2574 if (err < 0)
2575 mgmt_pending_remove(cmd);
2576
2577 unlock:
2578 hci_dev_unlock(hdev);
2579 return err;
2580 }
2581
2582 /* This is a helper function to test for pending mgmt commands that can
2583 * cause CoD or EIR HCI commands. We can only allow one such pending
2584 * mgmt command at a time since otherwise we cannot easily track what
2585 * the current values are, will be, and based on that calculate if a new
2586 * HCI command needs to be sent and if yes with what value.
2587 */
2588 static bool pending_eir_or_class(struct hci_dev *hdev)
2589 {
2590 struct mgmt_pending_cmd *cmd;
2591
2592 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2593 switch (cmd->opcode) {
2594 case MGMT_OP_ADD_UUID:
2595 case MGMT_OP_REMOVE_UUID:
2596 case MGMT_OP_SET_DEV_CLASS:
2597 case MGMT_OP_SET_POWERED:
2598 return true;
2599 }
2600 }
2601
2602 return false;
2603 }
2604
2605 static const u8 bluetooth_base_uuid[] = {
2606 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2607 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2608 };
2609
2610 static u8 get_uuid_size(const u8 *uuid)
2611 {
2612 u32 val;
2613
2614 if (memcmp(uuid, bluetooth_base_uuid, 12))
2615 return 128;
2616
2617 val = get_unaligned_le32(&uuid[12]);
2618 if (val > 0xffff)
2619 return 32;
2620
2621 return 16;
2622 }
2623
2624 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2625 {
2626 struct mgmt_pending_cmd *cmd;
2627
2628 hci_dev_lock(hdev);
2629
2630 cmd = pending_find(mgmt_op, hdev);
2631 if (!cmd)
2632 goto unlock;
2633
2634 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2635 mgmt_status(status), hdev->dev_class, 3);
2636
2637 mgmt_pending_remove(cmd);
2638
2639 unlock:
2640 hci_dev_unlock(hdev);
2641 }
2642
2643 static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2644 {
2645 BT_DBG("status 0x%02x", status);
2646
2647 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2648 }
2649
2650 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2651 {
2652 struct mgmt_cp_add_uuid *cp = data;
2653 struct mgmt_pending_cmd *cmd;
2654 struct hci_request req;
2655 struct bt_uuid *uuid;
2656 int err;
2657
2658 BT_DBG("request for %s", hdev->name);
2659
2660 hci_dev_lock(hdev);
2661
2662 if (pending_eir_or_class(hdev)) {
2663 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2664 MGMT_STATUS_BUSY);
2665 goto failed;
2666 }
2667
2668 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2669 if (!uuid) {
2670 err = -ENOMEM;
2671 goto failed;
2672 }
2673
2674 memcpy(uuid->uuid, cp->uuid, 16);
2675 uuid->svc_hint = cp->svc_hint;
2676 uuid->size = get_uuid_size(cp->uuid);
2677
2678 list_add_tail(&uuid->list, &hdev->uuids);
2679
2680 hci_req_init(&req, hdev);
2681
2682 update_class(&req);
2683 update_eir(&req);
2684
2685 err = hci_req_run(&req, add_uuid_complete);
2686 if (err < 0) {
2687 if (err != -ENODATA)
2688 goto failed;
2689
2690 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2691 hdev->dev_class, 3);
2692 goto failed;
2693 }
2694
2695 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2696 if (!cmd) {
2697 err = -ENOMEM;
2698 goto failed;
2699 }
2700
2701 err = 0;
2702
2703 failed:
2704 hci_dev_unlock(hdev);
2705 return err;
2706 }
2707
2708 static bool enable_service_cache(struct hci_dev *hdev)
2709 {
2710 if (!hdev_is_powered(hdev))
2711 return false;
2712
2713 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2714 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2715 CACHE_TIMEOUT);
2716 return true;
2717 }
2718
2719 return false;
2720 }
2721
2722 static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2723 {
2724 BT_DBG("status 0x%02x", status);
2725
2726 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2727 }
2728
2729 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2730 u16 len)
2731 {
2732 struct mgmt_cp_remove_uuid *cp = data;
2733 struct mgmt_pending_cmd *cmd;
2734 struct bt_uuid *match, *tmp;
2735 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2736 struct hci_request req;
2737 int err, found;
2738
2739 BT_DBG("request for %s", hdev->name);
2740
2741 hci_dev_lock(hdev);
2742
2743 if (pending_eir_or_class(hdev)) {
2744 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2745 MGMT_STATUS_BUSY);
2746 goto unlock;
2747 }
2748
2749 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2750 hci_uuids_clear(hdev);
2751
2752 if (enable_service_cache(hdev)) {
2753 err = mgmt_cmd_complete(sk, hdev->id,
2754 MGMT_OP_REMOVE_UUID,
2755 0, hdev->dev_class, 3);
2756 goto unlock;
2757 }
2758
2759 goto update_class;
2760 }
2761
2762 found = 0;
2763
2764 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2765 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2766 continue;
2767
2768 list_del(&match->list);
2769 kfree(match);
2770 found++;
2771 }
2772
2773 if (found == 0) {
2774 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2775 MGMT_STATUS_INVALID_PARAMS);
2776 goto unlock;
2777 }
2778
2779 update_class:
2780 hci_req_init(&req, hdev);
2781
2782 update_class(&req);
2783 update_eir(&req);
2784
2785 err = hci_req_run(&req, remove_uuid_complete);
2786 if (err < 0) {
2787 if (err != -ENODATA)
2788 goto unlock;
2789
2790 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2791 hdev->dev_class, 3);
2792 goto unlock;
2793 }
2794
2795 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2796 if (!cmd) {
2797 err = -ENOMEM;
2798 goto unlock;
2799 }
2800
2801 err = 0;
2802
2803 unlock:
2804 hci_dev_unlock(hdev);
2805 return err;
2806 }
2807
2808 static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2809 {
2810 BT_DBG("status 0x%02x", status);
2811
2812 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2813 }
2814
2815 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2816 u16 len)
2817 {
2818 struct mgmt_cp_set_dev_class *cp = data;
2819 struct mgmt_pending_cmd *cmd;
2820 struct hci_request req;
2821 int err;
2822
2823 BT_DBG("request for %s", hdev->name);
2824
2825 if (!lmp_bredr_capable(hdev))
2826 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2827 MGMT_STATUS_NOT_SUPPORTED);
2828
2829 hci_dev_lock(hdev);
2830
2831 if (pending_eir_or_class(hdev)) {
2832 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2833 MGMT_STATUS_BUSY);
2834 goto unlock;
2835 }
2836
2837 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2838 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2839 MGMT_STATUS_INVALID_PARAMS);
2840 goto unlock;
2841 }
2842
2843 hdev->major_class = cp->major;
2844 hdev->minor_class = cp->minor;
2845
2846 if (!hdev_is_powered(hdev)) {
2847 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2848 hdev->dev_class, 3);
2849 goto unlock;
2850 }
2851
2852 hci_req_init(&req, hdev);
2853
2854 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2855 hci_dev_unlock(hdev);
2856 cancel_delayed_work_sync(&hdev->service_cache);
2857 hci_dev_lock(hdev);
2858 update_eir(&req);
2859 }
2860
2861 update_class(&req);
2862
2863 err = hci_req_run(&req, set_class_complete);
2864 if (err < 0) {
2865 if (err != -ENODATA)
2866 goto unlock;
2867
2868 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2869 hdev->dev_class, 3);
2870 goto unlock;
2871 }
2872
2873 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2874 if (!cmd) {
2875 err = -ENOMEM;
2876 goto unlock;
2877 }
2878
2879 err = 0;
2880
2881 unlock:
2882 hci_dev_unlock(hdev);
2883 return err;
2884 }
2885
2886 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2887 u16 len)
2888 {
2889 struct mgmt_cp_load_link_keys *cp = data;
2890 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2891 sizeof(struct mgmt_link_key_info));
2892 u16 key_count, expected_len;
2893 bool changed;
2894 int i;
2895
2896 BT_DBG("request for %s", hdev->name);
2897
2898 if (!lmp_bredr_capable(hdev))
2899 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2900 MGMT_STATUS_NOT_SUPPORTED);
2901
2902 key_count = __le16_to_cpu(cp->key_count);
2903 if (key_count > max_key_count) {
2904 BT_ERR("load_link_keys: too big key_count value %u",
2905 key_count);
2906 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2907 MGMT_STATUS_INVALID_PARAMS);
2908 }
2909
2910 expected_len = sizeof(*cp) + key_count *
2911 sizeof(struct mgmt_link_key_info);
2912 if (expected_len != len) {
2913 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2914 expected_len, len);
2915 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2916 MGMT_STATUS_INVALID_PARAMS);
2917 }
2918
2919 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2920 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2921 MGMT_STATUS_INVALID_PARAMS);
2922
2923 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2924 key_count);
2925
2926 for (i = 0; i < key_count; i++) {
2927 struct mgmt_link_key_info *key = &cp->keys[i];
2928
2929 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2930 return mgmt_cmd_status(sk, hdev->id,
2931 MGMT_OP_LOAD_LINK_KEYS,
2932 MGMT_STATUS_INVALID_PARAMS);
2933 }
2934
2935 hci_dev_lock(hdev);
2936
2937 hci_link_keys_clear(hdev);
2938
2939 if (cp->debug_keys)
2940 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2941 else
2942 changed = hci_dev_test_and_clear_flag(hdev,
2943 HCI_KEEP_DEBUG_KEYS);
2944
2945 if (changed)
2946 new_settings(hdev, NULL);
2947
2948 for (i = 0; i < key_count; i++) {
2949 struct mgmt_link_key_info *key = &cp->keys[i];
2950
2951 /* Always ignore debug keys and require a new pairing if
2952 * the user wants to use them.
2953 */
2954 if (key->type == HCI_LK_DEBUG_COMBINATION)
2955 continue;
2956
2957 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2958 key->type, key->pin_len, NULL);
2959 }
2960
2961 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2962
2963 hci_dev_unlock(hdev);
2964
2965 return 0;
2966 }
2967
2968 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2969 u8 addr_type, struct sock *skip_sk)
2970 {
2971 struct mgmt_ev_device_unpaired ev;
2972
2973 bacpy(&ev.addr.bdaddr, bdaddr);
2974 ev.addr.type = addr_type;
2975
2976 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2977 skip_sk);
2978 }
2979
2980 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2981 u16 len)
2982 {
2983 struct mgmt_cp_unpair_device *cp = data;
2984 struct mgmt_rp_unpair_device rp;
2985 struct hci_conn_params *params;
2986 struct mgmt_pending_cmd *cmd;
2987 struct hci_conn *conn;
2988 u8 addr_type;
2989 int err;
2990
2991 memset(&rp, 0, sizeof(rp));
2992 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2993 rp.addr.type = cp->addr.type;
2994
2995 if (!bdaddr_type_is_valid(cp->addr.type))
2996 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2997 MGMT_STATUS_INVALID_PARAMS,
2998 &rp, sizeof(rp));
2999
3000 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
3001 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3002 MGMT_STATUS_INVALID_PARAMS,
3003 &rp, sizeof(rp));
3004
3005 hci_dev_lock(hdev);
3006
3007 if (!hdev_is_powered(hdev)) {
3008 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3009 MGMT_STATUS_NOT_POWERED, &rp,
3010 sizeof(rp));
3011 goto unlock;
3012 }
3013
3014 if (cp->addr.type == BDADDR_BREDR) {
3015 /* If disconnection is requested, then look up the
3016 * connection. If the remote device is connected, it
3017 * will be later used to terminate the link.
3018 *
3019 * Setting it to NULL explicitly will cause no
3020 * termination of the link.
3021 */
3022 if (cp->disconnect)
3023 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3024 &cp->addr.bdaddr);
3025 else
3026 conn = NULL;
3027
3028 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
3029 if (err < 0) {
3030 err = mgmt_cmd_complete(sk, hdev->id,
3031 MGMT_OP_UNPAIR_DEVICE,
3032 MGMT_STATUS_NOT_PAIRED, &rp,
3033 sizeof(rp));
3034 goto unlock;
3035 }
3036
3037 goto done;
3038 }
3039
3040 /* LE address type */
3041 addr_type = le_addr_type(cp->addr.type);
3042
3043 hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
3044
3045 err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
3046 if (err < 0) {
3047 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3048 MGMT_STATUS_NOT_PAIRED, &rp,
3049 sizeof(rp));
3050 goto unlock;
3051 }
3052
3053 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
3054 if (!conn) {
3055 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
3056 goto done;
3057 }
3058
3059 /* Abort any ongoing SMP pairing */
3060 smp_cancel_pairing(conn);
3061
3062 /* Defer clearing up the connection parameters until closing to
3063 * give a chance of keeping them if a repairing happens.
3064 */
3065 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3066
3067 /* Disable auto-connection parameters if present */
3068 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
3069 if (params) {
3070 if (params->explicit_connect)
3071 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3072 else
3073 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3074 }
3075
3076 /* If disconnection is not requested, then clear the connection
3077 * variable so that the link is not terminated.
3078 */
3079 if (!cp->disconnect)
3080 conn = NULL;
3081
3082 done:
3083 /* If the connection variable is set, then termination of the
3084 * link is requested.
3085 */
3086 if (!conn) {
3087 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
3088 &rp, sizeof(rp));
3089 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
3090 goto unlock;
3091 }
3092
3093 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
3094 sizeof(*cp));
3095 if (!cmd) {
3096 err = -ENOMEM;
3097 goto unlock;
3098 }
3099
3100 cmd->cmd_complete = addr_cmd_complete;
3101
3102 err = hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3103 if (err < 0)
3104 mgmt_pending_remove(cmd);
3105
3106 unlock:
3107 hci_dev_unlock(hdev);
3108 return err;
3109 }
3110
3111 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
3112 u16 len)
3113 {
3114 struct mgmt_cp_disconnect *cp = data;
3115 struct mgmt_rp_disconnect rp;
3116 struct mgmt_pending_cmd *cmd;
3117 struct hci_conn *conn;
3118 int err;
3119
3120 BT_DBG("");
3121
3122 memset(&rp, 0, sizeof(rp));
3123 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3124 rp.addr.type = cp->addr.type;
3125
3126 if (!bdaddr_type_is_valid(cp->addr.type))
3127 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3128 MGMT_STATUS_INVALID_PARAMS,
3129 &rp, sizeof(rp));
3130
3131 hci_dev_lock(hdev);
3132
3133 if (!test_bit(HCI_UP, &hdev->flags)) {
3134 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3135 MGMT_STATUS_NOT_POWERED, &rp,
3136 sizeof(rp));
3137 goto failed;
3138 }
3139
3140 if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
3141 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3142 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3143 goto failed;
3144 }
3145
3146 if (cp->addr.type == BDADDR_BREDR)
3147 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3148 &cp->addr.bdaddr);
3149 else
3150 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
3151 le_addr_type(cp->addr.type));
3152
3153 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
3154 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3155 MGMT_STATUS_NOT_CONNECTED, &rp,
3156 sizeof(rp));
3157 goto failed;
3158 }
3159
3160 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3161 if (!cmd) {
3162 err = -ENOMEM;
3163 goto failed;
3164 }
3165
3166 cmd->cmd_complete = generic_cmd_complete;
3167
3168 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
3169 if (err < 0)
3170 mgmt_pending_remove(cmd);
3171
3172 failed:
3173 hci_dev_unlock(hdev);
3174 return err;
3175 }
3176
3177 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3178 {
3179 switch (link_type) {
3180 case LE_LINK:
3181 switch (addr_type) {
3182 case ADDR_LE_DEV_PUBLIC:
3183 return BDADDR_LE_PUBLIC;
3184
3185 default:
3186 /* Fallback to LE Random address type */
3187 return BDADDR_LE_RANDOM;
3188 }
3189
3190 default:
3191 /* Fallback to BR/EDR type */
3192 return BDADDR_BREDR;
3193 }
3194 }
3195
3196 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
3197 u16 data_len)
3198 {
3199 struct mgmt_rp_get_connections *rp;
3200 struct hci_conn *c;
3201 size_t rp_len;
3202 int err;
3203 u16 i;
3204
3205 BT_DBG("");
3206
3207 hci_dev_lock(hdev);
3208
3209 if (!hdev_is_powered(hdev)) {
3210 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
3211 MGMT_STATUS_NOT_POWERED);
3212 goto unlock;
3213 }
3214
3215 i = 0;
3216 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3217 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3218 i++;
3219 }
3220
3221 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
3222 rp = kmalloc(rp_len, GFP_KERNEL);
3223 if (!rp) {
3224 err = -ENOMEM;
3225 goto unlock;
3226 }
3227
3228 i = 0;
3229 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3230 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3231 continue;
3232 bacpy(&rp->addr[i].bdaddr, &c->dst);
3233 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3234 if (c->type == SCO_LINK || c->type == ESCO_LINK)
3235 continue;
3236 i++;
3237 }
3238
3239 rp->conn_count = cpu_to_le16(i);
3240
3241 /* Recalculate length in case of filtered SCO connections, etc */
3242 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
3243
3244 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3245 rp_len);
3246
3247 kfree(rp);
3248
3249 unlock:
3250 hci_dev_unlock(hdev);
3251 return err;
3252 }
3253
3254 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3255 struct mgmt_cp_pin_code_neg_reply *cp)
3256 {
3257 struct mgmt_pending_cmd *cmd;
3258 int err;
3259
3260 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3261 sizeof(*cp));
3262 if (!cmd)
3263 return -ENOMEM;
3264
3265 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3266 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3267 if (err < 0)
3268 mgmt_pending_remove(cmd);
3269
3270 return err;
3271 }
3272
3273 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3274 u16 len)
3275 {
3276 struct hci_conn *conn;
3277 struct mgmt_cp_pin_code_reply *cp = data;
3278 struct hci_cp_pin_code_reply reply;
3279 struct mgmt_pending_cmd *cmd;
3280 int err;
3281
3282 BT_DBG("");
3283
3284 hci_dev_lock(hdev);
3285
3286 if (!hdev_is_powered(hdev)) {
3287 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3288 MGMT_STATUS_NOT_POWERED);
3289 goto failed;
3290 }
3291
3292 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3293 if (!conn) {
3294 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3295 MGMT_STATUS_NOT_CONNECTED);
3296 goto failed;
3297 }
3298
3299 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3300 struct mgmt_cp_pin_code_neg_reply ncp;
3301
3302 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3303
3304 BT_ERR("PIN code is not 16 bytes long");
3305
3306 err = send_pin_code_neg_reply(sk, hdev, &ncp);
3307 if (err >= 0)
3308 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3309 MGMT_STATUS_INVALID_PARAMS);
3310
3311 goto failed;
3312 }
3313
3314 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3315 if (!cmd) {
3316 err = -ENOMEM;
3317 goto failed;
3318 }
3319
3320 cmd->cmd_complete = addr_cmd_complete;
3321
3322 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3323 reply.pin_len = cp->pin_len;
3324 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3325
3326 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3327 if (err < 0)
3328 mgmt_pending_remove(cmd);
3329
3330 failed:
3331 hci_dev_unlock(hdev);
3332 return err;
3333 }
3334
3335 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3336 u16 len)
3337 {
3338 struct mgmt_cp_set_io_capability *cp = data;
3339
3340 BT_DBG("");
3341
3342 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3343 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3344 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
3345
3346 hci_dev_lock(hdev);
3347
3348 hdev->io_capability = cp->io_capability;
3349
3350 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
3351 hdev->io_capability);
3352
3353 hci_dev_unlock(hdev);
3354
3355 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3356 NULL, 0);
3357 }
3358
3359 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3360 {
3361 struct hci_dev *hdev = conn->hdev;
3362 struct mgmt_pending_cmd *cmd;
3363
3364 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3365 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3366 continue;
3367
3368 if (cmd->user_data != conn)
3369 continue;
3370
3371 return cmd;
3372 }
3373
3374 return NULL;
3375 }
3376
3377 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3378 {
3379 struct mgmt_rp_pair_device rp;
3380 struct hci_conn *conn = cmd->user_data;
3381 int err;
3382
3383 bacpy(&rp.addr.bdaddr, &conn->dst);
3384 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3385
3386 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
3387 status, &rp, sizeof(rp));
3388
3389 /* So we don't get further callbacks for this connection */
3390 conn->connect_cfm_cb = NULL;
3391 conn->security_cfm_cb = NULL;
3392 conn->disconn_cfm_cb = NULL;
3393
3394 hci_conn_drop(conn);
3395
3396 /* The device is paired so there is no need to remove
3397 * its connection parameters anymore.
3398 */
3399 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3400
3401 hci_conn_put(conn);
3402
3403 return err;
3404 }
3405
3406 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3407 {
3408 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3409 struct mgmt_pending_cmd *cmd;
3410
3411 cmd = find_pairing(conn);
3412 if (cmd) {
3413 cmd->cmd_complete(cmd, status);
3414 mgmt_pending_remove(cmd);
3415 }
3416 }
3417
3418 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3419 {
3420 struct mgmt_pending_cmd *cmd;
3421
3422 BT_DBG("status %u", status);
3423
3424 cmd = find_pairing(conn);
3425 if (!cmd) {
3426 BT_DBG("Unable to find a pending command");
3427 return;
3428 }
3429
3430 cmd->cmd_complete(cmd, mgmt_status(status));
3431 mgmt_pending_remove(cmd);
3432 }
3433
3434 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3435 {
3436 struct mgmt_pending_cmd *cmd;
3437
3438 BT_DBG("status %u", status);
3439
3440 if (!status)
3441 return;
3442
3443 cmd = find_pairing(conn);
3444 if (!cmd) {
3445 BT_DBG("Unable to find a pending command");
3446 return;
3447 }
3448
3449 cmd->cmd_complete(cmd, mgmt_status(status));
3450 mgmt_pending_remove(cmd);
3451 }
3452
3453 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3454 u16 len)
3455 {
3456 struct mgmt_cp_pair_device *cp = data;
3457 struct mgmt_rp_pair_device rp;
3458 struct mgmt_pending_cmd *cmd;
3459 u8 sec_level, auth_type;
3460 struct hci_conn *conn;
3461 int err;
3462
3463 BT_DBG("");
3464
3465 memset(&rp, 0, sizeof(rp));
3466 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3467 rp.addr.type = cp->addr.type;
3468
3469 if (!bdaddr_type_is_valid(cp->addr.type))
3470 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3471 MGMT_STATUS_INVALID_PARAMS,
3472 &rp, sizeof(rp));
3473
3474 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3475 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3476 MGMT_STATUS_INVALID_PARAMS,
3477 &rp, sizeof(rp));
3478
3479 hci_dev_lock(hdev);
3480
3481 if (!hdev_is_powered(hdev)) {
3482 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3483 MGMT_STATUS_NOT_POWERED, &rp,
3484 sizeof(rp));
3485 goto unlock;
3486 }
3487
3488 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3489 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3490 MGMT_STATUS_ALREADY_PAIRED, &rp,
3491 sizeof(rp));
3492 goto unlock;
3493 }
3494
3495 sec_level = BT_SECURITY_MEDIUM;
3496 auth_type = HCI_AT_DEDICATED_BONDING;
3497
3498 if (cp->addr.type == BDADDR_BREDR) {
3499 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3500 auth_type);
3501 } else {
3502 u8 addr_type = le_addr_type(cp->addr.type);
3503 struct hci_conn_params *p;
3504
3505 /* When pairing a new device, it is expected to remember
3506 * this device for future connections. Adding the connection
3507 * parameter information ahead of time allows tracking
3508 * of the slave preferred values and will speed up any
3509 * further connection establishment.
3510 *
3511 * If connection parameters already exist, then they
3512 * will be kept and this function does nothing.
3513 */
3514 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3515
3516 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3517 p->auto_connect = HCI_AUTO_CONN_DISABLED;
3518
3519 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr,
3520 addr_type, sec_level,
3521 HCI_LE_CONN_TIMEOUT);
3522 }
3523
3524 if (IS_ERR(conn)) {
3525 int status;
3526
3527 if (PTR_ERR(conn) == -EBUSY)
3528 status = MGMT_STATUS_BUSY;
3529 else if (PTR_ERR(conn) == -EOPNOTSUPP)
3530 status = MGMT_STATUS_NOT_SUPPORTED;
3531 else if (PTR_ERR(conn) == -ECONNREFUSED)
3532 status = MGMT_STATUS_REJECTED;
3533 else
3534 status = MGMT_STATUS_CONNECT_FAILED;
3535
3536 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3537 status, &rp, sizeof(rp));
3538 goto unlock;
3539 }
3540
3541 if (conn->connect_cfm_cb) {
3542 hci_conn_drop(conn);
3543 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3544 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3545 goto unlock;
3546 }
3547
3548 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3549 if (!cmd) {
3550 err = -ENOMEM;
3551 hci_conn_drop(conn);
3552 goto unlock;
3553 }
3554
3555 cmd->cmd_complete = pairing_complete;
3556
3557 /* For LE, just connecting isn't a proof that the pairing finished */
3558 if (cp->addr.type == BDADDR_BREDR) {
3559 conn->connect_cfm_cb = pairing_complete_cb;
3560 conn->security_cfm_cb = pairing_complete_cb;
3561 conn->disconn_cfm_cb = pairing_complete_cb;
3562 } else {
3563 conn->connect_cfm_cb = le_pairing_complete_cb;
3564 conn->security_cfm_cb = le_pairing_complete_cb;
3565 conn->disconn_cfm_cb = le_pairing_complete_cb;
3566 }
3567
3568 conn->io_capability = cp->io_cap;
3569 cmd->user_data = hci_conn_get(conn);
3570
3571 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3572 hci_conn_security(conn, sec_level, auth_type, true)) {
3573 cmd->cmd_complete(cmd, 0);
3574 mgmt_pending_remove(cmd);
3575 }
3576
3577 err = 0;
3578
3579 unlock:
3580 hci_dev_unlock(hdev);
3581 return err;
3582 }
3583
3584 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3585 u16 len)
3586 {
3587 struct mgmt_addr_info *addr = data;
3588 struct mgmt_pending_cmd *cmd;
3589 struct hci_conn *conn;
3590 int err;
3591
3592 BT_DBG("");
3593
3594 hci_dev_lock(hdev);
3595
3596 if (!hdev_is_powered(hdev)) {
3597 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3598 MGMT_STATUS_NOT_POWERED);
3599 goto unlock;
3600 }
3601
3602 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3603 if (!cmd) {
3604 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3605 MGMT_STATUS_INVALID_PARAMS);
3606 goto unlock;
3607 }
3608
3609 conn = cmd->user_data;
3610
3611 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3612 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3613 MGMT_STATUS_INVALID_PARAMS);
3614 goto unlock;
3615 }
3616
3617 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3618 mgmt_pending_remove(cmd);
3619
3620 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3621 addr, sizeof(*addr));
3622 unlock:
3623 hci_dev_unlock(hdev);
3624 return err;
3625 }
3626
3627 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3628 struct mgmt_addr_info *addr, u16 mgmt_op,
3629 u16 hci_op, __le32 passkey)
3630 {
3631 struct mgmt_pending_cmd *cmd;
3632 struct hci_conn *conn;
3633 int err;
3634
3635 hci_dev_lock(hdev);
3636
3637 if (!hdev_is_powered(hdev)) {
3638 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3639 MGMT_STATUS_NOT_POWERED, addr,
3640 sizeof(*addr));
3641 goto done;
3642 }
3643
3644 if (addr->type == BDADDR_BREDR)
3645 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3646 else
3647 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3648 le_addr_type(addr->type));
3649
3650 if (!conn) {
3651 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3652 MGMT_STATUS_NOT_CONNECTED, addr,
3653 sizeof(*addr));
3654 goto done;
3655 }
3656
3657 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3658 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3659 if (!err)
3660 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3661 MGMT_STATUS_SUCCESS, addr,
3662 sizeof(*addr));
3663 else
3664 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3665 MGMT_STATUS_FAILED, addr,
3666 sizeof(*addr));
3667
3668 goto done;
3669 }
3670
3671 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3672 if (!cmd) {
3673 err = -ENOMEM;
3674 goto done;
3675 }
3676
3677 cmd->cmd_complete = addr_cmd_complete;
3678
3679 /* Continue with pairing via HCI */
3680 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3681 struct hci_cp_user_passkey_reply cp;
3682
3683 bacpy(&cp.bdaddr, &addr->bdaddr);
3684 cp.passkey = passkey;
3685 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3686 } else
3687 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3688 &addr->bdaddr);
3689
3690 if (err < 0)
3691 mgmt_pending_remove(cmd);
3692
3693 done:
3694 hci_dev_unlock(hdev);
3695 return err;
3696 }
3697
3698 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3699 void *data, u16 len)
3700 {
3701 struct mgmt_cp_pin_code_neg_reply *cp = data;
3702
3703 BT_DBG("");
3704
3705 return user_pairing_resp(sk, hdev, &cp->addr,
3706 MGMT_OP_PIN_CODE_NEG_REPLY,
3707 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3708 }
3709
3710 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3711 u16 len)
3712 {
3713 struct mgmt_cp_user_confirm_reply *cp = data;
3714
3715 BT_DBG("");
3716
3717 if (len != sizeof(*cp))
3718 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3719 MGMT_STATUS_INVALID_PARAMS);
3720
3721 return user_pairing_resp(sk, hdev, &cp->addr,
3722 MGMT_OP_USER_CONFIRM_REPLY,
3723 HCI_OP_USER_CONFIRM_REPLY, 0);
3724 }
3725
3726 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3727 void *data, u16 len)
3728 {
3729 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3730
3731 BT_DBG("");
3732
3733 return user_pairing_resp(sk, hdev, &cp->addr,
3734 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3735 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3736 }
3737
3738 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3739 u16 len)
3740 {
3741 struct mgmt_cp_user_passkey_reply *cp = data;
3742
3743 BT_DBG("");
3744
3745 return user_pairing_resp(sk, hdev, &cp->addr,
3746 MGMT_OP_USER_PASSKEY_REPLY,
3747 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3748 }
3749
3750 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3751 void *data, u16 len)
3752 {
3753 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3754
3755 BT_DBG("");
3756
3757 return user_pairing_resp(sk, hdev, &cp->addr,
3758 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3759 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3760 }
3761
3762 static void update_name(struct hci_request *req)
3763 {
3764 struct hci_dev *hdev = req->hdev;
3765 struct hci_cp_write_local_name cp;
3766
3767 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3768
3769 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3770 }
3771
3772 static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3773 {
3774 struct mgmt_cp_set_local_name *cp;
3775 struct mgmt_pending_cmd *cmd;
3776
3777 BT_DBG("status 0x%02x", status);
3778
3779 hci_dev_lock(hdev);
3780
3781 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3782 if (!cmd)
3783 goto unlock;
3784
3785 cp = cmd->param;
3786
3787 if (status)
3788 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3789 mgmt_status(status));
3790 else
3791 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3792 cp, sizeof(*cp));
3793
3794 mgmt_pending_remove(cmd);
3795
3796 unlock:
3797 hci_dev_unlock(hdev);
3798 }
3799
3800 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3801 u16 len)
3802 {
3803 struct mgmt_cp_set_local_name *cp = data;
3804 struct mgmt_pending_cmd *cmd;
3805 struct hci_request req;
3806 int err;
3807
3808 BT_DBG("");
3809
3810 hci_dev_lock(hdev);
3811
3812 /* If the old values are the same as the new ones just return a
3813 * direct command complete event.
3814 */
3815 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3816 !memcmp(hdev->short_name, cp->short_name,
3817 sizeof(hdev->short_name))) {
3818 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3819 data, len);
3820 goto failed;
3821 }
3822
3823 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3824
3825 if (!hdev_is_powered(hdev)) {
3826 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3827
3828 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3829 data, len);
3830 if (err < 0)
3831 goto failed;
3832
3833 err = mgmt_generic_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev,
3834 data, len, sk);
3835
3836 goto failed;
3837 }
3838
3839 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3840 if (!cmd) {
3841 err = -ENOMEM;
3842 goto failed;
3843 }
3844
3845 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3846
3847 hci_req_init(&req, hdev);
3848
3849 if (lmp_bredr_capable(hdev)) {
3850 update_name(&req);
3851 update_eir(&req);
3852 }
3853
3854 /* The name is stored in the scan response data and so
3855 * no need to udpate the advertising data here.
3856 */
3857 if (lmp_le_capable(hdev))
3858 update_scan_rsp_data(&req);
3859
3860 err = hci_req_run(&req, set_name_complete);
3861 if (err < 0)
3862 mgmt_pending_remove(cmd);
3863
3864 failed:
3865 hci_dev_unlock(hdev);
3866 return err;
3867 }
3868
3869 static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status,
3870 u16 opcode, struct sk_buff *skb)
3871 {
3872 struct mgmt_rp_read_local_oob_data mgmt_rp;
3873 size_t rp_size = sizeof(mgmt_rp);
3874 struct mgmt_pending_cmd *cmd;
3875
3876 BT_DBG("%s status %u", hdev->name, status);
3877
3878 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
3879 if (!cmd)
3880 return;
3881
3882 if (status || !skb) {
3883 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3884 status ? mgmt_status(status) : MGMT_STATUS_FAILED);
3885 goto remove;
3886 }
3887
3888 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
3889
3890 if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
3891 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
3892
3893 if (skb->len < sizeof(*rp)) {
3894 mgmt_cmd_status(cmd->sk, hdev->id,
3895 MGMT_OP_READ_LOCAL_OOB_DATA,
3896 MGMT_STATUS_FAILED);
3897 goto remove;
3898 }
3899
3900 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
3901 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
3902
3903 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
3904 } else {
3905 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
3906
3907 if (skb->len < sizeof(*rp)) {
3908 mgmt_cmd_status(cmd->sk, hdev->id,
3909 MGMT_OP_READ_LOCAL_OOB_DATA,
3910 MGMT_STATUS_FAILED);
3911 goto remove;
3912 }
3913
3914 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
3915 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
3916
3917 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
3918 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
3919 }
3920
3921 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3922 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
3923
3924 remove:
3925 mgmt_pending_remove(cmd);
3926 }
3927
3928 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3929 void *data, u16 data_len)
3930 {
3931 struct mgmt_pending_cmd *cmd;
3932 struct hci_request req;
3933 int err;
3934
3935 BT_DBG("%s", hdev->name);
3936
3937 hci_dev_lock(hdev);
3938
3939 if (!hdev_is_powered(hdev)) {
3940 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3941 MGMT_STATUS_NOT_POWERED);
3942 goto unlock;
3943 }
3944
3945 if (!lmp_ssp_capable(hdev)) {
3946 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3947 MGMT_STATUS_NOT_SUPPORTED);
3948 goto unlock;
3949 }
3950
3951 if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3952 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3953 MGMT_STATUS_BUSY);
3954 goto unlock;
3955 }
3956
3957 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3958 if (!cmd) {
3959 err = -ENOMEM;
3960 goto unlock;
3961 }
3962
3963 hci_req_init(&req, hdev);
3964
3965 if (bredr_sc_enabled(hdev))
3966 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
3967 else
3968 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3969
3970 err = hci_req_run_skb(&req, read_local_oob_data_complete);
3971 if (err < 0)
3972 mgmt_pending_remove(cmd);
3973
3974 unlock:
3975 hci_dev_unlock(hdev);
3976 return err;
3977 }
3978
3979 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3980 void *data, u16 len)
3981 {
3982 struct mgmt_addr_info *addr = data;
3983 int err;
3984
3985 BT_DBG("%s ", hdev->name);
3986
3987 if (!bdaddr_type_is_valid(addr->type))
3988 return mgmt_cmd_complete(sk, hdev->id,
3989 MGMT_OP_ADD_REMOTE_OOB_DATA,
3990 MGMT_STATUS_INVALID_PARAMS,
3991 addr, sizeof(*addr));
3992
3993 hci_dev_lock(hdev);
3994
3995 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3996 struct mgmt_cp_add_remote_oob_data *cp = data;
3997 u8 status;
3998
3999 if (cp->addr.type != BDADDR_BREDR) {
4000 err = mgmt_cmd_complete(sk, hdev->id,
4001 MGMT_OP_ADD_REMOTE_OOB_DATA,
4002 MGMT_STATUS_INVALID_PARAMS,
4003 &cp->addr, sizeof(cp->addr));
4004 goto unlock;
4005 }
4006
4007 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
4008 cp->addr.type, cp->hash,
4009 cp->rand, NULL, NULL);
4010 if (err < 0)
4011 status = MGMT_STATUS_FAILED;
4012 else
4013 status = MGMT_STATUS_SUCCESS;
4014
4015 err = mgmt_cmd_complete(sk, hdev->id,
4016 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
4017 &cp->addr, sizeof(cp->addr));
4018 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
4019 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
4020 u8 *rand192, *hash192, *rand256, *hash256;
4021 u8 status;
4022
4023 if (bdaddr_type_is_le(cp->addr.type)) {
4024 /* Enforce zero-valued 192-bit parameters as
4025 * long as legacy SMP OOB isn't implemented.
4026 */
4027 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
4028 memcmp(cp->hash192, ZERO_KEY, 16)) {
4029 err = mgmt_cmd_complete(sk, hdev->id,
4030 MGMT_OP_ADD_REMOTE_OOB_DATA,
4031 MGMT_STATUS_INVALID_PARAMS,
4032 addr, sizeof(*addr));
4033 goto unlock;
4034 }
4035
4036 rand192 = NULL;
4037 hash192 = NULL;
4038 } else {
4039 /* In case one of the P-192 values is set to zero,
4040 * then just disable OOB data for P-192.
4041 */
4042 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
4043 !memcmp(cp->hash192, ZERO_KEY, 16)) {
4044 rand192 = NULL;
4045 hash192 = NULL;
4046 } else {
4047 rand192 = cp->rand192;
4048 hash192 = cp->hash192;
4049 }
4050 }
4051
4052 /* In case one of the P-256 values is set to zero, then just
4053 * disable OOB data for P-256.
4054 */
4055 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
4056 !memcmp(cp->hash256, ZERO_KEY, 16)) {
4057 rand256 = NULL;
4058 hash256 = NULL;
4059 } else {
4060 rand256 = cp->rand256;
4061 hash256 = cp->hash256;
4062 }
4063
4064 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
4065 cp->addr.type, hash192, rand192,
4066 hash256, rand256);
4067 if (err < 0)
4068 status = MGMT_STATUS_FAILED;
4069 else
4070 status = MGMT_STATUS_SUCCESS;
4071
4072 err = mgmt_cmd_complete(sk, hdev->id,
4073 MGMT_OP_ADD_REMOTE_OOB_DATA,
4074 status, &cp->addr, sizeof(cp->addr));
4075 } else {
4076 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
4077 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
4078 MGMT_STATUS_INVALID_PARAMS);
4079 }
4080
4081 unlock:
4082 hci_dev_unlock(hdev);
4083 return err;
4084 }
4085
4086 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
4087 void *data, u16 len)
4088 {
4089 struct mgmt_cp_remove_remote_oob_data *cp = data;
4090 u8 status;
4091 int err;
4092
4093 BT_DBG("%s", hdev->name);
4094
4095 if (cp->addr.type != BDADDR_BREDR)
4096 return mgmt_cmd_complete(sk, hdev->id,
4097 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
4098 MGMT_STATUS_INVALID_PARAMS,
4099 &cp->addr, sizeof(cp->addr));
4100
4101 hci_dev_lock(hdev);
4102
4103 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
4104 hci_remote_oob_data_clear(hdev);
4105 status = MGMT_STATUS_SUCCESS;
4106 goto done;
4107 }
4108
4109 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
4110 if (err < 0)
4111 status = MGMT_STATUS_INVALID_PARAMS;
4112 else
4113 status = MGMT_STATUS_SUCCESS;
4114
4115 done:
4116 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
4117 status, &cp->addr, sizeof(cp->addr));
4118
4119 hci_dev_unlock(hdev);
4120 return err;
4121 }
4122
4123 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
4124 {
4125 struct mgmt_pending_cmd *cmd;
4126
4127 BT_DBG("status %d", status);
4128
4129 hci_dev_lock(hdev);
4130
4131 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
4132 if (!cmd)
4133 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
4134
4135 if (cmd) {
4136 cmd->cmd_complete(cmd, mgmt_status(status));
4137 mgmt_pending_remove(cmd);
4138 }
4139
4140 hci_dev_unlock(hdev);
4141 }
4142
4143 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
4144 uint8_t *mgmt_status)
4145 {
4146 switch (type) {
4147 case DISCOV_TYPE_LE:
4148 *mgmt_status = mgmt_le_support(hdev);
4149 if (*mgmt_status)
4150 return false;
4151 break;
4152 case DISCOV_TYPE_INTERLEAVED:
4153 *mgmt_status = mgmt_le_support(hdev);
4154 if (*mgmt_status)
4155 return false;
4156 /* Intentional fall-through */
4157 case DISCOV_TYPE_BREDR:
4158 *mgmt_status = mgmt_bredr_support(hdev);
4159 if (*mgmt_status)
4160 return false;
4161 break;
4162 default:
4163 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
4164 return false;
4165 }
4166
4167 return true;
4168 }
4169
4170 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
4171 void *data, u16 len)
4172 {
4173 struct mgmt_cp_start_discovery *cp = data;
4174 struct mgmt_pending_cmd *cmd;
4175 u8 status;
4176 int err;
4177
4178 BT_DBG("%s", hdev->name);
4179
4180 hci_dev_lock(hdev);
4181
4182 if (!hdev_is_powered(hdev)) {
4183 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
4184 MGMT_STATUS_NOT_POWERED,
4185 &cp->type, sizeof(cp->type));
4186 goto failed;
4187 }
4188
4189 if (hdev->discovery.state != DISCOVERY_STOPPED ||
4190 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4191 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
4192 MGMT_STATUS_BUSY, &cp->type,
4193 sizeof(cp->type));
4194 goto failed;
4195 }
4196
4197 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
4198 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
4199 status, &cp->type, sizeof(cp->type));
4200 goto failed;
4201 }
4202
4203 /* Clear the discovery filter first to free any previously
4204 * allocated memory for the UUID list.
4205 */
4206 hci_discovery_filter_clear(hdev);
4207
4208 hdev->discovery.type = cp->type;
4209 hdev->discovery.report_invalid_rssi = false;
4210
4211 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, data, len);
4212 if (!cmd) {
4213 err = -ENOMEM;
4214 goto failed;
4215 }
4216
4217 cmd->cmd_complete = generic_cmd_complete;
4218
4219 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4220 queue_work(hdev->req_workqueue, &hdev->discov_update);
4221 err = 0;
4222
4223 failed:
4224 hci_dev_unlock(hdev);
4225 return err;
4226 }
4227
4228 static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd,
4229 u8 status)
4230 {
4231 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
4232 cmd->param, 1);
4233 }
4234
4235 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
4236 void *data, u16 len)
4237 {
4238 struct mgmt_cp_start_service_discovery *cp = data;
4239 struct mgmt_pending_cmd *cmd;
4240 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
4241 u16 uuid_count, expected_len;
4242 u8 status;
4243 int err;
4244
4245 BT_DBG("%s", hdev->name);
4246
4247 hci_dev_lock(hdev);
4248
4249 if (!hdev_is_powered(hdev)) {
4250 err = mgmt_cmd_complete(sk, hdev->id,
4251 MGMT_OP_START_SERVICE_DISCOVERY,
4252 MGMT_STATUS_NOT_POWERED,
4253 &cp->type, sizeof(cp->type));
4254 goto failed;
4255 }
4256
4257 if (hdev->discovery.state != DISCOVERY_STOPPED ||
4258 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4259 err = mgmt_cmd_complete(sk, hdev->id,
4260 MGMT_OP_START_SERVICE_DISCOVERY,
4261 MGMT_STATUS_BUSY, &cp->type,
4262 sizeof(cp->type));
4263 goto failed;
4264 }
4265
4266 uuid_count = __le16_to_cpu(cp->uuid_count);
4267 if (uuid_count > max_uuid_count) {
4268 BT_ERR("service_discovery: too big uuid_count value %u",
4269 uuid_count);
4270 err = mgmt_cmd_complete(sk, hdev->id,
4271 MGMT_OP_START_SERVICE_DISCOVERY,
4272 MGMT_STATUS_INVALID_PARAMS, &cp->type,
4273 sizeof(cp->type));
4274 goto failed;
4275 }
4276
4277 expected_len = sizeof(*cp) + uuid_count * 16;
4278 if (expected_len != len) {
4279 BT_ERR("service_discovery: expected %u bytes, got %u bytes",
4280 expected_len, len);
4281 err = mgmt_cmd_complete(sk, hdev->id,
4282 MGMT_OP_START_SERVICE_DISCOVERY,
4283 MGMT_STATUS_INVALID_PARAMS, &cp->type,
4284 sizeof(cp->type));
4285 goto failed;
4286 }
4287
4288 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
4289 err = mgmt_cmd_complete(sk, hdev->id,
4290 MGMT_OP_START_SERVICE_DISCOVERY,
4291 status, &cp->type, sizeof(cp->type));
4292 goto failed;
4293 }
4294
4295 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
4296 hdev, data, len);
4297 if (!cmd) {
4298 err = -ENOMEM;
4299 goto failed;
4300 }
4301
4302 cmd->cmd_complete = service_discovery_cmd_complete;
4303
4304 /* Clear the discovery filter first to free any previously
4305 * allocated memory for the UUID list.
4306 */
4307 hci_discovery_filter_clear(hdev);
4308
4309 hdev->discovery.result_filtering = true;
4310 hdev->discovery.type = cp->type;
4311 hdev->discovery.rssi = cp->rssi;
4312 hdev->discovery.uuid_count = uuid_count;
4313
4314 if (uuid_count > 0) {
4315 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
4316 GFP_KERNEL);
4317 if (!hdev->discovery.uuids) {
4318 err = mgmt_cmd_complete(sk, hdev->id,
4319 MGMT_OP_START_SERVICE_DISCOVERY,
4320 MGMT_STATUS_FAILED,
4321 &cp->type, sizeof(cp->type));
4322 mgmt_pending_remove(cmd);
4323 goto failed;
4324 }
4325 }
4326
4327 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4328 queue_work(hdev->req_workqueue, &hdev->discov_update);
4329 err = 0;
4330
4331 failed:
4332 hci_dev_unlock(hdev);
4333 return err;
4334 }
4335
4336 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
4337 {
4338 struct mgmt_pending_cmd *cmd;
4339
4340 BT_DBG("status %d", status);
4341
4342 hci_dev_lock(hdev);
4343
4344 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4345 if (cmd) {
4346 cmd->cmd_complete(cmd, mgmt_status(status));
4347 mgmt_pending_remove(cmd);
4348 }
4349
4350 hci_dev_unlock(hdev);
4351 }
4352
4353 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
4354 u16 len)
4355 {
4356 struct mgmt_cp_stop_discovery *mgmt_cp = data;
4357 struct mgmt_pending_cmd *cmd;
4358 int err;
4359
4360 BT_DBG("%s", hdev->name);
4361
4362 hci_dev_lock(hdev);
4363
4364 if (!hci_discovery_active(hdev)) {
4365 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4366 MGMT_STATUS_REJECTED, &mgmt_cp->type,
4367 sizeof(mgmt_cp->type));
4368 goto unlock;
4369 }
4370
4371 if (hdev->discovery.type != mgmt_cp->type) {
4372 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4373 MGMT_STATUS_INVALID_PARAMS,
4374 &mgmt_cp->type, sizeof(mgmt_cp->type));
4375 goto unlock;
4376 }
4377
4378 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
4379 if (!cmd) {
4380 err = -ENOMEM;
4381 goto unlock;
4382 }
4383
4384 cmd->cmd_complete = generic_cmd_complete;
4385
4386 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
4387 queue_work(hdev->req_workqueue, &hdev->discov_update);
4388 err = 0;
4389
4390 unlock:
4391 hci_dev_unlock(hdev);
4392 return err;
4393 }
4394
4395 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
4396 u16 len)
4397 {
4398 struct mgmt_cp_confirm_name *cp = data;
4399 struct inquiry_entry *e;
4400 int err;
4401
4402 BT_DBG("%s", hdev->name);
4403
4404 hci_dev_lock(hdev);
4405
4406 if (!hci_discovery_active(hdev)) {
4407 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4408 MGMT_STATUS_FAILED, &cp->addr,
4409 sizeof(cp->addr));
4410 goto failed;
4411 }
4412
4413 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
4414 if (!e) {
4415 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4416 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
4417 sizeof(cp->addr));
4418 goto failed;
4419 }
4420
4421 if (cp->name_known) {
4422 e->name_state = NAME_KNOWN;
4423 list_del(&e->list);
4424 } else {
4425 e->name_state = NAME_NEEDED;
4426 hci_inquiry_cache_update_resolve(hdev, e);
4427 }
4428
4429 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
4430 &cp->addr, sizeof(cp->addr));
4431
4432 failed:
4433 hci_dev_unlock(hdev);
4434 return err;
4435 }
4436
4437 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
4438 u16 len)
4439 {
4440 struct mgmt_cp_block_device *cp = data;
4441 u8 status;
4442 int err;
4443
4444 BT_DBG("%s", hdev->name);
4445
4446 if (!bdaddr_type_is_valid(cp->addr.type))
4447 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
4448 MGMT_STATUS_INVALID_PARAMS,
4449 &cp->addr, sizeof(cp->addr));
4450
4451 hci_dev_lock(hdev);
4452
4453 err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
4454 cp->addr.type);
4455 if (err < 0) {
4456 status = MGMT_STATUS_FAILED;
4457 goto done;
4458 }
4459
4460 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4461 sk);
4462 status = MGMT_STATUS_SUCCESS;
4463
4464 done:
4465 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
4466 &cp->addr, sizeof(cp->addr));
4467
4468 hci_dev_unlock(hdev);
4469
4470 return err;
4471 }
4472
4473 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
4474 u16 len)
4475 {
4476 struct mgmt_cp_unblock_device *cp = data;
4477 u8 status;
4478 int err;
4479
4480 BT_DBG("%s", hdev->name);
4481
4482 if (!bdaddr_type_is_valid(cp->addr.type))
4483 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
4484 MGMT_STATUS_INVALID_PARAMS,
4485 &cp->addr, sizeof(cp->addr));
4486
4487 hci_dev_lock(hdev);
4488
4489 err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
4490 cp->addr.type);
4491 if (err < 0) {
4492 status = MGMT_STATUS_INVALID_PARAMS;
4493 goto done;
4494 }
4495
4496 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4497 sk);
4498 status = MGMT_STATUS_SUCCESS;
4499
4500 done:
4501 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
4502 &cp->addr, sizeof(cp->addr));
4503
4504 hci_dev_unlock(hdev);
4505
4506 return err;
4507 }
4508
4509 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
4510 u16 len)
4511 {
4512 struct mgmt_cp_set_device_id *cp = data;
4513 struct hci_request req;
4514 int err;
4515 __u16 source;
4516
4517 BT_DBG("%s", hdev->name);
4518
4519 source = __le16_to_cpu(cp->source);
4520
4521 if (source > 0x0002)
4522 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
4523 MGMT_STATUS_INVALID_PARAMS);
4524
4525 hci_dev_lock(hdev);
4526
4527 hdev->devid_source = source;
4528 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
4529 hdev->devid_product = __le16_to_cpu(cp->product);
4530 hdev->devid_version = __le16_to_cpu(cp->version);
4531
4532 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
4533 NULL, 0);
4534
4535 hci_req_init(&req, hdev);
4536 update_eir(&req);
4537 hci_req_run(&req, NULL);
4538
4539 hci_dev_unlock(hdev);
4540
4541 return err;
4542 }
4543
4544 static void enable_advertising_instance(struct hci_dev *hdev, u8 status,
4545 u16 opcode)
4546 {
4547 BT_DBG("status %d", status);
4548 }
4549
4550 static void set_advertising_complete(struct hci_dev *hdev, u8 status,
4551 u16 opcode)
4552 {
4553 struct cmd_lookup match = { NULL, hdev };
4554 struct hci_request req;
4555 u8 instance;
4556 struct adv_info *adv_instance;
4557 int err;
4558
4559 hci_dev_lock(hdev);
4560
4561 if (status) {
4562 u8 mgmt_err = mgmt_status(status);
4563
4564 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
4565 cmd_status_rsp, &mgmt_err);
4566 goto unlock;
4567 }
4568
4569 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
4570 hci_dev_set_flag(hdev, HCI_ADVERTISING);
4571 else
4572 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4573
4574 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
4575 &match);
4576
4577 new_settings(hdev, match.sk);
4578
4579 if (match.sk)
4580 sock_put(match.sk);
4581
4582 /* If "Set Advertising" was just disabled and instance advertising was
4583 * set up earlier, then re-enable multi-instance advertising.
4584 */
4585 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
4586 !hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE) ||
4587 list_empty(&hdev->adv_instances))
4588 goto unlock;
4589
4590 instance = hdev->cur_adv_instance;
4591 if (!instance) {
4592 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
4593 struct adv_info, list);
4594 if (!adv_instance)
4595 goto unlock;
4596
4597 instance = adv_instance->instance;
4598 }
4599
4600 hci_req_init(&req, hdev);
4601
4602 err = schedule_adv_instance(&req, instance, true);
4603
4604 if (!err)
4605 err = hci_req_run(&req, enable_advertising_instance);
4606
4607 if (err)
4608 BT_ERR("Failed to re-configure advertising");
4609
4610 unlock:
4611 hci_dev_unlock(hdev);
4612 }
4613
4614 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
4615 u16 len)
4616 {
4617 struct mgmt_mode *cp = data;
4618 struct mgmt_pending_cmd *cmd;
4619 struct hci_request req;
4620 u8 val, status;
4621 int err;
4622
4623 BT_DBG("request for %s", hdev->name);
4624
4625 status = mgmt_le_support(hdev);
4626 if (status)
4627 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4628 status);
4629
4630 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4631 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4632 MGMT_STATUS_INVALID_PARAMS);
4633
4634 hci_dev_lock(hdev);
4635
4636 val = !!cp->val;
4637
4638 /* The following conditions are ones which mean that we should
4639 * not do any HCI communication but directly send a mgmt
4640 * response to user space (after toggling the flag if
4641 * necessary).
4642 */
4643 if (!hdev_is_powered(hdev) ||
4644 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
4645 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
4646 hci_conn_num(hdev, LE_LINK) > 0 ||
4647 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
4648 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
4649 bool changed;
4650
4651 if (cp->val) {
4652 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
4653 if (cp->val == 0x02)
4654 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4655 else
4656 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4657 } else {
4658 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
4659 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4660 }
4661
4662 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
4663 if (err < 0)
4664 goto unlock;
4665
4666 if (changed)
4667 err = new_settings(hdev, sk);
4668
4669 goto unlock;
4670 }
4671
4672 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
4673 pending_find(MGMT_OP_SET_LE, hdev)) {
4674 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4675 MGMT_STATUS_BUSY);
4676 goto unlock;
4677 }
4678
4679 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
4680 if (!cmd) {
4681 err = -ENOMEM;
4682 goto unlock;
4683 }
4684
4685 hci_req_init(&req, hdev);
4686
4687 if (cp->val == 0x02)
4688 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4689 else
4690 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4691
4692 cancel_adv_timeout(hdev);
4693
4694 if (val) {
4695 /* Switch to instance "0" for the Set Advertising setting.
4696 * We cannot use update_[adv|scan_rsp]_data() here as the
4697 * HCI_ADVERTISING flag is not yet set.
4698 */
4699 update_inst_adv_data(&req, 0x00);
4700 update_inst_scan_rsp_data(&req, 0x00);
4701 enable_advertising(&req);
4702 } else {
4703 disable_advertising(&req);
4704 }
4705
4706 err = hci_req_run(&req, set_advertising_complete);
4707 if (err < 0)
4708 mgmt_pending_remove(cmd);
4709
4710 unlock:
4711 hci_dev_unlock(hdev);
4712 return err;
4713 }
4714
4715 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
4716 void *data, u16 len)
4717 {
4718 struct mgmt_cp_set_static_address *cp = data;
4719 int err;
4720
4721 BT_DBG("%s", hdev->name);
4722
4723 if (!lmp_le_capable(hdev))
4724 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4725 MGMT_STATUS_NOT_SUPPORTED);
4726
4727 if (hdev_is_powered(hdev))
4728 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4729 MGMT_STATUS_REJECTED);
4730
4731 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
4732 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
4733 return mgmt_cmd_status(sk, hdev->id,
4734 MGMT_OP_SET_STATIC_ADDRESS,
4735 MGMT_STATUS_INVALID_PARAMS);
4736
4737 /* Two most significant bits shall be set */
4738 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
4739 return mgmt_cmd_status(sk, hdev->id,
4740 MGMT_OP_SET_STATIC_ADDRESS,
4741 MGMT_STATUS_INVALID_PARAMS);
4742 }
4743
4744 hci_dev_lock(hdev);
4745
4746 bacpy(&hdev->static_addr, &cp->bdaddr);
4747
4748 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
4749 if (err < 0)
4750 goto unlock;
4751
4752 err = new_settings(hdev, sk);
4753
4754 unlock:
4755 hci_dev_unlock(hdev);
4756 return err;
4757 }
4758
4759 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
4760 void *data, u16 len)
4761 {
4762 struct mgmt_cp_set_scan_params *cp = data;
4763 __u16 interval, window;
4764 int err;
4765
4766 BT_DBG("%s", hdev->name);
4767
4768 if (!lmp_le_capable(hdev))
4769 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4770 MGMT_STATUS_NOT_SUPPORTED);
4771
4772 interval = __le16_to_cpu(cp->interval);
4773
4774 if (interval < 0x0004 || interval > 0x4000)
4775 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4776 MGMT_STATUS_INVALID_PARAMS);
4777
4778 window = __le16_to_cpu(cp->window);
4779
4780 if (window < 0x0004 || window > 0x4000)
4781 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4782 MGMT_STATUS_INVALID_PARAMS);
4783
4784 if (window > interval)
4785 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4786 MGMT_STATUS_INVALID_PARAMS);
4787
4788 hci_dev_lock(hdev);
4789
4790 hdev->le_scan_interval = interval;
4791 hdev->le_scan_window = window;
4792
4793 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
4794 NULL, 0);
4795
4796 /* If background scan is running, restart it so new parameters are
4797 * loaded.
4798 */
4799 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
4800 hdev->discovery.state == DISCOVERY_STOPPED) {
4801 struct hci_request req;
4802
4803 hci_req_init(&req, hdev);
4804
4805 hci_req_add_le_scan_disable(&req);
4806 hci_req_add_le_passive_scan(&req);
4807
4808 hci_req_run(&req, NULL);
4809 }
4810
4811 hci_dev_unlock(hdev);
4812
4813 return err;
4814 }
4815
4816 static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
4817 u16 opcode)
4818 {
4819 struct mgmt_pending_cmd *cmd;
4820
4821 BT_DBG("status 0x%02x", status);
4822
4823 hci_dev_lock(hdev);
4824
4825 cmd = pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4826 if (!cmd)
4827 goto unlock;
4828
4829 if (status) {
4830 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4831 mgmt_status(status));
4832 } else {
4833 struct mgmt_mode *cp = cmd->param;
4834
4835 if (cp->val)
4836 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
4837 else
4838 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
4839
4840 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4841 new_settings(hdev, cmd->sk);
4842 }
4843
4844 mgmt_pending_remove(cmd);
4845
4846 unlock:
4847 hci_dev_unlock(hdev);
4848 }
4849
4850 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4851 void *data, u16 len)
4852 {
4853 struct mgmt_mode *cp = data;
4854 struct mgmt_pending_cmd *cmd;
4855 struct hci_request req;
4856 int err;
4857
4858 BT_DBG("%s", hdev->name);
4859
4860 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
4861 hdev->hci_ver < BLUETOOTH_VER_1_2)
4862 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4863 MGMT_STATUS_NOT_SUPPORTED);
4864
4865 if (cp->val != 0x00 && cp->val != 0x01)
4866 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4867 MGMT_STATUS_INVALID_PARAMS);
4868
4869 hci_dev_lock(hdev);
4870
4871 if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4872 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4873 MGMT_STATUS_BUSY);
4874 goto unlock;
4875 }
4876
4877 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
4878 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4879 hdev);
4880 goto unlock;
4881 }
4882
4883 if (!hdev_is_powered(hdev)) {
4884 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
4885 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4886 hdev);
4887 new_settings(hdev, sk);
4888 goto unlock;
4889 }
4890
4891 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4892 data, len);
4893 if (!cmd) {
4894 err = -ENOMEM;
4895 goto unlock;
4896 }
4897
4898 hci_req_init(&req, hdev);
4899
4900 write_fast_connectable(&req, cp->val);
4901
4902 err = hci_req_run(&req, fast_connectable_complete);
4903 if (err < 0) {
4904 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4905 MGMT_STATUS_FAILED);
4906 mgmt_pending_remove(cmd);
4907 }
4908
4909 unlock:
4910 hci_dev_unlock(hdev);
4911
4912 return err;
4913 }
4914
4915 static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4916 {
4917 struct mgmt_pending_cmd *cmd;
4918
4919 BT_DBG("status 0x%02x", status);
4920
4921 hci_dev_lock(hdev);
4922
4923 cmd = pending_find(MGMT_OP_SET_BREDR, hdev);
4924 if (!cmd)
4925 goto unlock;
4926
4927 if (status) {
4928 u8 mgmt_err = mgmt_status(status);
4929
4930 /* We need to restore the flag if related HCI commands
4931 * failed.
4932 */
4933 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
4934
4935 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4936 } else {
4937 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4938 new_settings(hdev, cmd->sk);
4939 }
4940
4941 mgmt_pending_remove(cmd);
4942
4943 unlock:
4944 hci_dev_unlock(hdev);
4945 }
4946
4947 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4948 {
4949 struct mgmt_mode *cp = data;
4950 struct mgmt_pending_cmd *cmd;
4951 struct hci_request req;
4952 int err;
4953
4954 BT_DBG("request for %s", hdev->name);
4955
4956 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4957 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4958 MGMT_STATUS_NOT_SUPPORTED);
4959
4960 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
4961 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4962 MGMT_STATUS_REJECTED);
4963
4964 if (cp->val != 0x00 && cp->val != 0x01)
4965 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4966 MGMT_STATUS_INVALID_PARAMS);
4967
4968 hci_dev_lock(hdev);
4969
4970 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
4971 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4972 goto unlock;
4973 }
4974
4975 if (!hdev_is_powered(hdev)) {
4976 if (!cp->val) {
4977 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
4978 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
4979 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
4980 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
4981 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
4982 }
4983
4984 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
4985
4986 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4987 if (err < 0)
4988 goto unlock;
4989
4990 err = new_settings(hdev, sk);
4991 goto unlock;
4992 }
4993
4994 /* Reject disabling when powered on */
4995 if (!cp->val) {
4996 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4997 MGMT_STATUS_REJECTED);
4998 goto unlock;
4999 } else {
5000 /* When configuring a dual-mode controller to operate
5001 * with LE only and using a static address, then switching
5002 * BR/EDR back on is not allowed.
5003 *
5004 * Dual-mode controllers shall operate with the public
5005 * address as its identity address for BR/EDR and LE. So
5006 * reject the attempt to create an invalid configuration.
5007 *
5008 * The same restrictions applies when secure connections
5009 * has been enabled. For BR/EDR this is a controller feature
5010 * while for LE it is a host stack feature. This means that
5011 * switching BR/EDR back on when secure connections has been
5012 * enabled is not a supported transaction.
5013 */
5014 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5015 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
5016 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
5017 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5018 MGMT_STATUS_REJECTED);
5019 goto unlock;
5020 }
5021 }
5022
5023 if (pending_find(MGMT_OP_SET_BREDR, hdev)) {
5024 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5025 MGMT_STATUS_BUSY);
5026 goto unlock;
5027 }
5028
5029 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
5030 if (!cmd) {
5031 err = -ENOMEM;
5032 goto unlock;
5033 }
5034
5035 /* We need to flip the bit already here so that update_adv_data
5036 * generates the correct flags.
5037 */
5038 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
5039
5040 hci_req_init(&req, hdev);
5041
5042 write_fast_connectable(&req, false);
5043 __hci_update_page_scan(&req);
5044
5045 /* Since only the advertising data flags will change, there
5046 * is no need to update the scan response data.
5047 */
5048 update_adv_data(&req);
5049
5050 err = hci_req_run(&req, set_bredr_complete);
5051 if (err < 0)
5052 mgmt_pending_remove(cmd);
5053
5054 unlock:
5055 hci_dev_unlock(hdev);
5056 return err;
5057 }
5058
5059 static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5060 {
5061 struct mgmt_pending_cmd *cmd;
5062 struct mgmt_mode *cp;
5063
5064 BT_DBG("%s status %u", hdev->name, status);
5065
5066 hci_dev_lock(hdev);
5067
5068 cmd = pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
5069 if (!cmd)
5070 goto unlock;
5071
5072 if (status) {
5073 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
5074 mgmt_status(status));
5075 goto remove;
5076 }
5077
5078 cp = cmd->param;
5079
5080 switch (cp->val) {
5081 case 0x00:
5082 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
5083 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5084 break;
5085 case 0x01:
5086 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5087 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5088 break;
5089 case 0x02:
5090 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5091 hci_dev_set_flag(hdev, HCI_SC_ONLY);
5092 break;
5093 }
5094
5095 send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
5096 new_settings(hdev, cmd->sk);
5097
5098 remove:
5099 mgmt_pending_remove(cmd);
5100 unlock:
5101 hci_dev_unlock(hdev);
5102 }
5103
5104 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
5105 void *data, u16 len)
5106 {
5107 struct mgmt_mode *cp = data;
5108 struct mgmt_pending_cmd *cmd;
5109 struct hci_request req;
5110 u8 val;
5111 int err;
5112
5113 BT_DBG("request for %s", hdev->name);
5114
5115 if (!lmp_sc_capable(hdev) &&
5116 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5117 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5118 MGMT_STATUS_NOT_SUPPORTED);
5119
5120 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5121 lmp_sc_capable(hdev) &&
5122 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
5123 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5124 MGMT_STATUS_REJECTED);
5125
5126 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5127 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5128 MGMT_STATUS_INVALID_PARAMS);
5129
5130 hci_dev_lock(hdev);
5131
5132 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
5133 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5134 bool changed;
5135
5136 if (cp->val) {
5137 changed = !hci_dev_test_and_set_flag(hdev,
5138 HCI_SC_ENABLED);
5139 if (cp->val == 0x02)
5140 hci_dev_set_flag(hdev, HCI_SC_ONLY);
5141 else
5142 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5143 } else {
5144 changed = hci_dev_test_and_clear_flag(hdev,
5145 HCI_SC_ENABLED);
5146 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5147 }
5148
5149 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5150 if (err < 0)
5151 goto failed;
5152
5153 if (changed)
5154 err = new_settings(hdev, sk);
5155
5156 goto failed;
5157 }
5158
5159 if (pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
5160 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5161 MGMT_STATUS_BUSY);
5162 goto failed;
5163 }
5164
5165 val = !!cp->val;
5166
5167 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
5168 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5169 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5170 goto failed;
5171 }
5172
5173 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
5174 if (!cmd) {
5175 err = -ENOMEM;
5176 goto failed;
5177 }
5178
5179 hci_req_init(&req, hdev);
5180 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
5181 err = hci_req_run(&req, sc_enable_complete);
5182 if (err < 0) {
5183 mgmt_pending_remove(cmd);
5184 goto failed;
5185 }
5186
5187 failed:
5188 hci_dev_unlock(hdev);
5189 return err;
5190 }
5191
5192 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
5193 void *data, u16 len)
5194 {
5195 struct mgmt_mode *cp = data;
5196 bool changed, use_changed;
5197 int err;
5198
5199 BT_DBG("request for %s", hdev->name);
5200
5201 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5202 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
5203 MGMT_STATUS_INVALID_PARAMS);
5204
5205 hci_dev_lock(hdev);
5206
5207 if (cp->val)
5208 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
5209 else
5210 changed = hci_dev_test_and_clear_flag(hdev,
5211 HCI_KEEP_DEBUG_KEYS);
5212
5213 if (cp->val == 0x02)
5214 use_changed = !hci_dev_test_and_set_flag(hdev,
5215 HCI_USE_DEBUG_KEYS);
5216 else
5217 use_changed = hci_dev_test_and_clear_flag(hdev,
5218 HCI_USE_DEBUG_KEYS);
5219
5220 if (hdev_is_powered(hdev) && use_changed &&
5221 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
5222 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
5223 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
5224 sizeof(mode), &mode);
5225 }
5226
5227 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
5228 if (err < 0)
5229 goto unlock;
5230
5231 if (changed)
5232 err = new_settings(hdev, sk);
5233
5234 unlock:
5235 hci_dev_unlock(hdev);
5236 return err;
5237 }
5238
5239 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5240 u16 len)
5241 {
5242 struct mgmt_cp_set_privacy *cp = cp_data;
5243 bool changed;
5244 int err;
5245
5246 BT_DBG("request for %s", hdev->name);
5247
5248 if (!lmp_le_capable(hdev))
5249 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5250 MGMT_STATUS_NOT_SUPPORTED);
5251
5252 if (cp->privacy != 0x00 && cp->privacy != 0x01)
5253 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5254 MGMT_STATUS_INVALID_PARAMS);
5255
5256 if (hdev_is_powered(hdev))
5257 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5258 MGMT_STATUS_REJECTED);
5259
5260 hci_dev_lock(hdev);
5261
5262 /* If user space supports this command it is also expected to
5263 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
5264 */
5265 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5266
5267 if (cp->privacy) {
5268 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
5269 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
5270 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
5271 } else {
5272 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
5273 memset(hdev->irk, 0, sizeof(hdev->irk));
5274 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
5275 }
5276
5277 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
5278 if (err < 0)
5279 goto unlock;
5280
5281 if (changed)
5282 err = new_settings(hdev, sk);
5283
5284 unlock:
5285 hci_dev_unlock(hdev);
5286 return err;
5287 }
5288
5289 static bool irk_is_valid(struct mgmt_irk_info *irk)
5290 {
5291 switch (irk->addr.type) {
5292 case BDADDR_LE_PUBLIC:
5293 return true;
5294
5295 case BDADDR_LE_RANDOM:
5296 /* Two most significant bits shall be set */
5297 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5298 return false;
5299 return true;
5300 }
5301
5302 return false;
5303 }
5304
5305 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5306 u16 len)
5307 {
5308 struct mgmt_cp_load_irks *cp = cp_data;
5309 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
5310 sizeof(struct mgmt_irk_info));
5311 u16 irk_count, expected_len;
5312 int i, err;
5313
5314 BT_DBG("request for %s", hdev->name);
5315
5316 if (!lmp_le_capable(hdev))
5317 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5318 MGMT_STATUS_NOT_SUPPORTED);
5319
5320 irk_count = __le16_to_cpu(cp->irk_count);
5321 if (irk_count > max_irk_count) {
5322 BT_ERR("load_irks: too big irk_count value %u", irk_count);
5323 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5324 MGMT_STATUS_INVALID_PARAMS);
5325 }
5326
5327 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
5328 if (expected_len != len) {
5329 BT_ERR("load_irks: expected %u bytes, got %u bytes",
5330 expected_len, len);
5331 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5332 MGMT_STATUS_INVALID_PARAMS);
5333 }
5334
5335 BT_DBG("%s irk_count %u", hdev->name, irk_count);
5336
5337 for (i = 0; i < irk_count; i++) {
5338 struct mgmt_irk_info *key = &cp->irks[i];
5339
5340 if (!irk_is_valid(key))
5341 return mgmt_cmd_status(sk, hdev->id,
5342 MGMT_OP_LOAD_IRKS,
5343 MGMT_STATUS_INVALID_PARAMS);
5344 }
5345
5346 hci_dev_lock(hdev);
5347
5348 hci_smp_irks_clear(hdev);
5349
5350 for (i = 0; i < irk_count; i++) {
5351 struct mgmt_irk_info *irk = &cp->irks[i];
5352
5353 hci_add_irk(hdev, &irk->addr.bdaddr,
5354 le_addr_type(irk->addr.type), irk->val,
5355 BDADDR_ANY);
5356 }
5357
5358 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5359
5360 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
5361
5362 hci_dev_unlock(hdev);
5363
5364 return err;
5365 }
5366
5367 static bool ltk_is_valid(struct mgmt_ltk_info *key)
5368 {
5369 if (key->master != 0x00 && key->master != 0x01)
5370 return false;
5371
5372 switch (key->addr.type) {
5373 case BDADDR_LE_PUBLIC:
5374 return true;
5375
5376 case BDADDR_LE_RANDOM:
5377 /* Two most significant bits shall be set */
5378 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5379 return false;
5380 return true;
5381 }
5382
5383 return false;
5384 }
5385
5386 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
5387 void *cp_data, u16 len)
5388 {
5389 struct mgmt_cp_load_long_term_keys *cp = cp_data;
5390 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
5391 sizeof(struct mgmt_ltk_info));
5392 u16 key_count, expected_len;
5393 int i, err;
5394
5395 BT_DBG("request for %s", hdev->name);
5396
5397 if (!lmp_le_capable(hdev))
5398 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5399 MGMT_STATUS_NOT_SUPPORTED);
5400
5401 key_count = __le16_to_cpu(cp->key_count);
5402 if (key_count > max_key_count) {
5403 BT_ERR("load_ltks: too big key_count value %u", key_count);
5404 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5405 MGMT_STATUS_INVALID_PARAMS);
5406 }
5407
5408 expected_len = sizeof(*cp) + key_count *
5409 sizeof(struct mgmt_ltk_info);
5410 if (expected_len != len) {
5411 BT_ERR("load_keys: expected %u bytes, got %u bytes",
5412 expected_len, len);
5413 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5414 MGMT_STATUS_INVALID_PARAMS);
5415 }
5416
5417 BT_DBG("%s key_count %u", hdev->name, key_count);
5418
5419 for (i = 0; i < key_count; i++) {
5420 struct mgmt_ltk_info *key = &cp->keys[i];
5421
5422 if (!ltk_is_valid(key))
5423 return mgmt_cmd_status(sk, hdev->id,
5424 MGMT_OP_LOAD_LONG_TERM_KEYS,
5425 MGMT_STATUS_INVALID_PARAMS);
5426 }
5427
5428 hci_dev_lock(hdev);
5429
5430 hci_smp_ltks_clear(hdev);
5431
5432 for (i = 0; i < key_count; i++) {
5433 struct mgmt_ltk_info *key = &cp->keys[i];
5434 u8 type, authenticated;
5435
5436 switch (key->type) {
5437 case MGMT_LTK_UNAUTHENTICATED:
5438 authenticated = 0x00;
5439 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5440 break;
5441 case MGMT_LTK_AUTHENTICATED:
5442 authenticated = 0x01;
5443 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5444 break;
5445 case MGMT_LTK_P256_UNAUTH:
5446 authenticated = 0x00;
5447 type = SMP_LTK_P256;
5448 break;
5449 case MGMT_LTK_P256_AUTH:
5450 authenticated = 0x01;
5451 type = SMP_LTK_P256;
5452 break;
5453 case MGMT_LTK_P256_DEBUG:
5454 authenticated = 0x00;
5455 type = SMP_LTK_P256_DEBUG;
5456 default:
5457 continue;
5458 }
5459
5460 hci_add_ltk(hdev, &key->addr.bdaddr,
5461 le_addr_type(key->addr.type), type, authenticated,
5462 key->val, key->enc_size, key->ediv, key->rand);
5463 }
5464
5465 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
5466 NULL, 0);
5467
5468 hci_dev_unlock(hdev);
5469
5470 return err;
5471 }
5472
5473 static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
5474 {
5475 struct hci_conn *conn = cmd->user_data;
5476 struct mgmt_rp_get_conn_info rp;
5477 int err;
5478
5479 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
5480
5481 if (status == MGMT_STATUS_SUCCESS) {
5482 rp.rssi = conn->rssi;
5483 rp.tx_power = conn->tx_power;
5484 rp.max_tx_power = conn->max_tx_power;
5485 } else {
5486 rp.rssi = HCI_RSSI_INVALID;
5487 rp.tx_power = HCI_TX_POWER_INVALID;
5488 rp.max_tx_power = HCI_TX_POWER_INVALID;
5489 }
5490
5491 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
5492 status, &rp, sizeof(rp));
5493
5494 hci_conn_drop(conn);
5495 hci_conn_put(conn);
5496
5497 return err;
5498 }
5499
5500 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
5501 u16 opcode)
5502 {
5503 struct hci_cp_read_rssi *cp;
5504 struct mgmt_pending_cmd *cmd;
5505 struct hci_conn *conn;
5506 u16 handle;
5507 u8 status;
5508
5509 BT_DBG("status 0x%02x", hci_status);
5510
5511 hci_dev_lock(hdev);
5512
5513 /* Commands sent in request are either Read RSSI or Read Transmit Power
5514 * Level so we check which one was last sent to retrieve connection
5515 * handle. Both commands have handle as first parameter so it's safe to
5516 * cast data on the same command struct.
5517 *
5518 * First command sent is always Read RSSI and we fail only if it fails.
5519 * In other case we simply override error to indicate success as we
5520 * already remembered if TX power value is actually valid.
5521 */
5522 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
5523 if (!cp) {
5524 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
5525 status = MGMT_STATUS_SUCCESS;
5526 } else {
5527 status = mgmt_status(hci_status);
5528 }
5529
5530 if (!cp) {
5531 BT_ERR("invalid sent_cmd in conn_info response");
5532 goto unlock;
5533 }
5534
5535 handle = __le16_to_cpu(cp->handle);
5536 conn = hci_conn_hash_lookup_handle(hdev, handle);
5537 if (!conn) {
5538 BT_ERR("unknown handle (%d) in conn_info response", handle);
5539 goto unlock;
5540 }
5541
5542 cmd = pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
5543 if (!cmd)
5544 goto unlock;
5545
5546 cmd->cmd_complete(cmd, status);
5547 mgmt_pending_remove(cmd);
5548
5549 unlock:
5550 hci_dev_unlock(hdev);
5551 }
5552
5553 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
5554 u16 len)
5555 {
5556 struct mgmt_cp_get_conn_info *cp = data;
5557 struct mgmt_rp_get_conn_info rp;
5558 struct hci_conn *conn;
5559 unsigned long conn_info_age;
5560 int err = 0;
5561
5562 BT_DBG("%s", hdev->name);
5563
5564 memset(&rp, 0, sizeof(rp));
5565 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5566 rp.addr.type = cp->addr.type;
5567
5568 if (!bdaddr_type_is_valid(cp->addr.type))
5569 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5570 MGMT_STATUS_INVALID_PARAMS,
5571 &rp, sizeof(rp));
5572
5573 hci_dev_lock(hdev);
5574
5575 if (!hdev_is_powered(hdev)) {
5576 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5577 MGMT_STATUS_NOT_POWERED, &rp,
5578 sizeof(rp));
5579 goto unlock;
5580 }
5581
5582 if (cp->addr.type == BDADDR_BREDR)
5583 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5584 &cp->addr.bdaddr);
5585 else
5586 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
5587
5588 if (!conn || conn->state != BT_CONNECTED) {
5589 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5590 MGMT_STATUS_NOT_CONNECTED, &rp,
5591 sizeof(rp));
5592 goto unlock;
5593 }
5594
5595 if (pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
5596 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5597 MGMT_STATUS_BUSY, &rp, sizeof(rp));
5598 goto unlock;
5599 }
5600
5601 /* To avoid client trying to guess when to poll again for information we
5602 * calculate conn info age as random value between min/max set in hdev.
5603 */
5604 conn_info_age = hdev->conn_info_min_age +
5605 prandom_u32_max(hdev->conn_info_max_age -
5606 hdev->conn_info_min_age);
5607
5608 /* Query controller to refresh cached values if they are too old or were
5609 * never read.
5610 */
5611 if (time_after(jiffies, conn->conn_info_timestamp +
5612 msecs_to_jiffies(conn_info_age)) ||
5613 !conn->conn_info_timestamp) {
5614 struct hci_request req;
5615 struct hci_cp_read_tx_power req_txp_cp;
5616 struct hci_cp_read_rssi req_rssi_cp;
5617 struct mgmt_pending_cmd *cmd;
5618
5619 hci_req_init(&req, hdev);
5620 req_rssi_cp.handle = cpu_to_le16(conn->handle);
5621 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
5622 &req_rssi_cp);
5623
5624 /* For LE links TX power does not change thus we don't need to
5625 * query for it once value is known.
5626 */
5627 if (!bdaddr_type_is_le(cp->addr.type) ||
5628 conn->tx_power == HCI_TX_POWER_INVALID) {
5629 req_txp_cp.handle = cpu_to_le16(conn->handle);
5630 req_txp_cp.type = 0x00;
5631 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5632 sizeof(req_txp_cp), &req_txp_cp);
5633 }
5634
5635 /* Max TX power needs to be read only once per connection */
5636 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
5637 req_txp_cp.handle = cpu_to_le16(conn->handle);
5638 req_txp_cp.type = 0x01;
5639 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5640 sizeof(req_txp_cp), &req_txp_cp);
5641 }
5642
5643 err = hci_req_run(&req, conn_info_refresh_complete);
5644 if (err < 0)
5645 goto unlock;
5646
5647 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
5648 data, len);
5649 if (!cmd) {
5650 err = -ENOMEM;
5651 goto unlock;
5652 }
5653
5654 hci_conn_hold(conn);
5655 cmd->user_data = hci_conn_get(conn);
5656 cmd->cmd_complete = conn_info_cmd_complete;
5657
5658 conn->conn_info_timestamp = jiffies;
5659 } else {
5660 /* Cache is valid, just reply with values cached in hci_conn */
5661 rp.rssi = conn->rssi;
5662 rp.tx_power = conn->tx_power;
5663 rp.max_tx_power = conn->max_tx_power;
5664
5665 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5666 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
5667 }
5668
5669 unlock:
5670 hci_dev_unlock(hdev);
5671 return err;
5672 }
5673
5674 static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
5675 {
5676 struct hci_conn *conn = cmd->user_data;
5677 struct mgmt_rp_get_clock_info rp;
5678 struct hci_dev *hdev;
5679 int err;
5680
5681 memset(&rp, 0, sizeof(rp));
5682 memcpy(&rp.addr, &cmd->param, sizeof(rp.addr));
5683
5684 if (status)
5685 goto complete;
5686
5687 hdev = hci_dev_get(cmd->index);
5688 if (hdev) {
5689 rp.local_clock = cpu_to_le32(hdev->clock);
5690 hci_dev_put(hdev);
5691 }
5692
5693 if (conn) {
5694 rp.piconet_clock = cpu_to_le32(conn->clock);
5695 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
5696 }
5697
5698 complete:
5699 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
5700 sizeof(rp));
5701
5702 if (conn) {
5703 hci_conn_drop(conn);
5704 hci_conn_put(conn);
5705 }
5706
5707 return err;
5708 }
5709
5710 static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5711 {
5712 struct hci_cp_read_clock *hci_cp;
5713 struct mgmt_pending_cmd *cmd;
5714 struct hci_conn *conn;
5715
5716 BT_DBG("%s status %u", hdev->name, status);
5717
5718 hci_dev_lock(hdev);
5719
5720 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
5721 if (!hci_cp)
5722 goto unlock;
5723
5724 if (hci_cp->which) {
5725 u16 handle = __le16_to_cpu(hci_cp->handle);
5726 conn = hci_conn_hash_lookup_handle(hdev, handle);
5727 } else {
5728 conn = NULL;
5729 }
5730
5731 cmd = pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
5732 if (!cmd)
5733 goto unlock;
5734
5735 cmd->cmd_complete(cmd, mgmt_status(status));
5736 mgmt_pending_remove(cmd);
5737
5738 unlock:
5739 hci_dev_unlock(hdev);
5740 }
5741
5742 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
5743 u16 len)
5744 {
5745 struct mgmt_cp_get_clock_info *cp = data;
5746 struct mgmt_rp_get_clock_info rp;
5747 struct hci_cp_read_clock hci_cp;
5748 struct mgmt_pending_cmd *cmd;
5749 struct hci_request req;
5750 struct hci_conn *conn;
5751 int err;
5752
5753 BT_DBG("%s", hdev->name);
5754
5755 memset(&rp, 0, sizeof(rp));
5756 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5757 rp.addr.type = cp->addr.type;
5758
5759 if (cp->addr.type != BDADDR_BREDR)
5760 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5761 MGMT_STATUS_INVALID_PARAMS,
5762 &rp, sizeof(rp));
5763
5764 hci_dev_lock(hdev);
5765
5766 if (!hdev_is_powered(hdev)) {
5767 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5768 MGMT_STATUS_NOT_POWERED, &rp,
5769 sizeof(rp));
5770 goto unlock;
5771 }
5772
5773 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5774 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5775 &cp->addr.bdaddr);
5776 if (!conn || conn->state != BT_CONNECTED) {
5777 err = mgmt_cmd_complete(sk, hdev->id,
5778 MGMT_OP_GET_CLOCK_INFO,
5779 MGMT_STATUS_NOT_CONNECTED,
5780 &rp, sizeof(rp));
5781 goto unlock;
5782 }
5783 } else {
5784 conn = NULL;
5785 }
5786
5787 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
5788 if (!cmd) {
5789 err = -ENOMEM;
5790 goto unlock;
5791 }
5792
5793 cmd->cmd_complete = clock_info_cmd_complete;
5794
5795 hci_req_init(&req, hdev);
5796
5797 memset(&hci_cp, 0, sizeof(hci_cp));
5798 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5799
5800 if (conn) {
5801 hci_conn_hold(conn);
5802 cmd->user_data = hci_conn_get(conn);
5803
5804 hci_cp.handle = cpu_to_le16(conn->handle);
5805 hci_cp.which = 0x01; /* Piconet clock */
5806 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5807 }
5808
5809 err = hci_req_run(&req, get_clock_info_complete);
5810 if (err < 0)
5811 mgmt_pending_remove(cmd);
5812
5813 unlock:
5814 hci_dev_unlock(hdev);
5815 return err;
5816 }
5817
5818 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
5819 {
5820 struct hci_conn *conn;
5821
5822 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
5823 if (!conn)
5824 return false;
5825
5826 if (conn->dst_type != type)
5827 return false;
5828
5829 if (conn->state != BT_CONNECTED)
5830 return false;
5831
5832 return true;
5833 }
5834
5835 /* This function requires the caller holds hdev->lock */
5836 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
5837 u8 addr_type, u8 auto_connect)
5838 {
5839 struct hci_conn_params *params;
5840
5841 params = hci_conn_params_add(hdev, addr, addr_type);
5842 if (!params)
5843 return -EIO;
5844
5845 if (params->auto_connect == auto_connect)
5846 return 0;
5847
5848 list_del_init(&params->action);
5849
5850 switch (auto_connect) {
5851 case HCI_AUTO_CONN_DISABLED:
5852 case HCI_AUTO_CONN_LINK_LOSS:
5853 /* If auto connect is being disabled when we're trying to
5854 * connect to device, keep connecting.
5855 */
5856 if (params->explicit_connect)
5857 list_add(&params->action, &hdev->pend_le_conns);
5858 break;
5859 case HCI_AUTO_CONN_REPORT:
5860 if (params->explicit_connect)
5861 list_add(&params->action, &hdev->pend_le_conns);
5862 else
5863 list_add(&params->action, &hdev->pend_le_reports);
5864 break;
5865 case HCI_AUTO_CONN_DIRECT:
5866 case HCI_AUTO_CONN_ALWAYS:
5867 if (!is_connected(hdev, addr, addr_type))
5868 list_add(&params->action, &hdev->pend_le_conns);
5869 break;
5870 }
5871
5872 params->auto_connect = auto_connect;
5873
5874 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
5875 auto_connect);
5876
5877 return 0;
5878 }
5879
5880 static void device_added(struct sock *sk, struct hci_dev *hdev,
5881 bdaddr_t *bdaddr, u8 type, u8 action)
5882 {
5883 struct mgmt_ev_device_added ev;
5884
5885 bacpy(&ev.addr.bdaddr, bdaddr);
5886 ev.addr.type = type;
5887 ev.action = action;
5888
5889 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
5890 }
5891
5892 static int add_device(struct sock *sk, struct hci_dev *hdev,
5893 void *data, u16 len)
5894 {
5895 struct mgmt_cp_add_device *cp = data;
5896 u8 auto_conn, addr_type;
5897 int err;
5898
5899 BT_DBG("%s", hdev->name);
5900
5901 if (!bdaddr_type_is_valid(cp->addr.type) ||
5902 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
5903 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5904 MGMT_STATUS_INVALID_PARAMS,
5905 &cp->addr, sizeof(cp->addr));
5906
5907 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
5908 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5909 MGMT_STATUS_INVALID_PARAMS,
5910 &cp->addr, sizeof(cp->addr));
5911
5912 hci_dev_lock(hdev);
5913
5914 if (cp->addr.type == BDADDR_BREDR) {
5915 /* Only incoming connections action is supported for now */
5916 if (cp->action != 0x01) {
5917 err = mgmt_cmd_complete(sk, hdev->id,
5918 MGMT_OP_ADD_DEVICE,
5919 MGMT_STATUS_INVALID_PARAMS,
5920 &cp->addr, sizeof(cp->addr));
5921 goto unlock;
5922 }
5923
5924 err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr,
5925 cp->addr.type);
5926 if (err)
5927 goto unlock;
5928
5929 hci_update_page_scan(hdev);
5930
5931 goto added;
5932 }
5933
5934 addr_type = le_addr_type(cp->addr.type);
5935
5936 if (cp->action == 0x02)
5937 auto_conn = HCI_AUTO_CONN_ALWAYS;
5938 else if (cp->action == 0x01)
5939 auto_conn = HCI_AUTO_CONN_DIRECT;
5940 else
5941 auto_conn = HCI_AUTO_CONN_REPORT;
5942
5943 /* Kernel internally uses conn_params with resolvable private
5944 * address, but Add Device allows only identity addresses.
5945 * Make sure it is enforced before calling
5946 * hci_conn_params_lookup.
5947 */
5948 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
5949 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5950 MGMT_STATUS_INVALID_PARAMS,
5951 &cp->addr, sizeof(cp->addr));
5952 goto unlock;
5953 }
5954
5955 /* If the connection parameters don't exist for this device,
5956 * they will be created and configured with defaults.
5957 */
5958 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
5959 auto_conn) < 0) {
5960 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5961 MGMT_STATUS_FAILED, &cp->addr,
5962 sizeof(cp->addr));
5963 goto unlock;
5964 }
5965
5966 hci_update_background_scan(hdev);
5967
5968 added:
5969 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
5970
5971 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5972 MGMT_STATUS_SUCCESS, &cp->addr,
5973 sizeof(cp->addr));
5974
5975 unlock:
5976 hci_dev_unlock(hdev);
5977 return err;
5978 }
5979
5980 static void device_removed(struct sock *sk, struct hci_dev *hdev,
5981 bdaddr_t *bdaddr, u8 type)
5982 {
5983 struct mgmt_ev_device_removed ev;
5984
5985 bacpy(&ev.addr.bdaddr, bdaddr);
5986 ev.addr.type = type;
5987
5988 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
5989 }
5990
5991 static int remove_device(struct sock *sk, struct hci_dev *hdev,
5992 void *data, u16 len)
5993 {
5994 struct mgmt_cp_remove_device *cp = data;
5995 int err;
5996
5997 BT_DBG("%s", hdev->name);
5998
5999 hci_dev_lock(hdev);
6000
6001 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
6002 struct hci_conn_params *params;
6003 u8 addr_type;
6004
6005 if (!bdaddr_type_is_valid(cp->addr.type)) {
6006 err = mgmt_cmd_complete(sk, hdev->id,
6007 MGMT_OP_REMOVE_DEVICE,
6008 MGMT_STATUS_INVALID_PARAMS,
6009 &cp->addr, sizeof(cp->addr));
6010 goto unlock;
6011 }
6012
6013 if (cp->addr.type == BDADDR_BREDR) {
6014 err = hci_bdaddr_list_del(&hdev->whitelist,
6015 &cp->addr.bdaddr,
6016 cp->addr.type);
6017 if (err) {
6018 err = mgmt_cmd_complete(sk, hdev->id,
6019 MGMT_OP_REMOVE_DEVICE,
6020 MGMT_STATUS_INVALID_PARAMS,
6021 &cp->addr,
6022 sizeof(cp->addr));
6023 goto unlock;
6024 }
6025
6026 hci_update_page_scan(hdev);
6027
6028 device_removed(sk, hdev, &cp->addr.bdaddr,
6029 cp->addr.type);
6030 goto complete;
6031 }
6032
6033 addr_type = le_addr_type(cp->addr.type);
6034
6035 /* Kernel internally uses conn_params with resolvable private
6036 * address, but Remove Device allows only identity addresses.
6037 * Make sure it is enforced before calling
6038 * hci_conn_params_lookup.
6039 */
6040 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
6041 err = mgmt_cmd_complete(sk, hdev->id,
6042 MGMT_OP_REMOVE_DEVICE,
6043 MGMT_STATUS_INVALID_PARAMS,
6044 &cp->addr, sizeof(cp->addr));
6045 goto unlock;
6046 }
6047
6048 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
6049 addr_type);
6050 if (!params) {
6051 err = mgmt_cmd_complete(sk, hdev->id,
6052 MGMT_OP_REMOVE_DEVICE,
6053 MGMT_STATUS_INVALID_PARAMS,
6054 &cp->addr, sizeof(cp->addr));
6055 goto unlock;
6056 }
6057
6058 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
6059 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
6060 err = mgmt_cmd_complete(sk, hdev->id,
6061 MGMT_OP_REMOVE_DEVICE,
6062 MGMT_STATUS_INVALID_PARAMS,
6063 &cp->addr, sizeof(cp->addr));
6064 goto unlock;
6065 }
6066
6067 list_del(&params->action);
6068 list_del(&params->list);
6069 kfree(params);
6070 hci_update_background_scan(hdev);
6071
6072 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
6073 } else {
6074 struct hci_conn_params *p, *tmp;
6075 struct bdaddr_list *b, *btmp;
6076
6077 if (cp->addr.type) {
6078 err = mgmt_cmd_complete(sk, hdev->id,
6079 MGMT_OP_REMOVE_DEVICE,
6080 MGMT_STATUS_INVALID_PARAMS,
6081 &cp->addr, sizeof(cp->addr));
6082 goto unlock;
6083 }
6084
6085 list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
6086 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
6087 list_del(&b->list);
6088 kfree(b);
6089 }
6090
6091 hci_update_page_scan(hdev);
6092
6093 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
6094 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
6095 continue;
6096 device_removed(sk, hdev, &p->addr, p->addr_type);
6097 if (p->explicit_connect) {
6098 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
6099 continue;
6100 }
6101 list_del(&p->action);
6102 list_del(&p->list);
6103 kfree(p);
6104 }
6105
6106 BT_DBG("All LE connection parameters were removed");
6107
6108 hci_update_background_scan(hdev);
6109 }
6110
6111 complete:
6112 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
6113 MGMT_STATUS_SUCCESS, &cp->addr,
6114 sizeof(cp->addr));
6115 unlock:
6116 hci_dev_unlock(hdev);
6117 return err;
6118 }
6119
6120 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
6121 u16 len)
6122 {
6123 struct mgmt_cp_load_conn_param *cp = data;
6124 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
6125 sizeof(struct mgmt_conn_param));
6126 u16 param_count, expected_len;
6127 int i;
6128
6129 if (!lmp_le_capable(hdev))
6130 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6131 MGMT_STATUS_NOT_SUPPORTED);
6132
6133 param_count = __le16_to_cpu(cp->param_count);
6134 if (param_count > max_param_count) {
6135 BT_ERR("load_conn_param: too big param_count value %u",
6136 param_count);
6137 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6138 MGMT_STATUS_INVALID_PARAMS);
6139 }
6140
6141 expected_len = sizeof(*cp) + param_count *
6142 sizeof(struct mgmt_conn_param);
6143 if (expected_len != len) {
6144 BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
6145 expected_len, len);
6146 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6147 MGMT_STATUS_INVALID_PARAMS);
6148 }
6149
6150 BT_DBG("%s param_count %u", hdev->name, param_count);
6151
6152 hci_dev_lock(hdev);
6153
6154 hci_conn_params_clear_disabled(hdev);
6155
6156 for (i = 0; i < param_count; i++) {
6157 struct mgmt_conn_param *param = &cp->params[i];
6158 struct hci_conn_params *hci_param;
6159 u16 min, max, latency, timeout;
6160 u8 addr_type;
6161
6162 BT_DBG("Adding %pMR (type %u)", &param->addr.bdaddr,
6163 param->addr.type);
6164
6165 if (param->addr.type == BDADDR_LE_PUBLIC) {
6166 addr_type = ADDR_LE_DEV_PUBLIC;
6167 } else if (param->addr.type == BDADDR_LE_RANDOM) {
6168 addr_type = ADDR_LE_DEV_RANDOM;
6169 } else {
6170 BT_ERR("Ignoring invalid connection parameters");
6171 continue;
6172 }
6173
6174 min = le16_to_cpu(param->min_interval);
6175 max = le16_to_cpu(param->max_interval);
6176 latency = le16_to_cpu(param->latency);
6177 timeout = le16_to_cpu(param->timeout);
6178
6179 BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
6180 min, max, latency, timeout);
6181
6182 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
6183 BT_ERR("Ignoring invalid connection parameters");
6184 continue;
6185 }
6186
6187 hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
6188 addr_type);
6189 if (!hci_param) {
6190 BT_ERR("Failed to add connection parameters");
6191 continue;
6192 }
6193
6194 hci_param->conn_min_interval = min;
6195 hci_param->conn_max_interval = max;
6196 hci_param->conn_latency = latency;
6197 hci_param->supervision_timeout = timeout;
6198 }
6199
6200 hci_dev_unlock(hdev);
6201
6202 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
6203 NULL, 0);
6204 }
6205
6206 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
6207 void *data, u16 len)
6208 {
6209 struct mgmt_cp_set_external_config *cp = data;
6210 bool changed;
6211 int err;
6212
6213 BT_DBG("%s", hdev->name);
6214
6215 if (hdev_is_powered(hdev))
6216 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6217 MGMT_STATUS_REJECTED);
6218
6219 if (cp->config != 0x00 && cp->config != 0x01)
6220 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6221 MGMT_STATUS_INVALID_PARAMS);
6222
6223 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
6224 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6225 MGMT_STATUS_NOT_SUPPORTED);
6226
6227 hci_dev_lock(hdev);
6228
6229 if (cp->config)
6230 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
6231 else
6232 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
6233
6234 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
6235 if (err < 0)
6236 goto unlock;
6237
6238 if (!changed)
6239 goto unlock;
6240
6241 err = new_options(hdev, sk);
6242
6243 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
6244 mgmt_index_removed(hdev);
6245
6246 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
6247 hci_dev_set_flag(hdev, HCI_CONFIG);
6248 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6249
6250 queue_work(hdev->req_workqueue, &hdev->power_on);
6251 } else {
6252 set_bit(HCI_RAW, &hdev->flags);
6253 mgmt_index_added(hdev);
6254 }
6255 }
6256
6257 unlock:
6258 hci_dev_unlock(hdev);
6259 return err;
6260 }
6261
6262 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
6263 void *data, u16 len)
6264 {
6265 struct mgmt_cp_set_public_address *cp = data;
6266 bool changed;
6267 int err;
6268
6269 BT_DBG("%s", hdev->name);
6270
6271 if (hdev_is_powered(hdev))
6272 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6273 MGMT_STATUS_REJECTED);
6274
6275 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
6276 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6277 MGMT_STATUS_INVALID_PARAMS);
6278
6279 if (!hdev->set_bdaddr)
6280 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6281 MGMT_STATUS_NOT_SUPPORTED);
6282
6283 hci_dev_lock(hdev);
6284
6285 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
6286 bacpy(&hdev->public_addr, &cp->bdaddr);
6287
6288 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
6289 if (err < 0)
6290 goto unlock;
6291
6292 if (!changed)
6293 goto unlock;
6294
6295 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
6296 err = new_options(hdev, sk);
6297
6298 if (is_configured(hdev)) {
6299 mgmt_index_removed(hdev);
6300
6301 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
6302
6303 hci_dev_set_flag(hdev, HCI_CONFIG);
6304 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6305
6306 queue_work(hdev->req_workqueue, &hdev->power_on);
6307 }
6308
6309 unlock:
6310 hci_dev_unlock(hdev);
6311 return err;
6312 }
6313
6314 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
6315 u8 data_len)
6316 {
6317 eir[eir_len++] = sizeof(type) + data_len;
6318 eir[eir_len++] = type;
6319 memcpy(&eir[eir_len], data, data_len);
6320 eir_len += data_len;
6321
6322 return eir_len;
6323 }
6324
6325 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, u8 status,
6326 u16 opcode, struct sk_buff *skb)
6327 {
6328 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
6329 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
6330 u8 *h192, *r192, *h256, *r256;
6331 struct mgmt_pending_cmd *cmd;
6332 u16 eir_len;
6333 int err;
6334
6335 BT_DBG("%s status %u", hdev->name, status);
6336
6337 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev);
6338 if (!cmd)
6339 return;
6340
6341 mgmt_cp = cmd->param;
6342
6343 if (status) {
6344 status = mgmt_status(status);
6345 eir_len = 0;
6346
6347 h192 = NULL;
6348 r192 = NULL;
6349 h256 = NULL;
6350 r256 = NULL;
6351 } else if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
6352 struct hci_rp_read_local_oob_data *rp;
6353
6354 if (skb->len != sizeof(*rp)) {
6355 status = MGMT_STATUS_FAILED;
6356 eir_len = 0;
6357 } else {
6358 status = MGMT_STATUS_SUCCESS;
6359 rp = (void *)skb->data;
6360
6361 eir_len = 5 + 18 + 18;
6362 h192 = rp->hash;
6363 r192 = rp->rand;
6364 h256 = NULL;
6365 r256 = NULL;
6366 }
6367 } else {
6368 struct hci_rp_read_local_oob_ext_data *rp;
6369
6370 if (skb->len != sizeof(*rp)) {
6371 status = MGMT_STATUS_FAILED;
6372 eir_len = 0;
6373 } else {
6374 status = MGMT_STATUS_SUCCESS;
6375 rp = (void *)skb->data;
6376
6377 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6378 eir_len = 5 + 18 + 18;
6379 h192 = NULL;
6380 r192 = NULL;
6381 } else {
6382 eir_len = 5 + 18 + 18 + 18 + 18;
6383 h192 = rp->hash192;
6384 r192 = rp->rand192;
6385 }
6386
6387 h256 = rp->hash256;
6388 r256 = rp->rand256;
6389 }
6390 }
6391
6392 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
6393 if (!mgmt_rp)
6394 goto done;
6395
6396 if (status)
6397 goto send_rsp;
6398
6399 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
6400 hdev->dev_class, 3);
6401
6402 if (h192 && r192) {
6403 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6404 EIR_SSP_HASH_C192, h192, 16);
6405 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6406 EIR_SSP_RAND_R192, r192, 16);
6407 }
6408
6409 if (h256 && r256) {
6410 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6411 EIR_SSP_HASH_C256, h256, 16);
6412 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6413 EIR_SSP_RAND_R256, r256, 16);
6414 }
6415
6416 send_rsp:
6417 mgmt_rp->type = mgmt_cp->type;
6418 mgmt_rp->eir_len = cpu_to_le16(eir_len);
6419
6420 err = mgmt_cmd_complete(cmd->sk, hdev->id,
6421 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
6422 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
6423 if (err < 0 || status)
6424 goto done;
6425
6426 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
6427
6428 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
6429 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
6430 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
6431 done:
6432 kfree(mgmt_rp);
6433 mgmt_pending_remove(cmd);
6434 }
6435
6436 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
6437 struct mgmt_cp_read_local_oob_ext_data *cp)
6438 {
6439 struct mgmt_pending_cmd *cmd;
6440 struct hci_request req;
6441 int err;
6442
6443 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
6444 cp, sizeof(*cp));
6445 if (!cmd)
6446 return -ENOMEM;
6447
6448 hci_req_init(&req, hdev);
6449
6450 if (bredr_sc_enabled(hdev))
6451 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
6452 else
6453 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
6454
6455 err = hci_req_run_skb(&req, read_local_oob_ext_data_complete);
6456 if (err < 0) {
6457 mgmt_pending_remove(cmd);
6458 return err;
6459 }
6460
6461 return 0;
6462 }
6463
6464 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
6465 void *data, u16 data_len)
6466 {
6467 struct mgmt_cp_read_local_oob_ext_data *cp = data;
6468 struct mgmt_rp_read_local_oob_ext_data *rp;
6469 size_t rp_len;
6470 u16 eir_len;
6471 u8 status, flags, role, addr[7], hash[16], rand[16];
6472 int err;
6473
6474 BT_DBG("%s", hdev->name);
6475
6476 if (hdev_is_powered(hdev)) {
6477 switch (cp->type) {
6478 case BIT(BDADDR_BREDR):
6479 status = mgmt_bredr_support(hdev);
6480 if (status)
6481 eir_len = 0;
6482 else
6483 eir_len = 5;
6484 break;
6485 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
6486 status = mgmt_le_support(hdev);
6487 if (status)
6488 eir_len = 0;
6489 else
6490 eir_len = 9 + 3 + 18 + 18 + 3;
6491 break;
6492 default:
6493 status = MGMT_STATUS_INVALID_PARAMS;
6494 eir_len = 0;
6495 break;
6496 }
6497 } else {
6498 status = MGMT_STATUS_NOT_POWERED;
6499 eir_len = 0;
6500 }
6501
6502 rp_len = sizeof(*rp) + eir_len;
6503 rp = kmalloc(rp_len, GFP_ATOMIC);
6504 if (!rp)
6505 return -ENOMEM;
6506
6507 if (status)
6508 goto complete;
6509
6510 hci_dev_lock(hdev);
6511
6512 eir_len = 0;
6513 switch (cp->type) {
6514 case BIT(BDADDR_BREDR):
6515 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6516 err = read_local_ssp_oob_req(hdev, sk, cp);
6517 hci_dev_unlock(hdev);
6518 if (!err)
6519 goto done;
6520
6521 status = MGMT_STATUS_FAILED;
6522 goto complete;
6523 } else {
6524 eir_len = eir_append_data(rp->eir, eir_len,
6525 EIR_CLASS_OF_DEV,
6526 hdev->dev_class, 3);
6527 }
6528 break;
6529 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
6530 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6531 smp_generate_oob(hdev, hash, rand) < 0) {
6532 hci_dev_unlock(hdev);
6533 status = MGMT_STATUS_FAILED;
6534 goto complete;
6535 }
6536
6537 /* This should return the active RPA, but since the RPA
6538 * is only programmed on demand, it is really hard to fill
6539 * this in at the moment. For now disallow retrieving
6540 * local out-of-band data when privacy is in use.
6541 *
6542 * Returning the identity address will not help here since
6543 * pairing happens before the identity resolving key is
6544 * known and thus the connection establishment happens
6545 * based on the RPA and not the identity address.
6546 */
6547 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
6548 hci_dev_unlock(hdev);
6549 status = MGMT_STATUS_REJECTED;
6550 goto complete;
6551 }
6552
6553 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
6554 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
6555 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6556 bacmp(&hdev->static_addr, BDADDR_ANY))) {
6557 memcpy(addr, &hdev->static_addr, 6);
6558 addr[6] = 0x01;
6559 } else {
6560 memcpy(addr, &hdev->bdaddr, 6);
6561 addr[6] = 0x00;
6562 }
6563
6564 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
6565 addr, sizeof(addr));
6566
6567 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
6568 role = 0x02;
6569 else
6570 role = 0x01;
6571
6572 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
6573 &role, sizeof(role));
6574
6575 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
6576 eir_len = eir_append_data(rp->eir, eir_len,
6577 EIR_LE_SC_CONFIRM,
6578 hash, sizeof(hash));
6579
6580 eir_len = eir_append_data(rp->eir, eir_len,
6581 EIR_LE_SC_RANDOM,
6582 rand, sizeof(rand));
6583 }
6584
6585 flags = get_adv_discov_flags(hdev);
6586
6587 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
6588 flags |= LE_AD_NO_BREDR;
6589
6590 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
6591 &flags, sizeof(flags));
6592 break;
6593 }
6594
6595 hci_dev_unlock(hdev);
6596
6597 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
6598
6599 status = MGMT_STATUS_SUCCESS;
6600
6601 complete:
6602 rp->type = cp->type;
6603 rp->eir_len = cpu_to_le16(eir_len);
6604
6605 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
6606 status, rp, sizeof(*rp) + eir_len);
6607 if (err < 0 || status)
6608 goto done;
6609
6610 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
6611 rp, sizeof(*rp) + eir_len,
6612 HCI_MGMT_OOB_DATA_EVENTS, sk);
6613
6614 done:
6615 kfree(rp);
6616
6617 return err;
6618 }
6619
6620 static u32 get_supported_adv_flags(struct hci_dev *hdev)
6621 {
6622 u32 flags = 0;
6623
6624 flags |= MGMT_ADV_FLAG_CONNECTABLE;
6625 flags |= MGMT_ADV_FLAG_DISCOV;
6626 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
6627 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
6628
6629 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID)
6630 flags |= MGMT_ADV_FLAG_TX_POWER;
6631
6632 return flags;
6633 }
6634
6635 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
6636 void *data, u16 data_len)
6637 {
6638 struct mgmt_rp_read_adv_features *rp;
6639 size_t rp_len;
6640 int err, i;
6641 bool instance;
6642 struct adv_info *adv_instance;
6643 u32 supported_flags;
6644
6645 BT_DBG("%s", hdev->name);
6646
6647 if (!lmp_le_capable(hdev))
6648 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
6649 MGMT_STATUS_REJECTED);
6650
6651 hci_dev_lock(hdev);
6652
6653 rp_len = sizeof(*rp);
6654
6655 instance = hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE);
6656 if (instance)
6657 rp_len += hdev->adv_instance_cnt;
6658
6659 rp = kmalloc(rp_len, GFP_ATOMIC);
6660 if (!rp) {
6661 hci_dev_unlock(hdev);
6662 return -ENOMEM;
6663 }
6664
6665 supported_flags = get_supported_adv_flags(hdev);
6666
6667 rp->supported_flags = cpu_to_le32(supported_flags);
6668 rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
6669 rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
6670 rp->max_instances = HCI_MAX_ADV_INSTANCES;
6671
6672 if (instance) {
6673 i = 0;
6674 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
6675 if (i >= hdev->adv_instance_cnt)
6676 break;
6677
6678 rp->instance[i] = adv_instance->instance;
6679 i++;
6680 }
6681 rp->num_instances = hdev->adv_instance_cnt;
6682 } else {
6683 rp->num_instances = 0;
6684 }
6685
6686 hci_dev_unlock(hdev);
6687
6688 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
6689 MGMT_STATUS_SUCCESS, rp, rp_len);
6690
6691 kfree(rp);
6692
6693 return err;
6694 }
6695
6696 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
6697 u8 len, bool is_adv_data)
6698 {
6699 u8 max_len = HCI_MAX_AD_LENGTH;
6700 int i, cur_len;
6701 bool flags_managed = false;
6702 bool tx_power_managed = false;
6703 u32 flags_params = MGMT_ADV_FLAG_DISCOV | MGMT_ADV_FLAG_LIMITED_DISCOV |
6704 MGMT_ADV_FLAG_MANAGED_FLAGS;
6705
6706 if (is_adv_data && (adv_flags & flags_params)) {
6707 flags_managed = true;
6708 max_len -= 3;
6709 }
6710
6711 if (is_adv_data && (adv_flags & MGMT_ADV_FLAG_TX_POWER)) {
6712 tx_power_managed = true;
6713 max_len -= 3;
6714 }
6715
6716 if (len > max_len)
6717 return false;
6718
6719 /* Make sure that the data is correctly formatted. */
6720 for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
6721 cur_len = data[i];
6722
6723 if (flags_managed && data[i + 1] == EIR_FLAGS)
6724 return false;
6725
6726 if (tx_power_managed && data[i + 1] == EIR_TX_POWER)
6727 return false;
6728
6729 /* If the current field length would exceed the total data
6730 * length, then it's invalid.
6731 */
6732 if (i + cur_len >= len)
6733 return false;
6734 }
6735
6736 return true;
6737 }
6738
6739 static void add_advertising_complete(struct hci_dev *hdev, u8 status,
6740 u16 opcode)
6741 {
6742 struct mgmt_pending_cmd *cmd;
6743 struct mgmt_cp_add_advertising *cp;
6744 struct mgmt_rp_add_advertising rp;
6745 struct adv_info *adv_instance, *n;
6746 u8 instance;
6747
6748 BT_DBG("status %d", status);
6749
6750 hci_dev_lock(hdev);
6751
6752 cmd = pending_find(MGMT_OP_ADD_ADVERTISING, hdev);
6753
6754 if (status)
6755 hci_dev_clear_flag(hdev, HCI_ADVERTISING_INSTANCE);
6756
6757 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
6758 if (!adv_instance->pending)
6759 continue;
6760
6761 if (!status) {
6762 adv_instance->pending = false;
6763 continue;
6764 }
6765
6766 instance = adv_instance->instance;
6767
6768 if (hdev->cur_adv_instance == instance)
6769 cancel_adv_timeout(hdev);
6770
6771 hci_remove_adv_instance(hdev, instance);
6772 advertising_removed(cmd ? cmd->sk : NULL, hdev, instance);
6773 }
6774
6775 if (!cmd)
6776 goto unlock;
6777
6778 cp = cmd->param;
6779 rp.instance = cp->instance;
6780
6781 if (status)
6782 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
6783 mgmt_status(status));
6784 else
6785 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
6786 mgmt_status(status), &rp, sizeof(rp));
6787
6788 mgmt_pending_remove(cmd);
6789
6790 unlock:
6791 hci_dev_unlock(hdev);
6792 }
6793
6794 void mgmt_adv_timeout_expired(struct hci_dev *hdev)
6795 {
6796 u8 instance;
6797 struct hci_request req;
6798
6799 hdev->adv_instance_timeout = 0;
6800
6801 instance = get_current_adv_instance(hdev);
6802 if (instance == 0x00)
6803 return;
6804
6805 hci_dev_lock(hdev);
6806 hci_req_init(&req, hdev);
6807
6808 clear_adv_instance(hdev, &req, instance, false);
6809
6810 if (list_empty(&hdev->adv_instances))
6811 disable_advertising(&req);
6812
6813 if (!skb_queue_empty(&req.cmd_q))
6814 hci_req_run(&req, NULL);
6815
6816 hci_dev_unlock(hdev);
6817 }
6818
6819 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
6820 void *data, u16 data_len)
6821 {
6822 struct mgmt_cp_add_advertising *cp = data;
6823 struct mgmt_rp_add_advertising rp;
6824 u32 flags;
6825 u32 supported_flags;
6826 u8 status;
6827 u16 timeout, duration;
6828 unsigned int prev_instance_cnt = hdev->adv_instance_cnt;
6829 u8 schedule_instance = 0;
6830 struct adv_info *next_instance;
6831 int err;
6832 struct mgmt_pending_cmd *cmd;
6833 struct hci_request req;
6834
6835 BT_DBG("%s", hdev->name);
6836
6837 status = mgmt_le_support(hdev);
6838 if (status)
6839 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6840 status);
6841
6842 if (cp->instance < 1 || cp->instance > HCI_MAX_ADV_INSTANCES)
6843 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6844 MGMT_STATUS_INVALID_PARAMS);
6845
6846 flags = __le32_to_cpu(cp->flags);
6847 timeout = __le16_to_cpu(cp->timeout);
6848 duration = __le16_to_cpu(cp->duration);
6849
6850 /* The current implementation only supports a subset of the specified
6851 * flags.
6852 */
6853 supported_flags = get_supported_adv_flags(hdev);
6854 if (flags & ~supported_flags)
6855 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6856 MGMT_STATUS_INVALID_PARAMS);
6857
6858 hci_dev_lock(hdev);
6859
6860 if (timeout && !hdev_is_powered(hdev)) {
6861 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6862 MGMT_STATUS_REJECTED);
6863 goto unlock;
6864 }
6865
6866 if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
6867 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
6868 pending_find(MGMT_OP_SET_LE, hdev)) {
6869 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6870 MGMT_STATUS_BUSY);
6871 goto unlock;
6872 }
6873
6874 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
6875 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
6876 cp->scan_rsp_len, false)) {
6877 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6878 MGMT_STATUS_INVALID_PARAMS);
6879 goto unlock;
6880 }
6881
6882 err = hci_add_adv_instance(hdev, cp->instance, flags,
6883 cp->adv_data_len, cp->data,
6884 cp->scan_rsp_len,
6885 cp->data + cp->adv_data_len,
6886 timeout, duration);
6887 if (err < 0) {
6888 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6889 MGMT_STATUS_FAILED);
6890 goto unlock;
6891 }
6892
6893 /* Only trigger an advertising added event if a new instance was
6894 * actually added.
6895 */
6896 if (hdev->adv_instance_cnt > prev_instance_cnt)
6897 advertising_added(sk, hdev, cp->instance);
6898
6899 hci_dev_set_flag(hdev, HCI_ADVERTISING_INSTANCE);
6900
6901 if (hdev->cur_adv_instance == cp->instance) {
6902 /* If the currently advertised instance is being changed then
6903 * cancel the current advertising and schedule the next
6904 * instance. If there is only one instance then the overridden
6905 * advertising data will be visible right away.
6906 */
6907 cancel_adv_timeout(hdev);
6908
6909 next_instance = hci_get_next_instance(hdev, cp->instance);
6910 if (next_instance)
6911 schedule_instance = next_instance->instance;
6912 } else if (!hdev->adv_instance_timeout) {
6913 /* Immediately advertise the new instance if no other
6914 * instance is currently being advertised.
6915 */
6916 schedule_instance = cp->instance;
6917 }
6918
6919 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
6920 * there is no instance to be advertised then we have no HCI
6921 * communication to make. Simply return.
6922 */
6923 if (!hdev_is_powered(hdev) ||
6924 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
6925 !schedule_instance) {
6926 rp.instance = cp->instance;
6927 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6928 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6929 goto unlock;
6930 }
6931
6932 /* We're good to go, update advertising data, parameters, and start
6933 * advertising.
6934 */
6935 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
6936 data_len);
6937 if (!cmd) {
6938 err = -ENOMEM;
6939 goto unlock;
6940 }
6941
6942 hci_req_init(&req, hdev);
6943
6944 err = schedule_adv_instance(&req, schedule_instance, true);
6945
6946 if (!err)
6947 err = hci_req_run(&req, add_advertising_complete);
6948
6949 if (err < 0)
6950 mgmt_pending_remove(cmd);
6951
6952 unlock:
6953 hci_dev_unlock(hdev);
6954
6955 return err;
6956 }
6957
6958 static void remove_advertising_complete(struct hci_dev *hdev, u8 status,
6959 u16 opcode)
6960 {
6961 struct mgmt_pending_cmd *cmd;
6962 struct mgmt_cp_remove_advertising *cp;
6963 struct mgmt_rp_remove_advertising rp;
6964
6965 BT_DBG("status %d", status);
6966
6967 hci_dev_lock(hdev);
6968
6969 /* A failure status here only means that we failed to disable
6970 * advertising. Otherwise, the advertising instance has been removed,
6971 * so report success.
6972 */
6973 cmd = pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev);
6974 if (!cmd)
6975 goto unlock;
6976
6977 cp = cmd->param;
6978 rp.instance = cp->instance;
6979
6980 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, MGMT_STATUS_SUCCESS,
6981 &rp, sizeof(rp));
6982 mgmt_pending_remove(cmd);
6983
6984 unlock:
6985 hci_dev_unlock(hdev);
6986 }
6987
6988 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
6989 void *data, u16 data_len)
6990 {
6991 struct mgmt_cp_remove_advertising *cp = data;
6992 struct mgmt_rp_remove_advertising rp;
6993 struct mgmt_pending_cmd *cmd;
6994 struct hci_request req;
6995 int err;
6996
6997 BT_DBG("%s", hdev->name);
6998
6999 hci_dev_lock(hdev);
7000
7001 if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
7002 err = mgmt_cmd_status(sk, hdev->id,
7003 MGMT_OP_REMOVE_ADVERTISING,
7004 MGMT_STATUS_INVALID_PARAMS);
7005 goto unlock;
7006 }
7007
7008 if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
7009 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
7010 pending_find(MGMT_OP_SET_LE, hdev)) {
7011 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
7012 MGMT_STATUS_BUSY);
7013 goto unlock;
7014 }
7015
7016 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE)) {
7017 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
7018 MGMT_STATUS_INVALID_PARAMS);
7019 goto unlock;
7020 }
7021
7022 hci_req_init(&req, hdev);
7023
7024 clear_adv_instance(hdev, &req, cp->instance, true);
7025
7026 if (list_empty(&hdev->adv_instances))
7027 disable_advertising(&req);
7028
7029 /* If no HCI commands have been collected so far or the HCI_ADVERTISING
7030 * flag is set or the device isn't powered then we have no HCI
7031 * communication to make. Simply return.
7032 */
7033 if (skb_queue_empty(&req.cmd_q) ||
7034 !hdev_is_powered(hdev) ||
7035 hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
7036 rp.instance = cp->instance;
7037 err = mgmt_cmd_complete(sk, hdev->id,
7038 MGMT_OP_REMOVE_ADVERTISING,
7039 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7040 goto unlock;
7041 }
7042
7043 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
7044 data_len);
7045 if (!cmd) {
7046 err = -ENOMEM;
7047 goto unlock;
7048 }
7049
7050 err = hci_req_run(&req, remove_advertising_complete);
7051 if (err < 0)
7052 mgmt_pending_remove(cmd);
7053
7054 unlock:
7055 hci_dev_unlock(hdev);
7056
7057 return err;
7058 }
7059
7060 static const struct hci_mgmt_handler mgmt_handlers[] = {
7061 { NULL }, /* 0x0000 (no command) */
7062 { read_version, MGMT_READ_VERSION_SIZE,
7063 HCI_MGMT_NO_HDEV |
7064 HCI_MGMT_UNTRUSTED },
7065 { read_commands, MGMT_READ_COMMANDS_SIZE,
7066 HCI_MGMT_NO_HDEV |
7067 HCI_MGMT_UNTRUSTED },
7068 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
7069 HCI_MGMT_NO_HDEV |
7070 HCI_MGMT_UNTRUSTED },
7071 { read_controller_info, MGMT_READ_INFO_SIZE,
7072 HCI_MGMT_UNTRUSTED },
7073 { set_powered, MGMT_SETTING_SIZE },
7074 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
7075 { set_connectable, MGMT_SETTING_SIZE },
7076 { set_fast_connectable, MGMT_SETTING_SIZE },
7077 { set_bondable, MGMT_SETTING_SIZE },
7078 { set_link_security, MGMT_SETTING_SIZE },
7079 { set_ssp, MGMT_SETTING_SIZE },
7080 { set_hs, MGMT_SETTING_SIZE },
7081 { set_le, MGMT_SETTING_SIZE },
7082 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
7083 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
7084 { add_uuid, MGMT_ADD_UUID_SIZE },
7085 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
7086 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
7087 HCI_MGMT_VAR_LEN },
7088 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
7089 HCI_MGMT_VAR_LEN },
7090 { disconnect, MGMT_DISCONNECT_SIZE },
7091 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
7092 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
7093 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
7094 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
7095 { pair_device, MGMT_PAIR_DEVICE_SIZE },
7096 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
7097 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
7098 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
7099 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
7100 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
7101 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
7102 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
7103 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
7104 HCI_MGMT_VAR_LEN },
7105 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
7106 { start_discovery, MGMT_START_DISCOVERY_SIZE },
7107 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
7108 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
7109 { block_device, MGMT_BLOCK_DEVICE_SIZE },
7110 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
7111 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
7112 { set_advertising, MGMT_SETTING_SIZE },
7113 { set_bredr, MGMT_SETTING_SIZE },
7114 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
7115 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
7116 { set_secure_conn, MGMT_SETTING_SIZE },
7117 { set_debug_keys, MGMT_SETTING_SIZE },
7118 { set_privacy, MGMT_SET_PRIVACY_SIZE },
7119 { load_irks, MGMT_LOAD_IRKS_SIZE,
7120 HCI_MGMT_VAR_LEN },
7121 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
7122 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
7123 { add_device, MGMT_ADD_DEVICE_SIZE },
7124 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
7125 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
7126 HCI_MGMT_VAR_LEN },
7127 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
7128 HCI_MGMT_NO_HDEV |
7129 HCI_MGMT_UNTRUSTED },
7130 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
7131 HCI_MGMT_UNCONFIGURED |
7132 HCI_MGMT_UNTRUSTED },
7133 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
7134 HCI_MGMT_UNCONFIGURED },
7135 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
7136 HCI_MGMT_UNCONFIGURED },
7137 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
7138 HCI_MGMT_VAR_LEN },
7139 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
7140 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
7141 HCI_MGMT_NO_HDEV |
7142 HCI_MGMT_UNTRUSTED },
7143 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
7144 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
7145 HCI_MGMT_VAR_LEN },
7146 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
7147 };
7148
7149 void mgmt_index_added(struct hci_dev *hdev)
7150 {
7151 struct mgmt_ev_ext_index ev;
7152
7153 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
7154 return;
7155
7156 switch (hdev->dev_type) {
7157 case HCI_BREDR:
7158 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
7159 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
7160 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
7161 ev.type = 0x01;
7162 } else {
7163 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
7164 HCI_MGMT_INDEX_EVENTS);
7165 ev.type = 0x00;
7166 }
7167 break;
7168 case HCI_AMP:
7169 ev.type = 0x02;
7170 break;
7171 default:
7172 return;
7173 }
7174
7175 ev.bus = hdev->bus;
7176
7177 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
7178 HCI_MGMT_EXT_INDEX_EVENTS);
7179 }
7180
7181 void mgmt_index_removed(struct hci_dev *hdev)
7182 {
7183 struct mgmt_ev_ext_index ev;
7184 u8 status = MGMT_STATUS_INVALID_INDEX;
7185
7186 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
7187 return;
7188
7189 switch (hdev->dev_type) {
7190 case HCI_BREDR:
7191 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
7192
7193 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
7194 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
7195 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
7196 ev.type = 0x01;
7197 } else {
7198 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
7199 HCI_MGMT_INDEX_EVENTS);
7200 ev.type = 0x00;
7201 }
7202 break;
7203 case HCI_AMP:
7204 ev.type = 0x02;
7205 break;
7206 default:
7207 return;
7208 }
7209
7210 ev.bus = hdev->bus;
7211
7212 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
7213 HCI_MGMT_EXT_INDEX_EVENTS);
7214 }
7215
7216 /* This function requires the caller holds hdev->lock */
7217 static void restart_le_actions(struct hci_dev *hdev)
7218 {
7219 struct hci_conn_params *p;
7220
7221 list_for_each_entry(p, &hdev->le_conn_params, list) {
7222 /* Needed for AUTO_OFF case where might not "really"
7223 * have been powered off.
7224 */
7225 list_del_init(&p->action);
7226
7227 switch (p->auto_connect) {
7228 case HCI_AUTO_CONN_DIRECT:
7229 case HCI_AUTO_CONN_ALWAYS:
7230 list_add(&p->action, &hdev->pend_le_conns);
7231 break;
7232 case HCI_AUTO_CONN_REPORT:
7233 list_add(&p->action, &hdev->pend_le_reports);
7234 break;
7235 default:
7236 break;
7237 }
7238 }
7239 }
7240
7241 static void powered_complete(struct hci_dev *hdev, u8 status, u16 opcode)
7242 {
7243 struct cmd_lookup match = { NULL, hdev };
7244
7245 BT_DBG("status 0x%02x", status);
7246
7247 if (!status) {
7248 /* Register the available SMP channels (BR/EDR and LE) only
7249 * when successfully powering on the controller. This late
7250 * registration is required so that LE SMP can clearly
7251 * decide if the public address or static address is used.
7252 */
7253 smp_register(hdev);
7254
7255 restart_le_actions(hdev);
7256 hci_update_background_scan(hdev);
7257 }
7258
7259 hci_dev_lock(hdev);
7260
7261 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
7262
7263 new_settings(hdev, match.sk);
7264
7265 hci_dev_unlock(hdev);
7266
7267 if (match.sk)
7268 sock_put(match.sk);
7269 }
7270
7271 static int powered_update_hci(struct hci_dev *hdev)
7272 {
7273 struct hci_request req;
7274 struct adv_info *adv_instance;
7275 u8 link_sec;
7276
7277 hci_req_init(&req, hdev);
7278
7279 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
7280 !lmp_host_ssp_capable(hdev)) {
7281 u8 mode = 0x01;
7282
7283 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
7284
7285 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
7286 u8 support = 0x01;
7287
7288 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT,
7289 sizeof(support), &support);
7290 }
7291 }
7292
7293 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
7294 lmp_bredr_capable(hdev)) {
7295 struct hci_cp_write_le_host_supported cp;
7296
7297 cp.le = 0x01;
7298 cp.simul = 0x00;
7299
7300 /* Check first if we already have the right
7301 * host state (host features set)
7302 */
7303 if (cp.le != lmp_host_le_capable(hdev) ||
7304 cp.simul != lmp_host_le_br_capable(hdev))
7305 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
7306 sizeof(cp), &cp);
7307 }
7308
7309 if (lmp_le_capable(hdev)) {
7310 /* Make sure the controller has a good default for
7311 * advertising data. This also applies to the case
7312 * where BR/EDR was toggled during the AUTO_OFF phase.
7313 */
7314 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
7315 (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
7316 !hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))) {
7317 update_adv_data(&req);
7318 update_scan_rsp_data(&req);
7319 }
7320
7321 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE) &&
7322 hdev->cur_adv_instance == 0x00 &&
7323 !list_empty(&hdev->adv_instances)) {
7324 adv_instance = list_first_entry(&hdev->adv_instances,
7325 struct adv_info, list);
7326 hdev->cur_adv_instance = adv_instance->instance;
7327 }
7328
7329 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7330 enable_advertising(&req);
7331 else if (hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE) &&
7332 hdev->cur_adv_instance)
7333 schedule_adv_instance(&req, hdev->cur_adv_instance,
7334 true);
7335 }
7336
7337 link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
7338 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
7339 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
7340 sizeof(link_sec), &link_sec);
7341
7342 if (lmp_bredr_capable(hdev)) {
7343 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
7344 write_fast_connectable(&req, true);
7345 else
7346 write_fast_connectable(&req, false);
7347 __hci_update_page_scan(&req);
7348 update_class(&req);
7349 update_name(&req);
7350 update_eir(&req);
7351 }
7352
7353 return hci_req_run(&req, powered_complete);
7354 }
7355
7356 int mgmt_powered(struct hci_dev *hdev, u8 powered)
7357 {
7358 struct cmd_lookup match = { NULL, hdev };
7359 u8 status, zero_cod[] = { 0, 0, 0 };
7360 int err;
7361
7362 if (!hci_dev_test_flag(hdev, HCI_MGMT))
7363 return 0;
7364
7365 if (powered) {
7366 if (powered_update_hci(hdev) == 0)
7367 return 0;
7368
7369 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
7370 &match);
7371 goto new_settings;
7372 }
7373
7374 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
7375
7376 /* If the power off is because of hdev unregistration let
7377 * use the appropriate INVALID_INDEX status. Otherwise use
7378 * NOT_POWERED. We cover both scenarios here since later in
7379 * mgmt_index_removed() any hci_conn callbacks will have already
7380 * been triggered, potentially causing misleading DISCONNECTED
7381 * status responses.
7382 */
7383 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
7384 status = MGMT_STATUS_INVALID_INDEX;
7385 else
7386 status = MGMT_STATUS_NOT_POWERED;
7387
7388 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
7389
7390 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
7391 mgmt_generic_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
7392 zero_cod, sizeof(zero_cod), NULL);
7393
7394 new_settings:
7395 err = new_settings(hdev, match.sk);
7396
7397 if (match.sk)
7398 sock_put(match.sk);
7399
7400 return err;
7401 }
7402
7403 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
7404 {
7405 struct mgmt_pending_cmd *cmd;
7406 u8 status;
7407
7408 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
7409 if (!cmd)
7410 return;
7411
7412 if (err == -ERFKILL)
7413 status = MGMT_STATUS_RFKILLED;
7414 else
7415 status = MGMT_STATUS_FAILED;
7416
7417 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
7418
7419 mgmt_pending_remove(cmd);
7420 }
7421
7422 void mgmt_discoverable_timeout(struct hci_dev *hdev)
7423 {
7424 struct hci_request req;
7425
7426 hci_dev_lock(hdev);
7427
7428 /* When discoverable timeout triggers, then just make sure
7429 * the limited discoverable flag is cleared. Even in the case
7430 * of a timeout triggered from general discoverable, it is
7431 * safe to unconditionally clear the flag.
7432 */
7433 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
7434 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
7435
7436 hci_req_init(&req, hdev);
7437 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
7438 u8 scan = SCAN_PAGE;
7439 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
7440 sizeof(scan), &scan);
7441 }
7442 update_class(&req);
7443
7444 /* Advertising instances don't use the global discoverable setting, so
7445 * only update AD if advertising was enabled using Set Advertising.
7446 */
7447 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7448 update_adv_data(&req);
7449
7450 hci_req_run(&req, NULL);
7451
7452 hdev->discov_timeout = 0;
7453
7454 new_settings(hdev, NULL);
7455
7456 hci_dev_unlock(hdev);
7457 }
7458
7459 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
7460 bool persistent)
7461 {
7462 struct mgmt_ev_new_link_key ev;
7463
7464 memset(&ev, 0, sizeof(ev));
7465
7466 ev.store_hint = persistent;
7467 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
7468 ev.key.addr.type = BDADDR_BREDR;
7469 ev.key.type = key->type;
7470 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
7471 ev.key.pin_len = key->pin_len;
7472
7473 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
7474 }
7475
7476 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
7477 {
7478 switch (ltk->type) {
7479 case SMP_LTK:
7480 case SMP_LTK_SLAVE:
7481 if (ltk->authenticated)
7482 return MGMT_LTK_AUTHENTICATED;
7483 return MGMT_LTK_UNAUTHENTICATED;
7484 case SMP_LTK_P256:
7485 if (ltk->authenticated)
7486 return MGMT_LTK_P256_AUTH;
7487 return MGMT_LTK_P256_UNAUTH;
7488 case SMP_LTK_P256_DEBUG:
7489 return MGMT_LTK_P256_DEBUG;
7490 }
7491
7492 return MGMT_LTK_UNAUTHENTICATED;
7493 }
7494
7495 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
7496 {
7497 struct mgmt_ev_new_long_term_key ev;
7498
7499 memset(&ev, 0, sizeof(ev));
7500
7501 /* Devices using resolvable or non-resolvable random addresses
7502 * without providing an identity resolving key don't require
7503 * to store long term keys. Their addresses will change the
7504 * next time around.
7505 *
7506 * Only when a remote device provides an identity address
7507 * make sure the long term key is stored. If the remote
7508 * identity is known, the long term keys are internally
7509 * mapped to the identity address. So allow static random
7510 * and public addresses here.
7511 */
7512 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
7513 (key->bdaddr.b[5] & 0xc0) != 0xc0)
7514 ev.store_hint = 0x00;
7515 else
7516 ev.store_hint = persistent;
7517
7518 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
7519 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
7520 ev.key.type = mgmt_ltk_type(key);
7521 ev.key.enc_size = key->enc_size;
7522 ev.key.ediv = key->ediv;
7523 ev.key.rand = key->rand;
7524
7525 if (key->type == SMP_LTK)
7526 ev.key.master = 1;
7527
7528 /* Make sure we copy only the significant bytes based on the
7529 * encryption key size, and set the rest of the value to zeroes.
7530 */
7531 memcpy(ev.key.val, key->val, key->enc_size);
7532 memset(ev.key.val + key->enc_size, 0,
7533 sizeof(ev.key.val) - key->enc_size);
7534
7535 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
7536 }
7537
7538 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
7539 {
7540 struct mgmt_ev_new_irk ev;
7541
7542 memset(&ev, 0, sizeof(ev));
7543
7544 ev.store_hint = persistent;
7545
7546 bacpy(&ev.rpa, &irk->rpa);
7547 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
7548 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
7549 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
7550
7551 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
7552 }
7553
7554 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
7555 bool persistent)
7556 {
7557 struct mgmt_ev_new_csrk ev;
7558
7559 memset(&ev, 0, sizeof(ev));
7560
7561 /* Devices using resolvable or non-resolvable random addresses
7562 * without providing an identity resolving key don't require
7563 * to store signature resolving keys. Their addresses will change
7564 * the next time around.
7565 *
7566 * Only when a remote device provides an identity address
7567 * make sure the signature resolving key is stored. So allow
7568 * static random and public addresses here.
7569 */
7570 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
7571 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
7572 ev.store_hint = 0x00;
7573 else
7574 ev.store_hint = persistent;
7575
7576 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
7577 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
7578 ev.key.type = csrk->type;
7579 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
7580
7581 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
7582 }
7583
7584 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
7585 u8 bdaddr_type, u8 store_hint, u16 min_interval,
7586 u16 max_interval, u16 latency, u16 timeout)
7587 {
7588 struct mgmt_ev_new_conn_param ev;
7589
7590 if (!hci_is_identity_address(bdaddr, bdaddr_type))
7591 return;
7592
7593 memset(&ev, 0, sizeof(ev));
7594 bacpy(&ev.addr.bdaddr, bdaddr);
7595 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
7596 ev.store_hint = store_hint;
7597 ev.min_interval = cpu_to_le16(min_interval);
7598 ev.max_interval = cpu_to_le16(max_interval);
7599 ev.latency = cpu_to_le16(latency);
7600 ev.timeout = cpu_to_le16(timeout);
7601
7602 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
7603 }
7604
7605 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
7606 u32 flags, u8 *name, u8 name_len)
7607 {
7608 char buf[512];
7609 struct mgmt_ev_device_connected *ev = (void *) buf;
7610 u16 eir_len = 0;
7611
7612 bacpy(&ev->addr.bdaddr, &conn->dst);
7613 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
7614
7615 ev->flags = __cpu_to_le32(flags);
7616
7617 /* We must ensure that the EIR Data fields are ordered and
7618 * unique. Keep it simple for now and avoid the problem by not
7619 * adding any BR/EDR data to the LE adv.
7620 */
7621 if (conn->le_adv_data_len > 0) {
7622 memcpy(&ev->eir[eir_len],
7623 conn->le_adv_data, conn->le_adv_data_len);
7624 eir_len = conn->le_adv_data_len;
7625 } else {
7626 if (name_len > 0)
7627 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
7628 name, name_len);
7629
7630 if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
7631 eir_len = eir_append_data(ev->eir, eir_len,
7632 EIR_CLASS_OF_DEV,
7633 conn->dev_class, 3);
7634 }
7635
7636 ev->eir_len = cpu_to_le16(eir_len);
7637
7638 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
7639 sizeof(*ev) + eir_len, NULL);
7640 }
7641
7642 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
7643 {
7644 struct sock **sk = data;
7645
7646 cmd->cmd_complete(cmd, 0);
7647
7648 *sk = cmd->sk;
7649 sock_hold(*sk);
7650
7651 mgmt_pending_remove(cmd);
7652 }
7653
7654 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
7655 {
7656 struct hci_dev *hdev = data;
7657 struct mgmt_cp_unpair_device *cp = cmd->param;
7658
7659 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
7660
7661 cmd->cmd_complete(cmd, 0);
7662 mgmt_pending_remove(cmd);
7663 }
7664
7665 bool mgmt_powering_down(struct hci_dev *hdev)
7666 {
7667 struct mgmt_pending_cmd *cmd;
7668 struct mgmt_mode *cp;
7669
7670 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
7671 if (!cmd)
7672 return false;
7673
7674 cp = cmd->param;
7675 if (!cp->val)
7676 return true;
7677
7678 return false;
7679 }
7680
7681 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
7682 u8 link_type, u8 addr_type, u8 reason,
7683 bool mgmt_connected)
7684 {
7685 struct mgmt_ev_device_disconnected ev;
7686 struct sock *sk = NULL;
7687
7688 /* The connection is still in hci_conn_hash so test for 1
7689 * instead of 0 to know if this is the last one.
7690 */
7691 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
7692 cancel_delayed_work(&hdev->power_off);
7693 queue_work(hdev->req_workqueue, &hdev->power_off.work);
7694 }
7695
7696 if (!mgmt_connected)
7697 return;
7698
7699 if (link_type != ACL_LINK && link_type != LE_LINK)
7700 return;
7701
7702 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
7703
7704 bacpy(&ev.addr.bdaddr, bdaddr);
7705 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7706 ev.reason = reason;
7707
7708 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
7709
7710 if (sk)
7711 sock_put(sk);
7712
7713 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
7714 hdev);
7715 }
7716
7717 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
7718 u8 link_type, u8 addr_type, u8 status)
7719 {
7720 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
7721 struct mgmt_cp_disconnect *cp;
7722 struct mgmt_pending_cmd *cmd;
7723
7724 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
7725 hdev);
7726
7727 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
7728 if (!cmd)
7729 return;
7730
7731 cp = cmd->param;
7732
7733 if (bacmp(bdaddr, &cp->addr.bdaddr))
7734 return;
7735
7736 if (cp->addr.type != bdaddr_type)
7737 return;
7738
7739 cmd->cmd_complete(cmd, mgmt_status(status));
7740 mgmt_pending_remove(cmd);
7741 }
7742
7743 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7744 u8 addr_type, u8 status)
7745 {
7746 struct mgmt_ev_connect_failed ev;
7747
7748 /* The connection is still in hci_conn_hash so test for 1
7749 * instead of 0 to know if this is the last one.
7750 */
7751 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
7752 cancel_delayed_work(&hdev->power_off);
7753 queue_work(hdev->req_workqueue, &hdev->power_off.work);
7754 }
7755
7756 bacpy(&ev.addr.bdaddr, bdaddr);
7757 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7758 ev.status = mgmt_status(status);
7759
7760 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
7761 }
7762
7763 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
7764 {
7765 struct mgmt_ev_pin_code_request ev;
7766
7767 bacpy(&ev.addr.bdaddr, bdaddr);
7768 ev.addr.type = BDADDR_BREDR;
7769 ev.secure = secure;
7770
7771 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
7772 }
7773
7774 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7775 u8 status)
7776 {
7777 struct mgmt_pending_cmd *cmd;
7778
7779 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
7780 if (!cmd)
7781 return;
7782
7783 cmd->cmd_complete(cmd, mgmt_status(status));
7784 mgmt_pending_remove(cmd);
7785 }
7786
7787 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7788 u8 status)
7789 {
7790 struct mgmt_pending_cmd *cmd;
7791
7792 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
7793 if (!cmd)
7794 return;
7795
7796 cmd->cmd_complete(cmd, mgmt_status(status));
7797 mgmt_pending_remove(cmd);
7798 }
7799
7800 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
7801 u8 link_type, u8 addr_type, u32 value,
7802 u8 confirm_hint)
7803 {
7804 struct mgmt_ev_user_confirm_request ev;
7805
7806 BT_DBG("%s", hdev->name);
7807
7808 bacpy(&ev.addr.bdaddr, bdaddr);
7809 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7810 ev.confirm_hint = confirm_hint;
7811 ev.value = cpu_to_le32(value);
7812
7813 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
7814 NULL);
7815 }
7816
7817 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
7818 u8 link_type, u8 addr_type)
7819 {
7820 struct mgmt_ev_user_passkey_request ev;
7821
7822 BT_DBG("%s", hdev->name);
7823
7824 bacpy(&ev.addr.bdaddr, bdaddr);
7825 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7826
7827 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
7828 NULL);
7829 }
7830
7831 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7832 u8 link_type, u8 addr_type, u8 status,
7833 u8 opcode)
7834 {
7835 struct mgmt_pending_cmd *cmd;
7836
7837 cmd = pending_find(opcode, hdev);
7838 if (!cmd)
7839 return -ENOENT;
7840
7841 cmd->cmd_complete(cmd, mgmt_status(status));
7842 mgmt_pending_remove(cmd);
7843
7844 return 0;
7845 }
7846
7847 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7848 u8 link_type, u8 addr_type, u8 status)
7849 {
7850 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7851 status, MGMT_OP_USER_CONFIRM_REPLY);
7852 }
7853
7854 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7855 u8 link_type, u8 addr_type, u8 status)
7856 {
7857 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7858 status,
7859 MGMT_OP_USER_CONFIRM_NEG_REPLY);
7860 }
7861
7862 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7863 u8 link_type, u8 addr_type, u8 status)
7864 {
7865 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7866 status, MGMT_OP_USER_PASSKEY_REPLY);
7867 }
7868
7869 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7870 u8 link_type, u8 addr_type, u8 status)
7871 {
7872 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7873 status,
7874 MGMT_OP_USER_PASSKEY_NEG_REPLY);
7875 }
7876
7877 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
7878 u8 link_type, u8 addr_type, u32 passkey,
7879 u8 entered)
7880 {
7881 struct mgmt_ev_passkey_notify ev;
7882
7883 BT_DBG("%s", hdev->name);
7884
7885 bacpy(&ev.addr.bdaddr, bdaddr);
7886 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7887 ev.passkey = __cpu_to_le32(passkey);
7888 ev.entered = entered;
7889
7890 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
7891 }
7892
7893 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
7894 {
7895 struct mgmt_ev_auth_failed ev;
7896 struct mgmt_pending_cmd *cmd;
7897 u8 status = mgmt_status(hci_status);
7898
7899 bacpy(&ev.addr.bdaddr, &conn->dst);
7900 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
7901 ev.status = status;
7902
7903 cmd = find_pairing(conn);
7904
7905 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
7906 cmd ? cmd->sk : NULL);
7907
7908 if (cmd) {
7909 cmd->cmd_complete(cmd, status);
7910 mgmt_pending_remove(cmd);
7911 }
7912 }
7913
7914 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
7915 {
7916 struct cmd_lookup match = { NULL, hdev };
7917 bool changed;
7918
7919 if (status) {
7920 u8 mgmt_err = mgmt_status(status);
7921 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
7922 cmd_status_rsp, &mgmt_err);
7923 return;
7924 }
7925
7926 if (test_bit(HCI_AUTH, &hdev->flags))
7927 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
7928 else
7929 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
7930
7931 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
7932 &match);
7933
7934 if (changed)
7935 new_settings(hdev, match.sk);
7936
7937 if (match.sk)
7938 sock_put(match.sk);
7939 }
7940
7941 static void clear_eir(struct hci_request *req)
7942 {
7943 struct hci_dev *hdev = req->hdev;
7944 struct hci_cp_write_eir cp;
7945
7946 if (!lmp_ext_inq_capable(hdev))
7947 return;
7948
7949 memset(hdev->eir, 0, sizeof(hdev->eir));
7950
7951 memset(&cp, 0, sizeof(cp));
7952
7953 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
7954 }
7955
7956 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
7957 {
7958 struct cmd_lookup match = { NULL, hdev };
7959 struct hci_request req;
7960 bool changed = false;
7961
7962 if (status) {
7963 u8 mgmt_err = mgmt_status(status);
7964
7965 if (enable && hci_dev_test_and_clear_flag(hdev,
7966 HCI_SSP_ENABLED)) {
7967 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
7968 new_settings(hdev, NULL);
7969 }
7970
7971 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
7972 &mgmt_err);
7973 return;
7974 }
7975
7976 if (enable) {
7977 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
7978 } else {
7979 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
7980 if (!changed)
7981 changed = hci_dev_test_and_clear_flag(hdev,
7982 HCI_HS_ENABLED);
7983 else
7984 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
7985 }
7986
7987 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
7988
7989 if (changed)
7990 new_settings(hdev, match.sk);
7991
7992 if (match.sk)
7993 sock_put(match.sk);
7994
7995 hci_req_init(&req, hdev);
7996
7997 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
7998 if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
7999 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
8000 sizeof(enable), &enable);
8001 update_eir(&req);
8002 } else {
8003 clear_eir(&req);
8004 }
8005
8006 hci_req_run(&req, NULL);
8007 }
8008
8009 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
8010 {
8011 struct cmd_lookup *match = data;
8012
8013 if (match->sk == NULL) {
8014 match->sk = cmd->sk;
8015 sock_hold(match->sk);
8016 }
8017 }
8018
8019 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
8020 u8 status)
8021 {
8022 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
8023
8024 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
8025 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
8026 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
8027
8028 if (!status)
8029 mgmt_generic_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
8030 dev_class, 3, NULL);
8031
8032 if (match.sk)
8033 sock_put(match.sk);
8034 }
8035
8036 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
8037 {
8038 struct mgmt_cp_set_local_name ev;
8039 struct mgmt_pending_cmd *cmd;
8040
8041 if (status)
8042 return;
8043
8044 memset(&ev, 0, sizeof(ev));
8045 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
8046 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
8047
8048 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
8049 if (!cmd) {
8050 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
8051
8052 /* If this is a HCI command related to powering on the
8053 * HCI dev don't send any mgmt signals.
8054 */
8055 if (pending_find(MGMT_OP_SET_POWERED, hdev))
8056 return;
8057 }
8058
8059 mgmt_generic_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
8060 cmd ? cmd->sk : NULL);
8061 }
8062
8063 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
8064 {
8065 int i;
8066
8067 for (i = 0; i < uuid_count; i++) {
8068 if (!memcmp(uuid, uuids[i], 16))
8069 return true;
8070 }
8071
8072 return false;
8073 }
8074
8075 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
8076 {
8077 u16 parsed = 0;
8078
8079 while (parsed < eir_len) {
8080 u8 field_len = eir[0];
8081 u8 uuid[16];
8082 int i;
8083
8084 if (field_len == 0)
8085 break;
8086
8087 if (eir_len - parsed < field_len + 1)
8088 break;
8089
8090 switch (eir[1]) {
8091 case EIR_UUID16_ALL:
8092 case EIR_UUID16_SOME:
8093 for (i = 0; i + 3 <= field_len; i += 2) {
8094 memcpy(uuid, bluetooth_base_uuid, 16);
8095 uuid[13] = eir[i + 3];
8096 uuid[12] = eir[i + 2];
8097 if (has_uuid(uuid, uuid_count, uuids))
8098 return true;
8099 }
8100 break;
8101 case EIR_UUID32_ALL:
8102 case EIR_UUID32_SOME:
8103 for (i = 0; i + 5 <= field_len; i += 4) {
8104 memcpy(uuid, bluetooth_base_uuid, 16);
8105 uuid[15] = eir[i + 5];
8106 uuid[14] = eir[i + 4];
8107 uuid[13] = eir[i + 3];
8108 uuid[12] = eir[i + 2];
8109 if (has_uuid(uuid, uuid_count, uuids))
8110 return true;
8111 }
8112 break;
8113 case EIR_UUID128_ALL:
8114 case EIR_UUID128_SOME:
8115 for (i = 0; i + 17 <= field_len; i += 16) {
8116 memcpy(uuid, eir + i + 2, 16);
8117 if (has_uuid(uuid, uuid_count, uuids))
8118 return true;
8119 }
8120 break;
8121 }
8122
8123 parsed += field_len + 1;
8124 eir += field_len + 1;
8125 }
8126
8127 return false;
8128 }
8129
8130 static void restart_le_scan(struct hci_dev *hdev)
8131 {
8132 /* If controller is not scanning we are done. */
8133 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
8134 return;
8135
8136 if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
8137 hdev->discovery.scan_start +
8138 hdev->discovery.scan_duration))
8139 return;
8140
8141 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
8142 DISCOV_LE_RESTART_DELAY);
8143 }
8144
8145 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
8146 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
8147 {
8148 /* If a RSSI threshold has been specified, and
8149 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
8150 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
8151 * is set, let it through for further processing, as we might need to
8152 * restart the scan.
8153 *
8154 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
8155 * the results are also dropped.
8156 */
8157 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
8158 (rssi == HCI_RSSI_INVALID ||
8159 (rssi < hdev->discovery.rssi &&
8160 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
8161 return false;
8162
8163 if (hdev->discovery.uuid_count != 0) {
8164 /* If a list of UUIDs is provided in filter, results with no
8165 * matching UUID should be dropped.
8166 */
8167 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
8168 hdev->discovery.uuids) &&
8169 !eir_has_uuids(scan_rsp, scan_rsp_len,
8170 hdev->discovery.uuid_count,
8171 hdev->discovery.uuids))
8172 return false;
8173 }
8174
8175 /* If duplicate filtering does not report RSSI changes, then restart
8176 * scanning to ensure updated result with updated RSSI values.
8177 */
8178 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
8179 restart_le_scan(hdev);
8180
8181 /* Validate RSSI value against the RSSI threshold once more. */
8182 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
8183 rssi < hdev->discovery.rssi)
8184 return false;
8185 }
8186
8187 return true;
8188 }
8189
8190 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
8191 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
8192 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
8193 {
8194 char buf[512];
8195 struct mgmt_ev_device_found *ev = (void *)buf;
8196 size_t ev_size;
8197
8198 /* Don't send events for a non-kernel initiated discovery. With
8199 * LE one exception is if we have pend_le_reports > 0 in which
8200 * case we're doing passive scanning and want these events.
8201 */
8202 if (!hci_discovery_active(hdev)) {
8203 if (link_type == ACL_LINK)
8204 return;
8205 if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
8206 return;
8207 }
8208
8209 if (hdev->discovery.result_filtering) {
8210 /* We are using service discovery */
8211 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
8212 scan_rsp_len))
8213 return;
8214 }
8215
8216 /* Make sure that the buffer is big enough. The 5 extra bytes
8217 * are for the potential CoD field.
8218 */
8219 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
8220 return;
8221
8222 memset(buf, 0, sizeof(buf));
8223
8224 /* In case of device discovery with BR/EDR devices (pre 1.2), the
8225 * RSSI value was reported as 0 when not available. This behavior
8226 * is kept when using device discovery. This is required for full
8227 * backwards compatibility with the API.
8228 *
8229 * However when using service discovery, the value 127 will be
8230 * returned when the RSSI is not available.
8231 */
8232 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
8233 link_type == ACL_LINK)
8234 rssi = 0;
8235
8236 bacpy(&ev->addr.bdaddr, bdaddr);
8237 ev->addr.type = link_to_bdaddr(link_type, addr_type);
8238 ev->rssi = rssi;
8239 ev->flags = cpu_to_le32(flags);
8240
8241 if (eir_len > 0)
8242 /* Copy EIR or advertising data into event */
8243 memcpy(ev->eir, eir, eir_len);
8244
8245 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
8246 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
8247 dev_class, 3);
8248
8249 if (scan_rsp_len > 0)
8250 /* Append scan response data to event */
8251 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
8252
8253 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
8254 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
8255
8256 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
8257 }
8258
8259 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
8260 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
8261 {
8262 struct mgmt_ev_device_found *ev;
8263 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
8264 u16 eir_len;
8265
8266 ev = (struct mgmt_ev_device_found *) buf;
8267
8268 memset(buf, 0, sizeof(buf));
8269
8270 bacpy(&ev->addr.bdaddr, bdaddr);
8271 ev->addr.type = link_to_bdaddr(link_type, addr_type);
8272 ev->rssi = rssi;
8273
8274 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
8275 name_len);
8276
8277 ev->eir_len = cpu_to_le16(eir_len);
8278
8279 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
8280 }
8281
8282 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
8283 {
8284 struct mgmt_ev_discovering ev;
8285
8286 BT_DBG("%s discovering %u", hdev->name, discovering);
8287
8288 memset(&ev, 0, sizeof(ev));
8289 ev.type = hdev->discovery.type;
8290 ev.discovering = discovering;
8291
8292 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
8293 }
8294
8295 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
8296 {
8297 BT_DBG("%s status %u", hdev->name, status);
8298 }
8299
8300 void mgmt_reenable_advertising(struct hci_dev *hdev)
8301 {
8302 struct hci_request req;
8303 u8 instance;
8304
8305 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
8306 !hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
8307 return;
8308
8309 instance = get_current_adv_instance(hdev);
8310
8311 hci_req_init(&req, hdev);
8312
8313 if (instance) {
8314 schedule_adv_instance(&req, instance, true);
8315 } else {
8316 update_adv_data(&req);
8317 update_scan_rsp_data(&req);
8318 enable_advertising(&req);
8319 }
8320
8321 hci_req_run(&req, adv_enable_complete);
8322 }
8323
8324 static struct hci_mgmt_chan chan = {
8325 .channel = HCI_CHANNEL_CONTROL,
8326 .handler_count = ARRAY_SIZE(mgmt_handlers),
8327 .handlers = mgmt_handlers,
8328 .hdev_init = mgmt_init_hdev,
8329 };
8330
8331 int mgmt_init(void)
8332 {
8333 return hci_mgmt_chan_register(&chan);
8334 }
8335
8336 void mgmt_exit(void)
8337 {
8338 hci_mgmt_chan_unregister(&chan);
8339 }