]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - net/bluetooth/mgmt.c
Merge branch 'fixes' of git://git.armlinux.org.uk/~rmk/linux-arm
[mirror_ubuntu-artful-kernel.git] / net / bluetooth / mgmt.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI Management interface */
26
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35
36 #include "hci_request.h"
37 #include "smp.h"
38 #include "mgmt_util.h"
39
40 #define MGMT_VERSION 1
41 #define MGMT_REVISION 14
42
43 static const u16 mgmt_commands[] = {
44 MGMT_OP_READ_INDEX_LIST,
45 MGMT_OP_READ_INFO,
46 MGMT_OP_SET_POWERED,
47 MGMT_OP_SET_DISCOVERABLE,
48 MGMT_OP_SET_CONNECTABLE,
49 MGMT_OP_SET_FAST_CONNECTABLE,
50 MGMT_OP_SET_BONDABLE,
51 MGMT_OP_SET_LINK_SECURITY,
52 MGMT_OP_SET_SSP,
53 MGMT_OP_SET_HS,
54 MGMT_OP_SET_LE,
55 MGMT_OP_SET_DEV_CLASS,
56 MGMT_OP_SET_LOCAL_NAME,
57 MGMT_OP_ADD_UUID,
58 MGMT_OP_REMOVE_UUID,
59 MGMT_OP_LOAD_LINK_KEYS,
60 MGMT_OP_LOAD_LONG_TERM_KEYS,
61 MGMT_OP_DISCONNECT,
62 MGMT_OP_GET_CONNECTIONS,
63 MGMT_OP_PIN_CODE_REPLY,
64 MGMT_OP_PIN_CODE_NEG_REPLY,
65 MGMT_OP_SET_IO_CAPABILITY,
66 MGMT_OP_PAIR_DEVICE,
67 MGMT_OP_CANCEL_PAIR_DEVICE,
68 MGMT_OP_UNPAIR_DEVICE,
69 MGMT_OP_USER_CONFIRM_REPLY,
70 MGMT_OP_USER_CONFIRM_NEG_REPLY,
71 MGMT_OP_USER_PASSKEY_REPLY,
72 MGMT_OP_USER_PASSKEY_NEG_REPLY,
73 MGMT_OP_READ_LOCAL_OOB_DATA,
74 MGMT_OP_ADD_REMOTE_OOB_DATA,
75 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
76 MGMT_OP_START_DISCOVERY,
77 MGMT_OP_STOP_DISCOVERY,
78 MGMT_OP_CONFIRM_NAME,
79 MGMT_OP_BLOCK_DEVICE,
80 MGMT_OP_UNBLOCK_DEVICE,
81 MGMT_OP_SET_DEVICE_ID,
82 MGMT_OP_SET_ADVERTISING,
83 MGMT_OP_SET_BREDR,
84 MGMT_OP_SET_STATIC_ADDRESS,
85 MGMT_OP_SET_SCAN_PARAMS,
86 MGMT_OP_SET_SECURE_CONN,
87 MGMT_OP_SET_DEBUG_KEYS,
88 MGMT_OP_SET_PRIVACY,
89 MGMT_OP_LOAD_IRKS,
90 MGMT_OP_GET_CONN_INFO,
91 MGMT_OP_GET_CLOCK_INFO,
92 MGMT_OP_ADD_DEVICE,
93 MGMT_OP_REMOVE_DEVICE,
94 MGMT_OP_LOAD_CONN_PARAM,
95 MGMT_OP_READ_UNCONF_INDEX_LIST,
96 MGMT_OP_READ_CONFIG_INFO,
97 MGMT_OP_SET_EXTERNAL_CONFIG,
98 MGMT_OP_SET_PUBLIC_ADDRESS,
99 MGMT_OP_START_SERVICE_DISCOVERY,
100 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
101 MGMT_OP_READ_EXT_INDEX_LIST,
102 MGMT_OP_READ_ADV_FEATURES,
103 MGMT_OP_ADD_ADVERTISING,
104 MGMT_OP_REMOVE_ADVERTISING,
105 MGMT_OP_GET_ADV_SIZE_INFO,
106 MGMT_OP_START_LIMITED_DISCOVERY,
107 MGMT_OP_READ_EXT_INFO,
108 MGMT_OP_SET_APPEARANCE,
109 };
110
111 static const u16 mgmt_events[] = {
112 MGMT_EV_CONTROLLER_ERROR,
113 MGMT_EV_INDEX_ADDED,
114 MGMT_EV_INDEX_REMOVED,
115 MGMT_EV_NEW_SETTINGS,
116 MGMT_EV_CLASS_OF_DEV_CHANGED,
117 MGMT_EV_LOCAL_NAME_CHANGED,
118 MGMT_EV_NEW_LINK_KEY,
119 MGMT_EV_NEW_LONG_TERM_KEY,
120 MGMT_EV_DEVICE_CONNECTED,
121 MGMT_EV_DEVICE_DISCONNECTED,
122 MGMT_EV_CONNECT_FAILED,
123 MGMT_EV_PIN_CODE_REQUEST,
124 MGMT_EV_USER_CONFIRM_REQUEST,
125 MGMT_EV_USER_PASSKEY_REQUEST,
126 MGMT_EV_AUTH_FAILED,
127 MGMT_EV_DEVICE_FOUND,
128 MGMT_EV_DISCOVERING,
129 MGMT_EV_DEVICE_BLOCKED,
130 MGMT_EV_DEVICE_UNBLOCKED,
131 MGMT_EV_DEVICE_UNPAIRED,
132 MGMT_EV_PASSKEY_NOTIFY,
133 MGMT_EV_NEW_IRK,
134 MGMT_EV_NEW_CSRK,
135 MGMT_EV_DEVICE_ADDED,
136 MGMT_EV_DEVICE_REMOVED,
137 MGMT_EV_NEW_CONN_PARAM,
138 MGMT_EV_UNCONF_INDEX_ADDED,
139 MGMT_EV_UNCONF_INDEX_REMOVED,
140 MGMT_EV_NEW_CONFIG_OPTIONS,
141 MGMT_EV_EXT_INDEX_ADDED,
142 MGMT_EV_EXT_INDEX_REMOVED,
143 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
144 MGMT_EV_ADVERTISING_ADDED,
145 MGMT_EV_ADVERTISING_REMOVED,
146 MGMT_EV_EXT_INFO_CHANGED,
147 };
148
149 static const u16 mgmt_untrusted_commands[] = {
150 MGMT_OP_READ_INDEX_LIST,
151 MGMT_OP_READ_INFO,
152 MGMT_OP_READ_UNCONF_INDEX_LIST,
153 MGMT_OP_READ_CONFIG_INFO,
154 MGMT_OP_READ_EXT_INDEX_LIST,
155 MGMT_OP_READ_EXT_INFO,
156 };
157
158 static const u16 mgmt_untrusted_events[] = {
159 MGMT_EV_INDEX_ADDED,
160 MGMT_EV_INDEX_REMOVED,
161 MGMT_EV_NEW_SETTINGS,
162 MGMT_EV_CLASS_OF_DEV_CHANGED,
163 MGMT_EV_LOCAL_NAME_CHANGED,
164 MGMT_EV_UNCONF_INDEX_ADDED,
165 MGMT_EV_UNCONF_INDEX_REMOVED,
166 MGMT_EV_NEW_CONFIG_OPTIONS,
167 MGMT_EV_EXT_INDEX_ADDED,
168 MGMT_EV_EXT_INDEX_REMOVED,
169 MGMT_EV_EXT_INFO_CHANGED,
170 };
171
172 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
173
174 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
175 "\x00\x00\x00\x00\x00\x00\x00\x00"
176
177 /* HCI to MGMT error code conversion table */
178 static u8 mgmt_status_table[] = {
179 MGMT_STATUS_SUCCESS,
180 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
181 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
182 MGMT_STATUS_FAILED, /* Hardware Failure */
183 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
184 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
185 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
186 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
187 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
188 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
189 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
190 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
191 MGMT_STATUS_BUSY, /* Command Disallowed */
192 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
193 MGMT_STATUS_REJECTED, /* Rejected Security */
194 MGMT_STATUS_REJECTED, /* Rejected Personal */
195 MGMT_STATUS_TIMEOUT, /* Host Timeout */
196 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
197 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
198 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
199 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
200 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
201 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
202 MGMT_STATUS_BUSY, /* Repeated Attempts */
203 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
204 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
205 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
206 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
207 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
208 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
209 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
210 MGMT_STATUS_FAILED, /* Unspecified Error */
211 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
212 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
213 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
214 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
215 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
216 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
217 MGMT_STATUS_FAILED, /* Unit Link Key Used */
218 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
219 MGMT_STATUS_TIMEOUT, /* Instant Passed */
220 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
221 MGMT_STATUS_FAILED, /* Transaction Collision */
222 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
223 MGMT_STATUS_REJECTED, /* QoS Rejected */
224 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
225 MGMT_STATUS_REJECTED, /* Insufficient Security */
226 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
227 MGMT_STATUS_BUSY, /* Role Switch Pending */
228 MGMT_STATUS_FAILED, /* Slot Violation */
229 MGMT_STATUS_FAILED, /* Role Switch Failed */
230 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
231 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
232 MGMT_STATUS_BUSY, /* Host Busy Pairing */
233 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
234 MGMT_STATUS_BUSY, /* Controller Busy */
235 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
236 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
237 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
238 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
239 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
240 };
241
242 static u8 mgmt_status(u8 hci_status)
243 {
244 if (hci_status < ARRAY_SIZE(mgmt_status_table))
245 return mgmt_status_table[hci_status];
246
247 return MGMT_STATUS_FAILED;
248 }
249
250 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
251 u16 len, int flag)
252 {
253 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
254 flag, NULL);
255 }
256
257 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
258 u16 len, int flag, struct sock *skip_sk)
259 {
260 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
261 flag, skip_sk);
262 }
263
264 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
265 struct sock *skip_sk)
266 {
267 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
268 HCI_SOCK_TRUSTED, skip_sk);
269 }
270
271 static u8 le_addr_type(u8 mgmt_addr_type)
272 {
273 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
274 return ADDR_LE_DEV_PUBLIC;
275 else
276 return ADDR_LE_DEV_RANDOM;
277 }
278
279 void mgmt_fill_version_info(void *ver)
280 {
281 struct mgmt_rp_read_version *rp = ver;
282
283 rp->version = MGMT_VERSION;
284 rp->revision = cpu_to_le16(MGMT_REVISION);
285 }
286
287 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
288 u16 data_len)
289 {
290 struct mgmt_rp_read_version rp;
291
292 BT_DBG("sock %p", sk);
293
294 mgmt_fill_version_info(&rp);
295
296 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
297 &rp, sizeof(rp));
298 }
299
300 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
301 u16 data_len)
302 {
303 struct mgmt_rp_read_commands *rp;
304 u16 num_commands, num_events;
305 size_t rp_size;
306 int i, err;
307
308 BT_DBG("sock %p", sk);
309
310 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
311 num_commands = ARRAY_SIZE(mgmt_commands);
312 num_events = ARRAY_SIZE(mgmt_events);
313 } else {
314 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
315 num_events = ARRAY_SIZE(mgmt_untrusted_events);
316 }
317
318 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
319
320 rp = kmalloc(rp_size, GFP_KERNEL);
321 if (!rp)
322 return -ENOMEM;
323
324 rp->num_commands = cpu_to_le16(num_commands);
325 rp->num_events = cpu_to_le16(num_events);
326
327 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
328 __le16 *opcode = rp->opcodes;
329
330 for (i = 0; i < num_commands; i++, opcode++)
331 put_unaligned_le16(mgmt_commands[i], opcode);
332
333 for (i = 0; i < num_events; i++, opcode++)
334 put_unaligned_le16(mgmt_events[i], opcode);
335 } else {
336 __le16 *opcode = rp->opcodes;
337
338 for (i = 0; i < num_commands; i++, opcode++)
339 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
340
341 for (i = 0; i < num_events; i++, opcode++)
342 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
343 }
344
345 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
346 rp, rp_size);
347 kfree(rp);
348
349 return err;
350 }
351
352 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
353 u16 data_len)
354 {
355 struct mgmt_rp_read_index_list *rp;
356 struct hci_dev *d;
357 size_t rp_len;
358 u16 count;
359 int err;
360
361 BT_DBG("sock %p", sk);
362
363 read_lock(&hci_dev_list_lock);
364
365 count = 0;
366 list_for_each_entry(d, &hci_dev_list, list) {
367 if (d->dev_type == HCI_PRIMARY &&
368 !hci_dev_test_flag(d, HCI_UNCONFIGURED))
369 count++;
370 }
371
372 rp_len = sizeof(*rp) + (2 * count);
373 rp = kmalloc(rp_len, GFP_ATOMIC);
374 if (!rp) {
375 read_unlock(&hci_dev_list_lock);
376 return -ENOMEM;
377 }
378
379 count = 0;
380 list_for_each_entry(d, &hci_dev_list, list) {
381 if (hci_dev_test_flag(d, HCI_SETUP) ||
382 hci_dev_test_flag(d, HCI_CONFIG) ||
383 hci_dev_test_flag(d, HCI_USER_CHANNEL))
384 continue;
385
386 /* Devices marked as raw-only are neither configured
387 * nor unconfigured controllers.
388 */
389 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
390 continue;
391
392 if (d->dev_type == HCI_PRIMARY &&
393 !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
394 rp->index[count++] = cpu_to_le16(d->id);
395 BT_DBG("Added hci%u", d->id);
396 }
397 }
398
399 rp->num_controllers = cpu_to_le16(count);
400 rp_len = sizeof(*rp) + (2 * count);
401
402 read_unlock(&hci_dev_list_lock);
403
404 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
405 0, rp, rp_len);
406
407 kfree(rp);
408
409 return err;
410 }
411
412 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
413 void *data, u16 data_len)
414 {
415 struct mgmt_rp_read_unconf_index_list *rp;
416 struct hci_dev *d;
417 size_t rp_len;
418 u16 count;
419 int err;
420
421 BT_DBG("sock %p", sk);
422
423 read_lock(&hci_dev_list_lock);
424
425 count = 0;
426 list_for_each_entry(d, &hci_dev_list, list) {
427 if (d->dev_type == HCI_PRIMARY &&
428 hci_dev_test_flag(d, HCI_UNCONFIGURED))
429 count++;
430 }
431
432 rp_len = sizeof(*rp) + (2 * count);
433 rp = kmalloc(rp_len, GFP_ATOMIC);
434 if (!rp) {
435 read_unlock(&hci_dev_list_lock);
436 return -ENOMEM;
437 }
438
439 count = 0;
440 list_for_each_entry(d, &hci_dev_list, list) {
441 if (hci_dev_test_flag(d, HCI_SETUP) ||
442 hci_dev_test_flag(d, HCI_CONFIG) ||
443 hci_dev_test_flag(d, HCI_USER_CHANNEL))
444 continue;
445
446 /* Devices marked as raw-only are neither configured
447 * nor unconfigured controllers.
448 */
449 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
450 continue;
451
452 if (d->dev_type == HCI_PRIMARY &&
453 hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
454 rp->index[count++] = cpu_to_le16(d->id);
455 BT_DBG("Added hci%u", d->id);
456 }
457 }
458
459 rp->num_controllers = cpu_to_le16(count);
460 rp_len = sizeof(*rp) + (2 * count);
461
462 read_unlock(&hci_dev_list_lock);
463
464 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
465 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
466
467 kfree(rp);
468
469 return err;
470 }
471
472 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
473 void *data, u16 data_len)
474 {
475 struct mgmt_rp_read_ext_index_list *rp;
476 struct hci_dev *d;
477 size_t rp_len;
478 u16 count;
479 int err;
480
481 BT_DBG("sock %p", sk);
482
483 read_lock(&hci_dev_list_lock);
484
485 count = 0;
486 list_for_each_entry(d, &hci_dev_list, list) {
487 if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
488 count++;
489 }
490
491 rp_len = sizeof(*rp) + (sizeof(rp->entry[0]) * count);
492 rp = kmalloc(rp_len, GFP_ATOMIC);
493 if (!rp) {
494 read_unlock(&hci_dev_list_lock);
495 return -ENOMEM;
496 }
497
498 count = 0;
499 list_for_each_entry(d, &hci_dev_list, list) {
500 if (hci_dev_test_flag(d, HCI_SETUP) ||
501 hci_dev_test_flag(d, HCI_CONFIG) ||
502 hci_dev_test_flag(d, HCI_USER_CHANNEL))
503 continue;
504
505 /* Devices marked as raw-only are neither configured
506 * nor unconfigured controllers.
507 */
508 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
509 continue;
510
511 if (d->dev_type == HCI_PRIMARY) {
512 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
513 rp->entry[count].type = 0x01;
514 else
515 rp->entry[count].type = 0x00;
516 } else if (d->dev_type == HCI_AMP) {
517 rp->entry[count].type = 0x02;
518 } else {
519 continue;
520 }
521
522 rp->entry[count].bus = d->bus;
523 rp->entry[count++].index = cpu_to_le16(d->id);
524 BT_DBG("Added hci%u", d->id);
525 }
526
527 rp->num_controllers = cpu_to_le16(count);
528 rp_len = sizeof(*rp) + (sizeof(rp->entry[0]) * count);
529
530 read_unlock(&hci_dev_list_lock);
531
532 /* If this command is called at least once, then all the
533 * default index and unconfigured index events are disabled
534 * and from now on only extended index events are used.
535 */
536 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
537 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
538 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
539
540 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
541 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp, rp_len);
542
543 kfree(rp);
544
545 return err;
546 }
547
548 static bool is_configured(struct hci_dev *hdev)
549 {
550 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
551 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
552 return false;
553
554 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
555 !bacmp(&hdev->public_addr, BDADDR_ANY))
556 return false;
557
558 return true;
559 }
560
561 static __le32 get_missing_options(struct hci_dev *hdev)
562 {
563 u32 options = 0;
564
565 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
566 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
567 options |= MGMT_OPTION_EXTERNAL_CONFIG;
568
569 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
570 !bacmp(&hdev->public_addr, BDADDR_ANY))
571 options |= MGMT_OPTION_PUBLIC_ADDRESS;
572
573 return cpu_to_le32(options);
574 }
575
576 static int new_options(struct hci_dev *hdev, struct sock *skip)
577 {
578 __le32 options = get_missing_options(hdev);
579
580 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
581 sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
582 }
583
584 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
585 {
586 __le32 options = get_missing_options(hdev);
587
588 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
589 sizeof(options));
590 }
591
592 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
593 void *data, u16 data_len)
594 {
595 struct mgmt_rp_read_config_info rp;
596 u32 options = 0;
597
598 BT_DBG("sock %p %s", sk, hdev->name);
599
600 hci_dev_lock(hdev);
601
602 memset(&rp, 0, sizeof(rp));
603 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
604
605 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
606 options |= MGMT_OPTION_EXTERNAL_CONFIG;
607
608 if (hdev->set_bdaddr)
609 options |= MGMT_OPTION_PUBLIC_ADDRESS;
610
611 rp.supported_options = cpu_to_le32(options);
612 rp.missing_options = get_missing_options(hdev);
613
614 hci_dev_unlock(hdev);
615
616 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
617 &rp, sizeof(rp));
618 }
619
620 static u32 get_supported_settings(struct hci_dev *hdev)
621 {
622 u32 settings = 0;
623
624 settings |= MGMT_SETTING_POWERED;
625 settings |= MGMT_SETTING_BONDABLE;
626 settings |= MGMT_SETTING_DEBUG_KEYS;
627 settings |= MGMT_SETTING_CONNECTABLE;
628 settings |= MGMT_SETTING_DISCOVERABLE;
629
630 if (lmp_bredr_capable(hdev)) {
631 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
632 settings |= MGMT_SETTING_FAST_CONNECTABLE;
633 settings |= MGMT_SETTING_BREDR;
634 settings |= MGMT_SETTING_LINK_SECURITY;
635
636 if (lmp_ssp_capable(hdev)) {
637 settings |= MGMT_SETTING_SSP;
638 settings |= MGMT_SETTING_HS;
639 }
640
641 if (lmp_sc_capable(hdev))
642 settings |= MGMT_SETTING_SECURE_CONN;
643 }
644
645 if (lmp_le_capable(hdev)) {
646 settings |= MGMT_SETTING_LE;
647 settings |= MGMT_SETTING_ADVERTISING;
648 settings |= MGMT_SETTING_SECURE_CONN;
649 settings |= MGMT_SETTING_PRIVACY;
650 settings |= MGMT_SETTING_STATIC_ADDRESS;
651 }
652
653 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
654 hdev->set_bdaddr)
655 settings |= MGMT_SETTING_CONFIGURATION;
656
657 return settings;
658 }
659
660 static u32 get_current_settings(struct hci_dev *hdev)
661 {
662 u32 settings = 0;
663
664 if (hdev_is_powered(hdev))
665 settings |= MGMT_SETTING_POWERED;
666
667 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
668 settings |= MGMT_SETTING_CONNECTABLE;
669
670 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
671 settings |= MGMT_SETTING_FAST_CONNECTABLE;
672
673 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
674 settings |= MGMT_SETTING_DISCOVERABLE;
675
676 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
677 settings |= MGMT_SETTING_BONDABLE;
678
679 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
680 settings |= MGMT_SETTING_BREDR;
681
682 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
683 settings |= MGMT_SETTING_LE;
684
685 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
686 settings |= MGMT_SETTING_LINK_SECURITY;
687
688 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
689 settings |= MGMT_SETTING_SSP;
690
691 if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
692 settings |= MGMT_SETTING_HS;
693
694 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
695 settings |= MGMT_SETTING_ADVERTISING;
696
697 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
698 settings |= MGMT_SETTING_SECURE_CONN;
699
700 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
701 settings |= MGMT_SETTING_DEBUG_KEYS;
702
703 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
704 settings |= MGMT_SETTING_PRIVACY;
705
706 /* The current setting for static address has two purposes. The
707 * first is to indicate if the static address will be used and
708 * the second is to indicate if it is actually set.
709 *
710 * This means if the static address is not configured, this flag
711 * will never be set. If the address is configured, then if the
712 * address is actually used decides if the flag is set or not.
713 *
714 * For single mode LE only controllers and dual-mode controllers
715 * with BR/EDR disabled, the existence of the static address will
716 * be evaluated.
717 */
718 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
719 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
720 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
721 if (bacmp(&hdev->static_addr, BDADDR_ANY))
722 settings |= MGMT_SETTING_STATIC_ADDRESS;
723 }
724
725 return settings;
726 }
727
728 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
729 {
730 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
731 }
732
733 static struct mgmt_pending_cmd *pending_find_data(u16 opcode,
734 struct hci_dev *hdev,
735 const void *data)
736 {
737 return mgmt_pending_find_data(HCI_CHANNEL_CONTROL, opcode, hdev, data);
738 }
739
740 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
741 {
742 struct mgmt_pending_cmd *cmd;
743
744 /* If there's a pending mgmt command the flags will not yet have
745 * their final values, so check for this first.
746 */
747 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
748 if (cmd) {
749 struct mgmt_mode *cp = cmd->param;
750 if (cp->val == 0x01)
751 return LE_AD_GENERAL;
752 else if (cp->val == 0x02)
753 return LE_AD_LIMITED;
754 } else {
755 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
756 return LE_AD_LIMITED;
757 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
758 return LE_AD_GENERAL;
759 }
760
761 return 0;
762 }
763
764 bool mgmt_get_connectable(struct hci_dev *hdev)
765 {
766 struct mgmt_pending_cmd *cmd;
767
768 /* If there's a pending mgmt command the flag will not yet have
769 * it's final value, so check for this first.
770 */
771 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
772 if (cmd) {
773 struct mgmt_mode *cp = cmd->param;
774
775 return cp->val;
776 }
777
778 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
779 }
780
781 static void service_cache_off(struct work_struct *work)
782 {
783 struct hci_dev *hdev = container_of(work, struct hci_dev,
784 service_cache.work);
785 struct hci_request req;
786
787 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
788 return;
789
790 hci_req_init(&req, hdev);
791
792 hci_dev_lock(hdev);
793
794 __hci_req_update_eir(&req);
795 __hci_req_update_class(&req);
796
797 hci_dev_unlock(hdev);
798
799 hci_req_run(&req, NULL);
800 }
801
802 static void rpa_expired(struct work_struct *work)
803 {
804 struct hci_dev *hdev = container_of(work, struct hci_dev,
805 rpa_expired.work);
806 struct hci_request req;
807
808 BT_DBG("");
809
810 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
811
812 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
813 return;
814
815 /* The generation of a new RPA and programming it into the
816 * controller happens in the hci_req_enable_advertising()
817 * function.
818 */
819 hci_req_init(&req, hdev);
820 __hci_req_enable_advertising(&req);
821 hci_req_run(&req, NULL);
822 }
823
824 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
825 {
826 if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
827 return;
828
829 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
830 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
831
832 /* Non-mgmt controlled devices get this bit set
833 * implicitly so that pairing works for them, however
834 * for mgmt we require user-space to explicitly enable
835 * it
836 */
837 hci_dev_clear_flag(hdev, HCI_BONDABLE);
838 }
839
840 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
841 void *data, u16 data_len)
842 {
843 struct mgmt_rp_read_info rp;
844
845 BT_DBG("sock %p %s", sk, hdev->name);
846
847 hci_dev_lock(hdev);
848
849 memset(&rp, 0, sizeof(rp));
850
851 bacpy(&rp.bdaddr, &hdev->bdaddr);
852
853 rp.version = hdev->hci_ver;
854 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
855
856 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
857 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
858
859 memcpy(rp.dev_class, hdev->dev_class, 3);
860
861 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
862 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
863
864 hci_dev_unlock(hdev);
865
866 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
867 sizeof(rp));
868 }
869
870 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
871 u8 data_len)
872 {
873 eir[eir_len++] = sizeof(type) + data_len;
874 eir[eir_len++] = type;
875 memcpy(&eir[eir_len], data, data_len);
876 eir_len += data_len;
877
878 return eir_len;
879 }
880
881 static inline u16 eir_append_le16(u8 *eir, u16 eir_len, u8 type, u16 data)
882 {
883 eir[eir_len++] = sizeof(type) + sizeof(data);
884 eir[eir_len++] = type;
885 put_unaligned_le16(data, &eir[eir_len]);
886 eir_len += sizeof(data);
887
888 return eir_len;
889 }
890
891 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
892 {
893 u16 eir_len = 0;
894 size_t name_len;
895
896 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
897 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
898 hdev->dev_class, 3);
899
900 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
901 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
902 hdev->appearance);
903
904 name_len = strlen(hdev->dev_name);
905 eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
906 hdev->dev_name, name_len);
907
908 name_len = strlen(hdev->short_name);
909 eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
910 hdev->short_name, name_len);
911
912 return eir_len;
913 }
914
915 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
916 void *data, u16 data_len)
917 {
918 char buf[512];
919 struct mgmt_rp_read_ext_info *rp = (void *)buf;
920 u16 eir_len;
921
922 BT_DBG("sock %p %s", sk, hdev->name);
923
924 memset(&buf, 0, sizeof(buf));
925
926 hci_dev_lock(hdev);
927
928 bacpy(&rp->bdaddr, &hdev->bdaddr);
929
930 rp->version = hdev->hci_ver;
931 rp->manufacturer = cpu_to_le16(hdev->manufacturer);
932
933 rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
934 rp->current_settings = cpu_to_le32(get_current_settings(hdev));
935
936
937 eir_len = append_eir_data_to_buf(hdev, rp->eir);
938 rp->eir_len = cpu_to_le16(eir_len);
939
940 hci_dev_unlock(hdev);
941
942 /* If this command is called at least once, then the events
943 * for class of device and local name changes are disabled
944 * and only the new extended controller information event
945 * is used.
946 */
947 hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
948 hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
949 hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
950
951 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
952 sizeof(*rp) + eir_len);
953 }
954
955 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
956 {
957 char buf[512];
958 struct mgmt_ev_ext_info_changed *ev = (void *)buf;
959 u16 eir_len;
960
961 memset(buf, 0, sizeof(buf));
962
963 eir_len = append_eir_data_to_buf(hdev, ev->eir);
964 ev->eir_len = cpu_to_le16(eir_len);
965
966 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
967 sizeof(*ev) + eir_len,
968 HCI_MGMT_EXT_INFO_EVENTS, skip);
969 }
970
971 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
972 {
973 __le32 settings = cpu_to_le32(get_current_settings(hdev));
974
975 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
976 sizeof(settings));
977 }
978
979 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
980 {
981 BT_DBG("%s status 0x%02x", hdev->name, status);
982
983 if (hci_conn_count(hdev) == 0) {
984 cancel_delayed_work(&hdev->power_off);
985 queue_work(hdev->req_workqueue, &hdev->power_off.work);
986 }
987 }
988
989 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
990 {
991 struct mgmt_ev_advertising_added ev;
992
993 ev.instance = instance;
994
995 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
996 }
997
998 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
999 u8 instance)
1000 {
1001 struct mgmt_ev_advertising_removed ev;
1002
1003 ev.instance = instance;
1004
1005 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1006 }
1007
1008 static void cancel_adv_timeout(struct hci_dev *hdev)
1009 {
1010 if (hdev->adv_instance_timeout) {
1011 hdev->adv_instance_timeout = 0;
1012 cancel_delayed_work(&hdev->adv_instance_expire);
1013 }
1014 }
1015
1016 static int clean_up_hci_state(struct hci_dev *hdev)
1017 {
1018 struct hci_request req;
1019 struct hci_conn *conn;
1020 bool discov_stopped;
1021 int err;
1022
1023 hci_req_init(&req, hdev);
1024
1025 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1026 test_bit(HCI_PSCAN, &hdev->flags)) {
1027 u8 scan = 0x00;
1028 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1029 }
1030
1031 hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, false);
1032
1033 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1034 __hci_req_disable_advertising(&req);
1035
1036 discov_stopped = hci_req_stop_discovery(&req);
1037
1038 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1039 /* 0x15 == Terminated due to Power Off */
1040 __hci_abort_conn(&req, conn, 0x15);
1041 }
1042
1043 err = hci_req_run(&req, clean_up_hci_complete);
1044 if (!err && discov_stopped)
1045 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1046
1047 return err;
1048 }
1049
1050 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1051 u16 len)
1052 {
1053 struct mgmt_mode *cp = data;
1054 struct mgmt_pending_cmd *cmd;
1055 int err;
1056
1057 BT_DBG("request for %s", hdev->name);
1058
1059 if (cp->val != 0x00 && cp->val != 0x01)
1060 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1061 MGMT_STATUS_INVALID_PARAMS);
1062
1063 hci_dev_lock(hdev);
1064
1065 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1066 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1067 MGMT_STATUS_BUSY);
1068 goto failed;
1069 }
1070
1071 if (!!cp->val == hdev_is_powered(hdev)) {
1072 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1073 goto failed;
1074 }
1075
1076 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1077 if (!cmd) {
1078 err = -ENOMEM;
1079 goto failed;
1080 }
1081
1082 if (cp->val) {
1083 queue_work(hdev->req_workqueue, &hdev->power_on);
1084 err = 0;
1085 } else {
1086 /* Disconnect connections, stop scans, etc */
1087 err = clean_up_hci_state(hdev);
1088 if (!err)
1089 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1090 HCI_POWER_OFF_TIMEOUT);
1091
1092 /* ENODATA means there were no HCI commands queued */
1093 if (err == -ENODATA) {
1094 cancel_delayed_work(&hdev->power_off);
1095 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1096 err = 0;
1097 }
1098 }
1099
1100 failed:
1101 hci_dev_unlock(hdev);
1102 return err;
1103 }
1104
1105 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1106 {
1107 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1108
1109 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1110 sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1111 }
1112
1113 int mgmt_new_settings(struct hci_dev *hdev)
1114 {
1115 return new_settings(hdev, NULL);
1116 }
1117
1118 struct cmd_lookup {
1119 struct sock *sk;
1120 struct hci_dev *hdev;
1121 u8 mgmt_status;
1122 };
1123
1124 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1125 {
1126 struct cmd_lookup *match = data;
1127
1128 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1129
1130 list_del(&cmd->list);
1131
1132 if (match->sk == NULL) {
1133 match->sk = cmd->sk;
1134 sock_hold(match->sk);
1135 }
1136
1137 mgmt_pending_free(cmd);
1138 }
1139
1140 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1141 {
1142 u8 *status = data;
1143
1144 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1145 mgmt_pending_remove(cmd);
1146 }
1147
1148 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1149 {
1150 if (cmd->cmd_complete) {
1151 u8 *status = data;
1152
1153 cmd->cmd_complete(cmd, *status);
1154 mgmt_pending_remove(cmd);
1155
1156 return;
1157 }
1158
1159 cmd_status_rsp(cmd, data);
1160 }
1161
1162 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1163 {
1164 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1165 cmd->param, cmd->param_len);
1166 }
1167
1168 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1169 {
1170 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1171 cmd->param, sizeof(struct mgmt_addr_info));
1172 }
1173
1174 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1175 {
1176 if (!lmp_bredr_capable(hdev))
1177 return MGMT_STATUS_NOT_SUPPORTED;
1178 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1179 return MGMT_STATUS_REJECTED;
1180 else
1181 return MGMT_STATUS_SUCCESS;
1182 }
1183
1184 static u8 mgmt_le_support(struct hci_dev *hdev)
1185 {
1186 if (!lmp_le_capable(hdev))
1187 return MGMT_STATUS_NOT_SUPPORTED;
1188 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1189 return MGMT_STATUS_REJECTED;
1190 else
1191 return MGMT_STATUS_SUCCESS;
1192 }
1193
1194 void mgmt_set_discoverable_complete(struct hci_dev *hdev, u8 status)
1195 {
1196 struct mgmt_pending_cmd *cmd;
1197
1198 BT_DBG("status 0x%02x", status);
1199
1200 hci_dev_lock(hdev);
1201
1202 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1203 if (!cmd)
1204 goto unlock;
1205
1206 if (status) {
1207 u8 mgmt_err = mgmt_status(status);
1208 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1209 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1210 goto remove_cmd;
1211 }
1212
1213 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1214 hdev->discov_timeout > 0) {
1215 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1216 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1217 }
1218
1219 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1220 new_settings(hdev, cmd->sk);
1221
1222 remove_cmd:
1223 mgmt_pending_remove(cmd);
1224
1225 unlock:
1226 hci_dev_unlock(hdev);
1227 }
1228
1229 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1230 u16 len)
1231 {
1232 struct mgmt_cp_set_discoverable *cp = data;
1233 struct mgmt_pending_cmd *cmd;
1234 u16 timeout;
1235 int err;
1236
1237 BT_DBG("request for %s", hdev->name);
1238
1239 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1240 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1241 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1242 MGMT_STATUS_REJECTED);
1243
1244 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1245 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1246 MGMT_STATUS_INVALID_PARAMS);
1247
1248 timeout = __le16_to_cpu(cp->timeout);
1249
1250 /* Disabling discoverable requires that no timeout is set,
1251 * and enabling limited discoverable requires a timeout.
1252 */
1253 if ((cp->val == 0x00 && timeout > 0) ||
1254 (cp->val == 0x02 && timeout == 0))
1255 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1256 MGMT_STATUS_INVALID_PARAMS);
1257
1258 hci_dev_lock(hdev);
1259
1260 if (!hdev_is_powered(hdev) && timeout > 0) {
1261 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1262 MGMT_STATUS_NOT_POWERED);
1263 goto failed;
1264 }
1265
1266 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1267 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1268 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1269 MGMT_STATUS_BUSY);
1270 goto failed;
1271 }
1272
1273 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1274 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1275 MGMT_STATUS_REJECTED);
1276 goto failed;
1277 }
1278
1279 if (!hdev_is_powered(hdev)) {
1280 bool changed = false;
1281
1282 /* Setting limited discoverable when powered off is
1283 * not a valid operation since it requires a timeout
1284 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1285 */
1286 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1287 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1288 changed = true;
1289 }
1290
1291 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1292 if (err < 0)
1293 goto failed;
1294
1295 if (changed)
1296 err = new_settings(hdev, sk);
1297
1298 goto failed;
1299 }
1300
1301 /* If the current mode is the same, then just update the timeout
1302 * value with the new value. And if only the timeout gets updated,
1303 * then no need for any HCI transactions.
1304 */
1305 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1306 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1307 HCI_LIMITED_DISCOVERABLE)) {
1308 cancel_delayed_work(&hdev->discov_off);
1309 hdev->discov_timeout = timeout;
1310
1311 if (cp->val && hdev->discov_timeout > 0) {
1312 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1313 queue_delayed_work(hdev->req_workqueue,
1314 &hdev->discov_off, to);
1315 }
1316
1317 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1318 goto failed;
1319 }
1320
1321 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1322 if (!cmd) {
1323 err = -ENOMEM;
1324 goto failed;
1325 }
1326
1327 /* Cancel any potential discoverable timeout that might be
1328 * still active and store new timeout value. The arming of
1329 * the timeout happens in the complete handler.
1330 */
1331 cancel_delayed_work(&hdev->discov_off);
1332 hdev->discov_timeout = timeout;
1333
1334 if (cp->val)
1335 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1336 else
1337 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1338
1339 /* Limited discoverable mode */
1340 if (cp->val == 0x02)
1341 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1342 else
1343 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1344
1345 queue_work(hdev->req_workqueue, &hdev->discoverable_update);
1346 err = 0;
1347
1348 failed:
1349 hci_dev_unlock(hdev);
1350 return err;
1351 }
1352
1353 void mgmt_set_connectable_complete(struct hci_dev *hdev, u8 status)
1354 {
1355 struct mgmt_pending_cmd *cmd;
1356
1357 BT_DBG("status 0x%02x", status);
1358
1359 hci_dev_lock(hdev);
1360
1361 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1362 if (!cmd)
1363 goto unlock;
1364
1365 if (status) {
1366 u8 mgmt_err = mgmt_status(status);
1367 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1368 goto remove_cmd;
1369 }
1370
1371 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1372 new_settings(hdev, cmd->sk);
1373
1374 remove_cmd:
1375 mgmt_pending_remove(cmd);
1376
1377 unlock:
1378 hci_dev_unlock(hdev);
1379 }
1380
1381 static int set_connectable_update_settings(struct hci_dev *hdev,
1382 struct sock *sk, u8 val)
1383 {
1384 bool changed = false;
1385 int err;
1386
1387 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1388 changed = true;
1389
1390 if (val) {
1391 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1392 } else {
1393 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1394 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1395 }
1396
1397 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1398 if (err < 0)
1399 return err;
1400
1401 if (changed) {
1402 hci_req_update_scan(hdev);
1403 hci_update_background_scan(hdev);
1404 return new_settings(hdev, sk);
1405 }
1406
1407 return 0;
1408 }
1409
1410 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1411 u16 len)
1412 {
1413 struct mgmt_mode *cp = data;
1414 struct mgmt_pending_cmd *cmd;
1415 int err;
1416
1417 BT_DBG("request for %s", hdev->name);
1418
1419 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1420 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1421 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1422 MGMT_STATUS_REJECTED);
1423
1424 if (cp->val != 0x00 && cp->val != 0x01)
1425 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1426 MGMT_STATUS_INVALID_PARAMS);
1427
1428 hci_dev_lock(hdev);
1429
1430 if (!hdev_is_powered(hdev)) {
1431 err = set_connectable_update_settings(hdev, sk, cp->val);
1432 goto failed;
1433 }
1434
1435 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1436 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1437 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1438 MGMT_STATUS_BUSY);
1439 goto failed;
1440 }
1441
1442 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1443 if (!cmd) {
1444 err = -ENOMEM;
1445 goto failed;
1446 }
1447
1448 if (cp->val) {
1449 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1450 } else {
1451 if (hdev->discov_timeout > 0)
1452 cancel_delayed_work(&hdev->discov_off);
1453
1454 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1455 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1456 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1457 }
1458
1459 queue_work(hdev->req_workqueue, &hdev->connectable_update);
1460 err = 0;
1461
1462 failed:
1463 hci_dev_unlock(hdev);
1464 return err;
1465 }
1466
1467 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1468 u16 len)
1469 {
1470 struct mgmt_mode *cp = data;
1471 bool changed;
1472 int err;
1473
1474 BT_DBG("request for %s", hdev->name);
1475
1476 if (cp->val != 0x00 && cp->val != 0x01)
1477 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1478 MGMT_STATUS_INVALID_PARAMS);
1479
1480 hci_dev_lock(hdev);
1481
1482 if (cp->val)
1483 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1484 else
1485 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1486
1487 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1488 if (err < 0)
1489 goto unlock;
1490
1491 if (changed) {
1492 /* In limited privacy mode the change of bondable mode
1493 * may affect the local advertising address.
1494 */
1495 if (hdev_is_powered(hdev) &&
1496 hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1497 hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1498 hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1499 queue_work(hdev->req_workqueue,
1500 &hdev->discoverable_update);
1501
1502 err = new_settings(hdev, sk);
1503 }
1504
1505 unlock:
1506 hci_dev_unlock(hdev);
1507 return err;
1508 }
1509
1510 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1511 u16 len)
1512 {
1513 struct mgmt_mode *cp = data;
1514 struct mgmt_pending_cmd *cmd;
1515 u8 val, status;
1516 int err;
1517
1518 BT_DBG("request for %s", hdev->name);
1519
1520 status = mgmt_bredr_support(hdev);
1521 if (status)
1522 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1523 status);
1524
1525 if (cp->val != 0x00 && cp->val != 0x01)
1526 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1527 MGMT_STATUS_INVALID_PARAMS);
1528
1529 hci_dev_lock(hdev);
1530
1531 if (!hdev_is_powered(hdev)) {
1532 bool changed = false;
1533
1534 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1535 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1536 changed = true;
1537 }
1538
1539 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1540 if (err < 0)
1541 goto failed;
1542
1543 if (changed)
1544 err = new_settings(hdev, sk);
1545
1546 goto failed;
1547 }
1548
1549 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1550 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1551 MGMT_STATUS_BUSY);
1552 goto failed;
1553 }
1554
1555 val = !!cp->val;
1556
1557 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1558 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1559 goto failed;
1560 }
1561
1562 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1563 if (!cmd) {
1564 err = -ENOMEM;
1565 goto failed;
1566 }
1567
1568 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1569 if (err < 0) {
1570 mgmt_pending_remove(cmd);
1571 goto failed;
1572 }
1573
1574 failed:
1575 hci_dev_unlock(hdev);
1576 return err;
1577 }
1578
1579 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1580 {
1581 struct mgmt_mode *cp = data;
1582 struct mgmt_pending_cmd *cmd;
1583 u8 status;
1584 int err;
1585
1586 BT_DBG("request for %s", hdev->name);
1587
1588 status = mgmt_bredr_support(hdev);
1589 if (status)
1590 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1591
1592 if (!lmp_ssp_capable(hdev))
1593 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1594 MGMT_STATUS_NOT_SUPPORTED);
1595
1596 if (cp->val != 0x00 && cp->val != 0x01)
1597 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1598 MGMT_STATUS_INVALID_PARAMS);
1599
1600 hci_dev_lock(hdev);
1601
1602 if (!hdev_is_powered(hdev)) {
1603 bool changed;
1604
1605 if (cp->val) {
1606 changed = !hci_dev_test_and_set_flag(hdev,
1607 HCI_SSP_ENABLED);
1608 } else {
1609 changed = hci_dev_test_and_clear_flag(hdev,
1610 HCI_SSP_ENABLED);
1611 if (!changed)
1612 changed = hci_dev_test_and_clear_flag(hdev,
1613 HCI_HS_ENABLED);
1614 else
1615 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1616 }
1617
1618 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1619 if (err < 0)
1620 goto failed;
1621
1622 if (changed)
1623 err = new_settings(hdev, sk);
1624
1625 goto failed;
1626 }
1627
1628 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1629 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1630 MGMT_STATUS_BUSY);
1631 goto failed;
1632 }
1633
1634 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
1635 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1636 goto failed;
1637 }
1638
1639 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1640 if (!cmd) {
1641 err = -ENOMEM;
1642 goto failed;
1643 }
1644
1645 if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
1646 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
1647 sizeof(cp->val), &cp->val);
1648
1649 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1650 if (err < 0) {
1651 mgmt_pending_remove(cmd);
1652 goto failed;
1653 }
1654
1655 failed:
1656 hci_dev_unlock(hdev);
1657 return err;
1658 }
1659
1660 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1661 {
1662 struct mgmt_mode *cp = data;
1663 bool changed;
1664 u8 status;
1665 int err;
1666
1667 BT_DBG("request for %s", hdev->name);
1668
1669 status = mgmt_bredr_support(hdev);
1670 if (status)
1671 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1672
1673 if (!lmp_ssp_capable(hdev))
1674 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1675 MGMT_STATUS_NOT_SUPPORTED);
1676
1677 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1678 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1679 MGMT_STATUS_REJECTED);
1680
1681 if (cp->val != 0x00 && cp->val != 0x01)
1682 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1683 MGMT_STATUS_INVALID_PARAMS);
1684
1685 hci_dev_lock(hdev);
1686
1687 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1688 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1689 MGMT_STATUS_BUSY);
1690 goto unlock;
1691 }
1692
1693 if (cp->val) {
1694 changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
1695 } else {
1696 if (hdev_is_powered(hdev)) {
1697 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1698 MGMT_STATUS_REJECTED);
1699 goto unlock;
1700 }
1701
1702 changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
1703 }
1704
1705 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1706 if (err < 0)
1707 goto unlock;
1708
1709 if (changed)
1710 err = new_settings(hdev, sk);
1711
1712 unlock:
1713 hci_dev_unlock(hdev);
1714 return err;
1715 }
1716
1717 static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1718 {
1719 struct cmd_lookup match = { NULL, hdev };
1720
1721 hci_dev_lock(hdev);
1722
1723 if (status) {
1724 u8 mgmt_err = mgmt_status(status);
1725
1726 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1727 &mgmt_err);
1728 goto unlock;
1729 }
1730
1731 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1732
1733 new_settings(hdev, match.sk);
1734
1735 if (match.sk)
1736 sock_put(match.sk);
1737
1738 /* Make sure the controller has a good default for
1739 * advertising data. Restrict the update to when LE
1740 * has actually been enabled. During power on, the
1741 * update in powered_update_hci will take care of it.
1742 */
1743 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1744 struct hci_request req;
1745
1746 hci_req_init(&req, hdev);
1747 __hci_req_update_adv_data(&req, 0x00);
1748 __hci_req_update_scan_rsp_data(&req, 0x00);
1749 hci_req_run(&req, NULL);
1750 hci_update_background_scan(hdev);
1751 }
1752
1753 unlock:
1754 hci_dev_unlock(hdev);
1755 }
1756
1757 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1758 {
1759 struct mgmt_mode *cp = data;
1760 struct hci_cp_write_le_host_supported hci_cp;
1761 struct mgmt_pending_cmd *cmd;
1762 struct hci_request req;
1763 int err;
1764 u8 val, enabled;
1765
1766 BT_DBG("request for %s", hdev->name);
1767
1768 if (!lmp_le_capable(hdev))
1769 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1770 MGMT_STATUS_NOT_SUPPORTED);
1771
1772 if (cp->val != 0x00 && cp->val != 0x01)
1773 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1774 MGMT_STATUS_INVALID_PARAMS);
1775
1776 /* Bluetooth single mode LE only controllers or dual-mode
1777 * controllers configured as LE only devices, do not allow
1778 * switching LE off. These have either LE enabled explicitly
1779 * or BR/EDR has been previously switched off.
1780 *
1781 * When trying to enable an already enabled LE, then gracefully
1782 * send a positive response. Trying to disable it however will
1783 * result into rejection.
1784 */
1785 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1786 if (cp->val == 0x01)
1787 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1788
1789 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1790 MGMT_STATUS_REJECTED);
1791 }
1792
1793 hci_dev_lock(hdev);
1794
1795 val = !!cp->val;
1796 enabled = lmp_host_le_capable(hdev);
1797
1798 if (!val)
1799 hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, true);
1800
1801 if (!hdev_is_powered(hdev) || val == enabled) {
1802 bool changed = false;
1803
1804 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1805 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
1806 changed = true;
1807 }
1808
1809 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
1810 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1811 changed = true;
1812 }
1813
1814 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1815 if (err < 0)
1816 goto unlock;
1817
1818 if (changed)
1819 err = new_settings(hdev, sk);
1820
1821 goto unlock;
1822 }
1823
1824 if (pending_find(MGMT_OP_SET_LE, hdev) ||
1825 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
1826 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1827 MGMT_STATUS_BUSY);
1828 goto unlock;
1829 }
1830
1831 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
1832 if (!cmd) {
1833 err = -ENOMEM;
1834 goto unlock;
1835 }
1836
1837 hci_req_init(&req, hdev);
1838
1839 memset(&hci_cp, 0, sizeof(hci_cp));
1840
1841 if (val) {
1842 hci_cp.le = val;
1843 hci_cp.simul = 0x00;
1844 } else {
1845 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1846 __hci_req_disable_advertising(&req);
1847 }
1848
1849 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
1850 &hci_cp);
1851
1852 err = hci_req_run(&req, le_enable_complete);
1853 if (err < 0)
1854 mgmt_pending_remove(cmd);
1855
1856 unlock:
1857 hci_dev_unlock(hdev);
1858 return err;
1859 }
1860
1861 /* This is a helper function to test for pending mgmt commands that can
1862 * cause CoD or EIR HCI commands. We can only allow one such pending
1863 * mgmt command at a time since otherwise we cannot easily track what
1864 * the current values are, will be, and based on that calculate if a new
1865 * HCI command needs to be sent and if yes with what value.
1866 */
1867 static bool pending_eir_or_class(struct hci_dev *hdev)
1868 {
1869 struct mgmt_pending_cmd *cmd;
1870
1871 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
1872 switch (cmd->opcode) {
1873 case MGMT_OP_ADD_UUID:
1874 case MGMT_OP_REMOVE_UUID:
1875 case MGMT_OP_SET_DEV_CLASS:
1876 case MGMT_OP_SET_POWERED:
1877 return true;
1878 }
1879 }
1880
1881 return false;
1882 }
1883
1884 static const u8 bluetooth_base_uuid[] = {
1885 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
1886 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1887 };
1888
1889 static u8 get_uuid_size(const u8 *uuid)
1890 {
1891 u32 val;
1892
1893 if (memcmp(uuid, bluetooth_base_uuid, 12))
1894 return 128;
1895
1896 val = get_unaligned_le32(&uuid[12]);
1897 if (val > 0xffff)
1898 return 32;
1899
1900 return 16;
1901 }
1902
1903 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
1904 {
1905 struct mgmt_pending_cmd *cmd;
1906
1907 hci_dev_lock(hdev);
1908
1909 cmd = pending_find(mgmt_op, hdev);
1910 if (!cmd)
1911 goto unlock;
1912
1913 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
1914 mgmt_status(status), hdev->dev_class, 3);
1915
1916 mgmt_pending_remove(cmd);
1917
1918 unlock:
1919 hci_dev_unlock(hdev);
1920 }
1921
1922 static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1923 {
1924 BT_DBG("status 0x%02x", status);
1925
1926 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
1927 }
1928
1929 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1930 {
1931 struct mgmt_cp_add_uuid *cp = data;
1932 struct mgmt_pending_cmd *cmd;
1933 struct hci_request req;
1934 struct bt_uuid *uuid;
1935 int err;
1936
1937 BT_DBG("request for %s", hdev->name);
1938
1939 hci_dev_lock(hdev);
1940
1941 if (pending_eir_or_class(hdev)) {
1942 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
1943 MGMT_STATUS_BUSY);
1944 goto failed;
1945 }
1946
1947 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
1948 if (!uuid) {
1949 err = -ENOMEM;
1950 goto failed;
1951 }
1952
1953 memcpy(uuid->uuid, cp->uuid, 16);
1954 uuid->svc_hint = cp->svc_hint;
1955 uuid->size = get_uuid_size(cp->uuid);
1956
1957 list_add_tail(&uuid->list, &hdev->uuids);
1958
1959 hci_req_init(&req, hdev);
1960
1961 __hci_req_update_class(&req);
1962 __hci_req_update_eir(&req);
1963
1964 err = hci_req_run(&req, add_uuid_complete);
1965 if (err < 0) {
1966 if (err != -ENODATA)
1967 goto failed;
1968
1969 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
1970 hdev->dev_class, 3);
1971 goto failed;
1972 }
1973
1974 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
1975 if (!cmd) {
1976 err = -ENOMEM;
1977 goto failed;
1978 }
1979
1980 err = 0;
1981
1982 failed:
1983 hci_dev_unlock(hdev);
1984 return err;
1985 }
1986
1987 static bool enable_service_cache(struct hci_dev *hdev)
1988 {
1989 if (!hdev_is_powered(hdev))
1990 return false;
1991
1992 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
1993 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
1994 CACHE_TIMEOUT);
1995 return true;
1996 }
1997
1998 return false;
1999 }
2000
2001 static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2002 {
2003 BT_DBG("status 0x%02x", status);
2004
2005 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2006 }
2007
2008 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2009 u16 len)
2010 {
2011 struct mgmt_cp_remove_uuid *cp = data;
2012 struct mgmt_pending_cmd *cmd;
2013 struct bt_uuid *match, *tmp;
2014 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2015 struct hci_request req;
2016 int err, found;
2017
2018 BT_DBG("request for %s", hdev->name);
2019
2020 hci_dev_lock(hdev);
2021
2022 if (pending_eir_or_class(hdev)) {
2023 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2024 MGMT_STATUS_BUSY);
2025 goto unlock;
2026 }
2027
2028 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2029 hci_uuids_clear(hdev);
2030
2031 if (enable_service_cache(hdev)) {
2032 err = mgmt_cmd_complete(sk, hdev->id,
2033 MGMT_OP_REMOVE_UUID,
2034 0, hdev->dev_class, 3);
2035 goto unlock;
2036 }
2037
2038 goto update_class;
2039 }
2040
2041 found = 0;
2042
2043 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2044 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2045 continue;
2046
2047 list_del(&match->list);
2048 kfree(match);
2049 found++;
2050 }
2051
2052 if (found == 0) {
2053 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2054 MGMT_STATUS_INVALID_PARAMS);
2055 goto unlock;
2056 }
2057
2058 update_class:
2059 hci_req_init(&req, hdev);
2060
2061 __hci_req_update_class(&req);
2062 __hci_req_update_eir(&req);
2063
2064 err = hci_req_run(&req, remove_uuid_complete);
2065 if (err < 0) {
2066 if (err != -ENODATA)
2067 goto unlock;
2068
2069 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2070 hdev->dev_class, 3);
2071 goto unlock;
2072 }
2073
2074 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2075 if (!cmd) {
2076 err = -ENOMEM;
2077 goto unlock;
2078 }
2079
2080 err = 0;
2081
2082 unlock:
2083 hci_dev_unlock(hdev);
2084 return err;
2085 }
2086
2087 static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2088 {
2089 BT_DBG("status 0x%02x", status);
2090
2091 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2092 }
2093
2094 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2095 u16 len)
2096 {
2097 struct mgmt_cp_set_dev_class *cp = data;
2098 struct mgmt_pending_cmd *cmd;
2099 struct hci_request req;
2100 int err;
2101
2102 BT_DBG("request for %s", hdev->name);
2103
2104 if (!lmp_bredr_capable(hdev))
2105 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2106 MGMT_STATUS_NOT_SUPPORTED);
2107
2108 hci_dev_lock(hdev);
2109
2110 if (pending_eir_or_class(hdev)) {
2111 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2112 MGMT_STATUS_BUSY);
2113 goto unlock;
2114 }
2115
2116 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2117 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2118 MGMT_STATUS_INVALID_PARAMS);
2119 goto unlock;
2120 }
2121
2122 hdev->major_class = cp->major;
2123 hdev->minor_class = cp->minor;
2124
2125 if (!hdev_is_powered(hdev)) {
2126 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2127 hdev->dev_class, 3);
2128 goto unlock;
2129 }
2130
2131 hci_req_init(&req, hdev);
2132
2133 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2134 hci_dev_unlock(hdev);
2135 cancel_delayed_work_sync(&hdev->service_cache);
2136 hci_dev_lock(hdev);
2137 __hci_req_update_eir(&req);
2138 }
2139
2140 __hci_req_update_class(&req);
2141
2142 err = hci_req_run(&req, set_class_complete);
2143 if (err < 0) {
2144 if (err != -ENODATA)
2145 goto unlock;
2146
2147 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2148 hdev->dev_class, 3);
2149 goto unlock;
2150 }
2151
2152 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2153 if (!cmd) {
2154 err = -ENOMEM;
2155 goto unlock;
2156 }
2157
2158 err = 0;
2159
2160 unlock:
2161 hci_dev_unlock(hdev);
2162 return err;
2163 }
2164
2165 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2166 u16 len)
2167 {
2168 struct mgmt_cp_load_link_keys *cp = data;
2169 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2170 sizeof(struct mgmt_link_key_info));
2171 u16 key_count, expected_len;
2172 bool changed;
2173 int i;
2174
2175 BT_DBG("request for %s", hdev->name);
2176
2177 if (!lmp_bredr_capable(hdev))
2178 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2179 MGMT_STATUS_NOT_SUPPORTED);
2180
2181 key_count = __le16_to_cpu(cp->key_count);
2182 if (key_count > max_key_count) {
2183 BT_ERR("load_link_keys: too big key_count value %u",
2184 key_count);
2185 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2186 MGMT_STATUS_INVALID_PARAMS);
2187 }
2188
2189 expected_len = sizeof(*cp) + key_count *
2190 sizeof(struct mgmt_link_key_info);
2191 if (expected_len != len) {
2192 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2193 expected_len, len);
2194 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2195 MGMT_STATUS_INVALID_PARAMS);
2196 }
2197
2198 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2199 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2200 MGMT_STATUS_INVALID_PARAMS);
2201
2202 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2203 key_count);
2204
2205 for (i = 0; i < key_count; i++) {
2206 struct mgmt_link_key_info *key = &cp->keys[i];
2207
2208 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2209 return mgmt_cmd_status(sk, hdev->id,
2210 MGMT_OP_LOAD_LINK_KEYS,
2211 MGMT_STATUS_INVALID_PARAMS);
2212 }
2213
2214 hci_dev_lock(hdev);
2215
2216 hci_link_keys_clear(hdev);
2217
2218 if (cp->debug_keys)
2219 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2220 else
2221 changed = hci_dev_test_and_clear_flag(hdev,
2222 HCI_KEEP_DEBUG_KEYS);
2223
2224 if (changed)
2225 new_settings(hdev, NULL);
2226
2227 for (i = 0; i < key_count; i++) {
2228 struct mgmt_link_key_info *key = &cp->keys[i];
2229
2230 /* Always ignore debug keys and require a new pairing if
2231 * the user wants to use them.
2232 */
2233 if (key->type == HCI_LK_DEBUG_COMBINATION)
2234 continue;
2235
2236 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2237 key->type, key->pin_len, NULL);
2238 }
2239
2240 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2241
2242 hci_dev_unlock(hdev);
2243
2244 return 0;
2245 }
2246
2247 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2248 u8 addr_type, struct sock *skip_sk)
2249 {
2250 struct mgmt_ev_device_unpaired ev;
2251
2252 bacpy(&ev.addr.bdaddr, bdaddr);
2253 ev.addr.type = addr_type;
2254
2255 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2256 skip_sk);
2257 }
2258
2259 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2260 u16 len)
2261 {
2262 struct mgmt_cp_unpair_device *cp = data;
2263 struct mgmt_rp_unpair_device rp;
2264 struct hci_conn_params *params;
2265 struct mgmt_pending_cmd *cmd;
2266 struct hci_conn *conn;
2267 u8 addr_type;
2268 int err;
2269
2270 memset(&rp, 0, sizeof(rp));
2271 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2272 rp.addr.type = cp->addr.type;
2273
2274 if (!bdaddr_type_is_valid(cp->addr.type))
2275 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2276 MGMT_STATUS_INVALID_PARAMS,
2277 &rp, sizeof(rp));
2278
2279 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2280 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2281 MGMT_STATUS_INVALID_PARAMS,
2282 &rp, sizeof(rp));
2283
2284 hci_dev_lock(hdev);
2285
2286 if (!hdev_is_powered(hdev)) {
2287 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2288 MGMT_STATUS_NOT_POWERED, &rp,
2289 sizeof(rp));
2290 goto unlock;
2291 }
2292
2293 if (cp->addr.type == BDADDR_BREDR) {
2294 /* If disconnection is requested, then look up the
2295 * connection. If the remote device is connected, it
2296 * will be later used to terminate the link.
2297 *
2298 * Setting it to NULL explicitly will cause no
2299 * termination of the link.
2300 */
2301 if (cp->disconnect)
2302 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2303 &cp->addr.bdaddr);
2304 else
2305 conn = NULL;
2306
2307 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2308 if (err < 0) {
2309 err = mgmt_cmd_complete(sk, hdev->id,
2310 MGMT_OP_UNPAIR_DEVICE,
2311 MGMT_STATUS_NOT_PAIRED, &rp,
2312 sizeof(rp));
2313 goto unlock;
2314 }
2315
2316 goto done;
2317 }
2318
2319 /* LE address type */
2320 addr_type = le_addr_type(cp->addr.type);
2321
2322 hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2323
2324 err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2325 if (err < 0) {
2326 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2327 MGMT_STATUS_NOT_PAIRED, &rp,
2328 sizeof(rp));
2329 goto unlock;
2330 }
2331
2332 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
2333 if (!conn) {
2334 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2335 goto done;
2336 }
2337
2338 /* Abort any ongoing SMP pairing */
2339 smp_cancel_pairing(conn);
2340
2341 /* Defer clearing up the connection parameters until closing to
2342 * give a chance of keeping them if a repairing happens.
2343 */
2344 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2345
2346 /* Disable auto-connection parameters if present */
2347 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
2348 if (params) {
2349 if (params->explicit_connect)
2350 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2351 else
2352 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2353 }
2354
2355 /* If disconnection is not requested, then clear the connection
2356 * variable so that the link is not terminated.
2357 */
2358 if (!cp->disconnect)
2359 conn = NULL;
2360
2361 done:
2362 /* If the connection variable is set, then termination of the
2363 * link is requested.
2364 */
2365 if (!conn) {
2366 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2367 &rp, sizeof(rp));
2368 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2369 goto unlock;
2370 }
2371
2372 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2373 sizeof(*cp));
2374 if (!cmd) {
2375 err = -ENOMEM;
2376 goto unlock;
2377 }
2378
2379 cmd->cmd_complete = addr_cmd_complete;
2380
2381 err = hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
2382 if (err < 0)
2383 mgmt_pending_remove(cmd);
2384
2385 unlock:
2386 hci_dev_unlock(hdev);
2387 return err;
2388 }
2389
2390 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2391 u16 len)
2392 {
2393 struct mgmt_cp_disconnect *cp = data;
2394 struct mgmt_rp_disconnect rp;
2395 struct mgmt_pending_cmd *cmd;
2396 struct hci_conn *conn;
2397 int err;
2398
2399 BT_DBG("");
2400
2401 memset(&rp, 0, sizeof(rp));
2402 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2403 rp.addr.type = cp->addr.type;
2404
2405 if (!bdaddr_type_is_valid(cp->addr.type))
2406 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2407 MGMT_STATUS_INVALID_PARAMS,
2408 &rp, sizeof(rp));
2409
2410 hci_dev_lock(hdev);
2411
2412 if (!test_bit(HCI_UP, &hdev->flags)) {
2413 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2414 MGMT_STATUS_NOT_POWERED, &rp,
2415 sizeof(rp));
2416 goto failed;
2417 }
2418
2419 if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
2420 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2421 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2422 goto failed;
2423 }
2424
2425 if (cp->addr.type == BDADDR_BREDR)
2426 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2427 &cp->addr.bdaddr);
2428 else
2429 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2430 le_addr_type(cp->addr.type));
2431
2432 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2433 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2434 MGMT_STATUS_NOT_CONNECTED, &rp,
2435 sizeof(rp));
2436 goto failed;
2437 }
2438
2439 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2440 if (!cmd) {
2441 err = -ENOMEM;
2442 goto failed;
2443 }
2444
2445 cmd->cmd_complete = generic_cmd_complete;
2446
2447 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2448 if (err < 0)
2449 mgmt_pending_remove(cmd);
2450
2451 failed:
2452 hci_dev_unlock(hdev);
2453 return err;
2454 }
2455
2456 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2457 {
2458 switch (link_type) {
2459 case LE_LINK:
2460 switch (addr_type) {
2461 case ADDR_LE_DEV_PUBLIC:
2462 return BDADDR_LE_PUBLIC;
2463
2464 default:
2465 /* Fallback to LE Random address type */
2466 return BDADDR_LE_RANDOM;
2467 }
2468
2469 default:
2470 /* Fallback to BR/EDR type */
2471 return BDADDR_BREDR;
2472 }
2473 }
2474
2475 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2476 u16 data_len)
2477 {
2478 struct mgmt_rp_get_connections *rp;
2479 struct hci_conn *c;
2480 size_t rp_len;
2481 int err;
2482 u16 i;
2483
2484 BT_DBG("");
2485
2486 hci_dev_lock(hdev);
2487
2488 if (!hdev_is_powered(hdev)) {
2489 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2490 MGMT_STATUS_NOT_POWERED);
2491 goto unlock;
2492 }
2493
2494 i = 0;
2495 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2496 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2497 i++;
2498 }
2499
2500 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2501 rp = kmalloc(rp_len, GFP_KERNEL);
2502 if (!rp) {
2503 err = -ENOMEM;
2504 goto unlock;
2505 }
2506
2507 i = 0;
2508 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2509 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2510 continue;
2511 bacpy(&rp->addr[i].bdaddr, &c->dst);
2512 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2513 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2514 continue;
2515 i++;
2516 }
2517
2518 rp->conn_count = cpu_to_le16(i);
2519
2520 /* Recalculate length in case of filtered SCO connections, etc */
2521 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2522
2523 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2524 rp_len);
2525
2526 kfree(rp);
2527
2528 unlock:
2529 hci_dev_unlock(hdev);
2530 return err;
2531 }
2532
2533 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2534 struct mgmt_cp_pin_code_neg_reply *cp)
2535 {
2536 struct mgmt_pending_cmd *cmd;
2537 int err;
2538
2539 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2540 sizeof(*cp));
2541 if (!cmd)
2542 return -ENOMEM;
2543
2544 cmd->cmd_complete = addr_cmd_complete;
2545
2546 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2547 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2548 if (err < 0)
2549 mgmt_pending_remove(cmd);
2550
2551 return err;
2552 }
2553
2554 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2555 u16 len)
2556 {
2557 struct hci_conn *conn;
2558 struct mgmt_cp_pin_code_reply *cp = data;
2559 struct hci_cp_pin_code_reply reply;
2560 struct mgmt_pending_cmd *cmd;
2561 int err;
2562
2563 BT_DBG("");
2564
2565 hci_dev_lock(hdev);
2566
2567 if (!hdev_is_powered(hdev)) {
2568 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2569 MGMT_STATUS_NOT_POWERED);
2570 goto failed;
2571 }
2572
2573 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2574 if (!conn) {
2575 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2576 MGMT_STATUS_NOT_CONNECTED);
2577 goto failed;
2578 }
2579
2580 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2581 struct mgmt_cp_pin_code_neg_reply ncp;
2582
2583 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2584
2585 BT_ERR("PIN code is not 16 bytes long");
2586
2587 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2588 if (err >= 0)
2589 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2590 MGMT_STATUS_INVALID_PARAMS);
2591
2592 goto failed;
2593 }
2594
2595 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2596 if (!cmd) {
2597 err = -ENOMEM;
2598 goto failed;
2599 }
2600
2601 cmd->cmd_complete = addr_cmd_complete;
2602
2603 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2604 reply.pin_len = cp->pin_len;
2605 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2606
2607 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2608 if (err < 0)
2609 mgmt_pending_remove(cmd);
2610
2611 failed:
2612 hci_dev_unlock(hdev);
2613 return err;
2614 }
2615
2616 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2617 u16 len)
2618 {
2619 struct mgmt_cp_set_io_capability *cp = data;
2620
2621 BT_DBG("");
2622
2623 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2624 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2625 MGMT_STATUS_INVALID_PARAMS);
2626
2627 hci_dev_lock(hdev);
2628
2629 hdev->io_capability = cp->io_capability;
2630
2631 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2632 hdev->io_capability);
2633
2634 hci_dev_unlock(hdev);
2635
2636 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
2637 NULL, 0);
2638 }
2639
2640 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
2641 {
2642 struct hci_dev *hdev = conn->hdev;
2643 struct mgmt_pending_cmd *cmd;
2644
2645 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2646 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2647 continue;
2648
2649 if (cmd->user_data != conn)
2650 continue;
2651
2652 return cmd;
2653 }
2654
2655 return NULL;
2656 }
2657
2658 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
2659 {
2660 struct mgmt_rp_pair_device rp;
2661 struct hci_conn *conn = cmd->user_data;
2662 int err;
2663
2664 bacpy(&rp.addr.bdaddr, &conn->dst);
2665 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2666
2667 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
2668 status, &rp, sizeof(rp));
2669
2670 /* So we don't get further callbacks for this connection */
2671 conn->connect_cfm_cb = NULL;
2672 conn->security_cfm_cb = NULL;
2673 conn->disconn_cfm_cb = NULL;
2674
2675 hci_conn_drop(conn);
2676
2677 /* The device is paired so there is no need to remove
2678 * its connection parameters anymore.
2679 */
2680 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2681
2682 hci_conn_put(conn);
2683
2684 return err;
2685 }
2686
2687 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2688 {
2689 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2690 struct mgmt_pending_cmd *cmd;
2691
2692 cmd = find_pairing(conn);
2693 if (cmd) {
2694 cmd->cmd_complete(cmd, status);
2695 mgmt_pending_remove(cmd);
2696 }
2697 }
2698
2699 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2700 {
2701 struct mgmt_pending_cmd *cmd;
2702
2703 BT_DBG("status %u", status);
2704
2705 cmd = find_pairing(conn);
2706 if (!cmd) {
2707 BT_DBG("Unable to find a pending command");
2708 return;
2709 }
2710
2711 cmd->cmd_complete(cmd, mgmt_status(status));
2712 mgmt_pending_remove(cmd);
2713 }
2714
2715 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2716 {
2717 struct mgmt_pending_cmd *cmd;
2718
2719 BT_DBG("status %u", status);
2720
2721 if (!status)
2722 return;
2723
2724 cmd = find_pairing(conn);
2725 if (!cmd) {
2726 BT_DBG("Unable to find a pending command");
2727 return;
2728 }
2729
2730 cmd->cmd_complete(cmd, mgmt_status(status));
2731 mgmt_pending_remove(cmd);
2732 }
2733
2734 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2735 u16 len)
2736 {
2737 struct mgmt_cp_pair_device *cp = data;
2738 struct mgmt_rp_pair_device rp;
2739 struct mgmt_pending_cmd *cmd;
2740 u8 sec_level, auth_type;
2741 struct hci_conn *conn;
2742 int err;
2743
2744 BT_DBG("");
2745
2746 memset(&rp, 0, sizeof(rp));
2747 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2748 rp.addr.type = cp->addr.type;
2749
2750 if (!bdaddr_type_is_valid(cp->addr.type))
2751 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2752 MGMT_STATUS_INVALID_PARAMS,
2753 &rp, sizeof(rp));
2754
2755 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
2756 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2757 MGMT_STATUS_INVALID_PARAMS,
2758 &rp, sizeof(rp));
2759
2760 hci_dev_lock(hdev);
2761
2762 if (!hdev_is_powered(hdev)) {
2763 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2764 MGMT_STATUS_NOT_POWERED, &rp,
2765 sizeof(rp));
2766 goto unlock;
2767 }
2768
2769 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
2770 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2771 MGMT_STATUS_ALREADY_PAIRED, &rp,
2772 sizeof(rp));
2773 goto unlock;
2774 }
2775
2776 sec_level = BT_SECURITY_MEDIUM;
2777 auth_type = HCI_AT_DEDICATED_BONDING;
2778
2779 if (cp->addr.type == BDADDR_BREDR) {
2780 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
2781 auth_type);
2782 } else {
2783 u8 addr_type = le_addr_type(cp->addr.type);
2784 struct hci_conn_params *p;
2785
2786 /* When pairing a new device, it is expected to remember
2787 * this device for future connections. Adding the connection
2788 * parameter information ahead of time allows tracking
2789 * of the slave preferred values and will speed up any
2790 * further connection establishment.
2791 *
2792 * If connection parameters already exist, then they
2793 * will be kept and this function does nothing.
2794 */
2795 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
2796
2797 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
2798 p->auto_connect = HCI_AUTO_CONN_DISABLED;
2799
2800 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr,
2801 addr_type, sec_level,
2802 HCI_LE_CONN_TIMEOUT);
2803 }
2804
2805 if (IS_ERR(conn)) {
2806 int status;
2807
2808 if (PTR_ERR(conn) == -EBUSY)
2809 status = MGMT_STATUS_BUSY;
2810 else if (PTR_ERR(conn) == -EOPNOTSUPP)
2811 status = MGMT_STATUS_NOT_SUPPORTED;
2812 else if (PTR_ERR(conn) == -ECONNREFUSED)
2813 status = MGMT_STATUS_REJECTED;
2814 else
2815 status = MGMT_STATUS_CONNECT_FAILED;
2816
2817 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2818 status, &rp, sizeof(rp));
2819 goto unlock;
2820 }
2821
2822 if (conn->connect_cfm_cb) {
2823 hci_conn_drop(conn);
2824 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2825 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2826 goto unlock;
2827 }
2828
2829 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2830 if (!cmd) {
2831 err = -ENOMEM;
2832 hci_conn_drop(conn);
2833 goto unlock;
2834 }
2835
2836 cmd->cmd_complete = pairing_complete;
2837
2838 /* For LE, just connecting isn't a proof that the pairing finished */
2839 if (cp->addr.type == BDADDR_BREDR) {
2840 conn->connect_cfm_cb = pairing_complete_cb;
2841 conn->security_cfm_cb = pairing_complete_cb;
2842 conn->disconn_cfm_cb = pairing_complete_cb;
2843 } else {
2844 conn->connect_cfm_cb = le_pairing_complete_cb;
2845 conn->security_cfm_cb = le_pairing_complete_cb;
2846 conn->disconn_cfm_cb = le_pairing_complete_cb;
2847 }
2848
2849 conn->io_capability = cp->io_cap;
2850 cmd->user_data = hci_conn_get(conn);
2851
2852 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
2853 hci_conn_security(conn, sec_level, auth_type, true)) {
2854 cmd->cmd_complete(cmd, 0);
2855 mgmt_pending_remove(cmd);
2856 }
2857
2858 err = 0;
2859
2860 unlock:
2861 hci_dev_unlock(hdev);
2862 return err;
2863 }
2864
2865 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2866 u16 len)
2867 {
2868 struct mgmt_addr_info *addr = data;
2869 struct mgmt_pending_cmd *cmd;
2870 struct hci_conn *conn;
2871 int err;
2872
2873 BT_DBG("");
2874
2875 hci_dev_lock(hdev);
2876
2877 if (!hdev_is_powered(hdev)) {
2878 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2879 MGMT_STATUS_NOT_POWERED);
2880 goto unlock;
2881 }
2882
2883 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
2884 if (!cmd) {
2885 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2886 MGMT_STATUS_INVALID_PARAMS);
2887 goto unlock;
2888 }
2889
2890 conn = cmd->user_data;
2891
2892 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
2893 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2894 MGMT_STATUS_INVALID_PARAMS);
2895 goto unlock;
2896 }
2897
2898 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
2899 mgmt_pending_remove(cmd);
2900
2901 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
2902 addr, sizeof(*addr));
2903 unlock:
2904 hci_dev_unlock(hdev);
2905 return err;
2906 }
2907
2908 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
2909 struct mgmt_addr_info *addr, u16 mgmt_op,
2910 u16 hci_op, __le32 passkey)
2911 {
2912 struct mgmt_pending_cmd *cmd;
2913 struct hci_conn *conn;
2914 int err;
2915
2916 hci_dev_lock(hdev);
2917
2918 if (!hdev_is_powered(hdev)) {
2919 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
2920 MGMT_STATUS_NOT_POWERED, addr,
2921 sizeof(*addr));
2922 goto done;
2923 }
2924
2925 if (addr->type == BDADDR_BREDR)
2926 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
2927 else
2928 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
2929 le_addr_type(addr->type));
2930
2931 if (!conn) {
2932 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
2933 MGMT_STATUS_NOT_CONNECTED, addr,
2934 sizeof(*addr));
2935 goto done;
2936 }
2937
2938 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
2939 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
2940 if (!err)
2941 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
2942 MGMT_STATUS_SUCCESS, addr,
2943 sizeof(*addr));
2944 else
2945 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
2946 MGMT_STATUS_FAILED, addr,
2947 sizeof(*addr));
2948
2949 goto done;
2950 }
2951
2952 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
2953 if (!cmd) {
2954 err = -ENOMEM;
2955 goto done;
2956 }
2957
2958 cmd->cmd_complete = addr_cmd_complete;
2959
2960 /* Continue with pairing via HCI */
2961 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
2962 struct hci_cp_user_passkey_reply cp;
2963
2964 bacpy(&cp.bdaddr, &addr->bdaddr);
2965 cp.passkey = passkey;
2966 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
2967 } else
2968 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
2969 &addr->bdaddr);
2970
2971 if (err < 0)
2972 mgmt_pending_remove(cmd);
2973
2974 done:
2975 hci_dev_unlock(hdev);
2976 return err;
2977 }
2978
2979 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2980 void *data, u16 len)
2981 {
2982 struct mgmt_cp_pin_code_neg_reply *cp = data;
2983
2984 BT_DBG("");
2985
2986 return user_pairing_resp(sk, hdev, &cp->addr,
2987 MGMT_OP_PIN_CODE_NEG_REPLY,
2988 HCI_OP_PIN_CODE_NEG_REPLY, 0);
2989 }
2990
2991 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2992 u16 len)
2993 {
2994 struct mgmt_cp_user_confirm_reply *cp = data;
2995
2996 BT_DBG("");
2997
2998 if (len != sizeof(*cp))
2999 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3000 MGMT_STATUS_INVALID_PARAMS);
3001
3002 return user_pairing_resp(sk, hdev, &cp->addr,
3003 MGMT_OP_USER_CONFIRM_REPLY,
3004 HCI_OP_USER_CONFIRM_REPLY, 0);
3005 }
3006
3007 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3008 void *data, u16 len)
3009 {
3010 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3011
3012 BT_DBG("");
3013
3014 return user_pairing_resp(sk, hdev, &cp->addr,
3015 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3016 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3017 }
3018
3019 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3020 u16 len)
3021 {
3022 struct mgmt_cp_user_passkey_reply *cp = data;
3023
3024 BT_DBG("");
3025
3026 return user_pairing_resp(sk, hdev, &cp->addr,
3027 MGMT_OP_USER_PASSKEY_REPLY,
3028 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3029 }
3030
3031 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3032 void *data, u16 len)
3033 {
3034 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3035
3036 BT_DBG("");
3037
3038 return user_pairing_resp(sk, hdev, &cp->addr,
3039 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3040 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3041 }
3042
3043 static void adv_expire(struct hci_dev *hdev, u32 flags)
3044 {
3045 struct adv_info *adv_instance;
3046 struct hci_request req;
3047 int err;
3048
3049 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3050 if (!adv_instance)
3051 return;
3052
3053 /* stop if current instance doesn't need to be changed */
3054 if (!(adv_instance->flags & flags))
3055 return;
3056
3057 cancel_adv_timeout(hdev);
3058
3059 adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3060 if (!adv_instance)
3061 return;
3062
3063 hci_req_init(&req, hdev);
3064 err = __hci_req_schedule_adv_instance(&req, adv_instance->instance,
3065 true);
3066 if (err)
3067 return;
3068
3069 hci_req_run(&req, NULL);
3070 }
3071
3072 static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3073 {
3074 struct mgmt_cp_set_local_name *cp;
3075 struct mgmt_pending_cmd *cmd;
3076
3077 BT_DBG("status 0x%02x", status);
3078
3079 hci_dev_lock(hdev);
3080
3081 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3082 if (!cmd)
3083 goto unlock;
3084
3085 cp = cmd->param;
3086
3087 if (status) {
3088 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3089 mgmt_status(status));
3090 } else {
3091 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3092 cp, sizeof(*cp));
3093
3094 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3095 adv_expire(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3096 }
3097
3098 mgmt_pending_remove(cmd);
3099
3100 unlock:
3101 hci_dev_unlock(hdev);
3102 }
3103
3104 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3105 u16 len)
3106 {
3107 struct mgmt_cp_set_local_name *cp = data;
3108 struct mgmt_pending_cmd *cmd;
3109 struct hci_request req;
3110 int err;
3111
3112 BT_DBG("");
3113
3114 hci_dev_lock(hdev);
3115
3116 /* If the old values are the same as the new ones just return a
3117 * direct command complete event.
3118 */
3119 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3120 !memcmp(hdev->short_name, cp->short_name,
3121 sizeof(hdev->short_name))) {
3122 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3123 data, len);
3124 goto failed;
3125 }
3126
3127 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3128
3129 if (!hdev_is_powered(hdev)) {
3130 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3131
3132 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3133 data, len);
3134 if (err < 0)
3135 goto failed;
3136
3137 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3138 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3139 ext_info_changed(hdev, sk);
3140
3141 goto failed;
3142 }
3143
3144 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3145 if (!cmd) {
3146 err = -ENOMEM;
3147 goto failed;
3148 }
3149
3150 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3151
3152 hci_req_init(&req, hdev);
3153
3154 if (lmp_bredr_capable(hdev)) {
3155 __hci_req_update_name(&req);
3156 __hci_req_update_eir(&req);
3157 }
3158
3159 /* The name is stored in the scan response data and so
3160 * no need to udpate the advertising data here.
3161 */
3162 if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3163 __hci_req_update_scan_rsp_data(&req, hdev->cur_adv_instance);
3164
3165 err = hci_req_run(&req, set_name_complete);
3166 if (err < 0)
3167 mgmt_pending_remove(cmd);
3168
3169 failed:
3170 hci_dev_unlock(hdev);
3171 return err;
3172 }
3173
3174 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3175 u16 len)
3176 {
3177 struct mgmt_cp_set_appearance *cp = data;
3178 u16 apperance;
3179 int err;
3180
3181 BT_DBG("");
3182
3183 if (!lmp_le_capable(hdev))
3184 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3185 MGMT_STATUS_NOT_SUPPORTED);
3186
3187 apperance = le16_to_cpu(cp->appearance);
3188
3189 hci_dev_lock(hdev);
3190
3191 if (hdev->appearance != apperance) {
3192 hdev->appearance = apperance;
3193
3194 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3195 adv_expire(hdev, MGMT_ADV_FLAG_APPEARANCE);
3196
3197 ext_info_changed(hdev, sk);
3198 }
3199
3200 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3201 0);
3202
3203 hci_dev_unlock(hdev);
3204
3205 return err;
3206 }
3207
3208 static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status,
3209 u16 opcode, struct sk_buff *skb)
3210 {
3211 struct mgmt_rp_read_local_oob_data mgmt_rp;
3212 size_t rp_size = sizeof(mgmt_rp);
3213 struct mgmt_pending_cmd *cmd;
3214
3215 BT_DBG("%s status %u", hdev->name, status);
3216
3217 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
3218 if (!cmd)
3219 return;
3220
3221 if (status || !skb) {
3222 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3223 status ? mgmt_status(status) : MGMT_STATUS_FAILED);
3224 goto remove;
3225 }
3226
3227 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
3228
3229 if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
3230 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
3231
3232 if (skb->len < sizeof(*rp)) {
3233 mgmt_cmd_status(cmd->sk, hdev->id,
3234 MGMT_OP_READ_LOCAL_OOB_DATA,
3235 MGMT_STATUS_FAILED);
3236 goto remove;
3237 }
3238
3239 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
3240 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
3241
3242 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
3243 } else {
3244 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
3245
3246 if (skb->len < sizeof(*rp)) {
3247 mgmt_cmd_status(cmd->sk, hdev->id,
3248 MGMT_OP_READ_LOCAL_OOB_DATA,
3249 MGMT_STATUS_FAILED);
3250 goto remove;
3251 }
3252
3253 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
3254 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
3255
3256 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
3257 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
3258 }
3259
3260 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3261 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
3262
3263 remove:
3264 mgmt_pending_remove(cmd);
3265 }
3266
3267 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3268 void *data, u16 data_len)
3269 {
3270 struct mgmt_pending_cmd *cmd;
3271 struct hci_request req;
3272 int err;
3273
3274 BT_DBG("%s", hdev->name);
3275
3276 hci_dev_lock(hdev);
3277
3278 if (!hdev_is_powered(hdev)) {
3279 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3280 MGMT_STATUS_NOT_POWERED);
3281 goto unlock;
3282 }
3283
3284 if (!lmp_ssp_capable(hdev)) {
3285 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3286 MGMT_STATUS_NOT_SUPPORTED);
3287 goto unlock;
3288 }
3289
3290 if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3291 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3292 MGMT_STATUS_BUSY);
3293 goto unlock;
3294 }
3295
3296 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3297 if (!cmd) {
3298 err = -ENOMEM;
3299 goto unlock;
3300 }
3301
3302 hci_req_init(&req, hdev);
3303
3304 if (bredr_sc_enabled(hdev))
3305 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
3306 else
3307 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3308
3309 err = hci_req_run_skb(&req, read_local_oob_data_complete);
3310 if (err < 0)
3311 mgmt_pending_remove(cmd);
3312
3313 unlock:
3314 hci_dev_unlock(hdev);
3315 return err;
3316 }
3317
3318 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3319 void *data, u16 len)
3320 {
3321 struct mgmt_addr_info *addr = data;
3322 int err;
3323
3324 BT_DBG("%s ", hdev->name);
3325
3326 if (!bdaddr_type_is_valid(addr->type))
3327 return mgmt_cmd_complete(sk, hdev->id,
3328 MGMT_OP_ADD_REMOTE_OOB_DATA,
3329 MGMT_STATUS_INVALID_PARAMS,
3330 addr, sizeof(*addr));
3331
3332 hci_dev_lock(hdev);
3333
3334 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3335 struct mgmt_cp_add_remote_oob_data *cp = data;
3336 u8 status;
3337
3338 if (cp->addr.type != BDADDR_BREDR) {
3339 err = mgmt_cmd_complete(sk, hdev->id,
3340 MGMT_OP_ADD_REMOTE_OOB_DATA,
3341 MGMT_STATUS_INVALID_PARAMS,
3342 &cp->addr, sizeof(cp->addr));
3343 goto unlock;
3344 }
3345
3346 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3347 cp->addr.type, cp->hash,
3348 cp->rand, NULL, NULL);
3349 if (err < 0)
3350 status = MGMT_STATUS_FAILED;
3351 else
3352 status = MGMT_STATUS_SUCCESS;
3353
3354 err = mgmt_cmd_complete(sk, hdev->id,
3355 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
3356 &cp->addr, sizeof(cp->addr));
3357 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3358 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3359 u8 *rand192, *hash192, *rand256, *hash256;
3360 u8 status;
3361
3362 if (bdaddr_type_is_le(cp->addr.type)) {
3363 /* Enforce zero-valued 192-bit parameters as
3364 * long as legacy SMP OOB isn't implemented.
3365 */
3366 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
3367 memcmp(cp->hash192, ZERO_KEY, 16)) {
3368 err = mgmt_cmd_complete(sk, hdev->id,
3369 MGMT_OP_ADD_REMOTE_OOB_DATA,
3370 MGMT_STATUS_INVALID_PARAMS,
3371 addr, sizeof(*addr));
3372 goto unlock;
3373 }
3374
3375 rand192 = NULL;
3376 hash192 = NULL;
3377 } else {
3378 /* In case one of the P-192 values is set to zero,
3379 * then just disable OOB data for P-192.
3380 */
3381 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
3382 !memcmp(cp->hash192, ZERO_KEY, 16)) {
3383 rand192 = NULL;
3384 hash192 = NULL;
3385 } else {
3386 rand192 = cp->rand192;
3387 hash192 = cp->hash192;
3388 }
3389 }
3390
3391 /* In case one of the P-256 values is set to zero, then just
3392 * disable OOB data for P-256.
3393 */
3394 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
3395 !memcmp(cp->hash256, ZERO_KEY, 16)) {
3396 rand256 = NULL;
3397 hash256 = NULL;
3398 } else {
3399 rand256 = cp->rand256;
3400 hash256 = cp->hash256;
3401 }
3402
3403 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3404 cp->addr.type, hash192, rand192,
3405 hash256, rand256);
3406 if (err < 0)
3407 status = MGMT_STATUS_FAILED;
3408 else
3409 status = MGMT_STATUS_SUCCESS;
3410
3411 err = mgmt_cmd_complete(sk, hdev->id,
3412 MGMT_OP_ADD_REMOTE_OOB_DATA,
3413 status, &cp->addr, sizeof(cp->addr));
3414 } else {
3415 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3416 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3417 MGMT_STATUS_INVALID_PARAMS);
3418 }
3419
3420 unlock:
3421 hci_dev_unlock(hdev);
3422 return err;
3423 }
3424
3425 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3426 void *data, u16 len)
3427 {
3428 struct mgmt_cp_remove_remote_oob_data *cp = data;
3429 u8 status;
3430 int err;
3431
3432 BT_DBG("%s", hdev->name);
3433
3434 if (cp->addr.type != BDADDR_BREDR)
3435 return mgmt_cmd_complete(sk, hdev->id,
3436 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3437 MGMT_STATUS_INVALID_PARAMS,
3438 &cp->addr, sizeof(cp->addr));
3439
3440 hci_dev_lock(hdev);
3441
3442 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
3443 hci_remote_oob_data_clear(hdev);
3444 status = MGMT_STATUS_SUCCESS;
3445 goto done;
3446 }
3447
3448 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
3449 if (err < 0)
3450 status = MGMT_STATUS_INVALID_PARAMS;
3451 else
3452 status = MGMT_STATUS_SUCCESS;
3453
3454 done:
3455 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3456 status, &cp->addr, sizeof(cp->addr));
3457
3458 hci_dev_unlock(hdev);
3459 return err;
3460 }
3461
3462 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
3463 {
3464 struct mgmt_pending_cmd *cmd;
3465
3466 BT_DBG("status %d", status);
3467
3468 hci_dev_lock(hdev);
3469
3470 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
3471 if (!cmd)
3472 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
3473
3474 if (!cmd)
3475 cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
3476
3477 if (cmd) {
3478 cmd->cmd_complete(cmd, mgmt_status(status));
3479 mgmt_pending_remove(cmd);
3480 }
3481
3482 hci_dev_unlock(hdev);
3483 }
3484
3485 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
3486 uint8_t *mgmt_status)
3487 {
3488 switch (type) {
3489 case DISCOV_TYPE_LE:
3490 *mgmt_status = mgmt_le_support(hdev);
3491 if (*mgmt_status)
3492 return false;
3493 break;
3494 case DISCOV_TYPE_INTERLEAVED:
3495 *mgmt_status = mgmt_le_support(hdev);
3496 if (*mgmt_status)
3497 return false;
3498 /* Intentional fall-through */
3499 case DISCOV_TYPE_BREDR:
3500 *mgmt_status = mgmt_bredr_support(hdev);
3501 if (*mgmt_status)
3502 return false;
3503 break;
3504 default:
3505 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
3506 return false;
3507 }
3508
3509 return true;
3510 }
3511
3512 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
3513 u16 op, void *data, u16 len)
3514 {
3515 struct mgmt_cp_start_discovery *cp = data;
3516 struct mgmt_pending_cmd *cmd;
3517 u8 status;
3518 int err;
3519
3520 BT_DBG("%s", hdev->name);
3521
3522 hci_dev_lock(hdev);
3523
3524 if (!hdev_is_powered(hdev)) {
3525 err = mgmt_cmd_complete(sk, hdev->id, op,
3526 MGMT_STATUS_NOT_POWERED,
3527 &cp->type, sizeof(cp->type));
3528 goto failed;
3529 }
3530
3531 if (hdev->discovery.state != DISCOVERY_STOPPED ||
3532 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
3533 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
3534 &cp->type, sizeof(cp->type));
3535 goto failed;
3536 }
3537
3538 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
3539 err = mgmt_cmd_complete(sk, hdev->id, op, status,
3540 &cp->type, sizeof(cp->type));
3541 goto failed;
3542 }
3543
3544 /* Clear the discovery filter first to free any previously
3545 * allocated memory for the UUID list.
3546 */
3547 hci_discovery_filter_clear(hdev);
3548
3549 hdev->discovery.type = cp->type;
3550 hdev->discovery.report_invalid_rssi = false;
3551 if (op == MGMT_OP_START_LIMITED_DISCOVERY)
3552 hdev->discovery.limited = true;
3553 else
3554 hdev->discovery.limited = false;
3555
3556 cmd = mgmt_pending_add(sk, op, hdev, data, len);
3557 if (!cmd) {
3558 err = -ENOMEM;
3559 goto failed;
3560 }
3561
3562 cmd->cmd_complete = generic_cmd_complete;
3563
3564 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3565 queue_work(hdev->req_workqueue, &hdev->discov_update);
3566 err = 0;
3567
3568 failed:
3569 hci_dev_unlock(hdev);
3570 return err;
3571 }
3572
3573 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3574 void *data, u16 len)
3575 {
3576 return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
3577 data, len);
3578 }
3579
3580 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
3581 void *data, u16 len)
3582 {
3583 return start_discovery_internal(sk, hdev,
3584 MGMT_OP_START_LIMITED_DISCOVERY,
3585 data, len);
3586 }
3587
3588 static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd,
3589 u8 status)
3590 {
3591 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
3592 cmd->param, 1);
3593 }
3594
3595 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
3596 void *data, u16 len)
3597 {
3598 struct mgmt_cp_start_service_discovery *cp = data;
3599 struct mgmt_pending_cmd *cmd;
3600 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
3601 u16 uuid_count, expected_len;
3602 u8 status;
3603 int err;
3604
3605 BT_DBG("%s", hdev->name);
3606
3607 hci_dev_lock(hdev);
3608
3609 if (!hdev_is_powered(hdev)) {
3610 err = mgmt_cmd_complete(sk, hdev->id,
3611 MGMT_OP_START_SERVICE_DISCOVERY,
3612 MGMT_STATUS_NOT_POWERED,
3613 &cp->type, sizeof(cp->type));
3614 goto failed;
3615 }
3616
3617 if (hdev->discovery.state != DISCOVERY_STOPPED ||
3618 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
3619 err = mgmt_cmd_complete(sk, hdev->id,
3620 MGMT_OP_START_SERVICE_DISCOVERY,
3621 MGMT_STATUS_BUSY, &cp->type,
3622 sizeof(cp->type));
3623 goto failed;
3624 }
3625
3626 uuid_count = __le16_to_cpu(cp->uuid_count);
3627 if (uuid_count > max_uuid_count) {
3628 BT_ERR("service_discovery: too big uuid_count value %u",
3629 uuid_count);
3630 err = mgmt_cmd_complete(sk, hdev->id,
3631 MGMT_OP_START_SERVICE_DISCOVERY,
3632 MGMT_STATUS_INVALID_PARAMS, &cp->type,
3633 sizeof(cp->type));
3634 goto failed;
3635 }
3636
3637 expected_len = sizeof(*cp) + uuid_count * 16;
3638 if (expected_len != len) {
3639 BT_ERR("service_discovery: expected %u bytes, got %u bytes",
3640 expected_len, len);
3641 err = mgmt_cmd_complete(sk, hdev->id,
3642 MGMT_OP_START_SERVICE_DISCOVERY,
3643 MGMT_STATUS_INVALID_PARAMS, &cp->type,
3644 sizeof(cp->type));
3645 goto failed;
3646 }
3647
3648 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
3649 err = mgmt_cmd_complete(sk, hdev->id,
3650 MGMT_OP_START_SERVICE_DISCOVERY,
3651 status, &cp->type, sizeof(cp->type));
3652 goto failed;
3653 }
3654
3655 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
3656 hdev, data, len);
3657 if (!cmd) {
3658 err = -ENOMEM;
3659 goto failed;
3660 }
3661
3662 cmd->cmd_complete = service_discovery_cmd_complete;
3663
3664 /* Clear the discovery filter first to free any previously
3665 * allocated memory for the UUID list.
3666 */
3667 hci_discovery_filter_clear(hdev);
3668
3669 hdev->discovery.result_filtering = true;
3670 hdev->discovery.type = cp->type;
3671 hdev->discovery.rssi = cp->rssi;
3672 hdev->discovery.uuid_count = uuid_count;
3673
3674 if (uuid_count > 0) {
3675 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
3676 GFP_KERNEL);
3677 if (!hdev->discovery.uuids) {
3678 err = mgmt_cmd_complete(sk, hdev->id,
3679 MGMT_OP_START_SERVICE_DISCOVERY,
3680 MGMT_STATUS_FAILED,
3681 &cp->type, sizeof(cp->type));
3682 mgmt_pending_remove(cmd);
3683 goto failed;
3684 }
3685 }
3686
3687 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3688 queue_work(hdev->req_workqueue, &hdev->discov_update);
3689 err = 0;
3690
3691 failed:
3692 hci_dev_unlock(hdev);
3693 return err;
3694 }
3695
3696 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
3697 {
3698 struct mgmt_pending_cmd *cmd;
3699
3700 BT_DBG("status %d", status);
3701
3702 hci_dev_lock(hdev);
3703
3704 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
3705 if (cmd) {
3706 cmd->cmd_complete(cmd, mgmt_status(status));
3707 mgmt_pending_remove(cmd);
3708 }
3709
3710 hci_dev_unlock(hdev);
3711 }
3712
3713 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
3714 u16 len)
3715 {
3716 struct mgmt_cp_stop_discovery *mgmt_cp = data;
3717 struct mgmt_pending_cmd *cmd;
3718 int err;
3719
3720 BT_DBG("%s", hdev->name);
3721
3722 hci_dev_lock(hdev);
3723
3724 if (!hci_discovery_active(hdev)) {
3725 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3726 MGMT_STATUS_REJECTED, &mgmt_cp->type,
3727 sizeof(mgmt_cp->type));
3728 goto unlock;
3729 }
3730
3731 if (hdev->discovery.type != mgmt_cp->type) {
3732 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3733 MGMT_STATUS_INVALID_PARAMS,
3734 &mgmt_cp->type, sizeof(mgmt_cp->type));
3735 goto unlock;
3736 }
3737
3738 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
3739 if (!cmd) {
3740 err = -ENOMEM;
3741 goto unlock;
3742 }
3743
3744 cmd->cmd_complete = generic_cmd_complete;
3745
3746 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
3747 queue_work(hdev->req_workqueue, &hdev->discov_update);
3748 err = 0;
3749
3750 unlock:
3751 hci_dev_unlock(hdev);
3752 return err;
3753 }
3754
3755 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
3756 u16 len)
3757 {
3758 struct mgmt_cp_confirm_name *cp = data;
3759 struct inquiry_entry *e;
3760 int err;
3761
3762 BT_DBG("%s", hdev->name);
3763
3764 hci_dev_lock(hdev);
3765
3766 if (!hci_discovery_active(hdev)) {
3767 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3768 MGMT_STATUS_FAILED, &cp->addr,
3769 sizeof(cp->addr));
3770 goto failed;
3771 }
3772
3773 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
3774 if (!e) {
3775 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3776 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
3777 sizeof(cp->addr));
3778 goto failed;
3779 }
3780
3781 if (cp->name_known) {
3782 e->name_state = NAME_KNOWN;
3783 list_del(&e->list);
3784 } else {
3785 e->name_state = NAME_NEEDED;
3786 hci_inquiry_cache_update_resolve(hdev, e);
3787 }
3788
3789 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
3790 &cp->addr, sizeof(cp->addr));
3791
3792 failed:
3793 hci_dev_unlock(hdev);
3794 return err;
3795 }
3796
3797 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
3798 u16 len)
3799 {
3800 struct mgmt_cp_block_device *cp = data;
3801 u8 status;
3802 int err;
3803
3804 BT_DBG("%s", hdev->name);
3805
3806 if (!bdaddr_type_is_valid(cp->addr.type))
3807 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
3808 MGMT_STATUS_INVALID_PARAMS,
3809 &cp->addr, sizeof(cp->addr));
3810
3811 hci_dev_lock(hdev);
3812
3813 err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
3814 cp->addr.type);
3815 if (err < 0) {
3816 status = MGMT_STATUS_FAILED;
3817 goto done;
3818 }
3819
3820 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
3821 sk);
3822 status = MGMT_STATUS_SUCCESS;
3823
3824 done:
3825 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
3826 &cp->addr, sizeof(cp->addr));
3827
3828 hci_dev_unlock(hdev);
3829
3830 return err;
3831 }
3832
3833 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
3834 u16 len)
3835 {
3836 struct mgmt_cp_unblock_device *cp = data;
3837 u8 status;
3838 int err;
3839
3840 BT_DBG("%s", hdev->name);
3841
3842 if (!bdaddr_type_is_valid(cp->addr.type))
3843 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
3844 MGMT_STATUS_INVALID_PARAMS,
3845 &cp->addr, sizeof(cp->addr));
3846
3847 hci_dev_lock(hdev);
3848
3849 err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
3850 cp->addr.type);
3851 if (err < 0) {
3852 status = MGMT_STATUS_INVALID_PARAMS;
3853 goto done;
3854 }
3855
3856 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
3857 sk);
3858 status = MGMT_STATUS_SUCCESS;
3859
3860 done:
3861 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
3862 &cp->addr, sizeof(cp->addr));
3863
3864 hci_dev_unlock(hdev);
3865
3866 return err;
3867 }
3868
3869 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
3870 u16 len)
3871 {
3872 struct mgmt_cp_set_device_id *cp = data;
3873 struct hci_request req;
3874 int err;
3875 __u16 source;
3876
3877 BT_DBG("%s", hdev->name);
3878
3879 source = __le16_to_cpu(cp->source);
3880
3881 if (source > 0x0002)
3882 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
3883 MGMT_STATUS_INVALID_PARAMS);
3884
3885 hci_dev_lock(hdev);
3886
3887 hdev->devid_source = source;
3888 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
3889 hdev->devid_product = __le16_to_cpu(cp->product);
3890 hdev->devid_version = __le16_to_cpu(cp->version);
3891
3892 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
3893 NULL, 0);
3894
3895 hci_req_init(&req, hdev);
3896 __hci_req_update_eir(&req);
3897 hci_req_run(&req, NULL);
3898
3899 hci_dev_unlock(hdev);
3900
3901 return err;
3902 }
3903
3904 static void enable_advertising_instance(struct hci_dev *hdev, u8 status,
3905 u16 opcode)
3906 {
3907 BT_DBG("status %d", status);
3908 }
3909
3910 static void set_advertising_complete(struct hci_dev *hdev, u8 status,
3911 u16 opcode)
3912 {
3913 struct cmd_lookup match = { NULL, hdev };
3914 struct hci_request req;
3915 u8 instance;
3916 struct adv_info *adv_instance;
3917 int err;
3918
3919 hci_dev_lock(hdev);
3920
3921 if (status) {
3922 u8 mgmt_err = mgmt_status(status);
3923
3924 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
3925 cmd_status_rsp, &mgmt_err);
3926 goto unlock;
3927 }
3928
3929 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3930 hci_dev_set_flag(hdev, HCI_ADVERTISING);
3931 else
3932 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
3933
3934 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
3935 &match);
3936
3937 new_settings(hdev, match.sk);
3938
3939 if (match.sk)
3940 sock_put(match.sk);
3941
3942 /* If "Set Advertising" was just disabled and instance advertising was
3943 * set up earlier, then re-enable multi-instance advertising.
3944 */
3945 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
3946 list_empty(&hdev->adv_instances))
3947 goto unlock;
3948
3949 instance = hdev->cur_adv_instance;
3950 if (!instance) {
3951 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
3952 struct adv_info, list);
3953 if (!adv_instance)
3954 goto unlock;
3955
3956 instance = adv_instance->instance;
3957 }
3958
3959 hci_req_init(&req, hdev);
3960
3961 err = __hci_req_schedule_adv_instance(&req, instance, true);
3962
3963 if (!err)
3964 err = hci_req_run(&req, enable_advertising_instance);
3965
3966 if (err)
3967 BT_ERR("Failed to re-configure advertising");
3968
3969 unlock:
3970 hci_dev_unlock(hdev);
3971 }
3972
3973 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
3974 u16 len)
3975 {
3976 struct mgmt_mode *cp = data;
3977 struct mgmt_pending_cmd *cmd;
3978 struct hci_request req;
3979 u8 val, status;
3980 int err;
3981
3982 BT_DBG("request for %s", hdev->name);
3983
3984 status = mgmt_le_support(hdev);
3985 if (status)
3986 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3987 status);
3988
3989 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
3990 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3991 MGMT_STATUS_INVALID_PARAMS);
3992
3993 hci_dev_lock(hdev);
3994
3995 val = !!cp->val;
3996
3997 /* The following conditions are ones which mean that we should
3998 * not do any HCI communication but directly send a mgmt
3999 * response to user space (after toggling the flag if
4000 * necessary).
4001 */
4002 if (!hdev_is_powered(hdev) ||
4003 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
4004 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
4005 hci_conn_num(hdev, LE_LINK) > 0 ||
4006 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
4007 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
4008 bool changed;
4009
4010 if (cp->val) {
4011 hdev->cur_adv_instance = 0x00;
4012 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
4013 if (cp->val == 0x02)
4014 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4015 else
4016 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4017 } else {
4018 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
4019 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4020 }
4021
4022 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
4023 if (err < 0)
4024 goto unlock;
4025
4026 if (changed)
4027 err = new_settings(hdev, sk);
4028
4029 goto unlock;
4030 }
4031
4032 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
4033 pending_find(MGMT_OP_SET_LE, hdev)) {
4034 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4035 MGMT_STATUS_BUSY);
4036 goto unlock;
4037 }
4038
4039 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
4040 if (!cmd) {
4041 err = -ENOMEM;
4042 goto unlock;
4043 }
4044
4045 hci_req_init(&req, hdev);
4046
4047 if (cp->val == 0x02)
4048 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4049 else
4050 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4051
4052 cancel_adv_timeout(hdev);
4053
4054 if (val) {
4055 /* Switch to instance "0" for the Set Advertising setting.
4056 * We cannot use update_[adv|scan_rsp]_data() here as the
4057 * HCI_ADVERTISING flag is not yet set.
4058 */
4059 hdev->cur_adv_instance = 0x00;
4060 __hci_req_update_adv_data(&req, 0x00);
4061 __hci_req_update_scan_rsp_data(&req, 0x00);
4062 __hci_req_enable_advertising(&req);
4063 } else {
4064 __hci_req_disable_advertising(&req);
4065 }
4066
4067 err = hci_req_run(&req, set_advertising_complete);
4068 if (err < 0)
4069 mgmt_pending_remove(cmd);
4070
4071 unlock:
4072 hci_dev_unlock(hdev);
4073 return err;
4074 }
4075
4076 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
4077 void *data, u16 len)
4078 {
4079 struct mgmt_cp_set_static_address *cp = data;
4080 int err;
4081
4082 BT_DBG("%s", hdev->name);
4083
4084 if (!lmp_le_capable(hdev))
4085 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4086 MGMT_STATUS_NOT_SUPPORTED);
4087
4088 if (hdev_is_powered(hdev))
4089 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4090 MGMT_STATUS_REJECTED);
4091
4092 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
4093 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
4094 return mgmt_cmd_status(sk, hdev->id,
4095 MGMT_OP_SET_STATIC_ADDRESS,
4096 MGMT_STATUS_INVALID_PARAMS);
4097
4098 /* Two most significant bits shall be set */
4099 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
4100 return mgmt_cmd_status(sk, hdev->id,
4101 MGMT_OP_SET_STATIC_ADDRESS,
4102 MGMT_STATUS_INVALID_PARAMS);
4103 }
4104
4105 hci_dev_lock(hdev);
4106
4107 bacpy(&hdev->static_addr, &cp->bdaddr);
4108
4109 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
4110 if (err < 0)
4111 goto unlock;
4112
4113 err = new_settings(hdev, sk);
4114
4115 unlock:
4116 hci_dev_unlock(hdev);
4117 return err;
4118 }
4119
4120 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
4121 void *data, u16 len)
4122 {
4123 struct mgmt_cp_set_scan_params *cp = data;
4124 __u16 interval, window;
4125 int err;
4126
4127 BT_DBG("%s", hdev->name);
4128
4129 if (!lmp_le_capable(hdev))
4130 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4131 MGMT_STATUS_NOT_SUPPORTED);
4132
4133 interval = __le16_to_cpu(cp->interval);
4134
4135 if (interval < 0x0004 || interval > 0x4000)
4136 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4137 MGMT_STATUS_INVALID_PARAMS);
4138
4139 window = __le16_to_cpu(cp->window);
4140
4141 if (window < 0x0004 || window > 0x4000)
4142 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4143 MGMT_STATUS_INVALID_PARAMS);
4144
4145 if (window > interval)
4146 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4147 MGMT_STATUS_INVALID_PARAMS);
4148
4149 hci_dev_lock(hdev);
4150
4151 hdev->le_scan_interval = interval;
4152 hdev->le_scan_window = window;
4153
4154 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
4155 NULL, 0);
4156
4157 /* If background scan is running, restart it so new parameters are
4158 * loaded.
4159 */
4160 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
4161 hdev->discovery.state == DISCOVERY_STOPPED) {
4162 struct hci_request req;
4163
4164 hci_req_init(&req, hdev);
4165
4166 hci_req_add_le_scan_disable(&req);
4167 hci_req_add_le_passive_scan(&req);
4168
4169 hci_req_run(&req, NULL);
4170 }
4171
4172 hci_dev_unlock(hdev);
4173
4174 return err;
4175 }
4176
4177 static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
4178 u16 opcode)
4179 {
4180 struct mgmt_pending_cmd *cmd;
4181
4182 BT_DBG("status 0x%02x", status);
4183
4184 hci_dev_lock(hdev);
4185
4186 cmd = pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4187 if (!cmd)
4188 goto unlock;
4189
4190 if (status) {
4191 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4192 mgmt_status(status));
4193 } else {
4194 struct mgmt_mode *cp = cmd->param;
4195
4196 if (cp->val)
4197 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
4198 else
4199 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
4200
4201 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4202 new_settings(hdev, cmd->sk);
4203 }
4204
4205 mgmt_pending_remove(cmd);
4206
4207 unlock:
4208 hci_dev_unlock(hdev);
4209 }
4210
4211 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4212 void *data, u16 len)
4213 {
4214 struct mgmt_mode *cp = data;
4215 struct mgmt_pending_cmd *cmd;
4216 struct hci_request req;
4217 int err;
4218
4219 BT_DBG("%s", hdev->name);
4220
4221 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
4222 hdev->hci_ver < BLUETOOTH_VER_1_2)
4223 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4224 MGMT_STATUS_NOT_SUPPORTED);
4225
4226 if (cp->val != 0x00 && cp->val != 0x01)
4227 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4228 MGMT_STATUS_INVALID_PARAMS);
4229
4230 hci_dev_lock(hdev);
4231
4232 if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4233 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4234 MGMT_STATUS_BUSY);
4235 goto unlock;
4236 }
4237
4238 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
4239 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4240 hdev);
4241 goto unlock;
4242 }
4243
4244 if (!hdev_is_powered(hdev)) {
4245 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
4246 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4247 hdev);
4248 new_settings(hdev, sk);
4249 goto unlock;
4250 }
4251
4252 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4253 data, len);
4254 if (!cmd) {
4255 err = -ENOMEM;
4256 goto unlock;
4257 }
4258
4259 hci_req_init(&req, hdev);
4260
4261 __hci_req_write_fast_connectable(&req, cp->val);
4262
4263 err = hci_req_run(&req, fast_connectable_complete);
4264 if (err < 0) {
4265 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4266 MGMT_STATUS_FAILED);
4267 mgmt_pending_remove(cmd);
4268 }
4269
4270 unlock:
4271 hci_dev_unlock(hdev);
4272
4273 return err;
4274 }
4275
4276 static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4277 {
4278 struct mgmt_pending_cmd *cmd;
4279
4280 BT_DBG("status 0x%02x", status);
4281
4282 hci_dev_lock(hdev);
4283
4284 cmd = pending_find(MGMT_OP_SET_BREDR, hdev);
4285 if (!cmd)
4286 goto unlock;
4287
4288 if (status) {
4289 u8 mgmt_err = mgmt_status(status);
4290
4291 /* We need to restore the flag if related HCI commands
4292 * failed.
4293 */
4294 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
4295
4296 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4297 } else {
4298 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4299 new_settings(hdev, cmd->sk);
4300 }
4301
4302 mgmt_pending_remove(cmd);
4303
4304 unlock:
4305 hci_dev_unlock(hdev);
4306 }
4307
4308 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4309 {
4310 struct mgmt_mode *cp = data;
4311 struct mgmt_pending_cmd *cmd;
4312 struct hci_request req;
4313 int err;
4314
4315 BT_DBG("request for %s", hdev->name);
4316
4317 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4318 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4319 MGMT_STATUS_NOT_SUPPORTED);
4320
4321 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
4322 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4323 MGMT_STATUS_REJECTED);
4324
4325 if (cp->val != 0x00 && cp->val != 0x01)
4326 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4327 MGMT_STATUS_INVALID_PARAMS);
4328
4329 hci_dev_lock(hdev);
4330
4331 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
4332 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4333 goto unlock;
4334 }
4335
4336 if (!hdev_is_powered(hdev)) {
4337 if (!cp->val) {
4338 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
4339 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
4340 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
4341 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
4342 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
4343 }
4344
4345 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
4346
4347 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4348 if (err < 0)
4349 goto unlock;
4350
4351 err = new_settings(hdev, sk);
4352 goto unlock;
4353 }
4354
4355 /* Reject disabling when powered on */
4356 if (!cp->val) {
4357 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4358 MGMT_STATUS_REJECTED);
4359 goto unlock;
4360 } else {
4361 /* When configuring a dual-mode controller to operate
4362 * with LE only and using a static address, then switching
4363 * BR/EDR back on is not allowed.
4364 *
4365 * Dual-mode controllers shall operate with the public
4366 * address as its identity address for BR/EDR and LE. So
4367 * reject the attempt to create an invalid configuration.
4368 *
4369 * The same restrictions applies when secure connections
4370 * has been enabled. For BR/EDR this is a controller feature
4371 * while for LE it is a host stack feature. This means that
4372 * switching BR/EDR back on when secure connections has been
4373 * enabled is not a supported transaction.
4374 */
4375 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
4376 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
4377 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
4378 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4379 MGMT_STATUS_REJECTED);
4380 goto unlock;
4381 }
4382 }
4383
4384 if (pending_find(MGMT_OP_SET_BREDR, hdev)) {
4385 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4386 MGMT_STATUS_BUSY);
4387 goto unlock;
4388 }
4389
4390 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4391 if (!cmd) {
4392 err = -ENOMEM;
4393 goto unlock;
4394 }
4395
4396 /* We need to flip the bit already here so that
4397 * hci_req_update_adv_data generates the correct flags.
4398 */
4399 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
4400
4401 hci_req_init(&req, hdev);
4402
4403 __hci_req_write_fast_connectable(&req, false);
4404 __hci_req_update_scan(&req);
4405
4406 /* Since only the advertising data flags will change, there
4407 * is no need to update the scan response data.
4408 */
4409 __hci_req_update_adv_data(&req, hdev->cur_adv_instance);
4410
4411 err = hci_req_run(&req, set_bredr_complete);
4412 if (err < 0)
4413 mgmt_pending_remove(cmd);
4414
4415 unlock:
4416 hci_dev_unlock(hdev);
4417 return err;
4418 }
4419
4420 static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4421 {
4422 struct mgmt_pending_cmd *cmd;
4423 struct mgmt_mode *cp;
4424
4425 BT_DBG("%s status %u", hdev->name, status);
4426
4427 hci_dev_lock(hdev);
4428
4429 cmd = pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
4430 if (!cmd)
4431 goto unlock;
4432
4433 if (status) {
4434 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
4435 mgmt_status(status));
4436 goto remove;
4437 }
4438
4439 cp = cmd->param;
4440
4441 switch (cp->val) {
4442 case 0x00:
4443 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
4444 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4445 break;
4446 case 0x01:
4447 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
4448 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4449 break;
4450 case 0x02:
4451 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
4452 hci_dev_set_flag(hdev, HCI_SC_ONLY);
4453 break;
4454 }
4455
4456 send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
4457 new_settings(hdev, cmd->sk);
4458
4459 remove:
4460 mgmt_pending_remove(cmd);
4461 unlock:
4462 hci_dev_unlock(hdev);
4463 }
4464
4465 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4466 void *data, u16 len)
4467 {
4468 struct mgmt_mode *cp = data;
4469 struct mgmt_pending_cmd *cmd;
4470 struct hci_request req;
4471 u8 val;
4472 int err;
4473
4474 BT_DBG("request for %s", hdev->name);
4475
4476 if (!lmp_sc_capable(hdev) &&
4477 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
4478 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4479 MGMT_STATUS_NOT_SUPPORTED);
4480
4481 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
4482 lmp_sc_capable(hdev) &&
4483 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
4484 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4485 MGMT_STATUS_REJECTED);
4486
4487 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4488 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4489 MGMT_STATUS_INVALID_PARAMS);
4490
4491 hci_dev_lock(hdev);
4492
4493 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
4494 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
4495 bool changed;
4496
4497 if (cp->val) {
4498 changed = !hci_dev_test_and_set_flag(hdev,
4499 HCI_SC_ENABLED);
4500 if (cp->val == 0x02)
4501 hci_dev_set_flag(hdev, HCI_SC_ONLY);
4502 else
4503 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4504 } else {
4505 changed = hci_dev_test_and_clear_flag(hdev,
4506 HCI_SC_ENABLED);
4507 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4508 }
4509
4510 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4511 if (err < 0)
4512 goto failed;
4513
4514 if (changed)
4515 err = new_settings(hdev, sk);
4516
4517 goto failed;
4518 }
4519
4520 if (pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4521 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4522 MGMT_STATUS_BUSY);
4523 goto failed;
4524 }
4525
4526 val = !!cp->val;
4527
4528 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
4529 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
4530 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4531 goto failed;
4532 }
4533
4534 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4535 if (!cmd) {
4536 err = -ENOMEM;
4537 goto failed;
4538 }
4539
4540 hci_req_init(&req, hdev);
4541 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4542 err = hci_req_run(&req, sc_enable_complete);
4543 if (err < 0) {
4544 mgmt_pending_remove(cmd);
4545 goto failed;
4546 }
4547
4548 failed:
4549 hci_dev_unlock(hdev);
4550 return err;
4551 }
4552
4553 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
4554 void *data, u16 len)
4555 {
4556 struct mgmt_mode *cp = data;
4557 bool changed, use_changed;
4558 int err;
4559
4560 BT_DBG("request for %s", hdev->name);
4561
4562 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4563 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
4564 MGMT_STATUS_INVALID_PARAMS);
4565
4566 hci_dev_lock(hdev);
4567
4568 if (cp->val)
4569 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
4570 else
4571 changed = hci_dev_test_and_clear_flag(hdev,
4572 HCI_KEEP_DEBUG_KEYS);
4573
4574 if (cp->val == 0x02)
4575 use_changed = !hci_dev_test_and_set_flag(hdev,
4576 HCI_USE_DEBUG_KEYS);
4577 else
4578 use_changed = hci_dev_test_and_clear_flag(hdev,
4579 HCI_USE_DEBUG_KEYS);
4580
4581 if (hdev_is_powered(hdev) && use_changed &&
4582 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
4583 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
4584 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
4585 sizeof(mode), &mode);
4586 }
4587
4588 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
4589 if (err < 0)
4590 goto unlock;
4591
4592 if (changed)
4593 err = new_settings(hdev, sk);
4594
4595 unlock:
4596 hci_dev_unlock(hdev);
4597 return err;
4598 }
4599
4600 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4601 u16 len)
4602 {
4603 struct mgmt_cp_set_privacy *cp = cp_data;
4604 bool changed;
4605 int err;
4606
4607 BT_DBG("request for %s", hdev->name);
4608
4609 if (!lmp_le_capable(hdev))
4610 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4611 MGMT_STATUS_NOT_SUPPORTED);
4612
4613 if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
4614 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4615 MGMT_STATUS_INVALID_PARAMS);
4616
4617 if (hdev_is_powered(hdev))
4618 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4619 MGMT_STATUS_REJECTED);
4620
4621 hci_dev_lock(hdev);
4622
4623 /* If user space supports this command it is also expected to
4624 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
4625 */
4626 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
4627
4628 if (cp->privacy) {
4629 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
4630 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
4631 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
4632 if (cp->privacy == 0x02)
4633 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
4634 else
4635 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
4636 } else {
4637 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
4638 memset(hdev->irk, 0, sizeof(hdev->irk));
4639 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
4640 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
4641 }
4642
4643 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
4644 if (err < 0)
4645 goto unlock;
4646
4647 if (changed)
4648 err = new_settings(hdev, sk);
4649
4650 unlock:
4651 hci_dev_unlock(hdev);
4652 return err;
4653 }
4654
4655 static bool irk_is_valid(struct mgmt_irk_info *irk)
4656 {
4657 switch (irk->addr.type) {
4658 case BDADDR_LE_PUBLIC:
4659 return true;
4660
4661 case BDADDR_LE_RANDOM:
4662 /* Two most significant bits shall be set */
4663 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4664 return false;
4665 return true;
4666 }
4667
4668 return false;
4669 }
4670
4671 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4672 u16 len)
4673 {
4674 struct mgmt_cp_load_irks *cp = cp_data;
4675 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
4676 sizeof(struct mgmt_irk_info));
4677 u16 irk_count, expected_len;
4678 int i, err;
4679
4680 BT_DBG("request for %s", hdev->name);
4681
4682 if (!lmp_le_capable(hdev))
4683 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4684 MGMT_STATUS_NOT_SUPPORTED);
4685
4686 irk_count = __le16_to_cpu(cp->irk_count);
4687 if (irk_count > max_irk_count) {
4688 BT_ERR("load_irks: too big irk_count value %u", irk_count);
4689 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4690 MGMT_STATUS_INVALID_PARAMS);
4691 }
4692
4693 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
4694 if (expected_len != len) {
4695 BT_ERR("load_irks: expected %u bytes, got %u bytes",
4696 expected_len, len);
4697 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4698 MGMT_STATUS_INVALID_PARAMS);
4699 }
4700
4701 BT_DBG("%s irk_count %u", hdev->name, irk_count);
4702
4703 for (i = 0; i < irk_count; i++) {
4704 struct mgmt_irk_info *key = &cp->irks[i];
4705
4706 if (!irk_is_valid(key))
4707 return mgmt_cmd_status(sk, hdev->id,
4708 MGMT_OP_LOAD_IRKS,
4709 MGMT_STATUS_INVALID_PARAMS);
4710 }
4711
4712 hci_dev_lock(hdev);
4713
4714 hci_smp_irks_clear(hdev);
4715
4716 for (i = 0; i < irk_count; i++) {
4717 struct mgmt_irk_info *irk = &cp->irks[i];
4718
4719 hci_add_irk(hdev, &irk->addr.bdaddr,
4720 le_addr_type(irk->addr.type), irk->val,
4721 BDADDR_ANY);
4722 }
4723
4724 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
4725
4726 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
4727
4728 hci_dev_unlock(hdev);
4729
4730 return err;
4731 }
4732
4733 static bool ltk_is_valid(struct mgmt_ltk_info *key)
4734 {
4735 if (key->master != 0x00 && key->master != 0x01)
4736 return false;
4737
4738 switch (key->addr.type) {
4739 case BDADDR_LE_PUBLIC:
4740 return true;
4741
4742 case BDADDR_LE_RANDOM:
4743 /* Two most significant bits shall be set */
4744 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4745 return false;
4746 return true;
4747 }
4748
4749 return false;
4750 }
4751
4752 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
4753 void *cp_data, u16 len)
4754 {
4755 struct mgmt_cp_load_long_term_keys *cp = cp_data;
4756 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
4757 sizeof(struct mgmt_ltk_info));
4758 u16 key_count, expected_len;
4759 int i, err;
4760
4761 BT_DBG("request for %s", hdev->name);
4762
4763 if (!lmp_le_capable(hdev))
4764 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4765 MGMT_STATUS_NOT_SUPPORTED);
4766
4767 key_count = __le16_to_cpu(cp->key_count);
4768 if (key_count > max_key_count) {
4769 BT_ERR("load_ltks: too big key_count value %u", key_count);
4770 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4771 MGMT_STATUS_INVALID_PARAMS);
4772 }
4773
4774 expected_len = sizeof(*cp) + key_count *
4775 sizeof(struct mgmt_ltk_info);
4776 if (expected_len != len) {
4777 BT_ERR("load_keys: expected %u bytes, got %u bytes",
4778 expected_len, len);
4779 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4780 MGMT_STATUS_INVALID_PARAMS);
4781 }
4782
4783 BT_DBG("%s key_count %u", hdev->name, key_count);
4784
4785 for (i = 0; i < key_count; i++) {
4786 struct mgmt_ltk_info *key = &cp->keys[i];
4787
4788 if (!ltk_is_valid(key))
4789 return mgmt_cmd_status(sk, hdev->id,
4790 MGMT_OP_LOAD_LONG_TERM_KEYS,
4791 MGMT_STATUS_INVALID_PARAMS);
4792 }
4793
4794 hci_dev_lock(hdev);
4795
4796 hci_smp_ltks_clear(hdev);
4797
4798 for (i = 0; i < key_count; i++) {
4799 struct mgmt_ltk_info *key = &cp->keys[i];
4800 u8 type, authenticated;
4801
4802 switch (key->type) {
4803 case MGMT_LTK_UNAUTHENTICATED:
4804 authenticated = 0x00;
4805 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
4806 break;
4807 case MGMT_LTK_AUTHENTICATED:
4808 authenticated = 0x01;
4809 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
4810 break;
4811 case MGMT_LTK_P256_UNAUTH:
4812 authenticated = 0x00;
4813 type = SMP_LTK_P256;
4814 break;
4815 case MGMT_LTK_P256_AUTH:
4816 authenticated = 0x01;
4817 type = SMP_LTK_P256;
4818 break;
4819 case MGMT_LTK_P256_DEBUG:
4820 authenticated = 0x00;
4821 type = SMP_LTK_P256_DEBUG;
4822 default:
4823 continue;
4824 }
4825
4826 hci_add_ltk(hdev, &key->addr.bdaddr,
4827 le_addr_type(key->addr.type), type, authenticated,
4828 key->val, key->enc_size, key->ediv, key->rand);
4829 }
4830
4831 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
4832 NULL, 0);
4833
4834 hci_dev_unlock(hdev);
4835
4836 return err;
4837 }
4838
4839 static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
4840 {
4841 struct hci_conn *conn = cmd->user_data;
4842 struct mgmt_rp_get_conn_info rp;
4843 int err;
4844
4845 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
4846
4847 if (status == MGMT_STATUS_SUCCESS) {
4848 rp.rssi = conn->rssi;
4849 rp.tx_power = conn->tx_power;
4850 rp.max_tx_power = conn->max_tx_power;
4851 } else {
4852 rp.rssi = HCI_RSSI_INVALID;
4853 rp.tx_power = HCI_TX_POWER_INVALID;
4854 rp.max_tx_power = HCI_TX_POWER_INVALID;
4855 }
4856
4857 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
4858 status, &rp, sizeof(rp));
4859
4860 hci_conn_drop(conn);
4861 hci_conn_put(conn);
4862
4863 return err;
4864 }
4865
4866 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
4867 u16 opcode)
4868 {
4869 struct hci_cp_read_rssi *cp;
4870 struct mgmt_pending_cmd *cmd;
4871 struct hci_conn *conn;
4872 u16 handle;
4873 u8 status;
4874
4875 BT_DBG("status 0x%02x", hci_status);
4876
4877 hci_dev_lock(hdev);
4878
4879 /* Commands sent in request are either Read RSSI or Read Transmit Power
4880 * Level so we check which one was last sent to retrieve connection
4881 * handle. Both commands have handle as first parameter so it's safe to
4882 * cast data on the same command struct.
4883 *
4884 * First command sent is always Read RSSI and we fail only if it fails.
4885 * In other case we simply override error to indicate success as we
4886 * already remembered if TX power value is actually valid.
4887 */
4888 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
4889 if (!cp) {
4890 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
4891 status = MGMT_STATUS_SUCCESS;
4892 } else {
4893 status = mgmt_status(hci_status);
4894 }
4895
4896 if (!cp) {
4897 BT_ERR("invalid sent_cmd in conn_info response");
4898 goto unlock;
4899 }
4900
4901 handle = __le16_to_cpu(cp->handle);
4902 conn = hci_conn_hash_lookup_handle(hdev, handle);
4903 if (!conn) {
4904 BT_ERR("unknown handle (%d) in conn_info response", handle);
4905 goto unlock;
4906 }
4907
4908 cmd = pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
4909 if (!cmd)
4910 goto unlock;
4911
4912 cmd->cmd_complete(cmd, status);
4913 mgmt_pending_remove(cmd);
4914
4915 unlock:
4916 hci_dev_unlock(hdev);
4917 }
4918
4919 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
4920 u16 len)
4921 {
4922 struct mgmt_cp_get_conn_info *cp = data;
4923 struct mgmt_rp_get_conn_info rp;
4924 struct hci_conn *conn;
4925 unsigned long conn_info_age;
4926 int err = 0;
4927
4928 BT_DBG("%s", hdev->name);
4929
4930 memset(&rp, 0, sizeof(rp));
4931 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4932 rp.addr.type = cp->addr.type;
4933
4934 if (!bdaddr_type_is_valid(cp->addr.type))
4935 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4936 MGMT_STATUS_INVALID_PARAMS,
4937 &rp, sizeof(rp));
4938
4939 hci_dev_lock(hdev);
4940
4941 if (!hdev_is_powered(hdev)) {
4942 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4943 MGMT_STATUS_NOT_POWERED, &rp,
4944 sizeof(rp));
4945 goto unlock;
4946 }
4947
4948 if (cp->addr.type == BDADDR_BREDR)
4949 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
4950 &cp->addr.bdaddr);
4951 else
4952 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
4953
4954 if (!conn || conn->state != BT_CONNECTED) {
4955 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4956 MGMT_STATUS_NOT_CONNECTED, &rp,
4957 sizeof(rp));
4958 goto unlock;
4959 }
4960
4961 if (pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
4962 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4963 MGMT_STATUS_BUSY, &rp, sizeof(rp));
4964 goto unlock;
4965 }
4966
4967 /* To avoid client trying to guess when to poll again for information we
4968 * calculate conn info age as random value between min/max set in hdev.
4969 */
4970 conn_info_age = hdev->conn_info_min_age +
4971 prandom_u32_max(hdev->conn_info_max_age -
4972 hdev->conn_info_min_age);
4973
4974 /* Query controller to refresh cached values if they are too old or were
4975 * never read.
4976 */
4977 if (time_after(jiffies, conn->conn_info_timestamp +
4978 msecs_to_jiffies(conn_info_age)) ||
4979 !conn->conn_info_timestamp) {
4980 struct hci_request req;
4981 struct hci_cp_read_tx_power req_txp_cp;
4982 struct hci_cp_read_rssi req_rssi_cp;
4983 struct mgmt_pending_cmd *cmd;
4984
4985 hci_req_init(&req, hdev);
4986 req_rssi_cp.handle = cpu_to_le16(conn->handle);
4987 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
4988 &req_rssi_cp);
4989
4990 /* For LE links TX power does not change thus we don't need to
4991 * query for it once value is known.
4992 */
4993 if (!bdaddr_type_is_le(cp->addr.type) ||
4994 conn->tx_power == HCI_TX_POWER_INVALID) {
4995 req_txp_cp.handle = cpu_to_le16(conn->handle);
4996 req_txp_cp.type = 0x00;
4997 hci_req_add(&req, HCI_OP_READ_TX_POWER,
4998 sizeof(req_txp_cp), &req_txp_cp);
4999 }
5000
5001 /* Max TX power needs to be read only once per connection */
5002 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
5003 req_txp_cp.handle = cpu_to_le16(conn->handle);
5004 req_txp_cp.type = 0x01;
5005 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5006 sizeof(req_txp_cp), &req_txp_cp);
5007 }
5008
5009 err = hci_req_run(&req, conn_info_refresh_complete);
5010 if (err < 0)
5011 goto unlock;
5012
5013 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
5014 data, len);
5015 if (!cmd) {
5016 err = -ENOMEM;
5017 goto unlock;
5018 }
5019
5020 hci_conn_hold(conn);
5021 cmd->user_data = hci_conn_get(conn);
5022 cmd->cmd_complete = conn_info_cmd_complete;
5023
5024 conn->conn_info_timestamp = jiffies;
5025 } else {
5026 /* Cache is valid, just reply with values cached in hci_conn */
5027 rp.rssi = conn->rssi;
5028 rp.tx_power = conn->tx_power;
5029 rp.max_tx_power = conn->max_tx_power;
5030
5031 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5032 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
5033 }
5034
5035 unlock:
5036 hci_dev_unlock(hdev);
5037 return err;
5038 }
5039
5040 static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
5041 {
5042 struct hci_conn *conn = cmd->user_data;
5043 struct mgmt_rp_get_clock_info rp;
5044 struct hci_dev *hdev;
5045 int err;
5046
5047 memset(&rp, 0, sizeof(rp));
5048 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
5049
5050 if (status)
5051 goto complete;
5052
5053 hdev = hci_dev_get(cmd->index);
5054 if (hdev) {
5055 rp.local_clock = cpu_to_le32(hdev->clock);
5056 hci_dev_put(hdev);
5057 }
5058
5059 if (conn) {
5060 rp.piconet_clock = cpu_to_le32(conn->clock);
5061 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
5062 }
5063
5064 complete:
5065 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
5066 sizeof(rp));
5067
5068 if (conn) {
5069 hci_conn_drop(conn);
5070 hci_conn_put(conn);
5071 }
5072
5073 return err;
5074 }
5075
5076 static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5077 {
5078 struct hci_cp_read_clock *hci_cp;
5079 struct mgmt_pending_cmd *cmd;
5080 struct hci_conn *conn;
5081
5082 BT_DBG("%s status %u", hdev->name, status);
5083
5084 hci_dev_lock(hdev);
5085
5086 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
5087 if (!hci_cp)
5088 goto unlock;
5089
5090 if (hci_cp->which) {
5091 u16 handle = __le16_to_cpu(hci_cp->handle);
5092 conn = hci_conn_hash_lookup_handle(hdev, handle);
5093 } else {
5094 conn = NULL;
5095 }
5096
5097 cmd = pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
5098 if (!cmd)
5099 goto unlock;
5100
5101 cmd->cmd_complete(cmd, mgmt_status(status));
5102 mgmt_pending_remove(cmd);
5103
5104 unlock:
5105 hci_dev_unlock(hdev);
5106 }
5107
5108 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
5109 u16 len)
5110 {
5111 struct mgmt_cp_get_clock_info *cp = data;
5112 struct mgmt_rp_get_clock_info rp;
5113 struct hci_cp_read_clock hci_cp;
5114 struct mgmt_pending_cmd *cmd;
5115 struct hci_request req;
5116 struct hci_conn *conn;
5117 int err;
5118
5119 BT_DBG("%s", hdev->name);
5120
5121 memset(&rp, 0, sizeof(rp));
5122 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5123 rp.addr.type = cp->addr.type;
5124
5125 if (cp->addr.type != BDADDR_BREDR)
5126 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5127 MGMT_STATUS_INVALID_PARAMS,
5128 &rp, sizeof(rp));
5129
5130 hci_dev_lock(hdev);
5131
5132 if (!hdev_is_powered(hdev)) {
5133 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5134 MGMT_STATUS_NOT_POWERED, &rp,
5135 sizeof(rp));
5136 goto unlock;
5137 }
5138
5139 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5140 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5141 &cp->addr.bdaddr);
5142 if (!conn || conn->state != BT_CONNECTED) {
5143 err = mgmt_cmd_complete(sk, hdev->id,
5144 MGMT_OP_GET_CLOCK_INFO,
5145 MGMT_STATUS_NOT_CONNECTED,
5146 &rp, sizeof(rp));
5147 goto unlock;
5148 }
5149 } else {
5150 conn = NULL;
5151 }
5152
5153 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
5154 if (!cmd) {
5155 err = -ENOMEM;
5156 goto unlock;
5157 }
5158
5159 cmd->cmd_complete = clock_info_cmd_complete;
5160
5161 hci_req_init(&req, hdev);
5162
5163 memset(&hci_cp, 0, sizeof(hci_cp));
5164 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5165
5166 if (conn) {
5167 hci_conn_hold(conn);
5168 cmd->user_data = hci_conn_get(conn);
5169
5170 hci_cp.handle = cpu_to_le16(conn->handle);
5171 hci_cp.which = 0x01; /* Piconet clock */
5172 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5173 }
5174
5175 err = hci_req_run(&req, get_clock_info_complete);
5176 if (err < 0)
5177 mgmt_pending_remove(cmd);
5178
5179 unlock:
5180 hci_dev_unlock(hdev);
5181 return err;
5182 }
5183
5184 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
5185 {
5186 struct hci_conn *conn;
5187
5188 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
5189 if (!conn)
5190 return false;
5191
5192 if (conn->dst_type != type)
5193 return false;
5194
5195 if (conn->state != BT_CONNECTED)
5196 return false;
5197
5198 return true;
5199 }
5200
5201 /* This function requires the caller holds hdev->lock */
5202 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
5203 u8 addr_type, u8 auto_connect)
5204 {
5205 struct hci_conn_params *params;
5206
5207 params = hci_conn_params_add(hdev, addr, addr_type);
5208 if (!params)
5209 return -EIO;
5210
5211 if (params->auto_connect == auto_connect)
5212 return 0;
5213
5214 list_del_init(&params->action);
5215
5216 switch (auto_connect) {
5217 case HCI_AUTO_CONN_DISABLED:
5218 case HCI_AUTO_CONN_LINK_LOSS:
5219 /* If auto connect is being disabled when we're trying to
5220 * connect to device, keep connecting.
5221 */
5222 if (params->explicit_connect)
5223 list_add(&params->action, &hdev->pend_le_conns);
5224 break;
5225 case HCI_AUTO_CONN_REPORT:
5226 if (params->explicit_connect)
5227 list_add(&params->action, &hdev->pend_le_conns);
5228 else
5229 list_add(&params->action, &hdev->pend_le_reports);
5230 break;
5231 case HCI_AUTO_CONN_DIRECT:
5232 case HCI_AUTO_CONN_ALWAYS:
5233 if (!is_connected(hdev, addr, addr_type))
5234 list_add(&params->action, &hdev->pend_le_conns);
5235 break;
5236 }
5237
5238 params->auto_connect = auto_connect;
5239
5240 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
5241 auto_connect);
5242
5243 return 0;
5244 }
5245
5246 static void device_added(struct sock *sk, struct hci_dev *hdev,
5247 bdaddr_t *bdaddr, u8 type, u8 action)
5248 {
5249 struct mgmt_ev_device_added ev;
5250
5251 bacpy(&ev.addr.bdaddr, bdaddr);
5252 ev.addr.type = type;
5253 ev.action = action;
5254
5255 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
5256 }
5257
5258 static int add_device(struct sock *sk, struct hci_dev *hdev,
5259 void *data, u16 len)
5260 {
5261 struct mgmt_cp_add_device *cp = data;
5262 u8 auto_conn, addr_type;
5263 int err;
5264
5265 BT_DBG("%s", hdev->name);
5266
5267 if (!bdaddr_type_is_valid(cp->addr.type) ||
5268 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
5269 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5270 MGMT_STATUS_INVALID_PARAMS,
5271 &cp->addr, sizeof(cp->addr));
5272
5273 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
5274 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5275 MGMT_STATUS_INVALID_PARAMS,
5276 &cp->addr, sizeof(cp->addr));
5277
5278 hci_dev_lock(hdev);
5279
5280 if (cp->addr.type == BDADDR_BREDR) {
5281 /* Only incoming connections action is supported for now */
5282 if (cp->action != 0x01) {
5283 err = mgmt_cmd_complete(sk, hdev->id,
5284 MGMT_OP_ADD_DEVICE,
5285 MGMT_STATUS_INVALID_PARAMS,
5286 &cp->addr, sizeof(cp->addr));
5287 goto unlock;
5288 }
5289
5290 err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr,
5291 cp->addr.type);
5292 if (err)
5293 goto unlock;
5294
5295 hci_req_update_scan(hdev);
5296
5297 goto added;
5298 }
5299
5300 addr_type = le_addr_type(cp->addr.type);
5301
5302 if (cp->action == 0x02)
5303 auto_conn = HCI_AUTO_CONN_ALWAYS;
5304 else if (cp->action == 0x01)
5305 auto_conn = HCI_AUTO_CONN_DIRECT;
5306 else
5307 auto_conn = HCI_AUTO_CONN_REPORT;
5308
5309 /* Kernel internally uses conn_params with resolvable private
5310 * address, but Add Device allows only identity addresses.
5311 * Make sure it is enforced before calling
5312 * hci_conn_params_lookup.
5313 */
5314 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
5315 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5316 MGMT_STATUS_INVALID_PARAMS,
5317 &cp->addr, sizeof(cp->addr));
5318 goto unlock;
5319 }
5320
5321 /* If the connection parameters don't exist for this device,
5322 * they will be created and configured with defaults.
5323 */
5324 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
5325 auto_conn) < 0) {
5326 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5327 MGMT_STATUS_FAILED, &cp->addr,
5328 sizeof(cp->addr));
5329 goto unlock;
5330 }
5331
5332 hci_update_background_scan(hdev);
5333
5334 added:
5335 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
5336
5337 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5338 MGMT_STATUS_SUCCESS, &cp->addr,
5339 sizeof(cp->addr));
5340
5341 unlock:
5342 hci_dev_unlock(hdev);
5343 return err;
5344 }
5345
5346 static void device_removed(struct sock *sk, struct hci_dev *hdev,
5347 bdaddr_t *bdaddr, u8 type)
5348 {
5349 struct mgmt_ev_device_removed ev;
5350
5351 bacpy(&ev.addr.bdaddr, bdaddr);
5352 ev.addr.type = type;
5353
5354 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
5355 }
5356
5357 static int remove_device(struct sock *sk, struct hci_dev *hdev,
5358 void *data, u16 len)
5359 {
5360 struct mgmt_cp_remove_device *cp = data;
5361 int err;
5362
5363 BT_DBG("%s", hdev->name);
5364
5365 hci_dev_lock(hdev);
5366
5367 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5368 struct hci_conn_params *params;
5369 u8 addr_type;
5370
5371 if (!bdaddr_type_is_valid(cp->addr.type)) {
5372 err = mgmt_cmd_complete(sk, hdev->id,
5373 MGMT_OP_REMOVE_DEVICE,
5374 MGMT_STATUS_INVALID_PARAMS,
5375 &cp->addr, sizeof(cp->addr));
5376 goto unlock;
5377 }
5378
5379 if (cp->addr.type == BDADDR_BREDR) {
5380 err = hci_bdaddr_list_del(&hdev->whitelist,
5381 &cp->addr.bdaddr,
5382 cp->addr.type);
5383 if (err) {
5384 err = mgmt_cmd_complete(sk, hdev->id,
5385 MGMT_OP_REMOVE_DEVICE,
5386 MGMT_STATUS_INVALID_PARAMS,
5387 &cp->addr,
5388 sizeof(cp->addr));
5389 goto unlock;
5390 }
5391
5392 hci_req_update_scan(hdev);
5393
5394 device_removed(sk, hdev, &cp->addr.bdaddr,
5395 cp->addr.type);
5396 goto complete;
5397 }
5398
5399 addr_type = le_addr_type(cp->addr.type);
5400
5401 /* Kernel internally uses conn_params with resolvable private
5402 * address, but Remove Device allows only identity addresses.
5403 * Make sure it is enforced before calling
5404 * hci_conn_params_lookup.
5405 */
5406 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
5407 err = mgmt_cmd_complete(sk, hdev->id,
5408 MGMT_OP_REMOVE_DEVICE,
5409 MGMT_STATUS_INVALID_PARAMS,
5410 &cp->addr, sizeof(cp->addr));
5411 goto unlock;
5412 }
5413
5414 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5415 addr_type);
5416 if (!params) {
5417 err = mgmt_cmd_complete(sk, hdev->id,
5418 MGMT_OP_REMOVE_DEVICE,
5419 MGMT_STATUS_INVALID_PARAMS,
5420 &cp->addr, sizeof(cp->addr));
5421 goto unlock;
5422 }
5423
5424 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
5425 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
5426 err = mgmt_cmd_complete(sk, hdev->id,
5427 MGMT_OP_REMOVE_DEVICE,
5428 MGMT_STATUS_INVALID_PARAMS,
5429 &cp->addr, sizeof(cp->addr));
5430 goto unlock;
5431 }
5432
5433 list_del(&params->action);
5434 list_del(&params->list);
5435 kfree(params);
5436 hci_update_background_scan(hdev);
5437
5438 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
5439 } else {
5440 struct hci_conn_params *p, *tmp;
5441 struct bdaddr_list *b, *btmp;
5442
5443 if (cp->addr.type) {
5444 err = mgmt_cmd_complete(sk, hdev->id,
5445 MGMT_OP_REMOVE_DEVICE,
5446 MGMT_STATUS_INVALID_PARAMS,
5447 &cp->addr, sizeof(cp->addr));
5448 goto unlock;
5449 }
5450
5451 list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
5452 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
5453 list_del(&b->list);
5454 kfree(b);
5455 }
5456
5457 hci_req_update_scan(hdev);
5458
5459 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
5460 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
5461 continue;
5462 device_removed(sk, hdev, &p->addr, p->addr_type);
5463 if (p->explicit_connect) {
5464 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
5465 continue;
5466 }
5467 list_del(&p->action);
5468 list_del(&p->list);
5469 kfree(p);
5470 }
5471
5472 BT_DBG("All LE connection parameters were removed");
5473
5474 hci_update_background_scan(hdev);
5475 }
5476
5477 complete:
5478 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5479 MGMT_STATUS_SUCCESS, &cp->addr,
5480 sizeof(cp->addr));
5481 unlock:
5482 hci_dev_unlock(hdev);
5483 return err;
5484 }
5485
5486 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
5487 u16 len)
5488 {
5489 struct mgmt_cp_load_conn_param *cp = data;
5490 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
5491 sizeof(struct mgmt_conn_param));
5492 u16 param_count, expected_len;
5493 int i;
5494
5495 if (!lmp_le_capable(hdev))
5496 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5497 MGMT_STATUS_NOT_SUPPORTED);
5498
5499 param_count = __le16_to_cpu(cp->param_count);
5500 if (param_count > max_param_count) {
5501 BT_ERR("load_conn_param: too big param_count value %u",
5502 param_count);
5503 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5504 MGMT_STATUS_INVALID_PARAMS);
5505 }
5506
5507 expected_len = sizeof(*cp) + param_count *
5508 sizeof(struct mgmt_conn_param);
5509 if (expected_len != len) {
5510 BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
5511 expected_len, len);
5512 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5513 MGMT_STATUS_INVALID_PARAMS);
5514 }
5515
5516 BT_DBG("%s param_count %u", hdev->name, param_count);
5517
5518 hci_dev_lock(hdev);
5519
5520 hci_conn_params_clear_disabled(hdev);
5521
5522 for (i = 0; i < param_count; i++) {
5523 struct mgmt_conn_param *param = &cp->params[i];
5524 struct hci_conn_params *hci_param;
5525 u16 min, max, latency, timeout;
5526 u8 addr_type;
5527
5528 BT_DBG("Adding %pMR (type %u)", &param->addr.bdaddr,
5529 param->addr.type);
5530
5531 if (param->addr.type == BDADDR_LE_PUBLIC) {
5532 addr_type = ADDR_LE_DEV_PUBLIC;
5533 } else if (param->addr.type == BDADDR_LE_RANDOM) {
5534 addr_type = ADDR_LE_DEV_RANDOM;
5535 } else {
5536 BT_ERR("Ignoring invalid connection parameters");
5537 continue;
5538 }
5539
5540 min = le16_to_cpu(param->min_interval);
5541 max = le16_to_cpu(param->max_interval);
5542 latency = le16_to_cpu(param->latency);
5543 timeout = le16_to_cpu(param->timeout);
5544
5545 BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
5546 min, max, latency, timeout);
5547
5548 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
5549 BT_ERR("Ignoring invalid connection parameters");
5550 continue;
5551 }
5552
5553 hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
5554 addr_type);
5555 if (!hci_param) {
5556 BT_ERR("Failed to add connection parameters");
5557 continue;
5558 }
5559
5560 hci_param->conn_min_interval = min;
5561 hci_param->conn_max_interval = max;
5562 hci_param->conn_latency = latency;
5563 hci_param->supervision_timeout = timeout;
5564 }
5565
5566 hci_dev_unlock(hdev);
5567
5568 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
5569 NULL, 0);
5570 }
5571
5572 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
5573 void *data, u16 len)
5574 {
5575 struct mgmt_cp_set_external_config *cp = data;
5576 bool changed;
5577 int err;
5578
5579 BT_DBG("%s", hdev->name);
5580
5581 if (hdev_is_powered(hdev))
5582 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5583 MGMT_STATUS_REJECTED);
5584
5585 if (cp->config != 0x00 && cp->config != 0x01)
5586 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5587 MGMT_STATUS_INVALID_PARAMS);
5588
5589 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
5590 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5591 MGMT_STATUS_NOT_SUPPORTED);
5592
5593 hci_dev_lock(hdev);
5594
5595 if (cp->config)
5596 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
5597 else
5598 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
5599
5600 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
5601 if (err < 0)
5602 goto unlock;
5603
5604 if (!changed)
5605 goto unlock;
5606
5607 err = new_options(hdev, sk);
5608
5609 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
5610 mgmt_index_removed(hdev);
5611
5612 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
5613 hci_dev_set_flag(hdev, HCI_CONFIG);
5614 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
5615
5616 queue_work(hdev->req_workqueue, &hdev->power_on);
5617 } else {
5618 set_bit(HCI_RAW, &hdev->flags);
5619 mgmt_index_added(hdev);
5620 }
5621 }
5622
5623 unlock:
5624 hci_dev_unlock(hdev);
5625 return err;
5626 }
5627
5628 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
5629 void *data, u16 len)
5630 {
5631 struct mgmt_cp_set_public_address *cp = data;
5632 bool changed;
5633 int err;
5634
5635 BT_DBG("%s", hdev->name);
5636
5637 if (hdev_is_powered(hdev))
5638 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5639 MGMT_STATUS_REJECTED);
5640
5641 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
5642 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5643 MGMT_STATUS_INVALID_PARAMS);
5644
5645 if (!hdev->set_bdaddr)
5646 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5647 MGMT_STATUS_NOT_SUPPORTED);
5648
5649 hci_dev_lock(hdev);
5650
5651 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
5652 bacpy(&hdev->public_addr, &cp->bdaddr);
5653
5654 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
5655 if (err < 0)
5656 goto unlock;
5657
5658 if (!changed)
5659 goto unlock;
5660
5661 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
5662 err = new_options(hdev, sk);
5663
5664 if (is_configured(hdev)) {
5665 mgmt_index_removed(hdev);
5666
5667 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
5668
5669 hci_dev_set_flag(hdev, HCI_CONFIG);
5670 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
5671
5672 queue_work(hdev->req_workqueue, &hdev->power_on);
5673 }
5674
5675 unlock:
5676 hci_dev_unlock(hdev);
5677 return err;
5678 }
5679
5680 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, u8 status,
5681 u16 opcode, struct sk_buff *skb)
5682 {
5683 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
5684 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
5685 u8 *h192, *r192, *h256, *r256;
5686 struct mgmt_pending_cmd *cmd;
5687 u16 eir_len;
5688 int err;
5689
5690 BT_DBG("%s status %u", hdev->name, status);
5691
5692 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev);
5693 if (!cmd)
5694 return;
5695
5696 mgmt_cp = cmd->param;
5697
5698 if (status) {
5699 status = mgmt_status(status);
5700 eir_len = 0;
5701
5702 h192 = NULL;
5703 r192 = NULL;
5704 h256 = NULL;
5705 r256 = NULL;
5706 } else if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
5707 struct hci_rp_read_local_oob_data *rp;
5708
5709 if (skb->len != sizeof(*rp)) {
5710 status = MGMT_STATUS_FAILED;
5711 eir_len = 0;
5712 } else {
5713 status = MGMT_STATUS_SUCCESS;
5714 rp = (void *)skb->data;
5715
5716 eir_len = 5 + 18 + 18;
5717 h192 = rp->hash;
5718 r192 = rp->rand;
5719 h256 = NULL;
5720 r256 = NULL;
5721 }
5722 } else {
5723 struct hci_rp_read_local_oob_ext_data *rp;
5724
5725 if (skb->len != sizeof(*rp)) {
5726 status = MGMT_STATUS_FAILED;
5727 eir_len = 0;
5728 } else {
5729 status = MGMT_STATUS_SUCCESS;
5730 rp = (void *)skb->data;
5731
5732 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5733 eir_len = 5 + 18 + 18;
5734 h192 = NULL;
5735 r192 = NULL;
5736 } else {
5737 eir_len = 5 + 18 + 18 + 18 + 18;
5738 h192 = rp->hash192;
5739 r192 = rp->rand192;
5740 }
5741
5742 h256 = rp->hash256;
5743 r256 = rp->rand256;
5744 }
5745 }
5746
5747 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
5748 if (!mgmt_rp)
5749 goto done;
5750
5751 if (status)
5752 goto send_rsp;
5753
5754 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
5755 hdev->dev_class, 3);
5756
5757 if (h192 && r192) {
5758 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
5759 EIR_SSP_HASH_C192, h192, 16);
5760 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
5761 EIR_SSP_RAND_R192, r192, 16);
5762 }
5763
5764 if (h256 && r256) {
5765 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
5766 EIR_SSP_HASH_C256, h256, 16);
5767 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
5768 EIR_SSP_RAND_R256, r256, 16);
5769 }
5770
5771 send_rsp:
5772 mgmt_rp->type = mgmt_cp->type;
5773 mgmt_rp->eir_len = cpu_to_le16(eir_len);
5774
5775 err = mgmt_cmd_complete(cmd->sk, hdev->id,
5776 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
5777 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
5778 if (err < 0 || status)
5779 goto done;
5780
5781 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
5782
5783 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
5784 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
5785 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
5786 done:
5787 kfree(mgmt_rp);
5788 mgmt_pending_remove(cmd);
5789 }
5790
5791 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
5792 struct mgmt_cp_read_local_oob_ext_data *cp)
5793 {
5794 struct mgmt_pending_cmd *cmd;
5795 struct hci_request req;
5796 int err;
5797
5798 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
5799 cp, sizeof(*cp));
5800 if (!cmd)
5801 return -ENOMEM;
5802
5803 hci_req_init(&req, hdev);
5804
5805 if (bredr_sc_enabled(hdev))
5806 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
5807 else
5808 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
5809
5810 err = hci_req_run_skb(&req, read_local_oob_ext_data_complete);
5811 if (err < 0) {
5812 mgmt_pending_remove(cmd);
5813 return err;
5814 }
5815
5816 return 0;
5817 }
5818
5819 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
5820 void *data, u16 data_len)
5821 {
5822 struct mgmt_cp_read_local_oob_ext_data *cp = data;
5823 struct mgmt_rp_read_local_oob_ext_data *rp;
5824 size_t rp_len;
5825 u16 eir_len;
5826 u8 status, flags, role, addr[7], hash[16], rand[16];
5827 int err;
5828
5829 BT_DBG("%s", hdev->name);
5830
5831 if (hdev_is_powered(hdev)) {
5832 switch (cp->type) {
5833 case BIT(BDADDR_BREDR):
5834 status = mgmt_bredr_support(hdev);
5835 if (status)
5836 eir_len = 0;
5837 else
5838 eir_len = 5;
5839 break;
5840 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
5841 status = mgmt_le_support(hdev);
5842 if (status)
5843 eir_len = 0;
5844 else
5845 eir_len = 9 + 3 + 18 + 18 + 3;
5846 break;
5847 default:
5848 status = MGMT_STATUS_INVALID_PARAMS;
5849 eir_len = 0;
5850 break;
5851 }
5852 } else {
5853 status = MGMT_STATUS_NOT_POWERED;
5854 eir_len = 0;
5855 }
5856
5857 rp_len = sizeof(*rp) + eir_len;
5858 rp = kmalloc(rp_len, GFP_ATOMIC);
5859 if (!rp)
5860 return -ENOMEM;
5861
5862 if (status)
5863 goto complete;
5864
5865 hci_dev_lock(hdev);
5866
5867 eir_len = 0;
5868 switch (cp->type) {
5869 case BIT(BDADDR_BREDR):
5870 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
5871 err = read_local_ssp_oob_req(hdev, sk, cp);
5872 hci_dev_unlock(hdev);
5873 if (!err)
5874 goto done;
5875
5876 status = MGMT_STATUS_FAILED;
5877 goto complete;
5878 } else {
5879 eir_len = eir_append_data(rp->eir, eir_len,
5880 EIR_CLASS_OF_DEV,
5881 hdev->dev_class, 3);
5882 }
5883 break;
5884 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
5885 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
5886 smp_generate_oob(hdev, hash, rand) < 0) {
5887 hci_dev_unlock(hdev);
5888 status = MGMT_STATUS_FAILED;
5889 goto complete;
5890 }
5891
5892 /* This should return the active RPA, but since the RPA
5893 * is only programmed on demand, it is really hard to fill
5894 * this in at the moment. For now disallow retrieving
5895 * local out-of-band data when privacy is in use.
5896 *
5897 * Returning the identity address will not help here since
5898 * pairing happens before the identity resolving key is
5899 * known and thus the connection establishment happens
5900 * based on the RPA and not the identity address.
5901 */
5902 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
5903 hci_dev_unlock(hdev);
5904 status = MGMT_STATUS_REJECTED;
5905 goto complete;
5906 }
5907
5908 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
5909 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
5910 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5911 bacmp(&hdev->static_addr, BDADDR_ANY))) {
5912 memcpy(addr, &hdev->static_addr, 6);
5913 addr[6] = 0x01;
5914 } else {
5915 memcpy(addr, &hdev->bdaddr, 6);
5916 addr[6] = 0x00;
5917 }
5918
5919 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
5920 addr, sizeof(addr));
5921
5922 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
5923 role = 0x02;
5924 else
5925 role = 0x01;
5926
5927 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
5928 &role, sizeof(role));
5929
5930 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
5931 eir_len = eir_append_data(rp->eir, eir_len,
5932 EIR_LE_SC_CONFIRM,
5933 hash, sizeof(hash));
5934
5935 eir_len = eir_append_data(rp->eir, eir_len,
5936 EIR_LE_SC_RANDOM,
5937 rand, sizeof(rand));
5938 }
5939
5940 flags = mgmt_get_adv_discov_flags(hdev);
5941
5942 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
5943 flags |= LE_AD_NO_BREDR;
5944
5945 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
5946 &flags, sizeof(flags));
5947 break;
5948 }
5949
5950 hci_dev_unlock(hdev);
5951
5952 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
5953
5954 status = MGMT_STATUS_SUCCESS;
5955
5956 complete:
5957 rp->type = cp->type;
5958 rp->eir_len = cpu_to_le16(eir_len);
5959
5960 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
5961 status, rp, sizeof(*rp) + eir_len);
5962 if (err < 0 || status)
5963 goto done;
5964
5965 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
5966 rp, sizeof(*rp) + eir_len,
5967 HCI_MGMT_OOB_DATA_EVENTS, sk);
5968
5969 done:
5970 kfree(rp);
5971
5972 return err;
5973 }
5974
5975 static u32 get_supported_adv_flags(struct hci_dev *hdev)
5976 {
5977 u32 flags = 0;
5978
5979 flags |= MGMT_ADV_FLAG_CONNECTABLE;
5980 flags |= MGMT_ADV_FLAG_DISCOV;
5981 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
5982 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
5983 flags |= MGMT_ADV_FLAG_APPEARANCE;
5984 flags |= MGMT_ADV_FLAG_LOCAL_NAME;
5985
5986 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID)
5987 flags |= MGMT_ADV_FLAG_TX_POWER;
5988
5989 return flags;
5990 }
5991
5992 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
5993 void *data, u16 data_len)
5994 {
5995 struct mgmt_rp_read_adv_features *rp;
5996 size_t rp_len;
5997 int err;
5998 struct adv_info *adv_instance;
5999 u32 supported_flags;
6000 u8 *instance;
6001
6002 BT_DBG("%s", hdev->name);
6003
6004 if (!lmp_le_capable(hdev))
6005 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
6006 MGMT_STATUS_REJECTED);
6007
6008 hci_dev_lock(hdev);
6009
6010 rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
6011 rp = kmalloc(rp_len, GFP_ATOMIC);
6012 if (!rp) {
6013 hci_dev_unlock(hdev);
6014 return -ENOMEM;
6015 }
6016
6017 supported_flags = get_supported_adv_flags(hdev);
6018
6019 rp->supported_flags = cpu_to_le32(supported_flags);
6020 rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
6021 rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
6022 rp->max_instances = HCI_MAX_ADV_INSTANCES;
6023 rp->num_instances = hdev->adv_instance_cnt;
6024
6025 instance = rp->instance;
6026 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
6027 *instance = adv_instance->instance;
6028 instance++;
6029 }
6030
6031 hci_dev_unlock(hdev);
6032
6033 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
6034 MGMT_STATUS_SUCCESS, rp, rp_len);
6035
6036 kfree(rp);
6037
6038 return err;
6039 }
6040
6041 static u8 tlv_data_max_len(u32 adv_flags, bool is_adv_data)
6042 {
6043 u8 max_len = HCI_MAX_AD_LENGTH;
6044
6045 if (is_adv_data) {
6046 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
6047 MGMT_ADV_FLAG_LIMITED_DISCOV |
6048 MGMT_ADV_FLAG_MANAGED_FLAGS))
6049 max_len -= 3;
6050
6051 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
6052 max_len -= 3;
6053 } else {
6054 /* at least 1 byte of name should fit in */
6055 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
6056 max_len -= 3;
6057
6058 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
6059 max_len -= 4;
6060 }
6061
6062 return max_len;
6063 }
6064
6065 static bool flags_managed(u32 adv_flags)
6066 {
6067 return adv_flags & (MGMT_ADV_FLAG_DISCOV |
6068 MGMT_ADV_FLAG_LIMITED_DISCOV |
6069 MGMT_ADV_FLAG_MANAGED_FLAGS);
6070 }
6071
6072 static bool tx_power_managed(u32 adv_flags)
6073 {
6074 return adv_flags & MGMT_ADV_FLAG_TX_POWER;
6075 }
6076
6077 static bool name_managed(u32 adv_flags)
6078 {
6079 return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
6080 }
6081
6082 static bool appearance_managed(u32 adv_flags)
6083 {
6084 return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
6085 }
6086
6087 static bool tlv_data_is_valid(u32 adv_flags, u8 *data, u8 len, bool is_adv_data)
6088 {
6089 int i, cur_len;
6090 u8 max_len;
6091
6092 max_len = tlv_data_max_len(adv_flags, is_adv_data);
6093
6094 if (len > max_len)
6095 return false;
6096
6097 /* Make sure that the data is correctly formatted. */
6098 for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
6099 cur_len = data[i];
6100
6101 if (data[i + 1] == EIR_FLAGS &&
6102 (!is_adv_data || flags_managed(adv_flags)))
6103 return false;
6104
6105 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
6106 return false;
6107
6108 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
6109 return false;
6110
6111 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
6112 return false;
6113
6114 if (data[i + 1] == EIR_APPEARANCE &&
6115 appearance_managed(adv_flags))
6116 return false;
6117
6118 /* If the current field length would exceed the total data
6119 * length, then it's invalid.
6120 */
6121 if (i + cur_len >= len)
6122 return false;
6123 }
6124
6125 return true;
6126 }
6127
6128 static void add_advertising_complete(struct hci_dev *hdev, u8 status,
6129 u16 opcode)
6130 {
6131 struct mgmt_pending_cmd *cmd;
6132 struct mgmt_cp_add_advertising *cp;
6133 struct mgmt_rp_add_advertising rp;
6134 struct adv_info *adv_instance, *n;
6135 u8 instance;
6136
6137 BT_DBG("status %d", status);
6138
6139 hci_dev_lock(hdev);
6140
6141 cmd = pending_find(MGMT_OP_ADD_ADVERTISING, hdev);
6142
6143 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
6144 if (!adv_instance->pending)
6145 continue;
6146
6147 if (!status) {
6148 adv_instance->pending = false;
6149 continue;
6150 }
6151
6152 instance = adv_instance->instance;
6153
6154 if (hdev->cur_adv_instance == instance)
6155 cancel_adv_timeout(hdev);
6156
6157 hci_remove_adv_instance(hdev, instance);
6158 mgmt_advertising_removed(cmd ? cmd->sk : NULL, hdev, instance);
6159 }
6160
6161 if (!cmd)
6162 goto unlock;
6163
6164 cp = cmd->param;
6165 rp.instance = cp->instance;
6166
6167 if (status)
6168 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
6169 mgmt_status(status));
6170 else
6171 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
6172 mgmt_status(status), &rp, sizeof(rp));
6173
6174 mgmt_pending_remove(cmd);
6175
6176 unlock:
6177 hci_dev_unlock(hdev);
6178 }
6179
6180 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
6181 void *data, u16 data_len)
6182 {
6183 struct mgmt_cp_add_advertising *cp = data;
6184 struct mgmt_rp_add_advertising rp;
6185 u32 flags;
6186 u32 supported_flags;
6187 u8 status;
6188 u16 timeout, duration;
6189 unsigned int prev_instance_cnt = hdev->adv_instance_cnt;
6190 u8 schedule_instance = 0;
6191 struct adv_info *next_instance;
6192 int err;
6193 struct mgmt_pending_cmd *cmd;
6194 struct hci_request req;
6195
6196 BT_DBG("%s", hdev->name);
6197
6198 status = mgmt_le_support(hdev);
6199 if (status)
6200 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6201 status);
6202
6203 if (cp->instance < 1 || cp->instance > HCI_MAX_ADV_INSTANCES)
6204 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6205 MGMT_STATUS_INVALID_PARAMS);
6206
6207 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
6208 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6209 MGMT_STATUS_INVALID_PARAMS);
6210
6211 flags = __le32_to_cpu(cp->flags);
6212 timeout = __le16_to_cpu(cp->timeout);
6213 duration = __le16_to_cpu(cp->duration);
6214
6215 /* The current implementation only supports a subset of the specified
6216 * flags.
6217 */
6218 supported_flags = get_supported_adv_flags(hdev);
6219 if (flags & ~supported_flags)
6220 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6221 MGMT_STATUS_INVALID_PARAMS);
6222
6223 hci_dev_lock(hdev);
6224
6225 if (timeout && !hdev_is_powered(hdev)) {
6226 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6227 MGMT_STATUS_REJECTED);
6228 goto unlock;
6229 }
6230
6231 if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
6232 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
6233 pending_find(MGMT_OP_SET_LE, hdev)) {
6234 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6235 MGMT_STATUS_BUSY);
6236 goto unlock;
6237 }
6238
6239 if (!tlv_data_is_valid(flags, cp->data, cp->adv_data_len, true) ||
6240 !tlv_data_is_valid(flags, cp->data + cp->adv_data_len,
6241 cp->scan_rsp_len, false)) {
6242 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6243 MGMT_STATUS_INVALID_PARAMS);
6244 goto unlock;
6245 }
6246
6247 err = hci_add_adv_instance(hdev, cp->instance, flags,
6248 cp->adv_data_len, cp->data,
6249 cp->scan_rsp_len,
6250 cp->data + cp->adv_data_len,
6251 timeout, duration);
6252 if (err < 0) {
6253 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6254 MGMT_STATUS_FAILED);
6255 goto unlock;
6256 }
6257
6258 /* Only trigger an advertising added event if a new instance was
6259 * actually added.
6260 */
6261 if (hdev->adv_instance_cnt > prev_instance_cnt)
6262 mgmt_advertising_added(sk, hdev, cp->instance);
6263
6264 if (hdev->cur_adv_instance == cp->instance) {
6265 /* If the currently advertised instance is being changed then
6266 * cancel the current advertising and schedule the next
6267 * instance. If there is only one instance then the overridden
6268 * advertising data will be visible right away.
6269 */
6270 cancel_adv_timeout(hdev);
6271
6272 next_instance = hci_get_next_instance(hdev, cp->instance);
6273 if (next_instance)
6274 schedule_instance = next_instance->instance;
6275 } else if (!hdev->adv_instance_timeout) {
6276 /* Immediately advertise the new instance if no other
6277 * instance is currently being advertised.
6278 */
6279 schedule_instance = cp->instance;
6280 }
6281
6282 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
6283 * there is no instance to be advertised then we have no HCI
6284 * communication to make. Simply return.
6285 */
6286 if (!hdev_is_powered(hdev) ||
6287 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
6288 !schedule_instance) {
6289 rp.instance = cp->instance;
6290 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6291 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6292 goto unlock;
6293 }
6294
6295 /* We're good to go, update advertising data, parameters, and start
6296 * advertising.
6297 */
6298 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
6299 data_len);
6300 if (!cmd) {
6301 err = -ENOMEM;
6302 goto unlock;
6303 }
6304
6305 hci_req_init(&req, hdev);
6306
6307 err = __hci_req_schedule_adv_instance(&req, schedule_instance, true);
6308
6309 if (!err)
6310 err = hci_req_run(&req, add_advertising_complete);
6311
6312 if (err < 0)
6313 mgmt_pending_remove(cmd);
6314
6315 unlock:
6316 hci_dev_unlock(hdev);
6317
6318 return err;
6319 }
6320
6321 static void remove_advertising_complete(struct hci_dev *hdev, u8 status,
6322 u16 opcode)
6323 {
6324 struct mgmt_pending_cmd *cmd;
6325 struct mgmt_cp_remove_advertising *cp;
6326 struct mgmt_rp_remove_advertising rp;
6327
6328 BT_DBG("status %d", status);
6329
6330 hci_dev_lock(hdev);
6331
6332 /* A failure status here only means that we failed to disable
6333 * advertising. Otherwise, the advertising instance has been removed,
6334 * so report success.
6335 */
6336 cmd = pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev);
6337 if (!cmd)
6338 goto unlock;
6339
6340 cp = cmd->param;
6341 rp.instance = cp->instance;
6342
6343 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, MGMT_STATUS_SUCCESS,
6344 &rp, sizeof(rp));
6345 mgmt_pending_remove(cmd);
6346
6347 unlock:
6348 hci_dev_unlock(hdev);
6349 }
6350
6351 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
6352 void *data, u16 data_len)
6353 {
6354 struct mgmt_cp_remove_advertising *cp = data;
6355 struct mgmt_rp_remove_advertising rp;
6356 struct mgmt_pending_cmd *cmd;
6357 struct hci_request req;
6358 int err;
6359
6360 BT_DBG("%s", hdev->name);
6361
6362 hci_dev_lock(hdev);
6363
6364 if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
6365 err = mgmt_cmd_status(sk, hdev->id,
6366 MGMT_OP_REMOVE_ADVERTISING,
6367 MGMT_STATUS_INVALID_PARAMS);
6368 goto unlock;
6369 }
6370
6371 if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
6372 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
6373 pending_find(MGMT_OP_SET_LE, hdev)) {
6374 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
6375 MGMT_STATUS_BUSY);
6376 goto unlock;
6377 }
6378
6379 if (list_empty(&hdev->adv_instances)) {
6380 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
6381 MGMT_STATUS_INVALID_PARAMS);
6382 goto unlock;
6383 }
6384
6385 hci_req_init(&req, hdev);
6386
6387 hci_req_clear_adv_instance(hdev, sk, &req, cp->instance, true);
6388
6389 if (list_empty(&hdev->adv_instances))
6390 __hci_req_disable_advertising(&req);
6391
6392 /* If no HCI commands have been collected so far or the HCI_ADVERTISING
6393 * flag is set or the device isn't powered then we have no HCI
6394 * communication to make. Simply return.
6395 */
6396 if (skb_queue_empty(&req.cmd_q) ||
6397 !hdev_is_powered(hdev) ||
6398 hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
6399 rp.instance = cp->instance;
6400 err = mgmt_cmd_complete(sk, hdev->id,
6401 MGMT_OP_REMOVE_ADVERTISING,
6402 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6403 goto unlock;
6404 }
6405
6406 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
6407 data_len);
6408 if (!cmd) {
6409 err = -ENOMEM;
6410 goto unlock;
6411 }
6412
6413 err = hci_req_run(&req, remove_advertising_complete);
6414 if (err < 0)
6415 mgmt_pending_remove(cmd);
6416
6417 unlock:
6418 hci_dev_unlock(hdev);
6419
6420 return err;
6421 }
6422
6423 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
6424 void *data, u16 data_len)
6425 {
6426 struct mgmt_cp_get_adv_size_info *cp = data;
6427 struct mgmt_rp_get_adv_size_info rp;
6428 u32 flags, supported_flags;
6429 int err;
6430
6431 BT_DBG("%s", hdev->name);
6432
6433 if (!lmp_le_capable(hdev))
6434 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
6435 MGMT_STATUS_REJECTED);
6436
6437 if (cp->instance < 1 || cp->instance > HCI_MAX_ADV_INSTANCES)
6438 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
6439 MGMT_STATUS_INVALID_PARAMS);
6440
6441 flags = __le32_to_cpu(cp->flags);
6442
6443 /* The current implementation only supports a subset of the specified
6444 * flags.
6445 */
6446 supported_flags = get_supported_adv_flags(hdev);
6447 if (flags & ~supported_flags)
6448 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
6449 MGMT_STATUS_INVALID_PARAMS);
6450
6451 rp.instance = cp->instance;
6452 rp.flags = cp->flags;
6453 rp.max_adv_data_len = tlv_data_max_len(flags, true);
6454 rp.max_scan_rsp_len = tlv_data_max_len(flags, false);
6455
6456 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
6457 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6458
6459 return err;
6460 }
6461
6462 static const struct hci_mgmt_handler mgmt_handlers[] = {
6463 { NULL }, /* 0x0000 (no command) */
6464 { read_version, MGMT_READ_VERSION_SIZE,
6465 HCI_MGMT_NO_HDEV |
6466 HCI_MGMT_UNTRUSTED },
6467 { read_commands, MGMT_READ_COMMANDS_SIZE,
6468 HCI_MGMT_NO_HDEV |
6469 HCI_MGMT_UNTRUSTED },
6470 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
6471 HCI_MGMT_NO_HDEV |
6472 HCI_MGMT_UNTRUSTED },
6473 { read_controller_info, MGMT_READ_INFO_SIZE,
6474 HCI_MGMT_UNTRUSTED },
6475 { set_powered, MGMT_SETTING_SIZE },
6476 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
6477 { set_connectable, MGMT_SETTING_SIZE },
6478 { set_fast_connectable, MGMT_SETTING_SIZE },
6479 { set_bondable, MGMT_SETTING_SIZE },
6480 { set_link_security, MGMT_SETTING_SIZE },
6481 { set_ssp, MGMT_SETTING_SIZE },
6482 { set_hs, MGMT_SETTING_SIZE },
6483 { set_le, MGMT_SETTING_SIZE },
6484 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
6485 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
6486 { add_uuid, MGMT_ADD_UUID_SIZE },
6487 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
6488 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
6489 HCI_MGMT_VAR_LEN },
6490 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
6491 HCI_MGMT_VAR_LEN },
6492 { disconnect, MGMT_DISCONNECT_SIZE },
6493 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
6494 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
6495 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
6496 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
6497 { pair_device, MGMT_PAIR_DEVICE_SIZE },
6498 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
6499 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
6500 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
6501 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
6502 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
6503 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
6504 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
6505 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
6506 HCI_MGMT_VAR_LEN },
6507 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
6508 { start_discovery, MGMT_START_DISCOVERY_SIZE },
6509 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
6510 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
6511 { block_device, MGMT_BLOCK_DEVICE_SIZE },
6512 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
6513 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
6514 { set_advertising, MGMT_SETTING_SIZE },
6515 { set_bredr, MGMT_SETTING_SIZE },
6516 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
6517 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
6518 { set_secure_conn, MGMT_SETTING_SIZE },
6519 { set_debug_keys, MGMT_SETTING_SIZE },
6520 { set_privacy, MGMT_SET_PRIVACY_SIZE },
6521 { load_irks, MGMT_LOAD_IRKS_SIZE,
6522 HCI_MGMT_VAR_LEN },
6523 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
6524 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
6525 { add_device, MGMT_ADD_DEVICE_SIZE },
6526 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
6527 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
6528 HCI_MGMT_VAR_LEN },
6529 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
6530 HCI_MGMT_NO_HDEV |
6531 HCI_MGMT_UNTRUSTED },
6532 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
6533 HCI_MGMT_UNCONFIGURED |
6534 HCI_MGMT_UNTRUSTED },
6535 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
6536 HCI_MGMT_UNCONFIGURED },
6537 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
6538 HCI_MGMT_UNCONFIGURED },
6539 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
6540 HCI_MGMT_VAR_LEN },
6541 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
6542 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
6543 HCI_MGMT_NO_HDEV |
6544 HCI_MGMT_UNTRUSTED },
6545 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
6546 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
6547 HCI_MGMT_VAR_LEN },
6548 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
6549 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
6550 { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
6551 { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
6552 HCI_MGMT_UNTRUSTED },
6553 { set_appearance, MGMT_SET_APPEARANCE_SIZE },
6554 };
6555
6556 void mgmt_index_added(struct hci_dev *hdev)
6557 {
6558 struct mgmt_ev_ext_index ev;
6559
6560 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
6561 return;
6562
6563 switch (hdev->dev_type) {
6564 case HCI_PRIMARY:
6565 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
6566 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
6567 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
6568 ev.type = 0x01;
6569 } else {
6570 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
6571 HCI_MGMT_INDEX_EVENTS);
6572 ev.type = 0x00;
6573 }
6574 break;
6575 case HCI_AMP:
6576 ev.type = 0x02;
6577 break;
6578 default:
6579 return;
6580 }
6581
6582 ev.bus = hdev->bus;
6583
6584 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
6585 HCI_MGMT_EXT_INDEX_EVENTS);
6586 }
6587
6588 void mgmt_index_removed(struct hci_dev *hdev)
6589 {
6590 struct mgmt_ev_ext_index ev;
6591 u8 status = MGMT_STATUS_INVALID_INDEX;
6592
6593 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
6594 return;
6595
6596 switch (hdev->dev_type) {
6597 case HCI_PRIMARY:
6598 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
6599
6600 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
6601 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
6602 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
6603 ev.type = 0x01;
6604 } else {
6605 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
6606 HCI_MGMT_INDEX_EVENTS);
6607 ev.type = 0x00;
6608 }
6609 break;
6610 case HCI_AMP:
6611 ev.type = 0x02;
6612 break;
6613 default:
6614 return;
6615 }
6616
6617 ev.bus = hdev->bus;
6618
6619 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
6620 HCI_MGMT_EXT_INDEX_EVENTS);
6621 }
6622
6623 /* This function requires the caller holds hdev->lock */
6624 static void restart_le_actions(struct hci_dev *hdev)
6625 {
6626 struct hci_conn_params *p;
6627
6628 list_for_each_entry(p, &hdev->le_conn_params, list) {
6629 /* Needed for AUTO_OFF case where might not "really"
6630 * have been powered off.
6631 */
6632 list_del_init(&p->action);
6633
6634 switch (p->auto_connect) {
6635 case HCI_AUTO_CONN_DIRECT:
6636 case HCI_AUTO_CONN_ALWAYS:
6637 list_add(&p->action, &hdev->pend_le_conns);
6638 break;
6639 case HCI_AUTO_CONN_REPORT:
6640 list_add(&p->action, &hdev->pend_le_reports);
6641 break;
6642 default:
6643 break;
6644 }
6645 }
6646 }
6647
6648 void mgmt_power_on(struct hci_dev *hdev, int err)
6649 {
6650 struct cmd_lookup match = { NULL, hdev };
6651
6652 BT_DBG("err %d", err);
6653
6654 hci_dev_lock(hdev);
6655
6656 if (!err) {
6657 restart_le_actions(hdev);
6658 hci_update_background_scan(hdev);
6659 }
6660
6661 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
6662
6663 new_settings(hdev, match.sk);
6664
6665 if (match.sk)
6666 sock_put(match.sk);
6667
6668 hci_dev_unlock(hdev);
6669 }
6670
6671 void __mgmt_power_off(struct hci_dev *hdev)
6672 {
6673 struct cmd_lookup match = { NULL, hdev };
6674 u8 status, zero_cod[] = { 0, 0, 0 };
6675
6676 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
6677
6678 /* If the power off is because of hdev unregistration let
6679 * use the appropriate INVALID_INDEX status. Otherwise use
6680 * NOT_POWERED. We cover both scenarios here since later in
6681 * mgmt_index_removed() any hci_conn callbacks will have already
6682 * been triggered, potentially causing misleading DISCONNECTED
6683 * status responses.
6684 */
6685 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
6686 status = MGMT_STATUS_INVALID_INDEX;
6687 else
6688 status = MGMT_STATUS_NOT_POWERED;
6689
6690 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
6691
6692 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
6693 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
6694 zero_cod, sizeof(zero_cod),
6695 HCI_MGMT_DEV_CLASS_EVENTS, NULL);
6696 ext_info_changed(hdev, NULL);
6697 }
6698
6699 new_settings(hdev, match.sk);
6700
6701 if (match.sk)
6702 sock_put(match.sk);
6703 }
6704
6705 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
6706 {
6707 struct mgmt_pending_cmd *cmd;
6708 u8 status;
6709
6710 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
6711 if (!cmd)
6712 return;
6713
6714 if (err == -ERFKILL)
6715 status = MGMT_STATUS_RFKILLED;
6716 else
6717 status = MGMT_STATUS_FAILED;
6718
6719 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
6720
6721 mgmt_pending_remove(cmd);
6722 }
6723
6724 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
6725 bool persistent)
6726 {
6727 struct mgmt_ev_new_link_key ev;
6728
6729 memset(&ev, 0, sizeof(ev));
6730
6731 ev.store_hint = persistent;
6732 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6733 ev.key.addr.type = BDADDR_BREDR;
6734 ev.key.type = key->type;
6735 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
6736 ev.key.pin_len = key->pin_len;
6737
6738 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
6739 }
6740
6741 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
6742 {
6743 switch (ltk->type) {
6744 case SMP_LTK:
6745 case SMP_LTK_SLAVE:
6746 if (ltk->authenticated)
6747 return MGMT_LTK_AUTHENTICATED;
6748 return MGMT_LTK_UNAUTHENTICATED;
6749 case SMP_LTK_P256:
6750 if (ltk->authenticated)
6751 return MGMT_LTK_P256_AUTH;
6752 return MGMT_LTK_P256_UNAUTH;
6753 case SMP_LTK_P256_DEBUG:
6754 return MGMT_LTK_P256_DEBUG;
6755 }
6756
6757 return MGMT_LTK_UNAUTHENTICATED;
6758 }
6759
6760 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
6761 {
6762 struct mgmt_ev_new_long_term_key ev;
6763
6764 memset(&ev, 0, sizeof(ev));
6765
6766 /* Devices using resolvable or non-resolvable random addresses
6767 * without providing an identity resolving key don't require
6768 * to store long term keys. Their addresses will change the
6769 * next time around.
6770 *
6771 * Only when a remote device provides an identity address
6772 * make sure the long term key is stored. If the remote
6773 * identity is known, the long term keys are internally
6774 * mapped to the identity address. So allow static random
6775 * and public addresses here.
6776 */
6777 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6778 (key->bdaddr.b[5] & 0xc0) != 0xc0)
6779 ev.store_hint = 0x00;
6780 else
6781 ev.store_hint = persistent;
6782
6783 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6784 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
6785 ev.key.type = mgmt_ltk_type(key);
6786 ev.key.enc_size = key->enc_size;
6787 ev.key.ediv = key->ediv;
6788 ev.key.rand = key->rand;
6789
6790 if (key->type == SMP_LTK)
6791 ev.key.master = 1;
6792
6793 /* Make sure we copy only the significant bytes based on the
6794 * encryption key size, and set the rest of the value to zeroes.
6795 */
6796 memcpy(ev.key.val, key->val, key->enc_size);
6797 memset(ev.key.val + key->enc_size, 0,
6798 sizeof(ev.key.val) - key->enc_size);
6799
6800 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
6801 }
6802
6803 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
6804 {
6805 struct mgmt_ev_new_irk ev;
6806
6807 memset(&ev, 0, sizeof(ev));
6808
6809 ev.store_hint = persistent;
6810
6811 bacpy(&ev.rpa, &irk->rpa);
6812 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
6813 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
6814 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
6815
6816 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
6817 }
6818
6819 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
6820 bool persistent)
6821 {
6822 struct mgmt_ev_new_csrk ev;
6823
6824 memset(&ev, 0, sizeof(ev));
6825
6826 /* Devices using resolvable or non-resolvable random addresses
6827 * without providing an identity resolving key don't require
6828 * to store signature resolving keys. Their addresses will change
6829 * the next time around.
6830 *
6831 * Only when a remote device provides an identity address
6832 * make sure the signature resolving key is stored. So allow
6833 * static random and public addresses here.
6834 */
6835 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6836 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
6837 ev.store_hint = 0x00;
6838 else
6839 ev.store_hint = persistent;
6840
6841 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
6842 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
6843 ev.key.type = csrk->type;
6844 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
6845
6846 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
6847 }
6848
6849 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
6850 u8 bdaddr_type, u8 store_hint, u16 min_interval,
6851 u16 max_interval, u16 latency, u16 timeout)
6852 {
6853 struct mgmt_ev_new_conn_param ev;
6854
6855 if (!hci_is_identity_address(bdaddr, bdaddr_type))
6856 return;
6857
6858 memset(&ev, 0, sizeof(ev));
6859 bacpy(&ev.addr.bdaddr, bdaddr);
6860 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
6861 ev.store_hint = store_hint;
6862 ev.min_interval = cpu_to_le16(min_interval);
6863 ev.max_interval = cpu_to_le16(max_interval);
6864 ev.latency = cpu_to_le16(latency);
6865 ev.timeout = cpu_to_le16(timeout);
6866
6867 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
6868 }
6869
6870 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
6871 u32 flags, u8 *name, u8 name_len)
6872 {
6873 char buf[512];
6874 struct mgmt_ev_device_connected *ev = (void *) buf;
6875 u16 eir_len = 0;
6876
6877 bacpy(&ev->addr.bdaddr, &conn->dst);
6878 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
6879
6880 ev->flags = __cpu_to_le32(flags);
6881
6882 /* We must ensure that the EIR Data fields are ordered and
6883 * unique. Keep it simple for now and avoid the problem by not
6884 * adding any BR/EDR data to the LE adv.
6885 */
6886 if (conn->le_adv_data_len > 0) {
6887 memcpy(&ev->eir[eir_len],
6888 conn->le_adv_data, conn->le_adv_data_len);
6889 eir_len = conn->le_adv_data_len;
6890 } else {
6891 if (name_len > 0)
6892 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
6893 name, name_len);
6894
6895 if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
6896 eir_len = eir_append_data(ev->eir, eir_len,
6897 EIR_CLASS_OF_DEV,
6898 conn->dev_class, 3);
6899 }
6900
6901 ev->eir_len = cpu_to_le16(eir_len);
6902
6903 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
6904 sizeof(*ev) + eir_len, NULL);
6905 }
6906
6907 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
6908 {
6909 struct sock **sk = data;
6910
6911 cmd->cmd_complete(cmd, 0);
6912
6913 *sk = cmd->sk;
6914 sock_hold(*sk);
6915
6916 mgmt_pending_remove(cmd);
6917 }
6918
6919 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
6920 {
6921 struct hci_dev *hdev = data;
6922 struct mgmt_cp_unpair_device *cp = cmd->param;
6923
6924 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
6925
6926 cmd->cmd_complete(cmd, 0);
6927 mgmt_pending_remove(cmd);
6928 }
6929
6930 bool mgmt_powering_down(struct hci_dev *hdev)
6931 {
6932 struct mgmt_pending_cmd *cmd;
6933 struct mgmt_mode *cp;
6934
6935 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
6936 if (!cmd)
6937 return false;
6938
6939 cp = cmd->param;
6940 if (!cp->val)
6941 return true;
6942
6943 return false;
6944 }
6945
6946 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
6947 u8 link_type, u8 addr_type, u8 reason,
6948 bool mgmt_connected)
6949 {
6950 struct mgmt_ev_device_disconnected ev;
6951 struct sock *sk = NULL;
6952
6953 /* The connection is still in hci_conn_hash so test for 1
6954 * instead of 0 to know if this is the last one.
6955 */
6956 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
6957 cancel_delayed_work(&hdev->power_off);
6958 queue_work(hdev->req_workqueue, &hdev->power_off.work);
6959 }
6960
6961 if (!mgmt_connected)
6962 return;
6963
6964 if (link_type != ACL_LINK && link_type != LE_LINK)
6965 return;
6966
6967 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
6968
6969 bacpy(&ev.addr.bdaddr, bdaddr);
6970 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6971 ev.reason = reason;
6972
6973 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
6974
6975 if (sk)
6976 sock_put(sk);
6977
6978 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6979 hdev);
6980 }
6981
6982 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
6983 u8 link_type, u8 addr_type, u8 status)
6984 {
6985 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
6986 struct mgmt_cp_disconnect *cp;
6987 struct mgmt_pending_cmd *cmd;
6988
6989 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6990 hdev);
6991
6992 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
6993 if (!cmd)
6994 return;
6995
6996 cp = cmd->param;
6997
6998 if (bacmp(bdaddr, &cp->addr.bdaddr))
6999 return;
7000
7001 if (cp->addr.type != bdaddr_type)
7002 return;
7003
7004 cmd->cmd_complete(cmd, mgmt_status(status));
7005 mgmt_pending_remove(cmd);
7006 }
7007
7008 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7009 u8 addr_type, u8 status)
7010 {
7011 struct mgmt_ev_connect_failed ev;
7012
7013 /* The connection is still in hci_conn_hash so test for 1
7014 * instead of 0 to know if this is the last one.
7015 */
7016 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
7017 cancel_delayed_work(&hdev->power_off);
7018 queue_work(hdev->req_workqueue, &hdev->power_off.work);
7019 }
7020
7021 bacpy(&ev.addr.bdaddr, bdaddr);
7022 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7023 ev.status = mgmt_status(status);
7024
7025 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
7026 }
7027
7028 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
7029 {
7030 struct mgmt_ev_pin_code_request ev;
7031
7032 bacpy(&ev.addr.bdaddr, bdaddr);
7033 ev.addr.type = BDADDR_BREDR;
7034 ev.secure = secure;
7035
7036 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
7037 }
7038
7039 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7040 u8 status)
7041 {
7042 struct mgmt_pending_cmd *cmd;
7043
7044 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
7045 if (!cmd)
7046 return;
7047
7048 cmd->cmd_complete(cmd, mgmt_status(status));
7049 mgmt_pending_remove(cmd);
7050 }
7051
7052 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7053 u8 status)
7054 {
7055 struct mgmt_pending_cmd *cmd;
7056
7057 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
7058 if (!cmd)
7059 return;
7060
7061 cmd->cmd_complete(cmd, mgmt_status(status));
7062 mgmt_pending_remove(cmd);
7063 }
7064
7065 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
7066 u8 link_type, u8 addr_type, u32 value,
7067 u8 confirm_hint)
7068 {
7069 struct mgmt_ev_user_confirm_request ev;
7070
7071 BT_DBG("%s", hdev->name);
7072
7073 bacpy(&ev.addr.bdaddr, bdaddr);
7074 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7075 ev.confirm_hint = confirm_hint;
7076 ev.value = cpu_to_le32(value);
7077
7078 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
7079 NULL);
7080 }
7081
7082 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
7083 u8 link_type, u8 addr_type)
7084 {
7085 struct mgmt_ev_user_passkey_request ev;
7086
7087 BT_DBG("%s", hdev->name);
7088
7089 bacpy(&ev.addr.bdaddr, bdaddr);
7090 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7091
7092 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
7093 NULL);
7094 }
7095
7096 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7097 u8 link_type, u8 addr_type, u8 status,
7098 u8 opcode)
7099 {
7100 struct mgmt_pending_cmd *cmd;
7101
7102 cmd = pending_find(opcode, hdev);
7103 if (!cmd)
7104 return -ENOENT;
7105
7106 cmd->cmd_complete(cmd, mgmt_status(status));
7107 mgmt_pending_remove(cmd);
7108
7109 return 0;
7110 }
7111
7112 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7113 u8 link_type, u8 addr_type, u8 status)
7114 {
7115 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7116 status, MGMT_OP_USER_CONFIRM_REPLY);
7117 }
7118
7119 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7120 u8 link_type, u8 addr_type, u8 status)
7121 {
7122 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7123 status,
7124 MGMT_OP_USER_CONFIRM_NEG_REPLY);
7125 }
7126
7127 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7128 u8 link_type, u8 addr_type, u8 status)
7129 {
7130 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7131 status, MGMT_OP_USER_PASSKEY_REPLY);
7132 }
7133
7134 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7135 u8 link_type, u8 addr_type, u8 status)
7136 {
7137 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7138 status,
7139 MGMT_OP_USER_PASSKEY_NEG_REPLY);
7140 }
7141
7142 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
7143 u8 link_type, u8 addr_type, u32 passkey,
7144 u8 entered)
7145 {
7146 struct mgmt_ev_passkey_notify ev;
7147
7148 BT_DBG("%s", hdev->name);
7149
7150 bacpy(&ev.addr.bdaddr, bdaddr);
7151 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7152 ev.passkey = __cpu_to_le32(passkey);
7153 ev.entered = entered;
7154
7155 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
7156 }
7157
7158 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
7159 {
7160 struct mgmt_ev_auth_failed ev;
7161 struct mgmt_pending_cmd *cmd;
7162 u8 status = mgmt_status(hci_status);
7163
7164 bacpy(&ev.addr.bdaddr, &conn->dst);
7165 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
7166 ev.status = status;
7167
7168 cmd = find_pairing(conn);
7169
7170 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
7171 cmd ? cmd->sk : NULL);
7172
7173 if (cmd) {
7174 cmd->cmd_complete(cmd, status);
7175 mgmt_pending_remove(cmd);
7176 }
7177 }
7178
7179 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
7180 {
7181 struct cmd_lookup match = { NULL, hdev };
7182 bool changed;
7183
7184 if (status) {
7185 u8 mgmt_err = mgmt_status(status);
7186 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
7187 cmd_status_rsp, &mgmt_err);
7188 return;
7189 }
7190
7191 if (test_bit(HCI_AUTH, &hdev->flags))
7192 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
7193 else
7194 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
7195
7196 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
7197 &match);
7198
7199 if (changed)
7200 new_settings(hdev, match.sk);
7201
7202 if (match.sk)
7203 sock_put(match.sk);
7204 }
7205
7206 static void clear_eir(struct hci_request *req)
7207 {
7208 struct hci_dev *hdev = req->hdev;
7209 struct hci_cp_write_eir cp;
7210
7211 if (!lmp_ext_inq_capable(hdev))
7212 return;
7213
7214 memset(hdev->eir, 0, sizeof(hdev->eir));
7215
7216 memset(&cp, 0, sizeof(cp));
7217
7218 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
7219 }
7220
7221 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
7222 {
7223 struct cmd_lookup match = { NULL, hdev };
7224 struct hci_request req;
7225 bool changed = false;
7226
7227 if (status) {
7228 u8 mgmt_err = mgmt_status(status);
7229
7230 if (enable && hci_dev_test_and_clear_flag(hdev,
7231 HCI_SSP_ENABLED)) {
7232 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
7233 new_settings(hdev, NULL);
7234 }
7235
7236 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
7237 &mgmt_err);
7238 return;
7239 }
7240
7241 if (enable) {
7242 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
7243 } else {
7244 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
7245 if (!changed)
7246 changed = hci_dev_test_and_clear_flag(hdev,
7247 HCI_HS_ENABLED);
7248 else
7249 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
7250 }
7251
7252 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
7253
7254 if (changed)
7255 new_settings(hdev, match.sk);
7256
7257 if (match.sk)
7258 sock_put(match.sk);
7259
7260 hci_req_init(&req, hdev);
7261
7262 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
7263 if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
7264 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
7265 sizeof(enable), &enable);
7266 __hci_req_update_eir(&req);
7267 } else {
7268 clear_eir(&req);
7269 }
7270
7271 hci_req_run(&req, NULL);
7272 }
7273
7274 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
7275 {
7276 struct cmd_lookup *match = data;
7277
7278 if (match->sk == NULL) {
7279 match->sk = cmd->sk;
7280 sock_hold(match->sk);
7281 }
7282 }
7283
7284 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
7285 u8 status)
7286 {
7287 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
7288
7289 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
7290 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
7291 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
7292
7293 if (!status) {
7294 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
7295 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
7296 ext_info_changed(hdev, NULL);
7297 }
7298
7299 if (match.sk)
7300 sock_put(match.sk);
7301 }
7302
7303 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
7304 {
7305 struct mgmt_cp_set_local_name ev;
7306 struct mgmt_pending_cmd *cmd;
7307
7308 if (status)
7309 return;
7310
7311 memset(&ev, 0, sizeof(ev));
7312 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
7313 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
7314
7315 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
7316 if (!cmd) {
7317 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
7318
7319 /* If this is a HCI command related to powering on the
7320 * HCI dev don't send any mgmt signals.
7321 */
7322 if (pending_find(MGMT_OP_SET_POWERED, hdev))
7323 return;
7324 }
7325
7326 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
7327 HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
7328 ext_info_changed(hdev, cmd ? cmd->sk : NULL);
7329 }
7330
7331 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
7332 {
7333 int i;
7334
7335 for (i = 0; i < uuid_count; i++) {
7336 if (!memcmp(uuid, uuids[i], 16))
7337 return true;
7338 }
7339
7340 return false;
7341 }
7342
7343 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
7344 {
7345 u16 parsed = 0;
7346
7347 while (parsed < eir_len) {
7348 u8 field_len = eir[0];
7349 u8 uuid[16];
7350 int i;
7351
7352 if (field_len == 0)
7353 break;
7354
7355 if (eir_len - parsed < field_len + 1)
7356 break;
7357
7358 switch (eir[1]) {
7359 case EIR_UUID16_ALL:
7360 case EIR_UUID16_SOME:
7361 for (i = 0; i + 3 <= field_len; i += 2) {
7362 memcpy(uuid, bluetooth_base_uuid, 16);
7363 uuid[13] = eir[i + 3];
7364 uuid[12] = eir[i + 2];
7365 if (has_uuid(uuid, uuid_count, uuids))
7366 return true;
7367 }
7368 break;
7369 case EIR_UUID32_ALL:
7370 case EIR_UUID32_SOME:
7371 for (i = 0; i + 5 <= field_len; i += 4) {
7372 memcpy(uuid, bluetooth_base_uuid, 16);
7373 uuid[15] = eir[i + 5];
7374 uuid[14] = eir[i + 4];
7375 uuid[13] = eir[i + 3];
7376 uuid[12] = eir[i + 2];
7377 if (has_uuid(uuid, uuid_count, uuids))
7378 return true;
7379 }
7380 break;
7381 case EIR_UUID128_ALL:
7382 case EIR_UUID128_SOME:
7383 for (i = 0; i + 17 <= field_len; i += 16) {
7384 memcpy(uuid, eir + i + 2, 16);
7385 if (has_uuid(uuid, uuid_count, uuids))
7386 return true;
7387 }
7388 break;
7389 }
7390
7391 parsed += field_len + 1;
7392 eir += field_len + 1;
7393 }
7394
7395 return false;
7396 }
7397
7398 static void restart_le_scan(struct hci_dev *hdev)
7399 {
7400 /* If controller is not scanning we are done. */
7401 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
7402 return;
7403
7404 if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
7405 hdev->discovery.scan_start +
7406 hdev->discovery.scan_duration))
7407 return;
7408
7409 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
7410 DISCOV_LE_RESTART_DELAY);
7411 }
7412
7413 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
7414 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
7415 {
7416 /* If a RSSI threshold has been specified, and
7417 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
7418 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
7419 * is set, let it through for further processing, as we might need to
7420 * restart the scan.
7421 *
7422 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
7423 * the results are also dropped.
7424 */
7425 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
7426 (rssi == HCI_RSSI_INVALID ||
7427 (rssi < hdev->discovery.rssi &&
7428 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
7429 return false;
7430
7431 if (hdev->discovery.uuid_count != 0) {
7432 /* If a list of UUIDs is provided in filter, results with no
7433 * matching UUID should be dropped.
7434 */
7435 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
7436 hdev->discovery.uuids) &&
7437 !eir_has_uuids(scan_rsp, scan_rsp_len,
7438 hdev->discovery.uuid_count,
7439 hdev->discovery.uuids))
7440 return false;
7441 }
7442
7443 /* If duplicate filtering does not report RSSI changes, then restart
7444 * scanning to ensure updated result with updated RSSI values.
7445 */
7446 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
7447 restart_le_scan(hdev);
7448
7449 /* Validate RSSI value against the RSSI threshold once more. */
7450 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
7451 rssi < hdev->discovery.rssi)
7452 return false;
7453 }
7454
7455 return true;
7456 }
7457
7458 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7459 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
7460 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
7461 {
7462 char buf[512];
7463 struct mgmt_ev_device_found *ev = (void *)buf;
7464 size_t ev_size;
7465
7466 /* Don't send events for a non-kernel initiated discovery. With
7467 * LE one exception is if we have pend_le_reports > 0 in which
7468 * case we're doing passive scanning and want these events.
7469 */
7470 if (!hci_discovery_active(hdev)) {
7471 if (link_type == ACL_LINK)
7472 return;
7473 if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
7474 return;
7475 }
7476
7477 if (hdev->discovery.result_filtering) {
7478 /* We are using service discovery */
7479 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
7480 scan_rsp_len))
7481 return;
7482 }
7483
7484 if (hdev->discovery.limited) {
7485 /* Check for limited discoverable bit */
7486 if (dev_class) {
7487 if (!(dev_class[1] & 0x20))
7488 return;
7489 } else {
7490 u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
7491 if (!flags || !(flags[0] & LE_AD_LIMITED))
7492 return;
7493 }
7494 }
7495
7496 /* Make sure that the buffer is big enough. The 5 extra bytes
7497 * are for the potential CoD field.
7498 */
7499 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
7500 return;
7501
7502 memset(buf, 0, sizeof(buf));
7503
7504 /* In case of device discovery with BR/EDR devices (pre 1.2), the
7505 * RSSI value was reported as 0 when not available. This behavior
7506 * is kept when using device discovery. This is required for full
7507 * backwards compatibility with the API.
7508 *
7509 * However when using service discovery, the value 127 will be
7510 * returned when the RSSI is not available.
7511 */
7512 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
7513 link_type == ACL_LINK)
7514 rssi = 0;
7515
7516 bacpy(&ev->addr.bdaddr, bdaddr);
7517 ev->addr.type = link_to_bdaddr(link_type, addr_type);
7518 ev->rssi = rssi;
7519 ev->flags = cpu_to_le32(flags);
7520
7521 if (eir_len > 0)
7522 /* Copy EIR or advertising data into event */
7523 memcpy(ev->eir, eir, eir_len);
7524
7525 if (dev_class && !eir_get_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
7526 NULL))
7527 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
7528 dev_class, 3);
7529
7530 if (scan_rsp_len > 0)
7531 /* Append scan response data to event */
7532 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
7533
7534 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
7535 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
7536
7537 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
7538 }
7539
7540 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7541 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
7542 {
7543 struct mgmt_ev_device_found *ev;
7544 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
7545 u16 eir_len;
7546
7547 ev = (struct mgmt_ev_device_found *) buf;
7548
7549 memset(buf, 0, sizeof(buf));
7550
7551 bacpy(&ev->addr.bdaddr, bdaddr);
7552 ev->addr.type = link_to_bdaddr(link_type, addr_type);
7553 ev->rssi = rssi;
7554
7555 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
7556 name_len);
7557
7558 ev->eir_len = cpu_to_le16(eir_len);
7559
7560 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
7561 }
7562
7563 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
7564 {
7565 struct mgmt_ev_discovering ev;
7566
7567 BT_DBG("%s discovering %u", hdev->name, discovering);
7568
7569 memset(&ev, 0, sizeof(ev));
7570 ev.type = hdev->discovery.type;
7571 ev.discovering = discovering;
7572
7573 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
7574 }
7575
7576 static struct hci_mgmt_chan chan = {
7577 .channel = HCI_CHANNEL_CONTROL,
7578 .handler_count = ARRAY_SIZE(mgmt_handlers),
7579 .handlers = mgmt_handlers,
7580 .hdev_init = mgmt_init_hdev,
7581 };
7582
7583 int mgmt_init(void)
7584 {
7585 return hci_mgmt_chan_register(&chan);
7586 }
7587
7588 void mgmt_exit(void)
7589 {
7590 hci_mgmt_chan_unregister(&chan);
7591 }