]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - net/bluetooth/mgmt.c
Bluetooth: Connection parameters and resolvable address
[mirror_ubuntu-artful-kernel.git] / net / bluetooth / mgmt.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI Management interface */
26
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/mgmt.h>
33
34 #include "smp.h"
35
36 #define MGMT_VERSION 1
37 #define MGMT_REVISION 5
38
39 static const u16 mgmt_commands[] = {
40 MGMT_OP_READ_INDEX_LIST,
41 MGMT_OP_READ_INFO,
42 MGMT_OP_SET_POWERED,
43 MGMT_OP_SET_DISCOVERABLE,
44 MGMT_OP_SET_CONNECTABLE,
45 MGMT_OP_SET_FAST_CONNECTABLE,
46 MGMT_OP_SET_PAIRABLE,
47 MGMT_OP_SET_LINK_SECURITY,
48 MGMT_OP_SET_SSP,
49 MGMT_OP_SET_HS,
50 MGMT_OP_SET_LE,
51 MGMT_OP_SET_DEV_CLASS,
52 MGMT_OP_SET_LOCAL_NAME,
53 MGMT_OP_ADD_UUID,
54 MGMT_OP_REMOVE_UUID,
55 MGMT_OP_LOAD_LINK_KEYS,
56 MGMT_OP_LOAD_LONG_TERM_KEYS,
57 MGMT_OP_DISCONNECT,
58 MGMT_OP_GET_CONNECTIONS,
59 MGMT_OP_PIN_CODE_REPLY,
60 MGMT_OP_PIN_CODE_NEG_REPLY,
61 MGMT_OP_SET_IO_CAPABILITY,
62 MGMT_OP_PAIR_DEVICE,
63 MGMT_OP_CANCEL_PAIR_DEVICE,
64 MGMT_OP_UNPAIR_DEVICE,
65 MGMT_OP_USER_CONFIRM_REPLY,
66 MGMT_OP_USER_CONFIRM_NEG_REPLY,
67 MGMT_OP_USER_PASSKEY_REPLY,
68 MGMT_OP_USER_PASSKEY_NEG_REPLY,
69 MGMT_OP_READ_LOCAL_OOB_DATA,
70 MGMT_OP_ADD_REMOTE_OOB_DATA,
71 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
72 MGMT_OP_START_DISCOVERY,
73 MGMT_OP_STOP_DISCOVERY,
74 MGMT_OP_CONFIRM_NAME,
75 MGMT_OP_BLOCK_DEVICE,
76 MGMT_OP_UNBLOCK_DEVICE,
77 MGMT_OP_SET_DEVICE_ID,
78 MGMT_OP_SET_ADVERTISING,
79 MGMT_OP_SET_BREDR,
80 MGMT_OP_SET_STATIC_ADDRESS,
81 MGMT_OP_SET_SCAN_PARAMS,
82 MGMT_OP_SET_SECURE_CONN,
83 MGMT_OP_SET_DEBUG_KEYS,
84 MGMT_OP_SET_PRIVACY,
85 MGMT_OP_LOAD_IRKS,
86 };
87
88 static const u16 mgmt_events[] = {
89 MGMT_EV_CONTROLLER_ERROR,
90 MGMT_EV_INDEX_ADDED,
91 MGMT_EV_INDEX_REMOVED,
92 MGMT_EV_NEW_SETTINGS,
93 MGMT_EV_CLASS_OF_DEV_CHANGED,
94 MGMT_EV_LOCAL_NAME_CHANGED,
95 MGMT_EV_NEW_LINK_KEY,
96 MGMT_EV_NEW_LONG_TERM_KEY,
97 MGMT_EV_DEVICE_CONNECTED,
98 MGMT_EV_DEVICE_DISCONNECTED,
99 MGMT_EV_CONNECT_FAILED,
100 MGMT_EV_PIN_CODE_REQUEST,
101 MGMT_EV_USER_CONFIRM_REQUEST,
102 MGMT_EV_USER_PASSKEY_REQUEST,
103 MGMT_EV_AUTH_FAILED,
104 MGMT_EV_DEVICE_FOUND,
105 MGMT_EV_DISCOVERING,
106 MGMT_EV_DEVICE_BLOCKED,
107 MGMT_EV_DEVICE_UNBLOCKED,
108 MGMT_EV_DEVICE_UNPAIRED,
109 MGMT_EV_PASSKEY_NOTIFY,
110 MGMT_EV_NEW_IRK,
111 };
112
113 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
114
115 #define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
116 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
117
118 struct pending_cmd {
119 struct list_head list;
120 u16 opcode;
121 int index;
122 void *param;
123 struct sock *sk;
124 void *user_data;
125 };
126
127 /* HCI to MGMT error code conversion table */
128 static u8 mgmt_status_table[] = {
129 MGMT_STATUS_SUCCESS,
130 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
131 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
132 MGMT_STATUS_FAILED, /* Hardware Failure */
133 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
134 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
135 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
136 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
137 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
138 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
139 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
140 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
141 MGMT_STATUS_BUSY, /* Command Disallowed */
142 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
143 MGMT_STATUS_REJECTED, /* Rejected Security */
144 MGMT_STATUS_REJECTED, /* Rejected Personal */
145 MGMT_STATUS_TIMEOUT, /* Host Timeout */
146 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
147 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
148 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
149 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
150 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
151 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
152 MGMT_STATUS_BUSY, /* Repeated Attempts */
153 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
154 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
155 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
156 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
157 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
158 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
159 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
160 MGMT_STATUS_FAILED, /* Unspecified Error */
161 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
162 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
163 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
164 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
165 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
166 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
167 MGMT_STATUS_FAILED, /* Unit Link Key Used */
168 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
169 MGMT_STATUS_TIMEOUT, /* Instant Passed */
170 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
171 MGMT_STATUS_FAILED, /* Transaction Collision */
172 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
173 MGMT_STATUS_REJECTED, /* QoS Rejected */
174 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
175 MGMT_STATUS_REJECTED, /* Insufficient Security */
176 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
177 MGMT_STATUS_BUSY, /* Role Switch Pending */
178 MGMT_STATUS_FAILED, /* Slot Violation */
179 MGMT_STATUS_FAILED, /* Role Switch Failed */
180 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
181 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
182 MGMT_STATUS_BUSY, /* Host Busy Pairing */
183 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
184 MGMT_STATUS_BUSY, /* Controller Busy */
185 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
186 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
187 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
188 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
189 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
190 };
191
192 static u8 mgmt_status(u8 hci_status)
193 {
194 if (hci_status < ARRAY_SIZE(mgmt_status_table))
195 return mgmt_status_table[hci_status];
196
197 return MGMT_STATUS_FAILED;
198 }
199
200 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
201 {
202 struct sk_buff *skb;
203 struct mgmt_hdr *hdr;
204 struct mgmt_ev_cmd_status *ev;
205 int err;
206
207 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
208
209 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
210 if (!skb)
211 return -ENOMEM;
212
213 hdr = (void *) skb_put(skb, sizeof(*hdr));
214
215 hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_STATUS);
216 hdr->index = cpu_to_le16(index);
217 hdr->len = cpu_to_le16(sizeof(*ev));
218
219 ev = (void *) skb_put(skb, sizeof(*ev));
220 ev->status = status;
221 ev->opcode = cpu_to_le16(cmd);
222
223 err = sock_queue_rcv_skb(sk, skb);
224 if (err < 0)
225 kfree_skb(skb);
226
227 return err;
228 }
229
230 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
231 void *rp, size_t rp_len)
232 {
233 struct sk_buff *skb;
234 struct mgmt_hdr *hdr;
235 struct mgmt_ev_cmd_complete *ev;
236 int err;
237
238 BT_DBG("sock %p", sk);
239
240 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
241 if (!skb)
242 return -ENOMEM;
243
244 hdr = (void *) skb_put(skb, sizeof(*hdr));
245
246 hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_COMPLETE);
247 hdr->index = cpu_to_le16(index);
248 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
249
250 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
251 ev->opcode = cpu_to_le16(cmd);
252 ev->status = status;
253
254 if (rp)
255 memcpy(ev->data, rp, rp_len);
256
257 err = sock_queue_rcv_skb(sk, skb);
258 if (err < 0)
259 kfree_skb(skb);
260
261 return err;
262 }
263
264 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
265 u16 data_len)
266 {
267 struct mgmt_rp_read_version rp;
268
269 BT_DBG("sock %p", sk);
270
271 rp.version = MGMT_VERSION;
272 rp.revision = __constant_cpu_to_le16(MGMT_REVISION);
273
274 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
275 sizeof(rp));
276 }
277
278 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
279 u16 data_len)
280 {
281 struct mgmt_rp_read_commands *rp;
282 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
283 const u16 num_events = ARRAY_SIZE(mgmt_events);
284 __le16 *opcode;
285 size_t rp_size;
286 int i, err;
287
288 BT_DBG("sock %p", sk);
289
290 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
291
292 rp = kmalloc(rp_size, GFP_KERNEL);
293 if (!rp)
294 return -ENOMEM;
295
296 rp->num_commands = __constant_cpu_to_le16(num_commands);
297 rp->num_events = __constant_cpu_to_le16(num_events);
298
299 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
300 put_unaligned_le16(mgmt_commands[i], opcode);
301
302 for (i = 0; i < num_events; i++, opcode++)
303 put_unaligned_le16(mgmt_events[i], opcode);
304
305 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
306 rp_size);
307 kfree(rp);
308
309 return err;
310 }
311
312 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
313 u16 data_len)
314 {
315 struct mgmt_rp_read_index_list *rp;
316 struct hci_dev *d;
317 size_t rp_len;
318 u16 count;
319 int err;
320
321 BT_DBG("sock %p", sk);
322
323 read_lock(&hci_dev_list_lock);
324
325 count = 0;
326 list_for_each_entry(d, &hci_dev_list, list) {
327 if (d->dev_type == HCI_BREDR)
328 count++;
329 }
330
331 rp_len = sizeof(*rp) + (2 * count);
332 rp = kmalloc(rp_len, GFP_ATOMIC);
333 if (!rp) {
334 read_unlock(&hci_dev_list_lock);
335 return -ENOMEM;
336 }
337
338 count = 0;
339 list_for_each_entry(d, &hci_dev_list, list) {
340 if (test_bit(HCI_SETUP, &d->dev_flags))
341 continue;
342
343 if (test_bit(HCI_USER_CHANNEL, &d->dev_flags))
344 continue;
345
346 if (d->dev_type == HCI_BREDR) {
347 rp->index[count++] = cpu_to_le16(d->id);
348 BT_DBG("Added hci%u", d->id);
349 }
350 }
351
352 rp->num_controllers = cpu_to_le16(count);
353 rp_len = sizeof(*rp) + (2 * count);
354
355 read_unlock(&hci_dev_list_lock);
356
357 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
358 rp_len);
359
360 kfree(rp);
361
362 return err;
363 }
364
365 static u32 get_supported_settings(struct hci_dev *hdev)
366 {
367 u32 settings = 0;
368
369 settings |= MGMT_SETTING_POWERED;
370 settings |= MGMT_SETTING_PAIRABLE;
371 settings |= MGMT_SETTING_DEBUG_KEYS;
372
373 if (lmp_bredr_capable(hdev)) {
374 settings |= MGMT_SETTING_CONNECTABLE;
375 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
376 settings |= MGMT_SETTING_FAST_CONNECTABLE;
377 settings |= MGMT_SETTING_DISCOVERABLE;
378 settings |= MGMT_SETTING_BREDR;
379 settings |= MGMT_SETTING_LINK_SECURITY;
380
381 if (lmp_ssp_capable(hdev)) {
382 settings |= MGMT_SETTING_SSP;
383 settings |= MGMT_SETTING_HS;
384 }
385
386 if (lmp_sc_capable(hdev) ||
387 test_bit(HCI_FORCE_SC, &hdev->dev_flags))
388 settings |= MGMT_SETTING_SECURE_CONN;
389 }
390
391 if (lmp_le_capable(hdev)) {
392 settings |= MGMT_SETTING_LE;
393 settings |= MGMT_SETTING_ADVERTISING;
394 settings |= MGMT_SETTING_PRIVACY;
395 }
396
397 return settings;
398 }
399
400 static u32 get_current_settings(struct hci_dev *hdev)
401 {
402 u32 settings = 0;
403
404 if (hdev_is_powered(hdev))
405 settings |= MGMT_SETTING_POWERED;
406
407 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
408 settings |= MGMT_SETTING_CONNECTABLE;
409
410 if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
411 settings |= MGMT_SETTING_FAST_CONNECTABLE;
412
413 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
414 settings |= MGMT_SETTING_DISCOVERABLE;
415
416 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
417 settings |= MGMT_SETTING_PAIRABLE;
418
419 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
420 settings |= MGMT_SETTING_BREDR;
421
422 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
423 settings |= MGMT_SETTING_LE;
424
425 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
426 settings |= MGMT_SETTING_LINK_SECURITY;
427
428 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
429 settings |= MGMT_SETTING_SSP;
430
431 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
432 settings |= MGMT_SETTING_HS;
433
434 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
435 settings |= MGMT_SETTING_ADVERTISING;
436
437 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
438 settings |= MGMT_SETTING_SECURE_CONN;
439
440 if (test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags))
441 settings |= MGMT_SETTING_DEBUG_KEYS;
442
443 if (test_bit(HCI_PRIVACY, &hdev->dev_flags))
444 settings |= MGMT_SETTING_PRIVACY;
445
446 return settings;
447 }
448
449 #define PNP_INFO_SVCLASS_ID 0x1200
450
451 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
452 {
453 u8 *ptr = data, *uuids_start = NULL;
454 struct bt_uuid *uuid;
455
456 if (len < 4)
457 return ptr;
458
459 list_for_each_entry(uuid, &hdev->uuids, list) {
460 u16 uuid16;
461
462 if (uuid->size != 16)
463 continue;
464
465 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
466 if (uuid16 < 0x1100)
467 continue;
468
469 if (uuid16 == PNP_INFO_SVCLASS_ID)
470 continue;
471
472 if (!uuids_start) {
473 uuids_start = ptr;
474 uuids_start[0] = 1;
475 uuids_start[1] = EIR_UUID16_ALL;
476 ptr += 2;
477 }
478
479 /* Stop if not enough space to put next UUID */
480 if ((ptr - data) + sizeof(u16) > len) {
481 uuids_start[1] = EIR_UUID16_SOME;
482 break;
483 }
484
485 *ptr++ = (uuid16 & 0x00ff);
486 *ptr++ = (uuid16 & 0xff00) >> 8;
487 uuids_start[0] += sizeof(uuid16);
488 }
489
490 return ptr;
491 }
492
493 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
494 {
495 u8 *ptr = data, *uuids_start = NULL;
496 struct bt_uuid *uuid;
497
498 if (len < 6)
499 return ptr;
500
501 list_for_each_entry(uuid, &hdev->uuids, list) {
502 if (uuid->size != 32)
503 continue;
504
505 if (!uuids_start) {
506 uuids_start = ptr;
507 uuids_start[0] = 1;
508 uuids_start[1] = EIR_UUID32_ALL;
509 ptr += 2;
510 }
511
512 /* Stop if not enough space to put next UUID */
513 if ((ptr - data) + sizeof(u32) > len) {
514 uuids_start[1] = EIR_UUID32_SOME;
515 break;
516 }
517
518 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
519 ptr += sizeof(u32);
520 uuids_start[0] += sizeof(u32);
521 }
522
523 return ptr;
524 }
525
526 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
527 {
528 u8 *ptr = data, *uuids_start = NULL;
529 struct bt_uuid *uuid;
530
531 if (len < 18)
532 return ptr;
533
534 list_for_each_entry(uuid, &hdev->uuids, list) {
535 if (uuid->size != 128)
536 continue;
537
538 if (!uuids_start) {
539 uuids_start = ptr;
540 uuids_start[0] = 1;
541 uuids_start[1] = EIR_UUID128_ALL;
542 ptr += 2;
543 }
544
545 /* Stop if not enough space to put next UUID */
546 if ((ptr - data) + 16 > len) {
547 uuids_start[1] = EIR_UUID128_SOME;
548 break;
549 }
550
551 memcpy(ptr, uuid->uuid, 16);
552 ptr += 16;
553 uuids_start[0] += 16;
554 }
555
556 return ptr;
557 }
558
559 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
560 {
561 struct pending_cmd *cmd;
562
563 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
564 if (cmd->opcode == opcode)
565 return cmd;
566 }
567
568 return NULL;
569 }
570
571 static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
572 {
573 u8 ad_len = 0;
574 size_t name_len;
575
576 name_len = strlen(hdev->dev_name);
577 if (name_len > 0) {
578 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
579
580 if (name_len > max_len) {
581 name_len = max_len;
582 ptr[1] = EIR_NAME_SHORT;
583 } else
584 ptr[1] = EIR_NAME_COMPLETE;
585
586 ptr[0] = name_len + 1;
587
588 memcpy(ptr + 2, hdev->dev_name, name_len);
589
590 ad_len += (name_len + 2);
591 ptr += (name_len + 2);
592 }
593
594 return ad_len;
595 }
596
597 static void update_scan_rsp_data(struct hci_request *req)
598 {
599 struct hci_dev *hdev = req->hdev;
600 struct hci_cp_le_set_scan_rsp_data cp;
601 u8 len;
602
603 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
604 return;
605
606 memset(&cp, 0, sizeof(cp));
607
608 len = create_scan_rsp_data(hdev, cp.data);
609
610 if (hdev->scan_rsp_data_len == len &&
611 memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
612 return;
613
614 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
615 hdev->scan_rsp_data_len = len;
616
617 cp.length = len;
618
619 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
620 }
621
622 static u8 get_adv_discov_flags(struct hci_dev *hdev)
623 {
624 struct pending_cmd *cmd;
625
626 /* If there's a pending mgmt command the flags will not yet have
627 * their final values, so check for this first.
628 */
629 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
630 if (cmd) {
631 struct mgmt_mode *cp = cmd->param;
632 if (cp->val == 0x01)
633 return LE_AD_GENERAL;
634 else if (cp->val == 0x02)
635 return LE_AD_LIMITED;
636 } else {
637 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
638 return LE_AD_LIMITED;
639 else if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
640 return LE_AD_GENERAL;
641 }
642
643 return 0;
644 }
645
646 static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
647 {
648 u8 ad_len = 0, flags = 0;
649
650 flags |= get_adv_discov_flags(hdev);
651
652 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
653 flags |= LE_AD_NO_BREDR;
654
655 if (flags) {
656 BT_DBG("adv flags 0x%02x", flags);
657
658 ptr[0] = 2;
659 ptr[1] = EIR_FLAGS;
660 ptr[2] = flags;
661
662 ad_len += 3;
663 ptr += 3;
664 }
665
666 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
667 ptr[0] = 2;
668 ptr[1] = EIR_TX_POWER;
669 ptr[2] = (u8) hdev->adv_tx_power;
670
671 ad_len += 3;
672 ptr += 3;
673 }
674
675 return ad_len;
676 }
677
678 static void update_adv_data(struct hci_request *req)
679 {
680 struct hci_dev *hdev = req->hdev;
681 struct hci_cp_le_set_adv_data cp;
682 u8 len;
683
684 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
685 return;
686
687 memset(&cp, 0, sizeof(cp));
688
689 len = create_adv_data(hdev, cp.data);
690
691 if (hdev->adv_data_len == len &&
692 memcmp(cp.data, hdev->adv_data, len) == 0)
693 return;
694
695 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
696 hdev->adv_data_len = len;
697
698 cp.length = len;
699
700 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
701 }
702
703 static void create_eir(struct hci_dev *hdev, u8 *data)
704 {
705 u8 *ptr = data;
706 size_t name_len;
707
708 name_len = strlen(hdev->dev_name);
709
710 if (name_len > 0) {
711 /* EIR Data type */
712 if (name_len > 48) {
713 name_len = 48;
714 ptr[1] = EIR_NAME_SHORT;
715 } else
716 ptr[1] = EIR_NAME_COMPLETE;
717
718 /* EIR Data length */
719 ptr[0] = name_len + 1;
720
721 memcpy(ptr + 2, hdev->dev_name, name_len);
722
723 ptr += (name_len + 2);
724 }
725
726 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
727 ptr[0] = 2;
728 ptr[1] = EIR_TX_POWER;
729 ptr[2] = (u8) hdev->inq_tx_power;
730
731 ptr += 3;
732 }
733
734 if (hdev->devid_source > 0) {
735 ptr[0] = 9;
736 ptr[1] = EIR_DEVICE_ID;
737
738 put_unaligned_le16(hdev->devid_source, ptr + 2);
739 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
740 put_unaligned_le16(hdev->devid_product, ptr + 6);
741 put_unaligned_le16(hdev->devid_version, ptr + 8);
742
743 ptr += 10;
744 }
745
746 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
747 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
748 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
749 }
750
751 static void update_eir(struct hci_request *req)
752 {
753 struct hci_dev *hdev = req->hdev;
754 struct hci_cp_write_eir cp;
755
756 if (!hdev_is_powered(hdev))
757 return;
758
759 if (!lmp_ext_inq_capable(hdev))
760 return;
761
762 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
763 return;
764
765 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
766 return;
767
768 memset(&cp, 0, sizeof(cp));
769
770 create_eir(hdev, cp.data);
771
772 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
773 return;
774
775 memcpy(hdev->eir, cp.data, sizeof(cp.data));
776
777 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
778 }
779
780 static u8 get_service_classes(struct hci_dev *hdev)
781 {
782 struct bt_uuid *uuid;
783 u8 val = 0;
784
785 list_for_each_entry(uuid, &hdev->uuids, list)
786 val |= uuid->svc_hint;
787
788 return val;
789 }
790
791 static void update_class(struct hci_request *req)
792 {
793 struct hci_dev *hdev = req->hdev;
794 u8 cod[3];
795
796 BT_DBG("%s", hdev->name);
797
798 if (!hdev_is_powered(hdev))
799 return;
800
801 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
802 return;
803
804 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
805 return;
806
807 cod[0] = hdev->minor_class;
808 cod[1] = hdev->major_class;
809 cod[2] = get_service_classes(hdev);
810
811 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
812 cod[1] |= 0x20;
813
814 if (memcmp(cod, hdev->dev_class, 3) == 0)
815 return;
816
817 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
818 }
819
820 static bool get_connectable(struct hci_dev *hdev)
821 {
822 struct pending_cmd *cmd;
823
824 /* If there's a pending mgmt command the flag will not yet have
825 * it's final value, so check for this first.
826 */
827 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
828 if (cmd) {
829 struct mgmt_mode *cp = cmd->param;
830 return cp->val;
831 }
832
833 return test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
834 }
835
836 static void enable_advertising(struct hci_request *req)
837 {
838 struct hci_dev *hdev = req->hdev;
839 struct hci_cp_le_set_adv_param cp;
840 u8 own_addr_type, enable = 0x01;
841 bool connectable;
842
843 connectable = get_connectable(hdev);
844
845 /* Set require_privacy to true only when non-connectable
846 * advertising is used. In that case it is fine to use a
847 * non-resolvable private address.
848 */
849 if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
850 return;
851
852 memset(&cp, 0, sizeof(cp));
853 cp.min_interval = __constant_cpu_to_le16(0x0800);
854 cp.max_interval = __constant_cpu_to_le16(0x0800);
855 cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
856 cp.own_address_type = own_addr_type;
857 cp.channel_map = hdev->le_adv_channel_map;
858
859 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
860
861 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
862 }
863
864 static void disable_advertising(struct hci_request *req)
865 {
866 u8 enable = 0x00;
867
868 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
869 }
870
871 static void service_cache_off(struct work_struct *work)
872 {
873 struct hci_dev *hdev = container_of(work, struct hci_dev,
874 service_cache.work);
875 struct hci_request req;
876
877 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
878 return;
879
880 hci_req_init(&req, hdev);
881
882 hci_dev_lock(hdev);
883
884 update_eir(&req);
885 update_class(&req);
886
887 hci_dev_unlock(hdev);
888
889 hci_req_run(&req, NULL);
890 }
891
892 static void rpa_expired(struct work_struct *work)
893 {
894 struct hci_dev *hdev = container_of(work, struct hci_dev,
895 rpa_expired.work);
896 struct hci_request req;
897
898 BT_DBG("");
899
900 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
901
902 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags) ||
903 hci_conn_num(hdev, LE_LINK) > 0)
904 return;
905
906 /* The generation of a new RPA and programming it into the
907 * controller happens in the enable_advertising() function.
908 */
909
910 hci_req_init(&req, hdev);
911
912 disable_advertising(&req);
913 enable_advertising(&req);
914
915 hci_req_run(&req, NULL);
916 }
917
918 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
919 {
920 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
921 return;
922
923 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
924 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
925
926 /* Non-mgmt controlled devices get this bit set
927 * implicitly so that pairing works for them, however
928 * for mgmt we require user-space to explicitly enable
929 * it
930 */
931 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
932 }
933
934 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
935 void *data, u16 data_len)
936 {
937 struct mgmt_rp_read_info rp;
938
939 BT_DBG("sock %p %s", sk, hdev->name);
940
941 hci_dev_lock(hdev);
942
943 memset(&rp, 0, sizeof(rp));
944
945 bacpy(&rp.bdaddr, &hdev->bdaddr);
946
947 rp.version = hdev->hci_ver;
948 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
949
950 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
951 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
952
953 memcpy(rp.dev_class, hdev->dev_class, 3);
954
955 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
956 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
957
958 hci_dev_unlock(hdev);
959
960 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
961 sizeof(rp));
962 }
963
964 static void mgmt_pending_free(struct pending_cmd *cmd)
965 {
966 sock_put(cmd->sk);
967 kfree(cmd->param);
968 kfree(cmd);
969 }
970
971 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
972 struct hci_dev *hdev, void *data,
973 u16 len)
974 {
975 struct pending_cmd *cmd;
976
977 cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
978 if (!cmd)
979 return NULL;
980
981 cmd->opcode = opcode;
982 cmd->index = hdev->id;
983
984 cmd->param = kmalloc(len, GFP_KERNEL);
985 if (!cmd->param) {
986 kfree(cmd);
987 return NULL;
988 }
989
990 if (data)
991 memcpy(cmd->param, data, len);
992
993 cmd->sk = sk;
994 sock_hold(sk);
995
996 list_add(&cmd->list, &hdev->mgmt_pending);
997
998 return cmd;
999 }
1000
1001 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
1002 void (*cb)(struct pending_cmd *cmd,
1003 void *data),
1004 void *data)
1005 {
1006 struct pending_cmd *cmd, *tmp;
1007
1008 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
1009 if (opcode > 0 && cmd->opcode != opcode)
1010 continue;
1011
1012 cb(cmd, data);
1013 }
1014 }
1015
1016 static void mgmt_pending_remove(struct pending_cmd *cmd)
1017 {
1018 list_del(&cmd->list);
1019 mgmt_pending_free(cmd);
1020 }
1021
1022 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1023 {
1024 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1025
1026 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
1027 sizeof(settings));
1028 }
1029
1030 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status)
1031 {
1032 BT_DBG("%s status 0x%02x", hdev->name, status);
1033
1034 if (hci_conn_count(hdev) == 0)
1035 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1036 }
1037
1038 static int clean_up_hci_state(struct hci_dev *hdev)
1039 {
1040 struct hci_request req;
1041 struct hci_conn *conn;
1042
1043 hci_req_init(&req, hdev);
1044
1045 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1046 test_bit(HCI_PSCAN, &hdev->flags)) {
1047 u8 scan = 0x00;
1048 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1049 }
1050
1051 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1052 disable_advertising(&req);
1053
1054 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
1055 hci_req_add_le_scan_disable(&req);
1056 }
1057
1058 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1059 struct hci_cp_disconnect dc;
1060
1061 dc.handle = cpu_to_le16(conn->handle);
1062 dc.reason = 0x15; /* Terminated due to Power Off */
1063 hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1064 }
1065
1066 return hci_req_run(&req, clean_up_hci_complete);
1067 }
1068
1069 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1070 u16 len)
1071 {
1072 struct mgmt_mode *cp = data;
1073 struct pending_cmd *cmd;
1074 int err;
1075
1076 BT_DBG("request for %s", hdev->name);
1077
1078 if (cp->val != 0x00 && cp->val != 0x01)
1079 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1080 MGMT_STATUS_INVALID_PARAMS);
1081
1082 hci_dev_lock(hdev);
1083
1084 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
1085 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1086 MGMT_STATUS_BUSY);
1087 goto failed;
1088 }
1089
1090 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1091 cancel_delayed_work(&hdev->power_off);
1092
1093 if (cp->val) {
1094 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
1095 data, len);
1096 err = mgmt_powered(hdev, 1);
1097 goto failed;
1098 }
1099 }
1100
1101 if (!!cp->val == hdev_is_powered(hdev)) {
1102 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1103 goto failed;
1104 }
1105
1106 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1107 if (!cmd) {
1108 err = -ENOMEM;
1109 goto failed;
1110 }
1111
1112 if (cp->val) {
1113 queue_work(hdev->req_workqueue, &hdev->power_on);
1114 err = 0;
1115 } else {
1116 /* Disconnect connections, stop scans, etc */
1117 err = clean_up_hci_state(hdev);
1118
1119 /* ENODATA means there were no HCI commands queued */
1120 if (err == -ENODATA) {
1121 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1122 err = 0;
1123 }
1124 }
1125
1126 failed:
1127 hci_dev_unlock(hdev);
1128 return err;
1129 }
1130
1131 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
1132 struct sock *skip_sk)
1133 {
1134 struct sk_buff *skb;
1135 struct mgmt_hdr *hdr;
1136
1137 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
1138 if (!skb)
1139 return -ENOMEM;
1140
1141 hdr = (void *) skb_put(skb, sizeof(*hdr));
1142 hdr->opcode = cpu_to_le16(event);
1143 if (hdev)
1144 hdr->index = cpu_to_le16(hdev->id);
1145 else
1146 hdr->index = __constant_cpu_to_le16(MGMT_INDEX_NONE);
1147 hdr->len = cpu_to_le16(data_len);
1148
1149 if (data)
1150 memcpy(skb_put(skb, data_len), data, data_len);
1151
1152 /* Time stamp */
1153 __net_timestamp(skb);
1154
1155 hci_send_to_control(skb, skip_sk);
1156 kfree_skb(skb);
1157
1158 return 0;
1159 }
1160
1161 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1162 {
1163 __le32 ev;
1164
1165 ev = cpu_to_le32(get_current_settings(hdev));
1166
1167 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
1168 }
1169
1170 struct cmd_lookup {
1171 struct sock *sk;
1172 struct hci_dev *hdev;
1173 u8 mgmt_status;
1174 };
1175
1176 static void settings_rsp(struct pending_cmd *cmd, void *data)
1177 {
1178 struct cmd_lookup *match = data;
1179
1180 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1181
1182 list_del(&cmd->list);
1183
1184 if (match->sk == NULL) {
1185 match->sk = cmd->sk;
1186 sock_hold(match->sk);
1187 }
1188
1189 mgmt_pending_free(cmd);
1190 }
1191
1192 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
1193 {
1194 u8 *status = data;
1195
1196 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1197 mgmt_pending_remove(cmd);
1198 }
1199
1200 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1201 {
1202 if (!lmp_bredr_capable(hdev))
1203 return MGMT_STATUS_NOT_SUPPORTED;
1204 else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1205 return MGMT_STATUS_REJECTED;
1206 else
1207 return MGMT_STATUS_SUCCESS;
1208 }
1209
1210 static u8 mgmt_le_support(struct hci_dev *hdev)
1211 {
1212 if (!lmp_le_capable(hdev))
1213 return MGMT_STATUS_NOT_SUPPORTED;
1214 else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1215 return MGMT_STATUS_REJECTED;
1216 else
1217 return MGMT_STATUS_SUCCESS;
1218 }
1219
1220 static void set_discoverable_complete(struct hci_dev *hdev, u8 status)
1221 {
1222 struct pending_cmd *cmd;
1223 struct mgmt_mode *cp;
1224 struct hci_request req;
1225 bool changed;
1226
1227 BT_DBG("status 0x%02x", status);
1228
1229 hci_dev_lock(hdev);
1230
1231 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1232 if (!cmd)
1233 goto unlock;
1234
1235 if (status) {
1236 u8 mgmt_err = mgmt_status(status);
1237 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1238 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1239 goto remove_cmd;
1240 }
1241
1242 cp = cmd->param;
1243 if (cp->val) {
1244 changed = !test_and_set_bit(HCI_DISCOVERABLE,
1245 &hdev->dev_flags);
1246
1247 if (hdev->discov_timeout > 0) {
1248 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1249 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1250 to);
1251 }
1252 } else {
1253 changed = test_and_clear_bit(HCI_DISCOVERABLE,
1254 &hdev->dev_flags);
1255 }
1256
1257 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1258
1259 if (changed)
1260 new_settings(hdev, cmd->sk);
1261
1262 /* When the discoverable mode gets changed, make sure
1263 * that class of device has the limited discoverable
1264 * bit correctly set.
1265 */
1266 hci_req_init(&req, hdev);
1267 update_class(&req);
1268 hci_req_run(&req, NULL);
1269
1270 remove_cmd:
1271 mgmt_pending_remove(cmd);
1272
1273 unlock:
1274 hci_dev_unlock(hdev);
1275 }
1276
1277 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1278 u16 len)
1279 {
1280 struct mgmt_cp_set_discoverable *cp = data;
1281 struct pending_cmd *cmd;
1282 struct hci_request req;
1283 u16 timeout;
1284 u8 scan;
1285 int err;
1286
1287 BT_DBG("request for %s", hdev->name);
1288
1289 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1290 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1291 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1292 MGMT_STATUS_REJECTED);
1293
1294 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1295 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1296 MGMT_STATUS_INVALID_PARAMS);
1297
1298 timeout = __le16_to_cpu(cp->timeout);
1299
1300 /* Disabling discoverable requires that no timeout is set,
1301 * and enabling limited discoverable requires a timeout.
1302 */
1303 if ((cp->val == 0x00 && timeout > 0) ||
1304 (cp->val == 0x02 && timeout == 0))
1305 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1306 MGMT_STATUS_INVALID_PARAMS);
1307
1308 hci_dev_lock(hdev);
1309
1310 if (!hdev_is_powered(hdev) && timeout > 0) {
1311 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1312 MGMT_STATUS_NOT_POWERED);
1313 goto failed;
1314 }
1315
1316 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1317 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1318 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1319 MGMT_STATUS_BUSY);
1320 goto failed;
1321 }
1322
1323 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
1324 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1325 MGMT_STATUS_REJECTED);
1326 goto failed;
1327 }
1328
1329 if (!hdev_is_powered(hdev)) {
1330 bool changed = false;
1331
1332 /* Setting limited discoverable when powered off is
1333 * not a valid operation since it requires a timeout
1334 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1335 */
1336 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
1337 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1338 changed = true;
1339 }
1340
1341 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1342 if (err < 0)
1343 goto failed;
1344
1345 if (changed)
1346 err = new_settings(hdev, sk);
1347
1348 goto failed;
1349 }
1350
1351 /* If the current mode is the same, then just update the timeout
1352 * value with the new value. And if only the timeout gets updated,
1353 * then no need for any HCI transactions.
1354 */
1355 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
1356 (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
1357 &hdev->dev_flags)) {
1358 cancel_delayed_work(&hdev->discov_off);
1359 hdev->discov_timeout = timeout;
1360
1361 if (cp->val && hdev->discov_timeout > 0) {
1362 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1363 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1364 to);
1365 }
1366
1367 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1368 goto failed;
1369 }
1370
1371 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1372 if (!cmd) {
1373 err = -ENOMEM;
1374 goto failed;
1375 }
1376
1377 /* Cancel any potential discoverable timeout that might be
1378 * still active and store new timeout value. The arming of
1379 * the timeout happens in the complete handler.
1380 */
1381 cancel_delayed_work(&hdev->discov_off);
1382 hdev->discov_timeout = timeout;
1383
1384 /* Limited discoverable mode */
1385 if (cp->val == 0x02)
1386 set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1387 else
1388 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1389
1390 hci_req_init(&req, hdev);
1391
1392 /* The procedure for LE-only controllers is much simpler - just
1393 * update the advertising data.
1394 */
1395 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1396 goto update_ad;
1397
1398 scan = SCAN_PAGE;
1399
1400 if (cp->val) {
1401 struct hci_cp_write_current_iac_lap hci_cp;
1402
1403 if (cp->val == 0x02) {
1404 /* Limited discoverable mode */
1405 hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1406 hci_cp.iac_lap[0] = 0x00; /* LIAC */
1407 hci_cp.iac_lap[1] = 0x8b;
1408 hci_cp.iac_lap[2] = 0x9e;
1409 hci_cp.iac_lap[3] = 0x33; /* GIAC */
1410 hci_cp.iac_lap[4] = 0x8b;
1411 hci_cp.iac_lap[5] = 0x9e;
1412 } else {
1413 /* General discoverable mode */
1414 hci_cp.num_iac = 1;
1415 hci_cp.iac_lap[0] = 0x33; /* GIAC */
1416 hci_cp.iac_lap[1] = 0x8b;
1417 hci_cp.iac_lap[2] = 0x9e;
1418 }
1419
1420 hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1421 (hci_cp.num_iac * 3) + 1, &hci_cp);
1422
1423 scan |= SCAN_INQUIRY;
1424 } else {
1425 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1426 }
1427
1428 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1429
1430 update_ad:
1431 update_adv_data(&req);
1432
1433 err = hci_req_run(&req, set_discoverable_complete);
1434 if (err < 0)
1435 mgmt_pending_remove(cmd);
1436
1437 failed:
1438 hci_dev_unlock(hdev);
1439 return err;
1440 }
1441
1442 static void write_fast_connectable(struct hci_request *req, bool enable)
1443 {
1444 struct hci_dev *hdev = req->hdev;
1445 struct hci_cp_write_page_scan_activity acp;
1446 u8 type;
1447
1448 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1449 return;
1450
1451 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1452 return;
1453
1454 if (enable) {
1455 type = PAGE_SCAN_TYPE_INTERLACED;
1456
1457 /* 160 msec page scan interval */
1458 acp.interval = __constant_cpu_to_le16(0x0100);
1459 } else {
1460 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1461
1462 /* default 1.28 sec page scan */
1463 acp.interval = __constant_cpu_to_le16(0x0800);
1464 }
1465
1466 acp.window = __constant_cpu_to_le16(0x0012);
1467
1468 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1469 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1470 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1471 sizeof(acp), &acp);
1472
1473 if (hdev->page_scan_type != type)
1474 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1475 }
1476
1477 static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1478 {
1479 struct pending_cmd *cmd;
1480 struct mgmt_mode *cp;
1481 bool changed;
1482
1483 BT_DBG("status 0x%02x", status);
1484
1485 hci_dev_lock(hdev);
1486
1487 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1488 if (!cmd)
1489 goto unlock;
1490
1491 if (status) {
1492 u8 mgmt_err = mgmt_status(status);
1493 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1494 goto remove_cmd;
1495 }
1496
1497 cp = cmd->param;
1498 if (cp->val)
1499 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1500 else
1501 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1502
1503 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1504
1505 if (changed)
1506 new_settings(hdev, cmd->sk);
1507
1508 remove_cmd:
1509 mgmt_pending_remove(cmd);
1510
1511 unlock:
1512 hci_dev_unlock(hdev);
1513 }
1514
1515 static int set_connectable_update_settings(struct hci_dev *hdev,
1516 struct sock *sk, u8 val)
1517 {
1518 bool changed = false;
1519 int err;
1520
1521 if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1522 changed = true;
1523
1524 if (val) {
1525 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1526 } else {
1527 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1528 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1529 }
1530
1531 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1532 if (err < 0)
1533 return err;
1534
1535 if (changed)
1536 return new_settings(hdev, sk);
1537
1538 return 0;
1539 }
1540
1541 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1542 u16 len)
1543 {
1544 struct mgmt_mode *cp = data;
1545 struct pending_cmd *cmd;
1546 struct hci_request req;
1547 u8 scan;
1548 int err;
1549
1550 BT_DBG("request for %s", hdev->name);
1551
1552 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1553 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1554 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1555 MGMT_STATUS_REJECTED);
1556
1557 if (cp->val != 0x00 && cp->val != 0x01)
1558 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1559 MGMT_STATUS_INVALID_PARAMS);
1560
1561 hci_dev_lock(hdev);
1562
1563 if (!hdev_is_powered(hdev)) {
1564 err = set_connectable_update_settings(hdev, sk, cp->val);
1565 goto failed;
1566 }
1567
1568 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1569 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1570 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1571 MGMT_STATUS_BUSY);
1572 goto failed;
1573 }
1574
1575 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1576 if (!cmd) {
1577 err = -ENOMEM;
1578 goto failed;
1579 }
1580
1581 hci_req_init(&req, hdev);
1582
1583 /* If BR/EDR is not enabled and we disable advertising as a
1584 * by-product of disabling connectable, we need to update the
1585 * advertising flags.
1586 */
1587 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1588 if (!cp->val) {
1589 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1590 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1591 }
1592 update_adv_data(&req);
1593 } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
1594 if (cp->val) {
1595 scan = SCAN_PAGE;
1596 } else {
1597 scan = 0;
1598
1599 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1600 hdev->discov_timeout > 0)
1601 cancel_delayed_work(&hdev->discov_off);
1602 }
1603
1604 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1605 }
1606
1607 /* If we're going from non-connectable to connectable or
1608 * vice-versa when fast connectable is enabled ensure that fast
1609 * connectable gets disabled. write_fast_connectable won't do
1610 * anything if the page scan parameters are already what they
1611 * should be.
1612 */
1613 if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1614 write_fast_connectable(&req, false);
1615
1616 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) &&
1617 hci_conn_num(hdev, LE_LINK) == 0) {
1618 disable_advertising(&req);
1619 enable_advertising(&req);
1620 }
1621
1622 err = hci_req_run(&req, set_connectable_complete);
1623 if (err < 0) {
1624 mgmt_pending_remove(cmd);
1625 if (err == -ENODATA)
1626 err = set_connectable_update_settings(hdev, sk,
1627 cp->val);
1628 goto failed;
1629 }
1630
1631 failed:
1632 hci_dev_unlock(hdev);
1633 return err;
1634 }
1635
1636 static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
1637 u16 len)
1638 {
1639 struct mgmt_mode *cp = data;
1640 bool changed;
1641 int err;
1642
1643 BT_DBG("request for %s", hdev->name);
1644
1645 if (cp->val != 0x00 && cp->val != 0x01)
1646 return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE,
1647 MGMT_STATUS_INVALID_PARAMS);
1648
1649 hci_dev_lock(hdev);
1650
1651 if (cp->val)
1652 changed = !test_and_set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1653 else
1654 changed = test_and_clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1655
1656 err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
1657 if (err < 0)
1658 goto unlock;
1659
1660 if (changed)
1661 err = new_settings(hdev, sk);
1662
1663 unlock:
1664 hci_dev_unlock(hdev);
1665 return err;
1666 }
1667
1668 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1669 u16 len)
1670 {
1671 struct mgmt_mode *cp = data;
1672 struct pending_cmd *cmd;
1673 u8 val, status;
1674 int err;
1675
1676 BT_DBG("request for %s", hdev->name);
1677
1678 status = mgmt_bredr_support(hdev);
1679 if (status)
1680 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1681 status);
1682
1683 if (cp->val != 0x00 && cp->val != 0x01)
1684 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1685 MGMT_STATUS_INVALID_PARAMS);
1686
1687 hci_dev_lock(hdev);
1688
1689 if (!hdev_is_powered(hdev)) {
1690 bool changed = false;
1691
1692 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1693 &hdev->dev_flags)) {
1694 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
1695 changed = true;
1696 }
1697
1698 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1699 if (err < 0)
1700 goto failed;
1701
1702 if (changed)
1703 err = new_settings(hdev, sk);
1704
1705 goto failed;
1706 }
1707
1708 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1709 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1710 MGMT_STATUS_BUSY);
1711 goto failed;
1712 }
1713
1714 val = !!cp->val;
1715
1716 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1717 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1718 goto failed;
1719 }
1720
1721 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1722 if (!cmd) {
1723 err = -ENOMEM;
1724 goto failed;
1725 }
1726
1727 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1728 if (err < 0) {
1729 mgmt_pending_remove(cmd);
1730 goto failed;
1731 }
1732
1733 failed:
1734 hci_dev_unlock(hdev);
1735 return err;
1736 }
1737
1738 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1739 {
1740 struct mgmt_mode *cp = data;
1741 struct pending_cmd *cmd;
1742 u8 status;
1743 int err;
1744
1745 BT_DBG("request for %s", hdev->name);
1746
1747 status = mgmt_bredr_support(hdev);
1748 if (status)
1749 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1750
1751 if (!lmp_ssp_capable(hdev))
1752 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1753 MGMT_STATUS_NOT_SUPPORTED);
1754
1755 if (cp->val != 0x00 && cp->val != 0x01)
1756 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1757 MGMT_STATUS_INVALID_PARAMS);
1758
1759 hci_dev_lock(hdev);
1760
1761 if (!hdev_is_powered(hdev)) {
1762 bool changed;
1763
1764 if (cp->val) {
1765 changed = !test_and_set_bit(HCI_SSP_ENABLED,
1766 &hdev->dev_flags);
1767 } else {
1768 changed = test_and_clear_bit(HCI_SSP_ENABLED,
1769 &hdev->dev_flags);
1770 if (!changed)
1771 changed = test_and_clear_bit(HCI_HS_ENABLED,
1772 &hdev->dev_flags);
1773 else
1774 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1775 }
1776
1777 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1778 if (err < 0)
1779 goto failed;
1780
1781 if (changed)
1782 err = new_settings(hdev, sk);
1783
1784 goto failed;
1785 }
1786
1787 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev) ||
1788 mgmt_pending_find(MGMT_OP_SET_HS, hdev)) {
1789 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1790 MGMT_STATUS_BUSY);
1791 goto failed;
1792 }
1793
1794 if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1795 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1796 goto failed;
1797 }
1798
1799 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1800 if (!cmd) {
1801 err = -ENOMEM;
1802 goto failed;
1803 }
1804
1805 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1806 if (err < 0) {
1807 mgmt_pending_remove(cmd);
1808 goto failed;
1809 }
1810
1811 failed:
1812 hci_dev_unlock(hdev);
1813 return err;
1814 }
1815
1816 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1817 {
1818 struct mgmt_mode *cp = data;
1819 bool changed;
1820 u8 status;
1821 int err;
1822
1823 BT_DBG("request for %s", hdev->name);
1824
1825 status = mgmt_bredr_support(hdev);
1826 if (status)
1827 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1828
1829 if (!lmp_ssp_capable(hdev))
1830 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1831 MGMT_STATUS_NOT_SUPPORTED);
1832
1833 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
1834 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1835 MGMT_STATUS_REJECTED);
1836
1837 if (cp->val != 0x00 && cp->val != 0x01)
1838 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1839 MGMT_STATUS_INVALID_PARAMS);
1840
1841 hci_dev_lock(hdev);
1842
1843 if (cp->val) {
1844 changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1845 } else {
1846 if (hdev_is_powered(hdev)) {
1847 err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1848 MGMT_STATUS_REJECTED);
1849 goto unlock;
1850 }
1851
1852 changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1853 }
1854
1855 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1856 if (err < 0)
1857 goto unlock;
1858
1859 if (changed)
1860 err = new_settings(hdev, sk);
1861
1862 unlock:
1863 hci_dev_unlock(hdev);
1864 return err;
1865 }
1866
1867 static void le_enable_complete(struct hci_dev *hdev, u8 status)
1868 {
1869 struct cmd_lookup match = { NULL, hdev };
1870
1871 if (status) {
1872 u8 mgmt_err = mgmt_status(status);
1873
1874 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1875 &mgmt_err);
1876 return;
1877 }
1878
1879 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1880
1881 new_settings(hdev, match.sk);
1882
1883 if (match.sk)
1884 sock_put(match.sk);
1885
1886 /* Make sure the controller has a good default for
1887 * advertising data. Restrict the update to when LE
1888 * has actually been enabled. During power on, the
1889 * update in powered_update_hci will take care of it.
1890 */
1891 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1892 struct hci_request req;
1893
1894 hci_dev_lock(hdev);
1895
1896 hci_req_init(&req, hdev);
1897 update_adv_data(&req);
1898 update_scan_rsp_data(&req);
1899 hci_req_run(&req, NULL);
1900
1901 hci_dev_unlock(hdev);
1902 }
1903 }
1904
1905 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1906 {
1907 struct mgmt_mode *cp = data;
1908 struct hci_cp_write_le_host_supported hci_cp;
1909 struct pending_cmd *cmd;
1910 struct hci_request req;
1911 int err;
1912 u8 val, enabled;
1913
1914 BT_DBG("request for %s", hdev->name);
1915
1916 if (!lmp_le_capable(hdev))
1917 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1918 MGMT_STATUS_NOT_SUPPORTED);
1919
1920 if (cp->val != 0x00 && cp->val != 0x01)
1921 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1922 MGMT_STATUS_INVALID_PARAMS);
1923
1924 /* LE-only devices do not allow toggling LE on/off */
1925 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1926 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1927 MGMT_STATUS_REJECTED);
1928
1929 hci_dev_lock(hdev);
1930
1931 val = !!cp->val;
1932 enabled = lmp_host_le_capable(hdev);
1933
1934 if (!hdev_is_powered(hdev) || val == enabled) {
1935 bool changed = false;
1936
1937 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1938 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1939 changed = true;
1940 }
1941
1942 if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
1943 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
1944 changed = true;
1945 }
1946
1947 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1948 if (err < 0)
1949 goto unlock;
1950
1951 if (changed)
1952 err = new_settings(hdev, sk);
1953
1954 goto unlock;
1955 }
1956
1957 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
1958 mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
1959 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1960 MGMT_STATUS_BUSY);
1961 goto unlock;
1962 }
1963
1964 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
1965 if (!cmd) {
1966 err = -ENOMEM;
1967 goto unlock;
1968 }
1969
1970 hci_req_init(&req, hdev);
1971
1972 memset(&hci_cp, 0, sizeof(hci_cp));
1973
1974 if (val) {
1975 hci_cp.le = val;
1976 hci_cp.simul = lmp_le_br_capable(hdev);
1977 } else {
1978 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1979 disable_advertising(&req);
1980 }
1981
1982 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
1983 &hci_cp);
1984
1985 err = hci_req_run(&req, le_enable_complete);
1986 if (err < 0)
1987 mgmt_pending_remove(cmd);
1988
1989 unlock:
1990 hci_dev_unlock(hdev);
1991 return err;
1992 }
1993
1994 /* This is a helper function to test for pending mgmt commands that can
1995 * cause CoD or EIR HCI commands. We can only allow one such pending
1996 * mgmt command at a time since otherwise we cannot easily track what
1997 * the current values are, will be, and based on that calculate if a new
1998 * HCI command needs to be sent and if yes with what value.
1999 */
2000 static bool pending_eir_or_class(struct hci_dev *hdev)
2001 {
2002 struct pending_cmd *cmd;
2003
2004 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2005 switch (cmd->opcode) {
2006 case MGMT_OP_ADD_UUID:
2007 case MGMT_OP_REMOVE_UUID:
2008 case MGMT_OP_SET_DEV_CLASS:
2009 case MGMT_OP_SET_POWERED:
2010 return true;
2011 }
2012 }
2013
2014 return false;
2015 }
2016
2017 static const u8 bluetooth_base_uuid[] = {
2018 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2019 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2020 };
2021
2022 static u8 get_uuid_size(const u8 *uuid)
2023 {
2024 u32 val;
2025
2026 if (memcmp(uuid, bluetooth_base_uuid, 12))
2027 return 128;
2028
2029 val = get_unaligned_le32(&uuid[12]);
2030 if (val > 0xffff)
2031 return 32;
2032
2033 return 16;
2034 }
2035
2036 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2037 {
2038 struct pending_cmd *cmd;
2039
2040 hci_dev_lock(hdev);
2041
2042 cmd = mgmt_pending_find(mgmt_op, hdev);
2043 if (!cmd)
2044 goto unlock;
2045
2046 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
2047 hdev->dev_class, 3);
2048
2049 mgmt_pending_remove(cmd);
2050
2051 unlock:
2052 hci_dev_unlock(hdev);
2053 }
2054
2055 static void add_uuid_complete(struct hci_dev *hdev, u8 status)
2056 {
2057 BT_DBG("status 0x%02x", status);
2058
2059 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2060 }
2061
2062 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2063 {
2064 struct mgmt_cp_add_uuid *cp = data;
2065 struct pending_cmd *cmd;
2066 struct hci_request req;
2067 struct bt_uuid *uuid;
2068 int err;
2069
2070 BT_DBG("request for %s", hdev->name);
2071
2072 hci_dev_lock(hdev);
2073
2074 if (pending_eir_or_class(hdev)) {
2075 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2076 MGMT_STATUS_BUSY);
2077 goto failed;
2078 }
2079
2080 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2081 if (!uuid) {
2082 err = -ENOMEM;
2083 goto failed;
2084 }
2085
2086 memcpy(uuid->uuid, cp->uuid, 16);
2087 uuid->svc_hint = cp->svc_hint;
2088 uuid->size = get_uuid_size(cp->uuid);
2089
2090 list_add_tail(&uuid->list, &hdev->uuids);
2091
2092 hci_req_init(&req, hdev);
2093
2094 update_class(&req);
2095 update_eir(&req);
2096
2097 err = hci_req_run(&req, add_uuid_complete);
2098 if (err < 0) {
2099 if (err != -ENODATA)
2100 goto failed;
2101
2102 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2103 hdev->dev_class, 3);
2104 goto failed;
2105 }
2106
2107 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2108 if (!cmd) {
2109 err = -ENOMEM;
2110 goto failed;
2111 }
2112
2113 err = 0;
2114
2115 failed:
2116 hci_dev_unlock(hdev);
2117 return err;
2118 }
2119
2120 static bool enable_service_cache(struct hci_dev *hdev)
2121 {
2122 if (!hdev_is_powered(hdev))
2123 return false;
2124
2125 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2126 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2127 CACHE_TIMEOUT);
2128 return true;
2129 }
2130
2131 return false;
2132 }
2133
2134 static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
2135 {
2136 BT_DBG("status 0x%02x", status);
2137
2138 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2139 }
2140
2141 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2142 u16 len)
2143 {
2144 struct mgmt_cp_remove_uuid *cp = data;
2145 struct pending_cmd *cmd;
2146 struct bt_uuid *match, *tmp;
2147 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2148 struct hci_request req;
2149 int err, found;
2150
2151 BT_DBG("request for %s", hdev->name);
2152
2153 hci_dev_lock(hdev);
2154
2155 if (pending_eir_or_class(hdev)) {
2156 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2157 MGMT_STATUS_BUSY);
2158 goto unlock;
2159 }
2160
2161 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2162 hci_uuids_clear(hdev);
2163
2164 if (enable_service_cache(hdev)) {
2165 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2166 0, hdev->dev_class, 3);
2167 goto unlock;
2168 }
2169
2170 goto update_class;
2171 }
2172
2173 found = 0;
2174
2175 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2176 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2177 continue;
2178
2179 list_del(&match->list);
2180 kfree(match);
2181 found++;
2182 }
2183
2184 if (found == 0) {
2185 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2186 MGMT_STATUS_INVALID_PARAMS);
2187 goto unlock;
2188 }
2189
2190 update_class:
2191 hci_req_init(&req, hdev);
2192
2193 update_class(&req);
2194 update_eir(&req);
2195
2196 err = hci_req_run(&req, remove_uuid_complete);
2197 if (err < 0) {
2198 if (err != -ENODATA)
2199 goto unlock;
2200
2201 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2202 hdev->dev_class, 3);
2203 goto unlock;
2204 }
2205
2206 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2207 if (!cmd) {
2208 err = -ENOMEM;
2209 goto unlock;
2210 }
2211
2212 err = 0;
2213
2214 unlock:
2215 hci_dev_unlock(hdev);
2216 return err;
2217 }
2218
2219 static void set_class_complete(struct hci_dev *hdev, u8 status)
2220 {
2221 BT_DBG("status 0x%02x", status);
2222
2223 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2224 }
2225
2226 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2227 u16 len)
2228 {
2229 struct mgmt_cp_set_dev_class *cp = data;
2230 struct pending_cmd *cmd;
2231 struct hci_request req;
2232 int err;
2233
2234 BT_DBG("request for %s", hdev->name);
2235
2236 if (!lmp_bredr_capable(hdev))
2237 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2238 MGMT_STATUS_NOT_SUPPORTED);
2239
2240 hci_dev_lock(hdev);
2241
2242 if (pending_eir_or_class(hdev)) {
2243 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2244 MGMT_STATUS_BUSY);
2245 goto unlock;
2246 }
2247
2248 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2249 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2250 MGMT_STATUS_INVALID_PARAMS);
2251 goto unlock;
2252 }
2253
2254 hdev->major_class = cp->major;
2255 hdev->minor_class = cp->minor;
2256
2257 if (!hdev_is_powered(hdev)) {
2258 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2259 hdev->dev_class, 3);
2260 goto unlock;
2261 }
2262
2263 hci_req_init(&req, hdev);
2264
2265 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2266 hci_dev_unlock(hdev);
2267 cancel_delayed_work_sync(&hdev->service_cache);
2268 hci_dev_lock(hdev);
2269 update_eir(&req);
2270 }
2271
2272 update_class(&req);
2273
2274 err = hci_req_run(&req, set_class_complete);
2275 if (err < 0) {
2276 if (err != -ENODATA)
2277 goto unlock;
2278
2279 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2280 hdev->dev_class, 3);
2281 goto unlock;
2282 }
2283
2284 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2285 if (!cmd) {
2286 err = -ENOMEM;
2287 goto unlock;
2288 }
2289
2290 err = 0;
2291
2292 unlock:
2293 hci_dev_unlock(hdev);
2294 return err;
2295 }
2296
2297 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2298 u16 len)
2299 {
2300 struct mgmt_cp_load_link_keys *cp = data;
2301 u16 key_count, expected_len;
2302 bool changed;
2303 int i;
2304
2305 BT_DBG("request for %s", hdev->name);
2306
2307 if (!lmp_bredr_capable(hdev))
2308 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2309 MGMT_STATUS_NOT_SUPPORTED);
2310
2311 key_count = __le16_to_cpu(cp->key_count);
2312
2313 expected_len = sizeof(*cp) + key_count *
2314 sizeof(struct mgmt_link_key_info);
2315 if (expected_len != len) {
2316 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2317 len, expected_len);
2318 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2319 MGMT_STATUS_INVALID_PARAMS);
2320 }
2321
2322 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2323 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2324 MGMT_STATUS_INVALID_PARAMS);
2325
2326 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2327 key_count);
2328
2329 for (i = 0; i < key_count; i++) {
2330 struct mgmt_link_key_info *key = &cp->keys[i];
2331
2332 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2333 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2334 MGMT_STATUS_INVALID_PARAMS);
2335 }
2336
2337 hci_dev_lock(hdev);
2338
2339 hci_link_keys_clear(hdev);
2340
2341 if (cp->debug_keys)
2342 changed = !test_and_set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
2343 else
2344 changed = test_and_clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
2345
2346 if (changed)
2347 new_settings(hdev, NULL);
2348
2349 for (i = 0; i < key_count; i++) {
2350 struct mgmt_link_key_info *key = &cp->keys[i];
2351
2352 hci_add_link_key(hdev, NULL, 0, &key->addr.bdaddr, key->val,
2353 key->type, key->pin_len);
2354 }
2355
2356 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2357
2358 hci_dev_unlock(hdev);
2359
2360 return 0;
2361 }
2362
2363 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2364 u8 addr_type, struct sock *skip_sk)
2365 {
2366 struct mgmt_ev_device_unpaired ev;
2367
2368 bacpy(&ev.addr.bdaddr, bdaddr);
2369 ev.addr.type = addr_type;
2370
2371 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2372 skip_sk);
2373 }
2374
2375 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2376 u16 len)
2377 {
2378 struct mgmt_cp_unpair_device *cp = data;
2379 struct mgmt_rp_unpair_device rp;
2380 struct hci_cp_disconnect dc;
2381 struct pending_cmd *cmd;
2382 struct hci_conn *conn;
2383 int err;
2384
2385 memset(&rp, 0, sizeof(rp));
2386 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2387 rp.addr.type = cp->addr.type;
2388
2389 if (!bdaddr_type_is_valid(cp->addr.type))
2390 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2391 MGMT_STATUS_INVALID_PARAMS,
2392 &rp, sizeof(rp));
2393
2394 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2395 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2396 MGMT_STATUS_INVALID_PARAMS,
2397 &rp, sizeof(rp));
2398
2399 hci_dev_lock(hdev);
2400
2401 if (!hdev_is_powered(hdev)) {
2402 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2403 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2404 goto unlock;
2405 }
2406
2407 if (cp->addr.type == BDADDR_BREDR) {
2408 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2409 } else {
2410 u8 addr_type;
2411
2412 if (cp->addr.type == BDADDR_LE_PUBLIC)
2413 addr_type = ADDR_LE_DEV_PUBLIC;
2414 else
2415 addr_type = ADDR_LE_DEV_RANDOM;
2416
2417 hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2418
2419 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2420
2421 err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2422 }
2423
2424 if (err < 0) {
2425 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2426 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
2427 goto unlock;
2428 }
2429
2430 if (cp->disconnect) {
2431 if (cp->addr.type == BDADDR_BREDR)
2432 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2433 &cp->addr.bdaddr);
2434 else
2435 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2436 &cp->addr.bdaddr);
2437 } else {
2438 conn = NULL;
2439 }
2440
2441 if (!conn) {
2442 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2443 &rp, sizeof(rp));
2444 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2445 goto unlock;
2446 }
2447
2448 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2449 sizeof(*cp));
2450 if (!cmd) {
2451 err = -ENOMEM;
2452 goto unlock;
2453 }
2454
2455 dc.handle = cpu_to_le16(conn->handle);
2456 dc.reason = 0x13; /* Remote User Terminated Connection */
2457 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2458 if (err < 0)
2459 mgmt_pending_remove(cmd);
2460
2461 unlock:
2462 hci_dev_unlock(hdev);
2463 return err;
2464 }
2465
2466 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2467 u16 len)
2468 {
2469 struct mgmt_cp_disconnect *cp = data;
2470 struct mgmt_rp_disconnect rp;
2471 struct hci_cp_disconnect dc;
2472 struct pending_cmd *cmd;
2473 struct hci_conn *conn;
2474 int err;
2475
2476 BT_DBG("");
2477
2478 memset(&rp, 0, sizeof(rp));
2479 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2480 rp.addr.type = cp->addr.type;
2481
2482 if (!bdaddr_type_is_valid(cp->addr.type))
2483 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2484 MGMT_STATUS_INVALID_PARAMS,
2485 &rp, sizeof(rp));
2486
2487 hci_dev_lock(hdev);
2488
2489 if (!test_bit(HCI_UP, &hdev->flags)) {
2490 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2491 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2492 goto failed;
2493 }
2494
2495 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
2496 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2497 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2498 goto failed;
2499 }
2500
2501 if (cp->addr.type == BDADDR_BREDR)
2502 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2503 &cp->addr.bdaddr);
2504 else
2505 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2506
2507 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2508 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2509 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
2510 goto failed;
2511 }
2512
2513 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2514 if (!cmd) {
2515 err = -ENOMEM;
2516 goto failed;
2517 }
2518
2519 dc.handle = cpu_to_le16(conn->handle);
2520 dc.reason = HCI_ERROR_REMOTE_USER_TERM;
2521
2522 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2523 if (err < 0)
2524 mgmt_pending_remove(cmd);
2525
2526 failed:
2527 hci_dev_unlock(hdev);
2528 return err;
2529 }
2530
2531 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2532 {
2533 switch (link_type) {
2534 case LE_LINK:
2535 switch (addr_type) {
2536 case ADDR_LE_DEV_PUBLIC:
2537 return BDADDR_LE_PUBLIC;
2538
2539 default:
2540 /* Fallback to LE Random address type */
2541 return BDADDR_LE_RANDOM;
2542 }
2543
2544 default:
2545 /* Fallback to BR/EDR type */
2546 return BDADDR_BREDR;
2547 }
2548 }
2549
2550 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2551 u16 data_len)
2552 {
2553 struct mgmt_rp_get_connections *rp;
2554 struct hci_conn *c;
2555 size_t rp_len;
2556 int err;
2557 u16 i;
2558
2559 BT_DBG("");
2560
2561 hci_dev_lock(hdev);
2562
2563 if (!hdev_is_powered(hdev)) {
2564 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2565 MGMT_STATUS_NOT_POWERED);
2566 goto unlock;
2567 }
2568
2569 i = 0;
2570 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2571 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2572 i++;
2573 }
2574
2575 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2576 rp = kmalloc(rp_len, GFP_KERNEL);
2577 if (!rp) {
2578 err = -ENOMEM;
2579 goto unlock;
2580 }
2581
2582 i = 0;
2583 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2584 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2585 continue;
2586 bacpy(&rp->addr[i].bdaddr, &c->dst);
2587 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2588 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2589 continue;
2590 i++;
2591 }
2592
2593 rp->conn_count = cpu_to_le16(i);
2594
2595 /* Recalculate length in case of filtered SCO connections, etc */
2596 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2597
2598 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2599 rp_len);
2600
2601 kfree(rp);
2602
2603 unlock:
2604 hci_dev_unlock(hdev);
2605 return err;
2606 }
2607
2608 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2609 struct mgmt_cp_pin_code_neg_reply *cp)
2610 {
2611 struct pending_cmd *cmd;
2612 int err;
2613
2614 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2615 sizeof(*cp));
2616 if (!cmd)
2617 return -ENOMEM;
2618
2619 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2620 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2621 if (err < 0)
2622 mgmt_pending_remove(cmd);
2623
2624 return err;
2625 }
2626
2627 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2628 u16 len)
2629 {
2630 struct hci_conn *conn;
2631 struct mgmt_cp_pin_code_reply *cp = data;
2632 struct hci_cp_pin_code_reply reply;
2633 struct pending_cmd *cmd;
2634 int err;
2635
2636 BT_DBG("");
2637
2638 hci_dev_lock(hdev);
2639
2640 if (!hdev_is_powered(hdev)) {
2641 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2642 MGMT_STATUS_NOT_POWERED);
2643 goto failed;
2644 }
2645
2646 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2647 if (!conn) {
2648 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2649 MGMT_STATUS_NOT_CONNECTED);
2650 goto failed;
2651 }
2652
2653 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2654 struct mgmt_cp_pin_code_neg_reply ncp;
2655
2656 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2657
2658 BT_ERR("PIN code is not 16 bytes long");
2659
2660 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2661 if (err >= 0)
2662 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2663 MGMT_STATUS_INVALID_PARAMS);
2664
2665 goto failed;
2666 }
2667
2668 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2669 if (!cmd) {
2670 err = -ENOMEM;
2671 goto failed;
2672 }
2673
2674 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2675 reply.pin_len = cp->pin_len;
2676 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2677
2678 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2679 if (err < 0)
2680 mgmt_pending_remove(cmd);
2681
2682 failed:
2683 hci_dev_unlock(hdev);
2684 return err;
2685 }
2686
2687 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2688 u16 len)
2689 {
2690 struct mgmt_cp_set_io_capability *cp = data;
2691
2692 BT_DBG("");
2693
2694 hci_dev_lock(hdev);
2695
2696 hdev->io_capability = cp->io_capability;
2697
2698 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2699 hdev->io_capability);
2700
2701 hci_dev_unlock(hdev);
2702
2703 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
2704 0);
2705 }
2706
2707 static struct pending_cmd *find_pairing(struct hci_conn *conn)
2708 {
2709 struct hci_dev *hdev = conn->hdev;
2710 struct pending_cmd *cmd;
2711
2712 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2713 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2714 continue;
2715
2716 if (cmd->user_data != conn)
2717 continue;
2718
2719 return cmd;
2720 }
2721
2722 return NULL;
2723 }
2724
2725 static void pairing_complete(struct pending_cmd *cmd, u8 status)
2726 {
2727 struct mgmt_rp_pair_device rp;
2728 struct hci_conn *conn = cmd->user_data;
2729
2730 bacpy(&rp.addr.bdaddr, &conn->dst);
2731 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2732
2733 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
2734 &rp, sizeof(rp));
2735
2736 /* So we don't get further callbacks for this connection */
2737 conn->connect_cfm_cb = NULL;
2738 conn->security_cfm_cb = NULL;
2739 conn->disconn_cfm_cb = NULL;
2740
2741 hci_conn_drop(conn);
2742
2743 mgmt_pending_remove(cmd);
2744 }
2745
2746 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2747 {
2748 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2749 struct pending_cmd *cmd;
2750
2751 cmd = find_pairing(conn);
2752 if (cmd)
2753 pairing_complete(cmd, status);
2754 }
2755
2756 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2757 {
2758 struct pending_cmd *cmd;
2759
2760 BT_DBG("status %u", status);
2761
2762 cmd = find_pairing(conn);
2763 if (!cmd)
2764 BT_DBG("Unable to find a pending command");
2765 else
2766 pairing_complete(cmd, mgmt_status(status));
2767 }
2768
2769 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2770 {
2771 struct pending_cmd *cmd;
2772
2773 BT_DBG("status %u", status);
2774
2775 if (!status)
2776 return;
2777
2778 cmd = find_pairing(conn);
2779 if (!cmd)
2780 BT_DBG("Unable to find a pending command");
2781 else
2782 pairing_complete(cmd, mgmt_status(status));
2783 }
2784
2785 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2786 u16 len)
2787 {
2788 struct mgmt_cp_pair_device *cp = data;
2789 struct mgmt_rp_pair_device rp;
2790 struct pending_cmd *cmd;
2791 u8 sec_level, auth_type;
2792 struct hci_conn *conn;
2793 int err;
2794
2795 BT_DBG("");
2796
2797 memset(&rp, 0, sizeof(rp));
2798 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2799 rp.addr.type = cp->addr.type;
2800
2801 if (!bdaddr_type_is_valid(cp->addr.type))
2802 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2803 MGMT_STATUS_INVALID_PARAMS,
2804 &rp, sizeof(rp));
2805
2806 hci_dev_lock(hdev);
2807
2808 if (!hdev_is_powered(hdev)) {
2809 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2810 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2811 goto unlock;
2812 }
2813
2814 sec_level = BT_SECURITY_MEDIUM;
2815 if (cp->io_cap == 0x03)
2816 auth_type = HCI_AT_DEDICATED_BONDING;
2817 else
2818 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
2819
2820 if (cp->addr.type == BDADDR_BREDR) {
2821 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
2822 auth_type);
2823 } else {
2824 u8 addr_type;
2825
2826 /* Convert from L2CAP channel address type to HCI address type
2827 */
2828 if (cp->addr.type == BDADDR_LE_PUBLIC)
2829 addr_type = ADDR_LE_DEV_PUBLIC;
2830 else
2831 addr_type = ADDR_LE_DEV_RANDOM;
2832
2833 conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
2834 sec_level, auth_type);
2835 }
2836
2837 if (IS_ERR(conn)) {
2838 int status;
2839
2840 if (PTR_ERR(conn) == -EBUSY)
2841 status = MGMT_STATUS_BUSY;
2842 else
2843 status = MGMT_STATUS_CONNECT_FAILED;
2844
2845 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2846 status, &rp,
2847 sizeof(rp));
2848 goto unlock;
2849 }
2850
2851 if (conn->connect_cfm_cb) {
2852 hci_conn_drop(conn);
2853 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2854 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2855 goto unlock;
2856 }
2857
2858 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2859 if (!cmd) {
2860 err = -ENOMEM;
2861 hci_conn_drop(conn);
2862 goto unlock;
2863 }
2864
2865 /* For LE, just connecting isn't a proof that the pairing finished */
2866 if (cp->addr.type == BDADDR_BREDR) {
2867 conn->connect_cfm_cb = pairing_complete_cb;
2868 conn->security_cfm_cb = pairing_complete_cb;
2869 conn->disconn_cfm_cb = pairing_complete_cb;
2870 } else {
2871 conn->connect_cfm_cb = le_pairing_complete_cb;
2872 conn->security_cfm_cb = le_pairing_complete_cb;
2873 conn->disconn_cfm_cb = le_pairing_complete_cb;
2874 }
2875
2876 conn->io_capability = cp->io_cap;
2877 cmd->user_data = conn;
2878
2879 if (conn->state == BT_CONNECTED &&
2880 hci_conn_security(conn, sec_level, auth_type))
2881 pairing_complete(cmd, 0);
2882
2883 err = 0;
2884
2885 unlock:
2886 hci_dev_unlock(hdev);
2887 return err;
2888 }
2889
2890 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2891 u16 len)
2892 {
2893 struct mgmt_addr_info *addr = data;
2894 struct pending_cmd *cmd;
2895 struct hci_conn *conn;
2896 int err;
2897
2898 BT_DBG("");
2899
2900 hci_dev_lock(hdev);
2901
2902 if (!hdev_is_powered(hdev)) {
2903 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2904 MGMT_STATUS_NOT_POWERED);
2905 goto unlock;
2906 }
2907
2908 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
2909 if (!cmd) {
2910 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2911 MGMT_STATUS_INVALID_PARAMS);
2912 goto unlock;
2913 }
2914
2915 conn = cmd->user_data;
2916
2917 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
2918 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2919 MGMT_STATUS_INVALID_PARAMS);
2920 goto unlock;
2921 }
2922
2923 pairing_complete(cmd, MGMT_STATUS_CANCELLED);
2924
2925 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
2926 addr, sizeof(*addr));
2927 unlock:
2928 hci_dev_unlock(hdev);
2929 return err;
2930 }
2931
2932 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
2933 struct mgmt_addr_info *addr, u16 mgmt_op,
2934 u16 hci_op, __le32 passkey)
2935 {
2936 struct pending_cmd *cmd;
2937 struct hci_conn *conn;
2938 int err;
2939
2940 hci_dev_lock(hdev);
2941
2942 if (!hdev_is_powered(hdev)) {
2943 err = cmd_complete(sk, hdev->id, mgmt_op,
2944 MGMT_STATUS_NOT_POWERED, addr,
2945 sizeof(*addr));
2946 goto done;
2947 }
2948
2949 if (addr->type == BDADDR_BREDR)
2950 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
2951 else
2952 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
2953
2954 if (!conn) {
2955 err = cmd_complete(sk, hdev->id, mgmt_op,
2956 MGMT_STATUS_NOT_CONNECTED, addr,
2957 sizeof(*addr));
2958 goto done;
2959 }
2960
2961 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
2962 /* Continue with pairing via SMP */
2963 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
2964
2965 if (!err)
2966 err = cmd_complete(sk, hdev->id, mgmt_op,
2967 MGMT_STATUS_SUCCESS, addr,
2968 sizeof(*addr));
2969 else
2970 err = cmd_complete(sk, hdev->id, mgmt_op,
2971 MGMT_STATUS_FAILED, addr,
2972 sizeof(*addr));
2973
2974 goto done;
2975 }
2976
2977 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
2978 if (!cmd) {
2979 err = -ENOMEM;
2980 goto done;
2981 }
2982
2983 /* Continue with pairing via HCI */
2984 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
2985 struct hci_cp_user_passkey_reply cp;
2986
2987 bacpy(&cp.bdaddr, &addr->bdaddr);
2988 cp.passkey = passkey;
2989 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
2990 } else
2991 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
2992 &addr->bdaddr);
2993
2994 if (err < 0)
2995 mgmt_pending_remove(cmd);
2996
2997 done:
2998 hci_dev_unlock(hdev);
2999 return err;
3000 }
3001
3002 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3003 void *data, u16 len)
3004 {
3005 struct mgmt_cp_pin_code_neg_reply *cp = data;
3006
3007 BT_DBG("");
3008
3009 return user_pairing_resp(sk, hdev, &cp->addr,
3010 MGMT_OP_PIN_CODE_NEG_REPLY,
3011 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3012 }
3013
3014 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3015 u16 len)
3016 {
3017 struct mgmt_cp_user_confirm_reply *cp = data;
3018
3019 BT_DBG("");
3020
3021 if (len != sizeof(*cp))
3022 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3023 MGMT_STATUS_INVALID_PARAMS);
3024
3025 return user_pairing_resp(sk, hdev, &cp->addr,
3026 MGMT_OP_USER_CONFIRM_REPLY,
3027 HCI_OP_USER_CONFIRM_REPLY, 0);
3028 }
3029
3030 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3031 void *data, u16 len)
3032 {
3033 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3034
3035 BT_DBG("");
3036
3037 return user_pairing_resp(sk, hdev, &cp->addr,
3038 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3039 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3040 }
3041
3042 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3043 u16 len)
3044 {
3045 struct mgmt_cp_user_passkey_reply *cp = data;
3046
3047 BT_DBG("");
3048
3049 return user_pairing_resp(sk, hdev, &cp->addr,
3050 MGMT_OP_USER_PASSKEY_REPLY,
3051 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3052 }
3053
3054 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3055 void *data, u16 len)
3056 {
3057 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3058
3059 BT_DBG("");
3060
3061 return user_pairing_resp(sk, hdev, &cp->addr,
3062 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3063 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3064 }
3065
3066 static void update_name(struct hci_request *req)
3067 {
3068 struct hci_dev *hdev = req->hdev;
3069 struct hci_cp_write_local_name cp;
3070
3071 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3072
3073 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3074 }
3075
3076 static void set_name_complete(struct hci_dev *hdev, u8 status)
3077 {
3078 struct mgmt_cp_set_local_name *cp;
3079 struct pending_cmd *cmd;
3080
3081 BT_DBG("status 0x%02x", status);
3082
3083 hci_dev_lock(hdev);
3084
3085 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3086 if (!cmd)
3087 goto unlock;
3088
3089 cp = cmd->param;
3090
3091 if (status)
3092 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3093 mgmt_status(status));
3094 else
3095 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3096 cp, sizeof(*cp));
3097
3098 mgmt_pending_remove(cmd);
3099
3100 unlock:
3101 hci_dev_unlock(hdev);
3102 }
3103
3104 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3105 u16 len)
3106 {
3107 struct mgmt_cp_set_local_name *cp = data;
3108 struct pending_cmd *cmd;
3109 struct hci_request req;
3110 int err;
3111
3112 BT_DBG("");
3113
3114 hci_dev_lock(hdev);
3115
3116 /* If the old values are the same as the new ones just return a
3117 * direct command complete event.
3118 */
3119 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3120 !memcmp(hdev->short_name, cp->short_name,
3121 sizeof(hdev->short_name))) {
3122 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3123 data, len);
3124 goto failed;
3125 }
3126
3127 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3128
3129 if (!hdev_is_powered(hdev)) {
3130 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3131
3132 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3133 data, len);
3134 if (err < 0)
3135 goto failed;
3136
3137 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
3138 sk);
3139
3140 goto failed;
3141 }
3142
3143 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3144 if (!cmd) {
3145 err = -ENOMEM;
3146 goto failed;
3147 }
3148
3149 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3150
3151 hci_req_init(&req, hdev);
3152
3153 if (lmp_bredr_capable(hdev)) {
3154 update_name(&req);
3155 update_eir(&req);
3156 }
3157
3158 /* The name is stored in the scan response data and so
3159 * no need to udpate the advertising data here.
3160 */
3161 if (lmp_le_capable(hdev))
3162 update_scan_rsp_data(&req);
3163
3164 err = hci_req_run(&req, set_name_complete);
3165 if (err < 0)
3166 mgmt_pending_remove(cmd);
3167
3168 failed:
3169 hci_dev_unlock(hdev);
3170 return err;
3171 }
3172
3173 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3174 void *data, u16 data_len)
3175 {
3176 struct pending_cmd *cmd;
3177 int err;
3178
3179 BT_DBG("%s", hdev->name);
3180
3181 hci_dev_lock(hdev);
3182
3183 if (!hdev_is_powered(hdev)) {
3184 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3185 MGMT_STATUS_NOT_POWERED);
3186 goto unlock;
3187 }
3188
3189 if (!lmp_ssp_capable(hdev)) {
3190 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3191 MGMT_STATUS_NOT_SUPPORTED);
3192 goto unlock;
3193 }
3194
3195 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3196 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3197 MGMT_STATUS_BUSY);
3198 goto unlock;
3199 }
3200
3201 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3202 if (!cmd) {
3203 err = -ENOMEM;
3204 goto unlock;
3205 }
3206
3207 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
3208 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
3209 0, NULL);
3210 else
3211 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3212
3213 if (err < 0)
3214 mgmt_pending_remove(cmd);
3215
3216 unlock:
3217 hci_dev_unlock(hdev);
3218 return err;
3219 }
3220
3221 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3222 void *data, u16 len)
3223 {
3224 int err;
3225
3226 BT_DBG("%s ", hdev->name);
3227
3228 hci_dev_lock(hdev);
3229
3230 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3231 struct mgmt_cp_add_remote_oob_data *cp = data;
3232 u8 status;
3233
3234 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3235 cp->hash, cp->randomizer);
3236 if (err < 0)
3237 status = MGMT_STATUS_FAILED;
3238 else
3239 status = MGMT_STATUS_SUCCESS;
3240
3241 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3242 status, &cp->addr, sizeof(cp->addr));
3243 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3244 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3245 u8 status;
3246
3247 err = hci_add_remote_oob_ext_data(hdev, &cp->addr.bdaddr,
3248 cp->hash192,
3249 cp->randomizer192,
3250 cp->hash256,
3251 cp->randomizer256);
3252 if (err < 0)
3253 status = MGMT_STATUS_FAILED;
3254 else
3255 status = MGMT_STATUS_SUCCESS;
3256
3257 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3258 status, &cp->addr, sizeof(cp->addr));
3259 } else {
3260 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3261 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3262 MGMT_STATUS_INVALID_PARAMS);
3263 }
3264
3265 hci_dev_unlock(hdev);
3266 return err;
3267 }
3268
3269 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3270 void *data, u16 len)
3271 {
3272 struct mgmt_cp_remove_remote_oob_data *cp = data;
3273 u8 status;
3274 int err;
3275
3276 BT_DBG("%s", hdev->name);
3277
3278 hci_dev_lock(hdev);
3279
3280 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
3281 if (err < 0)
3282 status = MGMT_STATUS_INVALID_PARAMS;
3283 else
3284 status = MGMT_STATUS_SUCCESS;
3285
3286 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3287 status, &cp->addr, sizeof(cp->addr));
3288
3289 hci_dev_unlock(hdev);
3290 return err;
3291 }
3292
3293 static int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
3294 {
3295 struct pending_cmd *cmd;
3296 u8 type;
3297 int err;
3298
3299 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3300
3301 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3302 if (!cmd)
3303 return -ENOENT;
3304
3305 type = hdev->discovery.type;
3306
3307 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3308 &type, sizeof(type));
3309 mgmt_pending_remove(cmd);
3310
3311 return err;
3312 }
3313
3314 static void start_discovery_complete(struct hci_dev *hdev, u8 status)
3315 {
3316 BT_DBG("status %d", status);
3317
3318 if (status) {
3319 hci_dev_lock(hdev);
3320 mgmt_start_discovery_failed(hdev, status);
3321 hci_dev_unlock(hdev);
3322 return;
3323 }
3324
3325 hci_dev_lock(hdev);
3326 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3327 hci_dev_unlock(hdev);
3328
3329 switch (hdev->discovery.type) {
3330 case DISCOV_TYPE_LE:
3331 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
3332 DISCOV_LE_TIMEOUT);
3333 break;
3334
3335 case DISCOV_TYPE_INTERLEAVED:
3336 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
3337 DISCOV_INTERLEAVED_TIMEOUT);
3338 break;
3339
3340 case DISCOV_TYPE_BREDR:
3341 break;
3342
3343 default:
3344 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
3345 }
3346 }
3347
3348 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3349 void *data, u16 len)
3350 {
3351 struct mgmt_cp_start_discovery *cp = data;
3352 struct pending_cmd *cmd;
3353 struct hci_cp_le_set_scan_param param_cp;
3354 struct hci_cp_le_set_scan_enable enable_cp;
3355 struct hci_cp_inquiry inq_cp;
3356 struct hci_request req;
3357 /* General inquiry access code (GIAC) */
3358 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3359 u8 status, own_addr_type;
3360 int err;
3361
3362 BT_DBG("%s", hdev->name);
3363
3364 hci_dev_lock(hdev);
3365
3366 if (!hdev_is_powered(hdev)) {
3367 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3368 MGMT_STATUS_NOT_POWERED);
3369 goto failed;
3370 }
3371
3372 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3373 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3374 MGMT_STATUS_BUSY);
3375 goto failed;
3376 }
3377
3378 if (hdev->discovery.state != DISCOVERY_STOPPED) {
3379 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3380 MGMT_STATUS_BUSY);
3381 goto failed;
3382 }
3383
3384 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
3385 if (!cmd) {
3386 err = -ENOMEM;
3387 goto failed;
3388 }
3389
3390 hdev->discovery.type = cp->type;
3391
3392 hci_req_init(&req, hdev);
3393
3394 switch (hdev->discovery.type) {
3395 case DISCOV_TYPE_BREDR:
3396 status = mgmt_bredr_support(hdev);
3397 if (status) {
3398 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3399 status);
3400 mgmt_pending_remove(cmd);
3401 goto failed;
3402 }
3403
3404 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3405 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3406 MGMT_STATUS_BUSY);
3407 mgmt_pending_remove(cmd);
3408 goto failed;
3409 }
3410
3411 hci_inquiry_cache_flush(hdev);
3412
3413 memset(&inq_cp, 0, sizeof(inq_cp));
3414 memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
3415 inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
3416 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
3417 break;
3418
3419 case DISCOV_TYPE_LE:
3420 case DISCOV_TYPE_INTERLEAVED:
3421 status = mgmt_le_support(hdev);
3422 if (status) {
3423 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3424 status);
3425 mgmt_pending_remove(cmd);
3426 goto failed;
3427 }
3428
3429 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3430 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3431 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3432 MGMT_STATUS_NOT_SUPPORTED);
3433 mgmt_pending_remove(cmd);
3434 goto failed;
3435 }
3436
3437 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3438 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3439 MGMT_STATUS_REJECTED);
3440 mgmt_pending_remove(cmd);
3441 goto failed;
3442 }
3443
3444 /* If controller is scanning, it means the background scanning
3445 * is running. Thus, we should temporarily stop it in order to
3446 * set the discovery scanning parameters.
3447 */
3448 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
3449 hci_req_add_le_scan_disable(&req);
3450
3451 memset(&param_cp, 0, sizeof(param_cp));
3452
3453 /* All active scans will be done with either a resolvable
3454 * private address (when privacy feature has been enabled)
3455 * or unresolvable private address.
3456 */
3457 err = hci_update_random_address(&req, true, &own_addr_type);
3458 if (err < 0) {
3459 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3460 MGMT_STATUS_FAILED);
3461 mgmt_pending_remove(cmd);
3462 goto failed;
3463 }
3464
3465 param_cp.type = LE_SCAN_ACTIVE;
3466 param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
3467 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3468 param_cp.own_address_type = own_addr_type;
3469 hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
3470 &param_cp);
3471
3472 memset(&enable_cp, 0, sizeof(enable_cp));
3473 enable_cp.enable = LE_SCAN_ENABLE;
3474 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3475 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
3476 &enable_cp);
3477 break;
3478
3479 default:
3480 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3481 MGMT_STATUS_INVALID_PARAMS);
3482 mgmt_pending_remove(cmd);
3483 goto failed;
3484 }
3485
3486 err = hci_req_run(&req, start_discovery_complete);
3487 if (err < 0)
3488 mgmt_pending_remove(cmd);
3489 else
3490 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3491
3492 failed:
3493 hci_dev_unlock(hdev);
3494 return err;
3495 }
3496
3497 static int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
3498 {
3499 struct pending_cmd *cmd;
3500 int err;
3501
3502 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
3503 if (!cmd)
3504 return -ENOENT;
3505
3506 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3507 &hdev->discovery.type, sizeof(hdev->discovery.type));
3508 mgmt_pending_remove(cmd);
3509
3510 return err;
3511 }
3512
3513 static void stop_discovery_complete(struct hci_dev *hdev, u8 status)
3514 {
3515 BT_DBG("status %d", status);
3516
3517 hci_dev_lock(hdev);
3518
3519 if (status) {
3520 mgmt_stop_discovery_failed(hdev, status);
3521 goto unlock;
3522 }
3523
3524 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3525
3526 unlock:
3527 hci_dev_unlock(hdev);
3528 }
3529
3530 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
3531 u16 len)
3532 {
3533 struct mgmt_cp_stop_discovery *mgmt_cp = data;
3534 struct pending_cmd *cmd;
3535 struct hci_cp_remote_name_req_cancel cp;
3536 struct inquiry_entry *e;
3537 struct hci_request req;
3538 int err;
3539
3540 BT_DBG("%s", hdev->name);
3541
3542 hci_dev_lock(hdev);
3543
3544 if (!hci_discovery_active(hdev)) {
3545 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3546 MGMT_STATUS_REJECTED, &mgmt_cp->type,
3547 sizeof(mgmt_cp->type));
3548 goto unlock;
3549 }
3550
3551 if (hdev->discovery.type != mgmt_cp->type) {
3552 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3553 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
3554 sizeof(mgmt_cp->type));
3555 goto unlock;
3556 }
3557
3558 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
3559 if (!cmd) {
3560 err = -ENOMEM;
3561 goto unlock;
3562 }
3563
3564 hci_req_init(&req, hdev);
3565
3566 switch (hdev->discovery.state) {
3567 case DISCOVERY_FINDING:
3568 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3569 hci_req_add(&req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3570 } else {
3571 cancel_delayed_work(&hdev->le_scan_disable);
3572
3573 hci_req_add_le_scan_disable(&req);
3574 }
3575
3576 break;
3577
3578 case DISCOVERY_RESOLVING:
3579 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
3580 NAME_PENDING);
3581 if (!e) {
3582 mgmt_pending_remove(cmd);
3583 err = cmd_complete(sk, hdev->id,
3584 MGMT_OP_STOP_DISCOVERY, 0,
3585 &mgmt_cp->type,
3586 sizeof(mgmt_cp->type));
3587 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3588 goto unlock;
3589 }
3590
3591 bacpy(&cp.bdaddr, &e->data.bdaddr);
3592 hci_req_add(&req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
3593 &cp);
3594
3595 break;
3596
3597 default:
3598 BT_DBG("unknown discovery state %u", hdev->discovery.state);
3599
3600 mgmt_pending_remove(cmd);
3601 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3602 MGMT_STATUS_FAILED, &mgmt_cp->type,
3603 sizeof(mgmt_cp->type));
3604 goto unlock;
3605 }
3606
3607 err = hci_req_run(&req, stop_discovery_complete);
3608 if (err < 0)
3609 mgmt_pending_remove(cmd);
3610 else
3611 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
3612
3613 unlock:
3614 hci_dev_unlock(hdev);
3615 return err;
3616 }
3617
3618 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
3619 u16 len)
3620 {
3621 struct mgmt_cp_confirm_name *cp = data;
3622 struct inquiry_entry *e;
3623 int err;
3624
3625 BT_DBG("%s", hdev->name);
3626
3627 hci_dev_lock(hdev);
3628
3629 if (!hci_discovery_active(hdev)) {
3630 err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3631 MGMT_STATUS_FAILED);
3632 goto failed;
3633 }
3634
3635 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
3636 if (!e) {
3637 err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3638 MGMT_STATUS_INVALID_PARAMS);
3639 goto failed;
3640 }
3641
3642 if (cp->name_known) {
3643 e->name_state = NAME_KNOWN;
3644 list_del(&e->list);
3645 } else {
3646 e->name_state = NAME_NEEDED;
3647 hci_inquiry_cache_update_resolve(hdev, e);
3648 }
3649
3650 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
3651 sizeof(cp->addr));
3652
3653 failed:
3654 hci_dev_unlock(hdev);
3655 return err;
3656 }
3657
3658 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
3659 u16 len)
3660 {
3661 struct mgmt_cp_block_device *cp = data;
3662 u8 status;
3663 int err;
3664
3665 BT_DBG("%s", hdev->name);
3666
3667 if (!bdaddr_type_is_valid(cp->addr.type))
3668 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
3669 MGMT_STATUS_INVALID_PARAMS,
3670 &cp->addr, sizeof(cp->addr));
3671
3672 hci_dev_lock(hdev);
3673
3674 err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type);
3675 if (err < 0)
3676 status = MGMT_STATUS_FAILED;
3677 else
3678 status = MGMT_STATUS_SUCCESS;
3679
3680 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
3681 &cp->addr, sizeof(cp->addr));
3682
3683 hci_dev_unlock(hdev);
3684
3685 return err;
3686 }
3687
3688 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
3689 u16 len)
3690 {
3691 struct mgmt_cp_unblock_device *cp = data;
3692 u8 status;
3693 int err;
3694
3695 BT_DBG("%s", hdev->name);
3696
3697 if (!bdaddr_type_is_valid(cp->addr.type))
3698 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
3699 MGMT_STATUS_INVALID_PARAMS,
3700 &cp->addr, sizeof(cp->addr));
3701
3702 hci_dev_lock(hdev);
3703
3704 err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type);
3705 if (err < 0)
3706 status = MGMT_STATUS_INVALID_PARAMS;
3707 else
3708 status = MGMT_STATUS_SUCCESS;
3709
3710 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
3711 &cp->addr, sizeof(cp->addr));
3712
3713 hci_dev_unlock(hdev);
3714
3715 return err;
3716 }
3717
3718 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
3719 u16 len)
3720 {
3721 struct mgmt_cp_set_device_id *cp = data;
3722 struct hci_request req;
3723 int err;
3724 __u16 source;
3725
3726 BT_DBG("%s", hdev->name);
3727
3728 source = __le16_to_cpu(cp->source);
3729
3730 if (source > 0x0002)
3731 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
3732 MGMT_STATUS_INVALID_PARAMS);
3733
3734 hci_dev_lock(hdev);
3735
3736 hdev->devid_source = source;
3737 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
3738 hdev->devid_product = __le16_to_cpu(cp->product);
3739 hdev->devid_version = __le16_to_cpu(cp->version);
3740
3741 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
3742
3743 hci_req_init(&req, hdev);
3744 update_eir(&req);
3745 hci_req_run(&req, NULL);
3746
3747 hci_dev_unlock(hdev);
3748
3749 return err;
3750 }
3751
3752 static void set_advertising_complete(struct hci_dev *hdev, u8 status)
3753 {
3754 struct cmd_lookup match = { NULL, hdev };
3755
3756 if (status) {
3757 u8 mgmt_err = mgmt_status(status);
3758
3759 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
3760 cmd_status_rsp, &mgmt_err);
3761 return;
3762 }
3763
3764 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
3765 &match);
3766
3767 new_settings(hdev, match.sk);
3768
3769 if (match.sk)
3770 sock_put(match.sk);
3771 }
3772
3773 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
3774 u16 len)
3775 {
3776 struct mgmt_mode *cp = data;
3777 struct pending_cmd *cmd;
3778 struct hci_request req;
3779 u8 val, enabled, status;
3780 int err;
3781
3782 BT_DBG("request for %s", hdev->name);
3783
3784 status = mgmt_le_support(hdev);
3785 if (status)
3786 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3787 status);
3788
3789 if (cp->val != 0x00 && cp->val != 0x01)
3790 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3791 MGMT_STATUS_INVALID_PARAMS);
3792
3793 hci_dev_lock(hdev);
3794
3795 val = !!cp->val;
3796 enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
3797
3798 /* The following conditions are ones which mean that we should
3799 * not do any HCI communication but directly send a mgmt
3800 * response to user space (after toggling the flag if
3801 * necessary).
3802 */
3803 if (!hdev_is_powered(hdev) || val == enabled ||
3804 hci_conn_num(hdev, LE_LINK) > 0) {
3805 bool changed = false;
3806
3807 if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3808 change_bit(HCI_ADVERTISING, &hdev->dev_flags);
3809 changed = true;
3810 }
3811
3812 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
3813 if (err < 0)
3814 goto unlock;
3815
3816 if (changed)
3817 err = new_settings(hdev, sk);
3818
3819 goto unlock;
3820 }
3821
3822 if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
3823 mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
3824 err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3825 MGMT_STATUS_BUSY);
3826 goto unlock;
3827 }
3828
3829 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
3830 if (!cmd) {
3831 err = -ENOMEM;
3832 goto unlock;
3833 }
3834
3835 hci_req_init(&req, hdev);
3836
3837 if (val)
3838 enable_advertising(&req);
3839 else
3840 disable_advertising(&req);
3841
3842 err = hci_req_run(&req, set_advertising_complete);
3843 if (err < 0)
3844 mgmt_pending_remove(cmd);
3845
3846 unlock:
3847 hci_dev_unlock(hdev);
3848 return err;
3849 }
3850
3851 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
3852 void *data, u16 len)
3853 {
3854 struct mgmt_cp_set_static_address *cp = data;
3855 int err;
3856
3857 BT_DBG("%s", hdev->name);
3858
3859 if (!lmp_le_capable(hdev))
3860 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
3861 MGMT_STATUS_NOT_SUPPORTED);
3862
3863 if (hdev_is_powered(hdev))
3864 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
3865 MGMT_STATUS_REJECTED);
3866
3867 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
3868 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
3869 return cmd_status(sk, hdev->id,
3870 MGMT_OP_SET_STATIC_ADDRESS,
3871 MGMT_STATUS_INVALID_PARAMS);
3872
3873 /* Two most significant bits shall be set */
3874 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
3875 return cmd_status(sk, hdev->id,
3876 MGMT_OP_SET_STATIC_ADDRESS,
3877 MGMT_STATUS_INVALID_PARAMS);
3878 }
3879
3880 hci_dev_lock(hdev);
3881
3882 bacpy(&hdev->static_addr, &cp->bdaddr);
3883
3884 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0);
3885
3886 hci_dev_unlock(hdev);
3887
3888 return err;
3889 }
3890
3891 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
3892 void *data, u16 len)
3893 {
3894 struct mgmt_cp_set_scan_params *cp = data;
3895 __u16 interval, window;
3896 int err;
3897
3898 BT_DBG("%s", hdev->name);
3899
3900 if (!lmp_le_capable(hdev))
3901 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3902 MGMT_STATUS_NOT_SUPPORTED);
3903
3904 interval = __le16_to_cpu(cp->interval);
3905
3906 if (interval < 0x0004 || interval > 0x4000)
3907 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3908 MGMT_STATUS_INVALID_PARAMS);
3909
3910 window = __le16_to_cpu(cp->window);
3911
3912 if (window < 0x0004 || window > 0x4000)
3913 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3914 MGMT_STATUS_INVALID_PARAMS);
3915
3916 if (window > interval)
3917 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3918 MGMT_STATUS_INVALID_PARAMS);
3919
3920 hci_dev_lock(hdev);
3921
3922 hdev->le_scan_interval = interval;
3923 hdev->le_scan_window = window;
3924
3925 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);
3926
3927 hci_dev_unlock(hdev);
3928
3929 return err;
3930 }
3931
3932 static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
3933 {
3934 struct pending_cmd *cmd;
3935
3936 BT_DBG("status 0x%02x", status);
3937
3938 hci_dev_lock(hdev);
3939
3940 cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
3941 if (!cmd)
3942 goto unlock;
3943
3944 if (status) {
3945 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3946 mgmt_status(status));
3947 } else {
3948 struct mgmt_mode *cp = cmd->param;
3949
3950 if (cp->val)
3951 set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
3952 else
3953 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
3954
3955 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
3956 new_settings(hdev, cmd->sk);
3957 }
3958
3959 mgmt_pending_remove(cmd);
3960
3961 unlock:
3962 hci_dev_unlock(hdev);
3963 }
3964
3965 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
3966 void *data, u16 len)
3967 {
3968 struct mgmt_mode *cp = data;
3969 struct pending_cmd *cmd;
3970 struct hci_request req;
3971 int err;
3972
3973 BT_DBG("%s", hdev->name);
3974
3975 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
3976 hdev->hci_ver < BLUETOOTH_VER_1_2)
3977 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3978 MGMT_STATUS_NOT_SUPPORTED);
3979
3980 if (cp->val != 0x00 && cp->val != 0x01)
3981 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3982 MGMT_STATUS_INVALID_PARAMS);
3983
3984 if (!hdev_is_powered(hdev))
3985 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3986 MGMT_STATUS_NOT_POWERED);
3987
3988 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3989 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3990 MGMT_STATUS_REJECTED);
3991
3992 hci_dev_lock(hdev);
3993
3994 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
3995 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3996 MGMT_STATUS_BUSY);
3997 goto unlock;
3998 }
3999
4000 if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
4001 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4002 hdev);
4003 goto unlock;
4004 }
4005
4006 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4007 data, len);
4008 if (!cmd) {
4009 err = -ENOMEM;
4010 goto unlock;
4011 }
4012
4013 hci_req_init(&req, hdev);
4014
4015 write_fast_connectable(&req, cp->val);
4016
4017 err = hci_req_run(&req, fast_connectable_complete);
4018 if (err < 0) {
4019 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4020 MGMT_STATUS_FAILED);
4021 mgmt_pending_remove(cmd);
4022 }
4023
4024 unlock:
4025 hci_dev_unlock(hdev);
4026
4027 return err;
4028 }
4029
4030 static void set_bredr_scan(struct hci_request *req)
4031 {
4032 struct hci_dev *hdev = req->hdev;
4033 u8 scan = 0;
4034
4035 /* Ensure that fast connectable is disabled. This function will
4036 * not do anything if the page scan parameters are already what
4037 * they should be.
4038 */
4039 write_fast_connectable(req, false);
4040
4041 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4042 scan |= SCAN_PAGE;
4043 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
4044 scan |= SCAN_INQUIRY;
4045
4046 if (scan)
4047 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
4048 }
4049
4050 static void set_bredr_complete(struct hci_dev *hdev, u8 status)
4051 {
4052 struct pending_cmd *cmd;
4053
4054 BT_DBG("status 0x%02x", status);
4055
4056 hci_dev_lock(hdev);
4057
4058 cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
4059 if (!cmd)
4060 goto unlock;
4061
4062 if (status) {
4063 u8 mgmt_err = mgmt_status(status);
4064
4065 /* We need to restore the flag if related HCI commands
4066 * failed.
4067 */
4068 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4069
4070 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4071 } else {
4072 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4073 new_settings(hdev, cmd->sk);
4074 }
4075
4076 mgmt_pending_remove(cmd);
4077
4078 unlock:
4079 hci_dev_unlock(hdev);
4080 }
4081
4082 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4083 {
4084 struct mgmt_mode *cp = data;
4085 struct pending_cmd *cmd;
4086 struct hci_request req;
4087 int err;
4088
4089 BT_DBG("request for %s", hdev->name);
4090
4091 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4092 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4093 MGMT_STATUS_NOT_SUPPORTED);
4094
4095 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4096 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4097 MGMT_STATUS_REJECTED);
4098
4099 if (cp->val != 0x00 && cp->val != 0x01)
4100 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4101 MGMT_STATUS_INVALID_PARAMS);
4102
4103 hci_dev_lock(hdev);
4104
4105 if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4106 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4107 goto unlock;
4108 }
4109
4110 if (!hdev_is_powered(hdev)) {
4111 if (!cp->val) {
4112 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4113 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4114 clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4115 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4116 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4117 }
4118
4119 change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4120
4121 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4122 if (err < 0)
4123 goto unlock;
4124
4125 err = new_settings(hdev, sk);
4126 goto unlock;
4127 }
4128
4129 /* Reject disabling when powered on */
4130 if (!cp->val) {
4131 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4132 MGMT_STATUS_REJECTED);
4133 goto unlock;
4134 }
4135
4136 if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
4137 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4138 MGMT_STATUS_BUSY);
4139 goto unlock;
4140 }
4141
4142 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4143 if (!cmd) {
4144 err = -ENOMEM;
4145 goto unlock;
4146 }
4147
4148 /* We need to flip the bit already here so that update_adv_data
4149 * generates the correct flags.
4150 */
4151 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4152
4153 hci_req_init(&req, hdev);
4154
4155 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4156 set_bredr_scan(&req);
4157
4158 /* Since only the advertising data flags will change, there
4159 * is no need to update the scan response data.
4160 */
4161 update_adv_data(&req);
4162
4163 err = hci_req_run(&req, set_bredr_complete);
4164 if (err < 0)
4165 mgmt_pending_remove(cmd);
4166
4167 unlock:
4168 hci_dev_unlock(hdev);
4169 return err;
4170 }
4171
4172 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4173 void *data, u16 len)
4174 {
4175 struct mgmt_mode *cp = data;
4176 struct pending_cmd *cmd;
4177 u8 val, status;
4178 int err;
4179
4180 BT_DBG("request for %s", hdev->name);
4181
4182 status = mgmt_bredr_support(hdev);
4183 if (status)
4184 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4185 status);
4186
4187 if (!lmp_sc_capable(hdev) &&
4188 !test_bit(HCI_FORCE_SC, &hdev->dev_flags))
4189 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4190 MGMT_STATUS_NOT_SUPPORTED);
4191
4192 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4193 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4194 MGMT_STATUS_INVALID_PARAMS);
4195
4196 hci_dev_lock(hdev);
4197
4198 if (!hdev_is_powered(hdev)) {
4199 bool changed;
4200
4201 if (cp->val) {
4202 changed = !test_and_set_bit(HCI_SC_ENABLED,
4203 &hdev->dev_flags);
4204 if (cp->val == 0x02)
4205 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4206 else
4207 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4208 } else {
4209 changed = test_and_clear_bit(HCI_SC_ENABLED,
4210 &hdev->dev_flags);
4211 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4212 }
4213
4214 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4215 if (err < 0)
4216 goto failed;
4217
4218 if (changed)
4219 err = new_settings(hdev, sk);
4220
4221 goto failed;
4222 }
4223
4224 if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4225 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4226 MGMT_STATUS_BUSY);
4227 goto failed;
4228 }
4229
4230 val = !!cp->val;
4231
4232 if (val == test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
4233 (cp->val == 0x02) == test_bit(HCI_SC_ONLY, &hdev->dev_flags)) {
4234 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4235 goto failed;
4236 }
4237
4238 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4239 if (!cmd) {
4240 err = -ENOMEM;
4241 goto failed;
4242 }
4243
4244 err = hci_send_cmd(hdev, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4245 if (err < 0) {
4246 mgmt_pending_remove(cmd);
4247 goto failed;
4248 }
4249
4250 if (cp->val == 0x02)
4251 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4252 else
4253 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4254
4255 failed:
4256 hci_dev_unlock(hdev);
4257 return err;
4258 }
4259
4260 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
4261 void *data, u16 len)
4262 {
4263 struct mgmt_mode *cp = data;
4264 bool changed;
4265 int err;
4266
4267 BT_DBG("request for %s", hdev->name);
4268
4269 if (cp->val != 0x00 && cp->val != 0x01)
4270 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
4271 MGMT_STATUS_INVALID_PARAMS);
4272
4273 hci_dev_lock(hdev);
4274
4275 if (cp->val)
4276 changed = !test_and_set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
4277 else
4278 changed = test_and_clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
4279
4280 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
4281 if (err < 0)
4282 goto unlock;
4283
4284 if (changed)
4285 err = new_settings(hdev, sk);
4286
4287 unlock:
4288 hci_dev_unlock(hdev);
4289 return err;
4290 }
4291
4292 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4293 u16 len)
4294 {
4295 struct mgmt_cp_set_privacy *cp = cp_data;
4296 bool changed;
4297 int err;
4298
4299 BT_DBG("request for %s", hdev->name);
4300
4301 if (!lmp_le_capable(hdev))
4302 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4303 MGMT_STATUS_NOT_SUPPORTED);
4304
4305 if (cp->privacy != 0x00 && cp->privacy != 0x01)
4306 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4307 MGMT_STATUS_INVALID_PARAMS);
4308
4309 if (hdev_is_powered(hdev))
4310 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4311 MGMT_STATUS_REJECTED);
4312
4313 hci_dev_lock(hdev);
4314
4315 /* If user space supports this command it is also expected to
4316 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
4317 */
4318 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4319
4320 if (cp->privacy) {
4321 changed = !test_and_set_bit(HCI_PRIVACY, &hdev->dev_flags);
4322 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
4323 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4324 } else {
4325 changed = test_and_clear_bit(HCI_PRIVACY, &hdev->dev_flags);
4326 memset(hdev->irk, 0, sizeof(hdev->irk));
4327 clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4328 }
4329
4330 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
4331 if (err < 0)
4332 goto unlock;
4333
4334 if (changed)
4335 err = new_settings(hdev, sk);
4336
4337 unlock:
4338 hci_dev_unlock(hdev);
4339 return err;
4340 }
4341
4342 static bool irk_is_valid(struct mgmt_irk_info *irk)
4343 {
4344 switch (irk->addr.type) {
4345 case BDADDR_LE_PUBLIC:
4346 return true;
4347
4348 case BDADDR_LE_RANDOM:
4349 /* Two most significant bits shall be set */
4350 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4351 return false;
4352 return true;
4353 }
4354
4355 return false;
4356 }
4357
4358 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4359 u16 len)
4360 {
4361 struct mgmt_cp_load_irks *cp = cp_data;
4362 u16 irk_count, expected_len;
4363 int i, err;
4364
4365 BT_DBG("request for %s", hdev->name);
4366
4367 if (!lmp_le_capable(hdev))
4368 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4369 MGMT_STATUS_NOT_SUPPORTED);
4370
4371 irk_count = __le16_to_cpu(cp->irk_count);
4372
4373 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
4374 if (expected_len != len) {
4375 BT_ERR("load_irks: expected %u bytes, got %u bytes",
4376 len, expected_len);
4377 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4378 MGMT_STATUS_INVALID_PARAMS);
4379 }
4380
4381 BT_DBG("%s irk_count %u", hdev->name, irk_count);
4382
4383 for (i = 0; i < irk_count; i++) {
4384 struct mgmt_irk_info *key = &cp->irks[i];
4385
4386 if (!irk_is_valid(key))
4387 return cmd_status(sk, hdev->id,
4388 MGMT_OP_LOAD_IRKS,
4389 MGMT_STATUS_INVALID_PARAMS);
4390 }
4391
4392 hci_dev_lock(hdev);
4393
4394 hci_smp_irks_clear(hdev);
4395
4396 for (i = 0; i < irk_count; i++) {
4397 struct mgmt_irk_info *irk = &cp->irks[i];
4398 u8 addr_type;
4399
4400 if (irk->addr.type == BDADDR_LE_PUBLIC)
4401 addr_type = ADDR_LE_DEV_PUBLIC;
4402 else
4403 addr_type = ADDR_LE_DEV_RANDOM;
4404
4405 hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
4406 BDADDR_ANY);
4407 }
4408
4409 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4410
4411 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
4412
4413 hci_dev_unlock(hdev);
4414
4415 return err;
4416 }
4417
4418 static bool ltk_is_valid(struct mgmt_ltk_info *key)
4419 {
4420 if (key->master != 0x00 && key->master != 0x01)
4421 return false;
4422
4423 switch (key->addr.type) {
4424 case BDADDR_LE_PUBLIC:
4425 return true;
4426
4427 case BDADDR_LE_RANDOM:
4428 /* Two most significant bits shall be set */
4429 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4430 return false;
4431 return true;
4432 }
4433
4434 return false;
4435 }
4436
4437 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
4438 void *cp_data, u16 len)
4439 {
4440 struct mgmt_cp_load_long_term_keys *cp = cp_data;
4441 u16 key_count, expected_len;
4442 int i, err;
4443
4444 BT_DBG("request for %s", hdev->name);
4445
4446 if (!lmp_le_capable(hdev))
4447 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4448 MGMT_STATUS_NOT_SUPPORTED);
4449
4450 key_count = __le16_to_cpu(cp->key_count);
4451
4452 expected_len = sizeof(*cp) + key_count *
4453 sizeof(struct mgmt_ltk_info);
4454 if (expected_len != len) {
4455 BT_ERR("load_keys: expected %u bytes, got %u bytes",
4456 len, expected_len);
4457 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4458 MGMT_STATUS_INVALID_PARAMS);
4459 }
4460
4461 BT_DBG("%s key_count %u", hdev->name, key_count);
4462
4463 for (i = 0; i < key_count; i++) {
4464 struct mgmt_ltk_info *key = &cp->keys[i];
4465
4466 if (!ltk_is_valid(key))
4467 return cmd_status(sk, hdev->id,
4468 MGMT_OP_LOAD_LONG_TERM_KEYS,
4469 MGMT_STATUS_INVALID_PARAMS);
4470 }
4471
4472 hci_dev_lock(hdev);
4473
4474 hci_smp_ltks_clear(hdev);
4475
4476 for (i = 0; i < key_count; i++) {
4477 struct mgmt_ltk_info *key = &cp->keys[i];
4478 u8 type, addr_type;
4479
4480 if (key->addr.type == BDADDR_LE_PUBLIC)
4481 addr_type = ADDR_LE_DEV_PUBLIC;
4482 else
4483 addr_type = ADDR_LE_DEV_RANDOM;
4484
4485 if (key->master)
4486 type = HCI_SMP_LTK;
4487 else
4488 type = HCI_SMP_LTK_SLAVE;
4489
4490 hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
4491 key->type, key->val, key->enc_size, key->ediv,
4492 key->rand);
4493 }
4494
4495 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
4496 NULL, 0);
4497
4498 hci_dev_unlock(hdev);
4499
4500 return err;
4501 }
4502
4503 static const struct mgmt_handler {
4504 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
4505 u16 data_len);
4506 bool var_len;
4507 size_t data_len;
4508 } mgmt_handlers[] = {
4509 { NULL }, /* 0x0000 (no command) */
4510 { read_version, false, MGMT_READ_VERSION_SIZE },
4511 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
4512 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
4513 { read_controller_info, false, MGMT_READ_INFO_SIZE },
4514 { set_powered, false, MGMT_SETTING_SIZE },
4515 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
4516 { set_connectable, false, MGMT_SETTING_SIZE },
4517 { set_fast_connectable, false, MGMT_SETTING_SIZE },
4518 { set_pairable, false, MGMT_SETTING_SIZE },
4519 { set_link_security, false, MGMT_SETTING_SIZE },
4520 { set_ssp, false, MGMT_SETTING_SIZE },
4521 { set_hs, false, MGMT_SETTING_SIZE },
4522 { set_le, false, MGMT_SETTING_SIZE },
4523 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
4524 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
4525 { add_uuid, false, MGMT_ADD_UUID_SIZE },
4526 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
4527 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
4528 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
4529 { disconnect, false, MGMT_DISCONNECT_SIZE },
4530 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
4531 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
4532 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
4533 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
4534 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
4535 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
4536 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
4537 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
4538 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
4539 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
4540 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
4541 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
4542 { add_remote_oob_data, true, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
4543 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
4544 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
4545 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
4546 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
4547 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
4548 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
4549 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
4550 { set_advertising, false, MGMT_SETTING_SIZE },
4551 { set_bredr, false, MGMT_SETTING_SIZE },
4552 { set_static_address, false, MGMT_SET_STATIC_ADDRESS_SIZE },
4553 { set_scan_params, false, MGMT_SET_SCAN_PARAMS_SIZE },
4554 { set_secure_conn, false, MGMT_SETTING_SIZE },
4555 { set_debug_keys, false, MGMT_SETTING_SIZE },
4556 { set_privacy, false, MGMT_SET_PRIVACY_SIZE },
4557 { load_irks, true, MGMT_LOAD_IRKS_SIZE },
4558 };
4559
4560
4561 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
4562 {
4563 void *buf;
4564 u8 *cp;
4565 struct mgmt_hdr *hdr;
4566 u16 opcode, index, len;
4567 struct hci_dev *hdev = NULL;
4568 const struct mgmt_handler *handler;
4569 int err;
4570
4571 BT_DBG("got %zu bytes", msglen);
4572
4573 if (msglen < sizeof(*hdr))
4574 return -EINVAL;
4575
4576 buf = kmalloc(msglen, GFP_KERNEL);
4577 if (!buf)
4578 return -ENOMEM;
4579
4580 if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
4581 err = -EFAULT;
4582 goto done;
4583 }
4584
4585 hdr = buf;
4586 opcode = __le16_to_cpu(hdr->opcode);
4587 index = __le16_to_cpu(hdr->index);
4588 len = __le16_to_cpu(hdr->len);
4589
4590 if (len != msglen - sizeof(*hdr)) {
4591 err = -EINVAL;
4592 goto done;
4593 }
4594
4595 if (index != MGMT_INDEX_NONE) {
4596 hdev = hci_dev_get(index);
4597 if (!hdev) {
4598 err = cmd_status(sk, index, opcode,
4599 MGMT_STATUS_INVALID_INDEX);
4600 goto done;
4601 }
4602
4603 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
4604 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4605 err = cmd_status(sk, index, opcode,
4606 MGMT_STATUS_INVALID_INDEX);
4607 goto done;
4608 }
4609 }
4610
4611 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
4612 mgmt_handlers[opcode].func == NULL) {
4613 BT_DBG("Unknown op %u", opcode);
4614 err = cmd_status(sk, index, opcode,
4615 MGMT_STATUS_UNKNOWN_COMMAND);
4616 goto done;
4617 }
4618
4619 if ((hdev && opcode < MGMT_OP_READ_INFO) ||
4620 (!hdev && opcode >= MGMT_OP_READ_INFO)) {
4621 err = cmd_status(sk, index, opcode,
4622 MGMT_STATUS_INVALID_INDEX);
4623 goto done;
4624 }
4625
4626 handler = &mgmt_handlers[opcode];
4627
4628 if ((handler->var_len && len < handler->data_len) ||
4629 (!handler->var_len && len != handler->data_len)) {
4630 err = cmd_status(sk, index, opcode,
4631 MGMT_STATUS_INVALID_PARAMS);
4632 goto done;
4633 }
4634
4635 if (hdev)
4636 mgmt_init_hdev(sk, hdev);
4637
4638 cp = buf + sizeof(*hdr);
4639
4640 err = handler->func(sk, hdev, cp, len);
4641 if (err < 0)
4642 goto done;
4643
4644 err = msglen;
4645
4646 done:
4647 if (hdev)
4648 hci_dev_put(hdev);
4649
4650 kfree(buf);
4651 return err;
4652 }
4653
4654 void mgmt_index_added(struct hci_dev *hdev)
4655 {
4656 if (hdev->dev_type != HCI_BREDR)
4657 return;
4658
4659 mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
4660 }
4661
4662 void mgmt_index_removed(struct hci_dev *hdev)
4663 {
4664 u8 status = MGMT_STATUS_INVALID_INDEX;
4665
4666 if (hdev->dev_type != HCI_BREDR)
4667 return;
4668
4669 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
4670
4671 mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
4672 }
4673
4674 /* This function requires the caller holds hdev->lock */
4675 static void restart_le_auto_conns(struct hci_dev *hdev)
4676 {
4677 struct hci_conn_params *p;
4678
4679 list_for_each_entry(p, &hdev->le_conn_params, list) {
4680 if (p->auto_connect == HCI_AUTO_CONN_ALWAYS)
4681 hci_pend_le_conn_add(hdev, &p->addr, p->addr_type);
4682 }
4683 }
4684
4685 static void powered_complete(struct hci_dev *hdev, u8 status)
4686 {
4687 struct cmd_lookup match = { NULL, hdev };
4688
4689 BT_DBG("status 0x%02x", status);
4690
4691 hci_dev_lock(hdev);
4692
4693 restart_le_auto_conns(hdev);
4694
4695 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
4696
4697 new_settings(hdev, match.sk);
4698
4699 hci_dev_unlock(hdev);
4700
4701 if (match.sk)
4702 sock_put(match.sk);
4703 }
4704
4705 static int powered_update_hci(struct hci_dev *hdev)
4706 {
4707 struct hci_request req;
4708 u8 link_sec;
4709
4710 hci_req_init(&req, hdev);
4711
4712 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
4713 !lmp_host_ssp_capable(hdev)) {
4714 u8 ssp = 1;
4715
4716 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
4717 }
4718
4719 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
4720 lmp_bredr_capable(hdev)) {
4721 struct hci_cp_write_le_host_supported cp;
4722
4723 cp.le = 1;
4724 cp.simul = lmp_le_br_capable(hdev);
4725
4726 /* Check first if we already have the right
4727 * host state (host features set)
4728 */
4729 if (cp.le != lmp_host_le_capable(hdev) ||
4730 cp.simul != lmp_host_le_br_capable(hdev))
4731 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
4732 sizeof(cp), &cp);
4733 }
4734
4735 if (lmp_le_capable(hdev)) {
4736 /* Make sure the controller has a good default for
4737 * advertising data. This also applies to the case
4738 * where BR/EDR was toggled during the AUTO_OFF phase.
4739 */
4740 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
4741 update_adv_data(&req);
4742 update_scan_rsp_data(&req);
4743 }
4744
4745 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
4746 enable_advertising(&req);
4747 }
4748
4749 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4750 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
4751 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
4752 sizeof(link_sec), &link_sec);
4753
4754 if (lmp_bredr_capable(hdev)) {
4755 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
4756 set_bredr_scan(&req);
4757 update_class(&req);
4758 update_name(&req);
4759 update_eir(&req);
4760 }
4761
4762 return hci_req_run(&req, powered_complete);
4763 }
4764
4765 int mgmt_powered(struct hci_dev *hdev, u8 powered)
4766 {
4767 struct cmd_lookup match = { NULL, hdev };
4768 u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
4769 u8 zero_cod[] = { 0, 0, 0 };
4770 int err;
4771
4772 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
4773 return 0;
4774
4775 if (powered) {
4776 if (powered_update_hci(hdev) == 0)
4777 return 0;
4778
4779 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
4780 &match);
4781 goto new_settings;
4782 }
4783
4784 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
4785 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered);
4786
4787 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
4788 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
4789 zero_cod, sizeof(zero_cod), NULL);
4790
4791 new_settings:
4792 err = new_settings(hdev, match.sk);
4793
4794 if (match.sk)
4795 sock_put(match.sk);
4796
4797 return err;
4798 }
4799
4800 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
4801 {
4802 struct pending_cmd *cmd;
4803 u8 status;
4804
4805 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
4806 if (!cmd)
4807 return;
4808
4809 if (err == -ERFKILL)
4810 status = MGMT_STATUS_RFKILLED;
4811 else
4812 status = MGMT_STATUS_FAILED;
4813
4814 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
4815
4816 mgmt_pending_remove(cmd);
4817 }
4818
4819 void mgmt_discoverable_timeout(struct hci_dev *hdev)
4820 {
4821 struct hci_request req;
4822
4823 hci_dev_lock(hdev);
4824
4825 /* When discoverable timeout triggers, then just make sure
4826 * the limited discoverable flag is cleared. Even in the case
4827 * of a timeout triggered from general discoverable, it is
4828 * safe to unconditionally clear the flag.
4829 */
4830 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
4831 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4832
4833 hci_req_init(&req, hdev);
4834 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4835 u8 scan = SCAN_PAGE;
4836 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
4837 sizeof(scan), &scan);
4838 }
4839 update_class(&req);
4840 update_adv_data(&req);
4841 hci_req_run(&req, NULL);
4842
4843 hdev->discov_timeout = 0;
4844
4845 new_settings(hdev, NULL);
4846
4847 hci_dev_unlock(hdev);
4848 }
4849
4850 void mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
4851 {
4852 bool changed;
4853
4854 /* Nothing needed here if there's a pending command since that
4855 * commands request completion callback takes care of everything
4856 * necessary.
4857 */
4858 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
4859 return;
4860
4861 /* Powering off may clear the scan mode - don't let that interfere */
4862 if (!discoverable && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
4863 return;
4864
4865 if (discoverable) {
4866 changed = !test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4867 } else {
4868 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
4869 changed = test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4870 }
4871
4872 if (changed) {
4873 struct hci_request req;
4874
4875 /* In case this change in discoverable was triggered by
4876 * a disabling of connectable there could be a need to
4877 * update the advertising flags.
4878 */
4879 hci_req_init(&req, hdev);
4880 update_adv_data(&req);
4881 hci_req_run(&req, NULL);
4882
4883 new_settings(hdev, NULL);
4884 }
4885 }
4886
4887 void mgmt_connectable(struct hci_dev *hdev, u8 connectable)
4888 {
4889 bool changed;
4890
4891 /* Nothing needed here if there's a pending command since that
4892 * commands request completion callback takes care of everything
4893 * necessary.
4894 */
4895 if (mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
4896 return;
4897
4898 /* Powering off may clear the scan mode - don't let that interfere */
4899 if (!connectable && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
4900 return;
4901
4902 if (connectable)
4903 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
4904 else
4905 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
4906
4907 if (changed)
4908 new_settings(hdev, NULL);
4909 }
4910
4911 void mgmt_advertising(struct hci_dev *hdev, u8 advertising)
4912 {
4913 /* Powering off may stop advertising - don't let that interfere */
4914 if (!advertising && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
4915 return;
4916
4917 if (advertising)
4918 set_bit(HCI_ADVERTISING, &hdev->dev_flags);
4919 else
4920 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
4921 }
4922
4923 void mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
4924 {
4925 u8 mgmt_err = mgmt_status(status);
4926
4927 if (scan & SCAN_PAGE)
4928 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
4929 cmd_status_rsp, &mgmt_err);
4930
4931 if (scan & SCAN_INQUIRY)
4932 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
4933 cmd_status_rsp, &mgmt_err);
4934 }
4935
4936 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
4937 bool persistent)
4938 {
4939 struct mgmt_ev_new_link_key ev;
4940
4941 memset(&ev, 0, sizeof(ev));
4942
4943 ev.store_hint = persistent;
4944 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
4945 ev.key.addr.type = BDADDR_BREDR;
4946 ev.key.type = key->type;
4947 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
4948 ev.key.pin_len = key->pin_len;
4949
4950 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
4951 }
4952
4953 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key)
4954 {
4955 struct mgmt_ev_new_long_term_key ev;
4956
4957 memset(&ev, 0, sizeof(ev));
4958
4959 /* Devices using resolvable or non-resolvable random addresses
4960 * without providing an indentity resolving key don't require
4961 * to store long term keys. Their addresses will change the
4962 * next time around.
4963 *
4964 * Only when a remote device provides an identity address
4965 * make sure the long term key is stored. If the remote
4966 * identity is known, the long term keys are internally
4967 * mapped to the identity address. So allow static random
4968 * and public addresses here.
4969 */
4970 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
4971 (key->bdaddr.b[5] & 0xc0) != 0xc0)
4972 ev.store_hint = 0x00;
4973 else
4974 ev.store_hint = 0x01;
4975
4976 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
4977 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
4978 ev.key.type = key->authenticated;
4979 ev.key.enc_size = key->enc_size;
4980 ev.key.ediv = key->ediv;
4981
4982 if (key->type == HCI_SMP_LTK)
4983 ev.key.master = 1;
4984
4985 memcpy(ev.key.rand, key->rand, sizeof(key->rand));
4986 memcpy(ev.key.val, key->val, sizeof(key->val));
4987
4988 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
4989 }
4990
4991 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
4992 {
4993 struct mgmt_ev_new_irk ev;
4994
4995 memset(&ev, 0, sizeof(ev));
4996
4997 /* For identity resolving keys from devices that are already
4998 * using a public address or static random address, do not
4999 * ask for storing this key. The identity resolving key really
5000 * is only mandatory for devices using resovlable random
5001 * addresses.
5002 *
5003 * Storing all identity resolving keys has the downside that
5004 * they will be also loaded on next boot of they system. More
5005 * identity resolving keys, means more time during scanning is
5006 * needed to actually resolve these addresses.
5007 */
5008 if (bacmp(&irk->rpa, BDADDR_ANY))
5009 ev.store_hint = 0x01;
5010 else
5011 ev.store_hint = 0x00;
5012
5013 bacpy(&ev.rpa, &irk->rpa);
5014 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
5015 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
5016 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
5017
5018 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
5019 }
5020
5021 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
5022 u8 data_len)
5023 {
5024 eir[eir_len++] = sizeof(type) + data_len;
5025 eir[eir_len++] = type;
5026 memcpy(&eir[eir_len], data, data_len);
5027 eir_len += data_len;
5028
5029 return eir_len;
5030 }
5031
5032 void mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5033 u8 addr_type, u32 flags, u8 *name, u8 name_len,
5034 u8 *dev_class)
5035 {
5036 char buf[512];
5037 struct mgmt_ev_device_connected *ev = (void *) buf;
5038 u16 eir_len = 0;
5039
5040 bacpy(&ev->addr.bdaddr, bdaddr);
5041 ev->addr.type = link_to_bdaddr(link_type, addr_type);
5042
5043 ev->flags = __cpu_to_le32(flags);
5044
5045 if (name_len > 0)
5046 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
5047 name, name_len);
5048
5049 if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
5050 eir_len = eir_append_data(ev->eir, eir_len,
5051 EIR_CLASS_OF_DEV, dev_class, 3);
5052
5053 ev->eir_len = cpu_to_le16(eir_len);
5054
5055 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
5056 sizeof(*ev) + eir_len, NULL);
5057 }
5058
5059 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
5060 {
5061 struct mgmt_cp_disconnect *cp = cmd->param;
5062 struct sock **sk = data;
5063 struct mgmt_rp_disconnect rp;
5064
5065 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5066 rp.addr.type = cp->addr.type;
5067
5068 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
5069 sizeof(rp));
5070
5071 *sk = cmd->sk;
5072 sock_hold(*sk);
5073
5074 mgmt_pending_remove(cmd);
5075 }
5076
5077 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
5078 {
5079 struct hci_dev *hdev = data;
5080 struct mgmt_cp_unpair_device *cp = cmd->param;
5081 struct mgmt_rp_unpair_device rp;
5082
5083 memset(&rp, 0, sizeof(rp));
5084 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5085 rp.addr.type = cp->addr.type;
5086
5087 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
5088
5089 cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
5090
5091 mgmt_pending_remove(cmd);
5092 }
5093
5094 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
5095 u8 link_type, u8 addr_type, u8 reason,
5096 bool mgmt_connected)
5097 {
5098 struct mgmt_ev_device_disconnected ev;
5099 struct pending_cmd *power_off;
5100 struct sock *sk = NULL;
5101
5102 power_off = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
5103 if (power_off) {
5104 struct mgmt_mode *cp = power_off->param;
5105
5106 /* The connection is still in hci_conn_hash so test for 1
5107 * instead of 0 to know if this is the last one.
5108 */
5109 if (!cp->val && hci_conn_count(hdev) == 1)
5110 queue_work(hdev->req_workqueue, &hdev->power_off.work);
5111 }
5112
5113 if (!mgmt_connected)
5114 return;
5115
5116 if (link_type != ACL_LINK && link_type != LE_LINK)
5117 return;
5118
5119 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
5120
5121 bacpy(&ev.addr.bdaddr, bdaddr);
5122 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5123 ev.reason = reason;
5124
5125 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
5126
5127 if (sk)
5128 sock_put(sk);
5129
5130 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
5131 hdev);
5132 }
5133
5134 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
5135 u8 link_type, u8 addr_type, u8 status)
5136 {
5137 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
5138 struct mgmt_cp_disconnect *cp;
5139 struct mgmt_rp_disconnect rp;
5140 struct pending_cmd *cmd;
5141
5142 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
5143 hdev);
5144
5145 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
5146 if (!cmd)
5147 return;
5148
5149 cp = cmd->param;
5150
5151 if (bacmp(bdaddr, &cp->addr.bdaddr))
5152 return;
5153
5154 if (cp->addr.type != bdaddr_type)
5155 return;
5156
5157 bacpy(&rp.addr.bdaddr, bdaddr);
5158 rp.addr.type = bdaddr_type;
5159
5160 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
5161 mgmt_status(status), &rp, sizeof(rp));
5162
5163 mgmt_pending_remove(cmd);
5164 }
5165
5166 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5167 u8 addr_type, u8 status)
5168 {
5169 struct mgmt_ev_connect_failed ev;
5170
5171 bacpy(&ev.addr.bdaddr, bdaddr);
5172 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5173 ev.status = mgmt_status(status);
5174
5175 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
5176 }
5177
5178 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
5179 {
5180 struct mgmt_ev_pin_code_request ev;
5181
5182 bacpy(&ev.addr.bdaddr, bdaddr);
5183 ev.addr.type = BDADDR_BREDR;
5184 ev.secure = secure;
5185
5186 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
5187 }
5188
5189 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5190 u8 status)
5191 {
5192 struct pending_cmd *cmd;
5193 struct mgmt_rp_pin_code_reply rp;
5194
5195 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
5196 if (!cmd)
5197 return;
5198
5199 bacpy(&rp.addr.bdaddr, bdaddr);
5200 rp.addr.type = BDADDR_BREDR;
5201
5202 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
5203 mgmt_status(status), &rp, sizeof(rp));
5204
5205 mgmt_pending_remove(cmd);
5206 }
5207
5208 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5209 u8 status)
5210 {
5211 struct pending_cmd *cmd;
5212 struct mgmt_rp_pin_code_reply rp;
5213
5214 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
5215 if (!cmd)
5216 return;
5217
5218 bacpy(&rp.addr.bdaddr, bdaddr);
5219 rp.addr.type = BDADDR_BREDR;
5220
5221 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
5222 mgmt_status(status), &rp, sizeof(rp));
5223
5224 mgmt_pending_remove(cmd);
5225 }
5226
5227 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
5228 u8 link_type, u8 addr_type, __le32 value,
5229 u8 confirm_hint)
5230 {
5231 struct mgmt_ev_user_confirm_request ev;
5232
5233 BT_DBG("%s", hdev->name);
5234
5235 bacpy(&ev.addr.bdaddr, bdaddr);
5236 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5237 ev.confirm_hint = confirm_hint;
5238 ev.value = value;
5239
5240 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
5241 NULL);
5242 }
5243
5244 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
5245 u8 link_type, u8 addr_type)
5246 {
5247 struct mgmt_ev_user_passkey_request ev;
5248
5249 BT_DBG("%s", hdev->name);
5250
5251 bacpy(&ev.addr.bdaddr, bdaddr);
5252 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5253
5254 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
5255 NULL);
5256 }
5257
5258 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5259 u8 link_type, u8 addr_type, u8 status,
5260 u8 opcode)
5261 {
5262 struct pending_cmd *cmd;
5263 struct mgmt_rp_user_confirm_reply rp;
5264 int err;
5265
5266 cmd = mgmt_pending_find(opcode, hdev);
5267 if (!cmd)
5268 return -ENOENT;
5269
5270 bacpy(&rp.addr.bdaddr, bdaddr);
5271 rp.addr.type = link_to_bdaddr(link_type, addr_type);
5272 err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
5273 &rp, sizeof(rp));
5274
5275 mgmt_pending_remove(cmd);
5276
5277 return err;
5278 }
5279
5280 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5281 u8 link_type, u8 addr_type, u8 status)
5282 {
5283 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5284 status, MGMT_OP_USER_CONFIRM_REPLY);
5285 }
5286
5287 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5288 u8 link_type, u8 addr_type, u8 status)
5289 {
5290 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5291 status,
5292 MGMT_OP_USER_CONFIRM_NEG_REPLY);
5293 }
5294
5295 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5296 u8 link_type, u8 addr_type, u8 status)
5297 {
5298 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5299 status, MGMT_OP_USER_PASSKEY_REPLY);
5300 }
5301
5302 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5303 u8 link_type, u8 addr_type, u8 status)
5304 {
5305 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5306 status,
5307 MGMT_OP_USER_PASSKEY_NEG_REPLY);
5308 }
5309
5310 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
5311 u8 link_type, u8 addr_type, u32 passkey,
5312 u8 entered)
5313 {
5314 struct mgmt_ev_passkey_notify ev;
5315
5316 BT_DBG("%s", hdev->name);
5317
5318 bacpy(&ev.addr.bdaddr, bdaddr);
5319 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5320 ev.passkey = __cpu_to_le32(passkey);
5321 ev.entered = entered;
5322
5323 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
5324 }
5325
5326 void mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5327 u8 addr_type, u8 status)
5328 {
5329 struct mgmt_ev_auth_failed ev;
5330
5331 bacpy(&ev.addr.bdaddr, bdaddr);
5332 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5333 ev.status = mgmt_status(status);
5334
5335 mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
5336 }
5337
5338 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
5339 {
5340 struct cmd_lookup match = { NULL, hdev };
5341 bool changed;
5342
5343 if (status) {
5344 u8 mgmt_err = mgmt_status(status);
5345 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
5346 cmd_status_rsp, &mgmt_err);
5347 return;
5348 }
5349
5350 if (test_bit(HCI_AUTH, &hdev->flags))
5351 changed = !test_and_set_bit(HCI_LINK_SECURITY,
5352 &hdev->dev_flags);
5353 else
5354 changed = test_and_clear_bit(HCI_LINK_SECURITY,
5355 &hdev->dev_flags);
5356
5357 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
5358 &match);
5359
5360 if (changed)
5361 new_settings(hdev, match.sk);
5362
5363 if (match.sk)
5364 sock_put(match.sk);
5365 }
5366
5367 static void clear_eir(struct hci_request *req)
5368 {
5369 struct hci_dev *hdev = req->hdev;
5370 struct hci_cp_write_eir cp;
5371
5372 if (!lmp_ext_inq_capable(hdev))
5373 return;
5374
5375 memset(hdev->eir, 0, sizeof(hdev->eir));
5376
5377 memset(&cp, 0, sizeof(cp));
5378
5379 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
5380 }
5381
5382 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
5383 {
5384 struct cmd_lookup match = { NULL, hdev };
5385 struct hci_request req;
5386 bool changed = false;
5387
5388 if (status) {
5389 u8 mgmt_err = mgmt_status(status);
5390
5391 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
5392 &hdev->dev_flags)) {
5393 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
5394 new_settings(hdev, NULL);
5395 }
5396
5397 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
5398 &mgmt_err);
5399 return;
5400 }
5401
5402 if (enable) {
5403 changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
5404 } else {
5405 changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
5406 if (!changed)
5407 changed = test_and_clear_bit(HCI_HS_ENABLED,
5408 &hdev->dev_flags);
5409 else
5410 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
5411 }
5412
5413 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
5414
5415 if (changed)
5416 new_settings(hdev, match.sk);
5417
5418 if (match.sk)
5419 sock_put(match.sk);
5420
5421 hci_req_init(&req, hdev);
5422
5423 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
5424 update_eir(&req);
5425 else
5426 clear_eir(&req);
5427
5428 hci_req_run(&req, NULL);
5429 }
5430
5431 void mgmt_sc_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
5432 {
5433 struct cmd_lookup match = { NULL, hdev };
5434 bool changed = false;
5435
5436 if (status) {
5437 u8 mgmt_err = mgmt_status(status);
5438
5439 if (enable) {
5440 if (test_and_clear_bit(HCI_SC_ENABLED,
5441 &hdev->dev_flags))
5442 new_settings(hdev, NULL);
5443 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
5444 }
5445
5446 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
5447 cmd_status_rsp, &mgmt_err);
5448 return;
5449 }
5450
5451 if (enable) {
5452 changed = !test_and_set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
5453 } else {
5454 changed = test_and_clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
5455 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
5456 }
5457
5458 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
5459 settings_rsp, &match);
5460
5461 if (changed)
5462 new_settings(hdev, match.sk);
5463
5464 if (match.sk)
5465 sock_put(match.sk);
5466 }
5467
5468 static void sk_lookup(struct pending_cmd *cmd, void *data)
5469 {
5470 struct cmd_lookup *match = data;
5471
5472 if (match->sk == NULL) {
5473 match->sk = cmd->sk;
5474 sock_hold(match->sk);
5475 }
5476 }
5477
5478 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
5479 u8 status)
5480 {
5481 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
5482
5483 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
5484 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
5485 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
5486
5487 if (!status)
5488 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
5489 NULL);
5490
5491 if (match.sk)
5492 sock_put(match.sk);
5493 }
5494
5495 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
5496 {
5497 struct mgmt_cp_set_local_name ev;
5498 struct pending_cmd *cmd;
5499
5500 if (status)
5501 return;
5502
5503 memset(&ev, 0, sizeof(ev));
5504 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
5505 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
5506
5507 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
5508 if (!cmd) {
5509 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
5510
5511 /* If this is a HCI command related to powering on the
5512 * HCI dev don't send any mgmt signals.
5513 */
5514 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5515 return;
5516 }
5517
5518 mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
5519 cmd ? cmd->sk : NULL);
5520 }
5521
5522 void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
5523 u8 *randomizer192, u8 *hash256,
5524 u8 *randomizer256, u8 status)
5525 {
5526 struct pending_cmd *cmd;
5527
5528 BT_DBG("%s status %u", hdev->name, status);
5529
5530 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
5531 if (!cmd)
5532 return;
5533
5534 if (status) {
5535 cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5536 mgmt_status(status));
5537 } else {
5538 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
5539 hash256 && randomizer256) {
5540 struct mgmt_rp_read_local_oob_ext_data rp;
5541
5542 memcpy(rp.hash192, hash192, sizeof(rp.hash192));
5543 memcpy(rp.randomizer192, randomizer192,
5544 sizeof(rp.randomizer192));
5545
5546 memcpy(rp.hash256, hash256, sizeof(rp.hash256));
5547 memcpy(rp.randomizer256, randomizer256,
5548 sizeof(rp.randomizer256));
5549
5550 cmd_complete(cmd->sk, hdev->id,
5551 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
5552 &rp, sizeof(rp));
5553 } else {
5554 struct mgmt_rp_read_local_oob_data rp;
5555
5556 memcpy(rp.hash, hash192, sizeof(rp.hash));
5557 memcpy(rp.randomizer, randomizer192,
5558 sizeof(rp.randomizer));
5559
5560 cmd_complete(cmd->sk, hdev->id,
5561 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
5562 &rp, sizeof(rp));
5563 }
5564 }
5565
5566 mgmt_pending_remove(cmd);
5567 }
5568
5569 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5570 u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name, u8
5571 ssp, u8 *eir, u16 eir_len)
5572 {
5573 char buf[512];
5574 struct mgmt_ev_device_found *ev = (void *) buf;
5575 struct smp_irk *irk;
5576 size_t ev_size;
5577
5578 if (!hci_discovery_active(hdev))
5579 return;
5580
5581 /* Leave 5 bytes for a potential CoD field */
5582 if (sizeof(*ev) + eir_len + 5 > sizeof(buf))
5583 return;
5584
5585 memset(buf, 0, sizeof(buf));
5586
5587 irk = hci_get_irk(hdev, bdaddr, addr_type);
5588 if (irk) {
5589 bacpy(&ev->addr.bdaddr, &irk->bdaddr);
5590 ev->addr.type = link_to_bdaddr(link_type, irk->addr_type);
5591 } else {
5592 bacpy(&ev->addr.bdaddr, bdaddr);
5593 ev->addr.type = link_to_bdaddr(link_type, addr_type);
5594 }
5595
5596 ev->rssi = rssi;
5597 if (cfm_name)
5598 ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_CONFIRM_NAME);
5599 if (!ssp)
5600 ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_LEGACY_PAIRING);
5601
5602 if (eir_len > 0)
5603 memcpy(ev->eir, eir, eir_len);
5604
5605 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
5606 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
5607 dev_class, 3);
5608
5609 ev->eir_len = cpu_to_le16(eir_len);
5610 ev_size = sizeof(*ev) + eir_len;
5611
5612 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
5613 }
5614
5615 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5616 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
5617 {
5618 struct mgmt_ev_device_found *ev;
5619 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
5620 u16 eir_len;
5621
5622 ev = (struct mgmt_ev_device_found *) buf;
5623
5624 memset(buf, 0, sizeof(buf));
5625
5626 bacpy(&ev->addr.bdaddr, bdaddr);
5627 ev->addr.type = link_to_bdaddr(link_type, addr_type);
5628 ev->rssi = rssi;
5629
5630 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
5631 name_len);
5632
5633 ev->eir_len = cpu_to_le16(eir_len);
5634
5635 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
5636 }
5637
5638 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
5639 {
5640 struct mgmt_ev_discovering ev;
5641 struct pending_cmd *cmd;
5642
5643 BT_DBG("%s discovering %u", hdev->name, discovering);
5644
5645 if (discovering)
5646 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
5647 else
5648 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
5649
5650 if (cmd != NULL) {
5651 u8 type = hdev->discovery.type;
5652
5653 cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
5654 sizeof(type));
5655 mgmt_pending_remove(cmd);
5656 }
5657
5658 memset(&ev, 0, sizeof(ev));
5659 ev.type = hdev->discovery.type;
5660 ev.discovering = discovering;
5661
5662 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
5663 }
5664
5665 int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
5666 {
5667 struct pending_cmd *cmd;
5668 struct mgmt_ev_device_blocked ev;
5669
5670 cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, hdev);
5671
5672 bacpy(&ev.addr.bdaddr, bdaddr);
5673 ev.addr.type = type;
5674
5675 return mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &ev, sizeof(ev),
5676 cmd ? cmd->sk : NULL);
5677 }
5678
5679 int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
5680 {
5681 struct pending_cmd *cmd;
5682 struct mgmt_ev_device_unblocked ev;
5683
5684 cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, hdev);
5685
5686 bacpy(&ev.addr.bdaddr, bdaddr);
5687 ev.addr.type = type;
5688
5689 return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &ev, sizeof(ev),
5690 cmd ? cmd->sk : NULL);
5691 }
5692
5693 static void adv_enable_complete(struct hci_dev *hdev, u8 status)
5694 {
5695 BT_DBG("%s status %u", hdev->name, status);
5696
5697 /* Clear the advertising mgmt setting if we failed to re-enable it */
5698 if (status) {
5699 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
5700 new_settings(hdev, NULL);
5701 }
5702 }
5703
5704 void mgmt_reenable_advertising(struct hci_dev *hdev)
5705 {
5706 struct hci_request req;
5707
5708 if (hci_conn_num(hdev, LE_LINK) > 0)
5709 return;
5710
5711 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
5712 return;
5713
5714 hci_req_init(&req, hdev);
5715 enable_advertising(&req);
5716
5717 /* If this fails we have no option but to let user space know
5718 * that we've disabled advertising.
5719 */
5720 if (hci_req_run(&req, adv_enable_complete) < 0) {
5721 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
5722 new_settings(hdev, NULL);
5723 }
5724 }