]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - net/bluetooth/mgmt.c
Bluetooth: Add support for local OOB data with Secure Connections
[mirror_ubuntu-artful-kernel.git] / net / bluetooth / mgmt.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI Management interface */
26
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/mgmt.h>
33
34 #include "smp.h"
35
36 #define MGMT_VERSION 1
37 #define MGMT_REVISION 4
38
39 static const u16 mgmt_commands[] = {
40 MGMT_OP_READ_INDEX_LIST,
41 MGMT_OP_READ_INFO,
42 MGMT_OP_SET_POWERED,
43 MGMT_OP_SET_DISCOVERABLE,
44 MGMT_OP_SET_CONNECTABLE,
45 MGMT_OP_SET_FAST_CONNECTABLE,
46 MGMT_OP_SET_PAIRABLE,
47 MGMT_OP_SET_LINK_SECURITY,
48 MGMT_OP_SET_SSP,
49 MGMT_OP_SET_HS,
50 MGMT_OP_SET_LE,
51 MGMT_OP_SET_DEV_CLASS,
52 MGMT_OP_SET_LOCAL_NAME,
53 MGMT_OP_ADD_UUID,
54 MGMT_OP_REMOVE_UUID,
55 MGMT_OP_LOAD_LINK_KEYS,
56 MGMT_OP_LOAD_LONG_TERM_KEYS,
57 MGMT_OP_DISCONNECT,
58 MGMT_OP_GET_CONNECTIONS,
59 MGMT_OP_PIN_CODE_REPLY,
60 MGMT_OP_PIN_CODE_NEG_REPLY,
61 MGMT_OP_SET_IO_CAPABILITY,
62 MGMT_OP_PAIR_DEVICE,
63 MGMT_OP_CANCEL_PAIR_DEVICE,
64 MGMT_OP_UNPAIR_DEVICE,
65 MGMT_OP_USER_CONFIRM_REPLY,
66 MGMT_OP_USER_CONFIRM_NEG_REPLY,
67 MGMT_OP_USER_PASSKEY_REPLY,
68 MGMT_OP_USER_PASSKEY_NEG_REPLY,
69 MGMT_OP_READ_LOCAL_OOB_DATA,
70 MGMT_OP_ADD_REMOTE_OOB_DATA,
71 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
72 MGMT_OP_START_DISCOVERY,
73 MGMT_OP_STOP_DISCOVERY,
74 MGMT_OP_CONFIRM_NAME,
75 MGMT_OP_BLOCK_DEVICE,
76 MGMT_OP_UNBLOCK_DEVICE,
77 MGMT_OP_SET_DEVICE_ID,
78 MGMT_OP_SET_ADVERTISING,
79 MGMT_OP_SET_BREDR,
80 MGMT_OP_SET_STATIC_ADDRESS,
81 MGMT_OP_SET_SCAN_PARAMS,
82 MGMT_OP_SET_SECURE_CONN,
83 };
84
85 static const u16 mgmt_events[] = {
86 MGMT_EV_CONTROLLER_ERROR,
87 MGMT_EV_INDEX_ADDED,
88 MGMT_EV_INDEX_REMOVED,
89 MGMT_EV_NEW_SETTINGS,
90 MGMT_EV_CLASS_OF_DEV_CHANGED,
91 MGMT_EV_LOCAL_NAME_CHANGED,
92 MGMT_EV_NEW_LINK_KEY,
93 MGMT_EV_NEW_LONG_TERM_KEY,
94 MGMT_EV_DEVICE_CONNECTED,
95 MGMT_EV_DEVICE_DISCONNECTED,
96 MGMT_EV_CONNECT_FAILED,
97 MGMT_EV_PIN_CODE_REQUEST,
98 MGMT_EV_USER_CONFIRM_REQUEST,
99 MGMT_EV_USER_PASSKEY_REQUEST,
100 MGMT_EV_AUTH_FAILED,
101 MGMT_EV_DEVICE_FOUND,
102 MGMT_EV_DISCOVERING,
103 MGMT_EV_DEVICE_BLOCKED,
104 MGMT_EV_DEVICE_UNBLOCKED,
105 MGMT_EV_DEVICE_UNPAIRED,
106 MGMT_EV_PASSKEY_NOTIFY,
107 };
108
109 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
110
111 #define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
112 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
113
114 struct pending_cmd {
115 struct list_head list;
116 u16 opcode;
117 int index;
118 void *param;
119 struct sock *sk;
120 void *user_data;
121 };
122
123 /* HCI to MGMT error code conversion table */
124 static u8 mgmt_status_table[] = {
125 MGMT_STATUS_SUCCESS,
126 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
127 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
128 MGMT_STATUS_FAILED, /* Hardware Failure */
129 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
130 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
131 MGMT_STATUS_NOT_PAIRED, /* PIN or Key Missing */
132 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
133 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
134 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
135 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
136 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
137 MGMT_STATUS_BUSY, /* Command Disallowed */
138 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
139 MGMT_STATUS_REJECTED, /* Rejected Security */
140 MGMT_STATUS_REJECTED, /* Rejected Personal */
141 MGMT_STATUS_TIMEOUT, /* Host Timeout */
142 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
143 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
144 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
145 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
146 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
147 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
148 MGMT_STATUS_BUSY, /* Repeated Attempts */
149 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
150 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
151 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
152 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
153 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
154 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
155 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
156 MGMT_STATUS_FAILED, /* Unspecified Error */
157 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
158 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
159 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
160 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
161 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
162 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
163 MGMT_STATUS_FAILED, /* Unit Link Key Used */
164 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
165 MGMT_STATUS_TIMEOUT, /* Instant Passed */
166 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
167 MGMT_STATUS_FAILED, /* Transaction Collision */
168 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
169 MGMT_STATUS_REJECTED, /* QoS Rejected */
170 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
171 MGMT_STATUS_REJECTED, /* Insufficient Security */
172 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
173 MGMT_STATUS_BUSY, /* Role Switch Pending */
174 MGMT_STATUS_FAILED, /* Slot Violation */
175 MGMT_STATUS_FAILED, /* Role Switch Failed */
176 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
177 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
178 MGMT_STATUS_BUSY, /* Host Busy Pairing */
179 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
180 MGMT_STATUS_BUSY, /* Controller Busy */
181 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
182 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
183 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
184 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
185 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
186 };
187
188 static u8 mgmt_status(u8 hci_status)
189 {
190 if (hci_status < ARRAY_SIZE(mgmt_status_table))
191 return mgmt_status_table[hci_status];
192
193 return MGMT_STATUS_FAILED;
194 }
195
196 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
197 {
198 struct sk_buff *skb;
199 struct mgmt_hdr *hdr;
200 struct mgmt_ev_cmd_status *ev;
201 int err;
202
203 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
204
205 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
206 if (!skb)
207 return -ENOMEM;
208
209 hdr = (void *) skb_put(skb, sizeof(*hdr));
210
211 hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_STATUS);
212 hdr->index = cpu_to_le16(index);
213 hdr->len = cpu_to_le16(sizeof(*ev));
214
215 ev = (void *) skb_put(skb, sizeof(*ev));
216 ev->status = status;
217 ev->opcode = cpu_to_le16(cmd);
218
219 err = sock_queue_rcv_skb(sk, skb);
220 if (err < 0)
221 kfree_skb(skb);
222
223 return err;
224 }
225
226 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
227 void *rp, size_t rp_len)
228 {
229 struct sk_buff *skb;
230 struct mgmt_hdr *hdr;
231 struct mgmt_ev_cmd_complete *ev;
232 int err;
233
234 BT_DBG("sock %p", sk);
235
236 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
237 if (!skb)
238 return -ENOMEM;
239
240 hdr = (void *) skb_put(skb, sizeof(*hdr));
241
242 hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_COMPLETE);
243 hdr->index = cpu_to_le16(index);
244 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
245
246 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
247 ev->opcode = cpu_to_le16(cmd);
248 ev->status = status;
249
250 if (rp)
251 memcpy(ev->data, rp, rp_len);
252
253 err = sock_queue_rcv_skb(sk, skb);
254 if (err < 0)
255 kfree_skb(skb);
256
257 return err;
258 }
259
260 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
261 u16 data_len)
262 {
263 struct mgmt_rp_read_version rp;
264
265 BT_DBG("sock %p", sk);
266
267 rp.version = MGMT_VERSION;
268 rp.revision = __constant_cpu_to_le16(MGMT_REVISION);
269
270 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
271 sizeof(rp));
272 }
273
274 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
275 u16 data_len)
276 {
277 struct mgmt_rp_read_commands *rp;
278 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
279 const u16 num_events = ARRAY_SIZE(mgmt_events);
280 __le16 *opcode;
281 size_t rp_size;
282 int i, err;
283
284 BT_DBG("sock %p", sk);
285
286 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
287
288 rp = kmalloc(rp_size, GFP_KERNEL);
289 if (!rp)
290 return -ENOMEM;
291
292 rp->num_commands = __constant_cpu_to_le16(num_commands);
293 rp->num_events = __constant_cpu_to_le16(num_events);
294
295 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
296 put_unaligned_le16(mgmt_commands[i], opcode);
297
298 for (i = 0; i < num_events; i++, opcode++)
299 put_unaligned_le16(mgmt_events[i], opcode);
300
301 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
302 rp_size);
303 kfree(rp);
304
305 return err;
306 }
307
308 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
309 u16 data_len)
310 {
311 struct mgmt_rp_read_index_list *rp;
312 struct hci_dev *d;
313 size_t rp_len;
314 u16 count;
315 int err;
316
317 BT_DBG("sock %p", sk);
318
319 read_lock(&hci_dev_list_lock);
320
321 count = 0;
322 list_for_each_entry(d, &hci_dev_list, list) {
323 if (d->dev_type == HCI_BREDR)
324 count++;
325 }
326
327 rp_len = sizeof(*rp) + (2 * count);
328 rp = kmalloc(rp_len, GFP_ATOMIC);
329 if (!rp) {
330 read_unlock(&hci_dev_list_lock);
331 return -ENOMEM;
332 }
333
334 count = 0;
335 list_for_each_entry(d, &hci_dev_list, list) {
336 if (test_bit(HCI_SETUP, &d->dev_flags))
337 continue;
338
339 if (test_bit(HCI_USER_CHANNEL, &d->dev_flags))
340 continue;
341
342 if (d->dev_type == HCI_BREDR) {
343 rp->index[count++] = cpu_to_le16(d->id);
344 BT_DBG("Added hci%u", d->id);
345 }
346 }
347
348 rp->num_controllers = cpu_to_le16(count);
349 rp_len = sizeof(*rp) + (2 * count);
350
351 read_unlock(&hci_dev_list_lock);
352
353 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
354 rp_len);
355
356 kfree(rp);
357
358 return err;
359 }
360
361 static u32 get_supported_settings(struct hci_dev *hdev)
362 {
363 u32 settings = 0;
364
365 settings |= MGMT_SETTING_POWERED;
366 settings |= MGMT_SETTING_PAIRABLE;
367
368 if (lmp_bredr_capable(hdev)) {
369 settings |= MGMT_SETTING_CONNECTABLE;
370 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
371 settings |= MGMT_SETTING_FAST_CONNECTABLE;
372 settings |= MGMT_SETTING_DISCOVERABLE;
373 settings |= MGMT_SETTING_BREDR;
374 settings |= MGMT_SETTING_LINK_SECURITY;
375
376 if (lmp_ssp_capable(hdev)) {
377 settings |= MGMT_SETTING_SSP;
378 settings |= MGMT_SETTING_HS;
379 }
380
381 if (lmp_sc_capable(hdev))
382 settings |= MGMT_SETTING_SECURE_CONN;
383 }
384
385 if (lmp_le_capable(hdev)) {
386 settings |= MGMT_SETTING_LE;
387 settings |= MGMT_SETTING_ADVERTISING;
388 }
389
390 return settings;
391 }
392
393 static u32 get_current_settings(struct hci_dev *hdev)
394 {
395 u32 settings = 0;
396
397 if (hdev_is_powered(hdev))
398 settings |= MGMT_SETTING_POWERED;
399
400 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
401 settings |= MGMT_SETTING_CONNECTABLE;
402
403 if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
404 settings |= MGMT_SETTING_FAST_CONNECTABLE;
405
406 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
407 settings |= MGMT_SETTING_DISCOVERABLE;
408
409 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
410 settings |= MGMT_SETTING_PAIRABLE;
411
412 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
413 settings |= MGMT_SETTING_BREDR;
414
415 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
416 settings |= MGMT_SETTING_LE;
417
418 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
419 settings |= MGMT_SETTING_LINK_SECURITY;
420
421 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
422 settings |= MGMT_SETTING_SSP;
423
424 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
425 settings |= MGMT_SETTING_HS;
426
427 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
428 settings |= MGMT_SETTING_ADVERTISING;
429
430 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
431 settings |= MGMT_SETTING_SECURE_CONN;
432
433 return settings;
434 }
435
436 #define PNP_INFO_SVCLASS_ID 0x1200
437
438 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
439 {
440 u8 *ptr = data, *uuids_start = NULL;
441 struct bt_uuid *uuid;
442
443 if (len < 4)
444 return ptr;
445
446 list_for_each_entry(uuid, &hdev->uuids, list) {
447 u16 uuid16;
448
449 if (uuid->size != 16)
450 continue;
451
452 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
453 if (uuid16 < 0x1100)
454 continue;
455
456 if (uuid16 == PNP_INFO_SVCLASS_ID)
457 continue;
458
459 if (!uuids_start) {
460 uuids_start = ptr;
461 uuids_start[0] = 1;
462 uuids_start[1] = EIR_UUID16_ALL;
463 ptr += 2;
464 }
465
466 /* Stop if not enough space to put next UUID */
467 if ((ptr - data) + sizeof(u16) > len) {
468 uuids_start[1] = EIR_UUID16_SOME;
469 break;
470 }
471
472 *ptr++ = (uuid16 & 0x00ff);
473 *ptr++ = (uuid16 & 0xff00) >> 8;
474 uuids_start[0] += sizeof(uuid16);
475 }
476
477 return ptr;
478 }
479
480 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
481 {
482 u8 *ptr = data, *uuids_start = NULL;
483 struct bt_uuid *uuid;
484
485 if (len < 6)
486 return ptr;
487
488 list_for_each_entry(uuid, &hdev->uuids, list) {
489 if (uuid->size != 32)
490 continue;
491
492 if (!uuids_start) {
493 uuids_start = ptr;
494 uuids_start[0] = 1;
495 uuids_start[1] = EIR_UUID32_ALL;
496 ptr += 2;
497 }
498
499 /* Stop if not enough space to put next UUID */
500 if ((ptr - data) + sizeof(u32) > len) {
501 uuids_start[1] = EIR_UUID32_SOME;
502 break;
503 }
504
505 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
506 ptr += sizeof(u32);
507 uuids_start[0] += sizeof(u32);
508 }
509
510 return ptr;
511 }
512
513 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
514 {
515 u8 *ptr = data, *uuids_start = NULL;
516 struct bt_uuid *uuid;
517
518 if (len < 18)
519 return ptr;
520
521 list_for_each_entry(uuid, &hdev->uuids, list) {
522 if (uuid->size != 128)
523 continue;
524
525 if (!uuids_start) {
526 uuids_start = ptr;
527 uuids_start[0] = 1;
528 uuids_start[1] = EIR_UUID128_ALL;
529 ptr += 2;
530 }
531
532 /* Stop if not enough space to put next UUID */
533 if ((ptr - data) + 16 > len) {
534 uuids_start[1] = EIR_UUID128_SOME;
535 break;
536 }
537
538 memcpy(ptr, uuid->uuid, 16);
539 ptr += 16;
540 uuids_start[0] += 16;
541 }
542
543 return ptr;
544 }
545
546 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
547 {
548 struct pending_cmd *cmd;
549
550 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
551 if (cmd->opcode == opcode)
552 return cmd;
553 }
554
555 return NULL;
556 }
557
558 static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
559 {
560 u8 ad_len = 0;
561 size_t name_len;
562
563 name_len = strlen(hdev->dev_name);
564 if (name_len > 0) {
565 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
566
567 if (name_len > max_len) {
568 name_len = max_len;
569 ptr[1] = EIR_NAME_SHORT;
570 } else
571 ptr[1] = EIR_NAME_COMPLETE;
572
573 ptr[0] = name_len + 1;
574
575 memcpy(ptr + 2, hdev->dev_name, name_len);
576
577 ad_len += (name_len + 2);
578 ptr += (name_len + 2);
579 }
580
581 return ad_len;
582 }
583
584 static void update_scan_rsp_data(struct hci_request *req)
585 {
586 struct hci_dev *hdev = req->hdev;
587 struct hci_cp_le_set_scan_rsp_data cp;
588 u8 len;
589
590 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
591 return;
592
593 memset(&cp, 0, sizeof(cp));
594
595 len = create_scan_rsp_data(hdev, cp.data);
596
597 if (hdev->scan_rsp_data_len == len &&
598 memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
599 return;
600
601 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
602 hdev->scan_rsp_data_len = len;
603
604 cp.length = len;
605
606 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
607 }
608
609 static u8 get_adv_discov_flags(struct hci_dev *hdev)
610 {
611 struct pending_cmd *cmd;
612
613 /* If there's a pending mgmt command the flags will not yet have
614 * their final values, so check for this first.
615 */
616 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
617 if (cmd) {
618 struct mgmt_mode *cp = cmd->param;
619 if (cp->val == 0x01)
620 return LE_AD_GENERAL;
621 else if (cp->val == 0x02)
622 return LE_AD_LIMITED;
623 } else {
624 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
625 return LE_AD_LIMITED;
626 else if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
627 return LE_AD_GENERAL;
628 }
629
630 return 0;
631 }
632
633 static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
634 {
635 u8 ad_len = 0, flags = 0;
636
637 flags |= get_adv_discov_flags(hdev);
638
639 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
640 if (lmp_le_br_capable(hdev))
641 flags |= LE_AD_SIM_LE_BREDR_CTRL;
642 if (lmp_host_le_br_capable(hdev))
643 flags |= LE_AD_SIM_LE_BREDR_HOST;
644 } else {
645 flags |= LE_AD_NO_BREDR;
646 }
647
648 if (flags) {
649 BT_DBG("adv flags 0x%02x", flags);
650
651 ptr[0] = 2;
652 ptr[1] = EIR_FLAGS;
653 ptr[2] = flags;
654
655 ad_len += 3;
656 ptr += 3;
657 }
658
659 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
660 ptr[0] = 2;
661 ptr[1] = EIR_TX_POWER;
662 ptr[2] = (u8) hdev->adv_tx_power;
663
664 ad_len += 3;
665 ptr += 3;
666 }
667
668 return ad_len;
669 }
670
671 static void update_adv_data(struct hci_request *req)
672 {
673 struct hci_dev *hdev = req->hdev;
674 struct hci_cp_le_set_adv_data cp;
675 u8 len;
676
677 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
678 return;
679
680 memset(&cp, 0, sizeof(cp));
681
682 len = create_adv_data(hdev, cp.data);
683
684 if (hdev->adv_data_len == len &&
685 memcmp(cp.data, hdev->adv_data, len) == 0)
686 return;
687
688 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
689 hdev->adv_data_len = len;
690
691 cp.length = len;
692
693 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
694 }
695
696 static void create_eir(struct hci_dev *hdev, u8 *data)
697 {
698 u8 *ptr = data;
699 size_t name_len;
700
701 name_len = strlen(hdev->dev_name);
702
703 if (name_len > 0) {
704 /* EIR Data type */
705 if (name_len > 48) {
706 name_len = 48;
707 ptr[1] = EIR_NAME_SHORT;
708 } else
709 ptr[1] = EIR_NAME_COMPLETE;
710
711 /* EIR Data length */
712 ptr[0] = name_len + 1;
713
714 memcpy(ptr + 2, hdev->dev_name, name_len);
715
716 ptr += (name_len + 2);
717 }
718
719 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
720 ptr[0] = 2;
721 ptr[1] = EIR_TX_POWER;
722 ptr[2] = (u8) hdev->inq_tx_power;
723
724 ptr += 3;
725 }
726
727 if (hdev->devid_source > 0) {
728 ptr[0] = 9;
729 ptr[1] = EIR_DEVICE_ID;
730
731 put_unaligned_le16(hdev->devid_source, ptr + 2);
732 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
733 put_unaligned_le16(hdev->devid_product, ptr + 6);
734 put_unaligned_le16(hdev->devid_version, ptr + 8);
735
736 ptr += 10;
737 }
738
739 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
740 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
741 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
742 }
743
744 static void update_eir(struct hci_request *req)
745 {
746 struct hci_dev *hdev = req->hdev;
747 struct hci_cp_write_eir cp;
748
749 if (!hdev_is_powered(hdev))
750 return;
751
752 if (!lmp_ext_inq_capable(hdev))
753 return;
754
755 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
756 return;
757
758 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
759 return;
760
761 memset(&cp, 0, sizeof(cp));
762
763 create_eir(hdev, cp.data);
764
765 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
766 return;
767
768 memcpy(hdev->eir, cp.data, sizeof(cp.data));
769
770 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
771 }
772
773 static u8 get_service_classes(struct hci_dev *hdev)
774 {
775 struct bt_uuid *uuid;
776 u8 val = 0;
777
778 list_for_each_entry(uuid, &hdev->uuids, list)
779 val |= uuid->svc_hint;
780
781 return val;
782 }
783
784 static void update_class(struct hci_request *req)
785 {
786 struct hci_dev *hdev = req->hdev;
787 u8 cod[3];
788
789 BT_DBG("%s", hdev->name);
790
791 if (!hdev_is_powered(hdev))
792 return;
793
794 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
795 return;
796
797 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
798 return;
799
800 cod[0] = hdev->minor_class;
801 cod[1] = hdev->major_class;
802 cod[2] = get_service_classes(hdev);
803
804 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
805 cod[1] |= 0x20;
806
807 if (memcmp(cod, hdev->dev_class, 3) == 0)
808 return;
809
810 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
811 }
812
813 static void service_cache_off(struct work_struct *work)
814 {
815 struct hci_dev *hdev = container_of(work, struct hci_dev,
816 service_cache.work);
817 struct hci_request req;
818
819 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
820 return;
821
822 hci_req_init(&req, hdev);
823
824 hci_dev_lock(hdev);
825
826 update_eir(&req);
827 update_class(&req);
828
829 hci_dev_unlock(hdev);
830
831 hci_req_run(&req, NULL);
832 }
833
834 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
835 {
836 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
837 return;
838
839 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
840
841 /* Non-mgmt controlled devices get this bit set
842 * implicitly so that pairing works for them, however
843 * for mgmt we require user-space to explicitly enable
844 * it
845 */
846 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
847 }
848
849 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
850 void *data, u16 data_len)
851 {
852 struct mgmt_rp_read_info rp;
853
854 BT_DBG("sock %p %s", sk, hdev->name);
855
856 hci_dev_lock(hdev);
857
858 memset(&rp, 0, sizeof(rp));
859
860 bacpy(&rp.bdaddr, &hdev->bdaddr);
861
862 rp.version = hdev->hci_ver;
863 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
864
865 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
866 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
867
868 memcpy(rp.dev_class, hdev->dev_class, 3);
869
870 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
871 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
872
873 hci_dev_unlock(hdev);
874
875 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
876 sizeof(rp));
877 }
878
879 static void mgmt_pending_free(struct pending_cmd *cmd)
880 {
881 sock_put(cmd->sk);
882 kfree(cmd->param);
883 kfree(cmd);
884 }
885
886 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
887 struct hci_dev *hdev, void *data,
888 u16 len)
889 {
890 struct pending_cmd *cmd;
891
892 cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
893 if (!cmd)
894 return NULL;
895
896 cmd->opcode = opcode;
897 cmd->index = hdev->id;
898
899 cmd->param = kmalloc(len, GFP_KERNEL);
900 if (!cmd->param) {
901 kfree(cmd);
902 return NULL;
903 }
904
905 if (data)
906 memcpy(cmd->param, data, len);
907
908 cmd->sk = sk;
909 sock_hold(sk);
910
911 list_add(&cmd->list, &hdev->mgmt_pending);
912
913 return cmd;
914 }
915
916 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
917 void (*cb)(struct pending_cmd *cmd,
918 void *data),
919 void *data)
920 {
921 struct pending_cmd *cmd, *tmp;
922
923 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
924 if (opcode > 0 && cmd->opcode != opcode)
925 continue;
926
927 cb(cmd, data);
928 }
929 }
930
931 static void mgmt_pending_remove(struct pending_cmd *cmd)
932 {
933 list_del(&cmd->list);
934 mgmt_pending_free(cmd);
935 }
936
937 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
938 {
939 __le32 settings = cpu_to_le32(get_current_settings(hdev));
940
941 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
942 sizeof(settings));
943 }
944
945 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
946 u16 len)
947 {
948 struct mgmt_mode *cp = data;
949 struct pending_cmd *cmd;
950 int err;
951
952 BT_DBG("request for %s", hdev->name);
953
954 if (cp->val != 0x00 && cp->val != 0x01)
955 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
956 MGMT_STATUS_INVALID_PARAMS);
957
958 hci_dev_lock(hdev);
959
960 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
961 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
962 MGMT_STATUS_BUSY);
963 goto failed;
964 }
965
966 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
967 cancel_delayed_work(&hdev->power_off);
968
969 if (cp->val) {
970 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
971 data, len);
972 err = mgmt_powered(hdev, 1);
973 goto failed;
974 }
975 }
976
977 if (!!cp->val == hdev_is_powered(hdev)) {
978 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
979 goto failed;
980 }
981
982 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
983 if (!cmd) {
984 err = -ENOMEM;
985 goto failed;
986 }
987
988 if (cp->val)
989 queue_work(hdev->req_workqueue, &hdev->power_on);
990 else
991 queue_work(hdev->req_workqueue, &hdev->power_off.work);
992
993 err = 0;
994
995 failed:
996 hci_dev_unlock(hdev);
997 return err;
998 }
999
1000 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
1001 struct sock *skip_sk)
1002 {
1003 struct sk_buff *skb;
1004 struct mgmt_hdr *hdr;
1005
1006 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
1007 if (!skb)
1008 return -ENOMEM;
1009
1010 hdr = (void *) skb_put(skb, sizeof(*hdr));
1011 hdr->opcode = cpu_to_le16(event);
1012 if (hdev)
1013 hdr->index = cpu_to_le16(hdev->id);
1014 else
1015 hdr->index = __constant_cpu_to_le16(MGMT_INDEX_NONE);
1016 hdr->len = cpu_to_le16(data_len);
1017
1018 if (data)
1019 memcpy(skb_put(skb, data_len), data, data_len);
1020
1021 /* Time stamp */
1022 __net_timestamp(skb);
1023
1024 hci_send_to_control(skb, skip_sk);
1025 kfree_skb(skb);
1026
1027 return 0;
1028 }
1029
1030 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1031 {
1032 __le32 ev;
1033
1034 ev = cpu_to_le32(get_current_settings(hdev));
1035
1036 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
1037 }
1038
1039 struct cmd_lookup {
1040 struct sock *sk;
1041 struct hci_dev *hdev;
1042 u8 mgmt_status;
1043 };
1044
1045 static void settings_rsp(struct pending_cmd *cmd, void *data)
1046 {
1047 struct cmd_lookup *match = data;
1048
1049 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1050
1051 list_del(&cmd->list);
1052
1053 if (match->sk == NULL) {
1054 match->sk = cmd->sk;
1055 sock_hold(match->sk);
1056 }
1057
1058 mgmt_pending_free(cmd);
1059 }
1060
1061 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
1062 {
1063 u8 *status = data;
1064
1065 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1066 mgmt_pending_remove(cmd);
1067 }
1068
1069 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1070 {
1071 if (!lmp_bredr_capable(hdev))
1072 return MGMT_STATUS_NOT_SUPPORTED;
1073 else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1074 return MGMT_STATUS_REJECTED;
1075 else
1076 return MGMT_STATUS_SUCCESS;
1077 }
1078
1079 static u8 mgmt_le_support(struct hci_dev *hdev)
1080 {
1081 if (!lmp_le_capable(hdev))
1082 return MGMT_STATUS_NOT_SUPPORTED;
1083 else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1084 return MGMT_STATUS_REJECTED;
1085 else
1086 return MGMT_STATUS_SUCCESS;
1087 }
1088
1089 static void set_discoverable_complete(struct hci_dev *hdev, u8 status)
1090 {
1091 struct pending_cmd *cmd;
1092 struct mgmt_mode *cp;
1093 struct hci_request req;
1094 bool changed;
1095
1096 BT_DBG("status 0x%02x", status);
1097
1098 hci_dev_lock(hdev);
1099
1100 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1101 if (!cmd)
1102 goto unlock;
1103
1104 if (status) {
1105 u8 mgmt_err = mgmt_status(status);
1106 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1107 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1108 goto remove_cmd;
1109 }
1110
1111 cp = cmd->param;
1112 if (cp->val) {
1113 changed = !test_and_set_bit(HCI_DISCOVERABLE,
1114 &hdev->dev_flags);
1115
1116 if (hdev->discov_timeout > 0) {
1117 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1118 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1119 to);
1120 }
1121 } else {
1122 changed = test_and_clear_bit(HCI_DISCOVERABLE,
1123 &hdev->dev_flags);
1124 }
1125
1126 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1127
1128 if (changed)
1129 new_settings(hdev, cmd->sk);
1130
1131 /* When the discoverable mode gets changed, make sure
1132 * that class of device has the limited discoverable
1133 * bit correctly set.
1134 */
1135 hci_req_init(&req, hdev);
1136 update_class(&req);
1137 hci_req_run(&req, NULL);
1138
1139 remove_cmd:
1140 mgmt_pending_remove(cmd);
1141
1142 unlock:
1143 hci_dev_unlock(hdev);
1144 }
1145
1146 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1147 u16 len)
1148 {
1149 struct mgmt_cp_set_discoverable *cp = data;
1150 struct pending_cmd *cmd;
1151 struct hci_request req;
1152 u16 timeout;
1153 u8 scan;
1154 int err;
1155
1156 BT_DBG("request for %s", hdev->name);
1157
1158 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1159 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1160 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1161 MGMT_STATUS_REJECTED);
1162
1163 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1164 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1165 MGMT_STATUS_INVALID_PARAMS);
1166
1167 timeout = __le16_to_cpu(cp->timeout);
1168
1169 /* Disabling discoverable requires that no timeout is set,
1170 * and enabling limited discoverable requires a timeout.
1171 */
1172 if ((cp->val == 0x00 && timeout > 0) ||
1173 (cp->val == 0x02 && timeout == 0))
1174 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1175 MGMT_STATUS_INVALID_PARAMS);
1176
1177 hci_dev_lock(hdev);
1178
1179 if (!hdev_is_powered(hdev) && timeout > 0) {
1180 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1181 MGMT_STATUS_NOT_POWERED);
1182 goto failed;
1183 }
1184
1185 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1186 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1187 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1188 MGMT_STATUS_BUSY);
1189 goto failed;
1190 }
1191
1192 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
1193 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1194 MGMT_STATUS_REJECTED);
1195 goto failed;
1196 }
1197
1198 if (!hdev_is_powered(hdev)) {
1199 bool changed = false;
1200
1201 /* Setting limited discoverable when powered off is
1202 * not a valid operation since it requires a timeout
1203 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1204 */
1205 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
1206 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1207 changed = true;
1208 }
1209
1210 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1211 if (err < 0)
1212 goto failed;
1213
1214 if (changed)
1215 err = new_settings(hdev, sk);
1216
1217 goto failed;
1218 }
1219
1220 /* If the current mode is the same, then just update the timeout
1221 * value with the new value. And if only the timeout gets updated,
1222 * then no need for any HCI transactions.
1223 */
1224 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
1225 (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
1226 &hdev->dev_flags)) {
1227 cancel_delayed_work(&hdev->discov_off);
1228 hdev->discov_timeout = timeout;
1229
1230 if (cp->val && hdev->discov_timeout > 0) {
1231 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1232 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1233 to);
1234 }
1235
1236 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1237 goto failed;
1238 }
1239
1240 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1241 if (!cmd) {
1242 err = -ENOMEM;
1243 goto failed;
1244 }
1245
1246 /* Cancel any potential discoverable timeout that might be
1247 * still active and store new timeout value. The arming of
1248 * the timeout happens in the complete handler.
1249 */
1250 cancel_delayed_work(&hdev->discov_off);
1251 hdev->discov_timeout = timeout;
1252
1253 /* Limited discoverable mode */
1254 if (cp->val == 0x02)
1255 set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1256 else
1257 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1258
1259 hci_req_init(&req, hdev);
1260
1261 /* The procedure for LE-only controllers is much simpler - just
1262 * update the advertising data.
1263 */
1264 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1265 goto update_ad;
1266
1267 scan = SCAN_PAGE;
1268
1269 if (cp->val) {
1270 struct hci_cp_write_current_iac_lap hci_cp;
1271
1272 if (cp->val == 0x02) {
1273 /* Limited discoverable mode */
1274 hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1275 hci_cp.iac_lap[0] = 0x00; /* LIAC */
1276 hci_cp.iac_lap[1] = 0x8b;
1277 hci_cp.iac_lap[2] = 0x9e;
1278 hci_cp.iac_lap[3] = 0x33; /* GIAC */
1279 hci_cp.iac_lap[4] = 0x8b;
1280 hci_cp.iac_lap[5] = 0x9e;
1281 } else {
1282 /* General discoverable mode */
1283 hci_cp.num_iac = 1;
1284 hci_cp.iac_lap[0] = 0x33; /* GIAC */
1285 hci_cp.iac_lap[1] = 0x8b;
1286 hci_cp.iac_lap[2] = 0x9e;
1287 }
1288
1289 hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1290 (hci_cp.num_iac * 3) + 1, &hci_cp);
1291
1292 scan |= SCAN_INQUIRY;
1293 } else {
1294 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1295 }
1296
1297 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1298
1299 update_ad:
1300 update_adv_data(&req);
1301
1302 err = hci_req_run(&req, set_discoverable_complete);
1303 if (err < 0)
1304 mgmt_pending_remove(cmd);
1305
1306 failed:
1307 hci_dev_unlock(hdev);
1308 return err;
1309 }
1310
1311 static void write_fast_connectable(struct hci_request *req, bool enable)
1312 {
1313 struct hci_dev *hdev = req->hdev;
1314 struct hci_cp_write_page_scan_activity acp;
1315 u8 type;
1316
1317 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1318 return;
1319
1320 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1321 return;
1322
1323 if (enable) {
1324 type = PAGE_SCAN_TYPE_INTERLACED;
1325
1326 /* 160 msec page scan interval */
1327 acp.interval = __constant_cpu_to_le16(0x0100);
1328 } else {
1329 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1330
1331 /* default 1.28 sec page scan */
1332 acp.interval = __constant_cpu_to_le16(0x0800);
1333 }
1334
1335 acp.window = __constant_cpu_to_le16(0x0012);
1336
1337 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1338 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1339 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1340 sizeof(acp), &acp);
1341
1342 if (hdev->page_scan_type != type)
1343 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1344 }
1345
1346 static u8 get_adv_type(struct hci_dev *hdev)
1347 {
1348 struct pending_cmd *cmd;
1349 bool connectable;
1350
1351 /* If there's a pending mgmt command the flag will not yet have
1352 * it's final value, so check for this first.
1353 */
1354 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1355 if (cmd) {
1356 struct mgmt_mode *cp = cmd->param;
1357 connectable = !!cp->val;
1358 } else {
1359 connectable = test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1360 }
1361
1362 return connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
1363 }
1364
1365 static void enable_advertising(struct hci_request *req)
1366 {
1367 struct hci_dev *hdev = req->hdev;
1368 struct hci_cp_le_set_adv_param cp;
1369 u8 enable = 0x01;
1370
1371 memset(&cp, 0, sizeof(cp));
1372 cp.min_interval = __constant_cpu_to_le16(0x0800);
1373 cp.max_interval = __constant_cpu_to_le16(0x0800);
1374 cp.type = get_adv_type(hdev);
1375 cp.own_address_type = hdev->own_addr_type;
1376 cp.channel_map = 0x07;
1377
1378 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1379
1380 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1381 }
1382
1383 static void disable_advertising(struct hci_request *req)
1384 {
1385 u8 enable = 0x00;
1386
1387 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1388 }
1389
1390 static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1391 {
1392 struct pending_cmd *cmd;
1393 struct mgmt_mode *cp;
1394 bool changed;
1395
1396 BT_DBG("status 0x%02x", status);
1397
1398 hci_dev_lock(hdev);
1399
1400 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1401 if (!cmd)
1402 goto unlock;
1403
1404 if (status) {
1405 u8 mgmt_err = mgmt_status(status);
1406 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1407 goto remove_cmd;
1408 }
1409
1410 cp = cmd->param;
1411 if (cp->val)
1412 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1413 else
1414 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1415
1416 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1417
1418 if (changed)
1419 new_settings(hdev, cmd->sk);
1420
1421 remove_cmd:
1422 mgmt_pending_remove(cmd);
1423
1424 unlock:
1425 hci_dev_unlock(hdev);
1426 }
1427
1428 static int set_connectable_update_settings(struct hci_dev *hdev,
1429 struct sock *sk, u8 val)
1430 {
1431 bool changed = false;
1432 int err;
1433
1434 if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1435 changed = true;
1436
1437 if (val) {
1438 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1439 } else {
1440 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1441 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1442 }
1443
1444 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1445 if (err < 0)
1446 return err;
1447
1448 if (changed)
1449 return new_settings(hdev, sk);
1450
1451 return 0;
1452 }
1453
1454 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1455 u16 len)
1456 {
1457 struct mgmt_mode *cp = data;
1458 struct pending_cmd *cmd;
1459 struct hci_request req;
1460 u8 scan;
1461 int err;
1462
1463 BT_DBG("request for %s", hdev->name);
1464
1465 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1466 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1467 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1468 MGMT_STATUS_REJECTED);
1469
1470 if (cp->val != 0x00 && cp->val != 0x01)
1471 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1472 MGMT_STATUS_INVALID_PARAMS);
1473
1474 hci_dev_lock(hdev);
1475
1476 if (!hdev_is_powered(hdev)) {
1477 err = set_connectable_update_settings(hdev, sk, cp->val);
1478 goto failed;
1479 }
1480
1481 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1482 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1483 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1484 MGMT_STATUS_BUSY);
1485 goto failed;
1486 }
1487
1488 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1489 if (!cmd) {
1490 err = -ENOMEM;
1491 goto failed;
1492 }
1493
1494 hci_req_init(&req, hdev);
1495
1496 /* If BR/EDR is not enabled and we disable advertising as a
1497 * by-product of disabling connectable, we need to update the
1498 * advertising flags.
1499 */
1500 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1501 if (!cp->val) {
1502 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1503 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1504 }
1505 update_adv_data(&req);
1506 } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
1507 if (cp->val) {
1508 scan = SCAN_PAGE;
1509 } else {
1510 scan = 0;
1511
1512 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1513 hdev->discov_timeout > 0)
1514 cancel_delayed_work(&hdev->discov_off);
1515 }
1516
1517 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1518 }
1519
1520 /* If we're going from non-connectable to connectable or
1521 * vice-versa when fast connectable is enabled ensure that fast
1522 * connectable gets disabled. write_fast_connectable won't do
1523 * anything if the page scan parameters are already what they
1524 * should be.
1525 */
1526 if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1527 write_fast_connectable(&req, false);
1528
1529 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) &&
1530 hci_conn_num(hdev, LE_LINK) == 0) {
1531 disable_advertising(&req);
1532 enable_advertising(&req);
1533 }
1534
1535 err = hci_req_run(&req, set_connectable_complete);
1536 if (err < 0) {
1537 mgmt_pending_remove(cmd);
1538 if (err == -ENODATA)
1539 err = set_connectable_update_settings(hdev, sk,
1540 cp->val);
1541 goto failed;
1542 }
1543
1544 failed:
1545 hci_dev_unlock(hdev);
1546 return err;
1547 }
1548
1549 static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
1550 u16 len)
1551 {
1552 struct mgmt_mode *cp = data;
1553 bool changed;
1554 int err;
1555
1556 BT_DBG("request for %s", hdev->name);
1557
1558 if (cp->val != 0x00 && cp->val != 0x01)
1559 return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE,
1560 MGMT_STATUS_INVALID_PARAMS);
1561
1562 hci_dev_lock(hdev);
1563
1564 if (cp->val)
1565 changed = !test_and_set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1566 else
1567 changed = test_and_clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1568
1569 err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
1570 if (err < 0)
1571 goto unlock;
1572
1573 if (changed)
1574 err = new_settings(hdev, sk);
1575
1576 unlock:
1577 hci_dev_unlock(hdev);
1578 return err;
1579 }
1580
1581 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1582 u16 len)
1583 {
1584 struct mgmt_mode *cp = data;
1585 struct pending_cmd *cmd;
1586 u8 val, status;
1587 int err;
1588
1589 BT_DBG("request for %s", hdev->name);
1590
1591 status = mgmt_bredr_support(hdev);
1592 if (status)
1593 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1594 status);
1595
1596 if (cp->val != 0x00 && cp->val != 0x01)
1597 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1598 MGMT_STATUS_INVALID_PARAMS);
1599
1600 hci_dev_lock(hdev);
1601
1602 if (!hdev_is_powered(hdev)) {
1603 bool changed = false;
1604
1605 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1606 &hdev->dev_flags)) {
1607 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
1608 changed = true;
1609 }
1610
1611 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1612 if (err < 0)
1613 goto failed;
1614
1615 if (changed)
1616 err = new_settings(hdev, sk);
1617
1618 goto failed;
1619 }
1620
1621 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1622 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1623 MGMT_STATUS_BUSY);
1624 goto failed;
1625 }
1626
1627 val = !!cp->val;
1628
1629 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1630 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1631 goto failed;
1632 }
1633
1634 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1635 if (!cmd) {
1636 err = -ENOMEM;
1637 goto failed;
1638 }
1639
1640 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1641 if (err < 0) {
1642 mgmt_pending_remove(cmd);
1643 goto failed;
1644 }
1645
1646 failed:
1647 hci_dev_unlock(hdev);
1648 return err;
1649 }
1650
1651 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1652 {
1653 struct mgmt_mode *cp = data;
1654 struct pending_cmd *cmd;
1655 u8 status;
1656 int err;
1657
1658 BT_DBG("request for %s", hdev->name);
1659
1660 status = mgmt_bredr_support(hdev);
1661 if (status)
1662 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1663
1664 if (!lmp_ssp_capable(hdev))
1665 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1666 MGMT_STATUS_NOT_SUPPORTED);
1667
1668 if (cp->val != 0x00 && cp->val != 0x01)
1669 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1670 MGMT_STATUS_INVALID_PARAMS);
1671
1672 hci_dev_lock(hdev);
1673
1674 if (!hdev_is_powered(hdev)) {
1675 bool changed;
1676
1677 if (cp->val) {
1678 changed = !test_and_set_bit(HCI_SSP_ENABLED,
1679 &hdev->dev_flags);
1680 } else {
1681 changed = test_and_clear_bit(HCI_SSP_ENABLED,
1682 &hdev->dev_flags);
1683 if (!changed)
1684 changed = test_and_clear_bit(HCI_HS_ENABLED,
1685 &hdev->dev_flags);
1686 else
1687 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1688 }
1689
1690 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1691 if (err < 0)
1692 goto failed;
1693
1694 if (changed)
1695 err = new_settings(hdev, sk);
1696
1697 goto failed;
1698 }
1699
1700 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev) ||
1701 mgmt_pending_find(MGMT_OP_SET_HS, hdev)) {
1702 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1703 MGMT_STATUS_BUSY);
1704 goto failed;
1705 }
1706
1707 if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1708 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1709 goto failed;
1710 }
1711
1712 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1713 if (!cmd) {
1714 err = -ENOMEM;
1715 goto failed;
1716 }
1717
1718 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1719 if (err < 0) {
1720 mgmt_pending_remove(cmd);
1721 goto failed;
1722 }
1723
1724 failed:
1725 hci_dev_unlock(hdev);
1726 return err;
1727 }
1728
1729 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1730 {
1731 struct mgmt_mode *cp = data;
1732 bool changed;
1733 u8 status;
1734 int err;
1735
1736 BT_DBG("request for %s", hdev->name);
1737
1738 status = mgmt_bredr_support(hdev);
1739 if (status)
1740 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1741
1742 if (!lmp_ssp_capable(hdev))
1743 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1744 MGMT_STATUS_NOT_SUPPORTED);
1745
1746 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
1747 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1748 MGMT_STATUS_REJECTED);
1749
1750 if (cp->val != 0x00 && cp->val != 0x01)
1751 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1752 MGMT_STATUS_INVALID_PARAMS);
1753
1754 hci_dev_lock(hdev);
1755
1756 if (cp->val) {
1757 changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1758 } else {
1759 if (hdev_is_powered(hdev)) {
1760 err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1761 MGMT_STATUS_REJECTED);
1762 goto unlock;
1763 }
1764
1765 changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1766 }
1767
1768 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1769 if (err < 0)
1770 goto unlock;
1771
1772 if (changed)
1773 err = new_settings(hdev, sk);
1774
1775 unlock:
1776 hci_dev_unlock(hdev);
1777 return err;
1778 }
1779
1780 static void le_enable_complete(struct hci_dev *hdev, u8 status)
1781 {
1782 struct cmd_lookup match = { NULL, hdev };
1783
1784 if (status) {
1785 u8 mgmt_err = mgmt_status(status);
1786
1787 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1788 &mgmt_err);
1789 return;
1790 }
1791
1792 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1793
1794 new_settings(hdev, match.sk);
1795
1796 if (match.sk)
1797 sock_put(match.sk);
1798
1799 /* Make sure the controller has a good default for
1800 * advertising data. Restrict the update to when LE
1801 * has actually been enabled. During power on, the
1802 * update in powered_update_hci will take care of it.
1803 */
1804 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1805 struct hci_request req;
1806
1807 hci_dev_lock(hdev);
1808
1809 hci_req_init(&req, hdev);
1810 update_adv_data(&req);
1811 update_scan_rsp_data(&req);
1812 hci_req_run(&req, NULL);
1813
1814 hci_dev_unlock(hdev);
1815 }
1816 }
1817
1818 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1819 {
1820 struct mgmt_mode *cp = data;
1821 struct hci_cp_write_le_host_supported hci_cp;
1822 struct pending_cmd *cmd;
1823 struct hci_request req;
1824 int err;
1825 u8 val, enabled;
1826
1827 BT_DBG("request for %s", hdev->name);
1828
1829 if (!lmp_le_capable(hdev))
1830 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1831 MGMT_STATUS_NOT_SUPPORTED);
1832
1833 if (cp->val != 0x00 && cp->val != 0x01)
1834 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1835 MGMT_STATUS_INVALID_PARAMS);
1836
1837 /* LE-only devices do not allow toggling LE on/off */
1838 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1839 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1840 MGMT_STATUS_REJECTED);
1841
1842 hci_dev_lock(hdev);
1843
1844 val = !!cp->val;
1845 enabled = lmp_host_le_capable(hdev);
1846
1847 if (!hdev_is_powered(hdev) || val == enabled) {
1848 bool changed = false;
1849
1850 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1851 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1852 changed = true;
1853 }
1854
1855 if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
1856 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
1857 changed = true;
1858 }
1859
1860 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1861 if (err < 0)
1862 goto unlock;
1863
1864 if (changed)
1865 err = new_settings(hdev, sk);
1866
1867 goto unlock;
1868 }
1869
1870 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
1871 mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
1872 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1873 MGMT_STATUS_BUSY);
1874 goto unlock;
1875 }
1876
1877 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
1878 if (!cmd) {
1879 err = -ENOMEM;
1880 goto unlock;
1881 }
1882
1883 hci_req_init(&req, hdev);
1884
1885 memset(&hci_cp, 0, sizeof(hci_cp));
1886
1887 if (val) {
1888 hci_cp.le = val;
1889 hci_cp.simul = lmp_le_br_capable(hdev);
1890 } else {
1891 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1892 disable_advertising(&req);
1893 }
1894
1895 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
1896 &hci_cp);
1897
1898 err = hci_req_run(&req, le_enable_complete);
1899 if (err < 0)
1900 mgmt_pending_remove(cmd);
1901
1902 unlock:
1903 hci_dev_unlock(hdev);
1904 return err;
1905 }
1906
1907 /* This is a helper function to test for pending mgmt commands that can
1908 * cause CoD or EIR HCI commands. We can only allow one such pending
1909 * mgmt command at a time since otherwise we cannot easily track what
1910 * the current values are, will be, and based on that calculate if a new
1911 * HCI command needs to be sent and if yes with what value.
1912 */
1913 static bool pending_eir_or_class(struct hci_dev *hdev)
1914 {
1915 struct pending_cmd *cmd;
1916
1917 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
1918 switch (cmd->opcode) {
1919 case MGMT_OP_ADD_UUID:
1920 case MGMT_OP_REMOVE_UUID:
1921 case MGMT_OP_SET_DEV_CLASS:
1922 case MGMT_OP_SET_POWERED:
1923 return true;
1924 }
1925 }
1926
1927 return false;
1928 }
1929
1930 static const u8 bluetooth_base_uuid[] = {
1931 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
1932 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1933 };
1934
1935 static u8 get_uuid_size(const u8 *uuid)
1936 {
1937 u32 val;
1938
1939 if (memcmp(uuid, bluetooth_base_uuid, 12))
1940 return 128;
1941
1942 val = get_unaligned_le32(&uuid[12]);
1943 if (val > 0xffff)
1944 return 32;
1945
1946 return 16;
1947 }
1948
1949 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
1950 {
1951 struct pending_cmd *cmd;
1952
1953 hci_dev_lock(hdev);
1954
1955 cmd = mgmt_pending_find(mgmt_op, hdev);
1956 if (!cmd)
1957 goto unlock;
1958
1959 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
1960 hdev->dev_class, 3);
1961
1962 mgmt_pending_remove(cmd);
1963
1964 unlock:
1965 hci_dev_unlock(hdev);
1966 }
1967
1968 static void add_uuid_complete(struct hci_dev *hdev, u8 status)
1969 {
1970 BT_DBG("status 0x%02x", status);
1971
1972 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
1973 }
1974
1975 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1976 {
1977 struct mgmt_cp_add_uuid *cp = data;
1978 struct pending_cmd *cmd;
1979 struct hci_request req;
1980 struct bt_uuid *uuid;
1981 int err;
1982
1983 BT_DBG("request for %s", hdev->name);
1984
1985 hci_dev_lock(hdev);
1986
1987 if (pending_eir_or_class(hdev)) {
1988 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
1989 MGMT_STATUS_BUSY);
1990 goto failed;
1991 }
1992
1993 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
1994 if (!uuid) {
1995 err = -ENOMEM;
1996 goto failed;
1997 }
1998
1999 memcpy(uuid->uuid, cp->uuid, 16);
2000 uuid->svc_hint = cp->svc_hint;
2001 uuid->size = get_uuid_size(cp->uuid);
2002
2003 list_add_tail(&uuid->list, &hdev->uuids);
2004
2005 hci_req_init(&req, hdev);
2006
2007 update_class(&req);
2008 update_eir(&req);
2009
2010 err = hci_req_run(&req, add_uuid_complete);
2011 if (err < 0) {
2012 if (err != -ENODATA)
2013 goto failed;
2014
2015 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2016 hdev->dev_class, 3);
2017 goto failed;
2018 }
2019
2020 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2021 if (!cmd) {
2022 err = -ENOMEM;
2023 goto failed;
2024 }
2025
2026 err = 0;
2027
2028 failed:
2029 hci_dev_unlock(hdev);
2030 return err;
2031 }
2032
2033 static bool enable_service_cache(struct hci_dev *hdev)
2034 {
2035 if (!hdev_is_powered(hdev))
2036 return false;
2037
2038 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2039 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2040 CACHE_TIMEOUT);
2041 return true;
2042 }
2043
2044 return false;
2045 }
2046
2047 static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
2048 {
2049 BT_DBG("status 0x%02x", status);
2050
2051 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2052 }
2053
2054 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2055 u16 len)
2056 {
2057 struct mgmt_cp_remove_uuid *cp = data;
2058 struct pending_cmd *cmd;
2059 struct bt_uuid *match, *tmp;
2060 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2061 struct hci_request req;
2062 int err, found;
2063
2064 BT_DBG("request for %s", hdev->name);
2065
2066 hci_dev_lock(hdev);
2067
2068 if (pending_eir_or_class(hdev)) {
2069 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2070 MGMT_STATUS_BUSY);
2071 goto unlock;
2072 }
2073
2074 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2075 err = hci_uuids_clear(hdev);
2076
2077 if (enable_service_cache(hdev)) {
2078 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2079 0, hdev->dev_class, 3);
2080 goto unlock;
2081 }
2082
2083 goto update_class;
2084 }
2085
2086 found = 0;
2087
2088 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2089 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2090 continue;
2091
2092 list_del(&match->list);
2093 kfree(match);
2094 found++;
2095 }
2096
2097 if (found == 0) {
2098 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2099 MGMT_STATUS_INVALID_PARAMS);
2100 goto unlock;
2101 }
2102
2103 update_class:
2104 hci_req_init(&req, hdev);
2105
2106 update_class(&req);
2107 update_eir(&req);
2108
2109 err = hci_req_run(&req, remove_uuid_complete);
2110 if (err < 0) {
2111 if (err != -ENODATA)
2112 goto unlock;
2113
2114 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2115 hdev->dev_class, 3);
2116 goto unlock;
2117 }
2118
2119 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2120 if (!cmd) {
2121 err = -ENOMEM;
2122 goto unlock;
2123 }
2124
2125 err = 0;
2126
2127 unlock:
2128 hci_dev_unlock(hdev);
2129 return err;
2130 }
2131
2132 static void set_class_complete(struct hci_dev *hdev, u8 status)
2133 {
2134 BT_DBG("status 0x%02x", status);
2135
2136 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2137 }
2138
2139 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2140 u16 len)
2141 {
2142 struct mgmt_cp_set_dev_class *cp = data;
2143 struct pending_cmd *cmd;
2144 struct hci_request req;
2145 int err;
2146
2147 BT_DBG("request for %s", hdev->name);
2148
2149 if (!lmp_bredr_capable(hdev))
2150 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2151 MGMT_STATUS_NOT_SUPPORTED);
2152
2153 hci_dev_lock(hdev);
2154
2155 if (pending_eir_or_class(hdev)) {
2156 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2157 MGMT_STATUS_BUSY);
2158 goto unlock;
2159 }
2160
2161 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2162 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2163 MGMT_STATUS_INVALID_PARAMS);
2164 goto unlock;
2165 }
2166
2167 hdev->major_class = cp->major;
2168 hdev->minor_class = cp->minor;
2169
2170 if (!hdev_is_powered(hdev)) {
2171 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2172 hdev->dev_class, 3);
2173 goto unlock;
2174 }
2175
2176 hci_req_init(&req, hdev);
2177
2178 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2179 hci_dev_unlock(hdev);
2180 cancel_delayed_work_sync(&hdev->service_cache);
2181 hci_dev_lock(hdev);
2182 update_eir(&req);
2183 }
2184
2185 update_class(&req);
2186
2187 err = hci_req_run(&req, set_class_complete);
2188 if (err < 0) {
2189 if (err != -ENODATA)
2190 goto unlock;
2191
2192 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2193 hdev->dev_class, 3);
2194 goto unlock;
2195 }
2196
2197 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2198 if (!cmd) {
2199 err = -ENOMEM;
2200 goto unlock;
2201 }
2202
2203 err = 0;
2204
2205 unlock:
2206 hci_dev_unlock(hdev);
2207 return err;
2208 }
2209
2210 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2211 u16 len)
2212 {
2213 struct mgmt_cp_load_link_keys *cp = data;
2214 u16 key_count, expected_len;
2215 int i;
2216
2217 BT_DBG("request for %s", hdev->name);
2218
2219 if (!lmp_bredr_capable(hdev))
2220 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2221 MGMT_STATUS_NOT_SUPPORTED);
2222
2223 key_count = __le16_to_cpu(cp->key_count);
2224
2225 expected_len = sizeof(*cp) + key_count *
2226 sizeof(struct mgmt_link_key_info);
2227 if (expected_len != len) {
2228 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2229 len, expected_len);
2230 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2231 MGMT_STATUS_INVALID_PARAMS);
2232 }
2233
2234 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2235 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2236 MGMT_STATUS_INVALID_PARAMS);
2237
2238 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2239 key_count);
2240
2241 for (i = 0; i < key_count; i++) {
2242 struct mgmt_link_key_info *key = &cp->keys[i];
2243
2244 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2245 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2246 MGMT_STATUS_INVALID_PARAMS);
2247 }
2248
2249 hci_dev_lock(hdev);
2250
2251 hci_link_keys_clear(hdev);
2252
2253 if (cp->debug_keys)
2254 set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
2255 else
2256 clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
2257
2258 for (i = 0; i < key_count; i++) {
2259 struct mgmt_link_key_info *key = &cp->keys[i];
2260
2261 hci_add_link_key(hdev, NULL, 0, &key->addr.bdaddr, key->val,
2262 key->type, key->pin_len);
2263 }
2264
2265 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2266
2267 hci_dev_unlock(hdev);
2268
2269 return 0;
2270 }
2271
2272 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2273 u8 addr_type, struct sock *skip_sk)
2274 {
2275 struct mgmt_ev_device_unpaired ev;
2276
2277 bacpy(&ev.addr.bdaddr, bdaddr);
2278 ev.addr.type = addr_type;
2279
2280 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2281 skip_sk);
2282 }
2283
2284 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2285 u16 len)
2286 {
2287 struct mgmt_cp_unpair_device *cp = data;
2288 struct mgmt_rp_unpair_device rp;
2289 struct hci_cp_disconnect dc;
2290 struct pending_cmd *cmd;
2291 struct hci_conn *conn;
2292 int err;
2293
2294 memset(&rp, 0, sizeof(rp));
2295 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2296 rp.addr.type = cp->addr.type;
2297
2298 if (!bdaddr_type_is_valid(cp->addr.type))
2299 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2300 MGMT_STATUS_INVALID_PARAMS,
2301 &rp, sizeof(rp));
2302
2303 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2304 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2305 MGMT_STATUS_INVALID_PARAMS,
2306 &rp, sizeof(rp));
2307
2308 hci_dev_lock(hdev);
2309
2310 if (!hdev_is_powered(hdev)) {
2311 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2312 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2313 goto unlock;
2314 }
2315
2316 if (cp->addr.type == BDADDR_BREDR)
2317 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2318 else
2319 err = hci_remove_ltk(hdev, &cp->addr.bdaddr);
2320
2321 if (err < 0) {
2322 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2323 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
2324 goto unlock;
2325 }
2326
2327 if (cp->disconnect) {
2328 if (cp->addr.type == BDADDR_BREDR)
2329 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2330 &cp->addr.bdaddr);
2331 else
2332 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2333 &cp->addr.bdaddr);
2334 } else {
2335 conn = NULL;
2336 }
2337
2338 if (!conn) {
2339 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2340 &rp, sizeof(rp));
2341 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2342 goto unlock;
2343 }
2344
2345 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2346 sizeof(*cp));
2347 if (!cmd) {
2348 err = -ENOMEM;
2349 goto unlock;
2350 }
2351
2352 dc.handle = cpu_to_le16(conn->handle);
2353 dc.reason = 0x13; /* Remote User Terminated Connection */
2354 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2355 if (err < 0)
2356 mgmt_pending_remove(cmd);
2357
2358 unlock:
2359 hci_dev_unlock(hdev);
2360 return err;
2361 }
2362
2363 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2364 u16 len)
2365 {
2366 struct mgmt_cp_disconnect *cp = data;
2367 struct mgmt_rp_disconnect rp;
2368 struct hci_cp_disconnect dc;
2369 struct pending_cmd *cmd;
2370 struct hci_conn *conn;
2371 int err;
2372
2373 BT_DBG("");
2374
2375 memset(&rp, 0, sizeof(rp));
2376 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2377 rp.addr.type = cp->addr.type;
2378
2379 if (!bdaddr_type_is_valid(cp->addr.type))
2380 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2381 MGMT_STATUS_INVALID_PARAMS,
2382 &rp, sizeof(rp));
2383
2384 hci_dev_lock(hdev);
2385
2386 if (!test_bit(HCI_UP, &hdev->flags)) {
2387 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2388 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2389 goto failed;
2390 }
2391
2392 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
2393 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2394 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2395 goto failed;
2396 }
2397
2398 if (cp->addr.type == BDADDR_BREDR)
2399 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2400 &cp->addr.bdaddr);
2401 else
2402 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2403
2404 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2405 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2406 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
2407 goto failed;
2408 }
2409
2410 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2411 if (!cmd) {
2412 err = -ENOMEM;
2413 goto failed;
2414 }
2415
2416 dc.handle = cpu_to_le16(conn->handle);
2417 dc.reason = HCI_ERROR_REMOTE_USER_TERM;
2418
2419 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2420 if (err < 0)
2421 mgmt_pending_remove(cmd);
2422
2423 failed:
2424 hci_dev_unlock(hdev);
2425 return err;
2426 }
2427
2428 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2429 {
2430 switch (link_type) {
2431 case LE_LINK:
2432 switch (addr_type) {
2433 case ADDR_LE_DEV_PUBLIC:
2434 return BDADDR_LE_PUBLIC;
2435
2436 default:
2437 /* Fallback to LE Random address type */
2438 return BDADDR_LE_RANDOM;
2439 }
2440
2441 default:
2442 /* Fallback to BR/EDR type */
2443 return BDADDR_BREDR;
2444 }
2445 }
2446
2447 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2448 u16 data_len)
2449 {
2450 struct mgmt_rp_get_connections *rp;
2451 struct hci_conn *c;
2452 size_t rp_len;
2453 int err;
2454 u16 i;
2455
2456 BT_DBG("");
2457
2458 hci_dev_lock(hdev);
2459
2460 if (!hdev_is_powered(hdev)) {
2461 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2462 MGMT_STATUS_NOT_POWERED);
2463 goto unlock;
2464 }
2465
2466 i = 0;
2467 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2468 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2469 i++;
2470 }
2471
2472 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2473 rp = kmalloc(rp_len, GFP_KERNEL);
2474 if (!rp) {
2475 err = -ENOMEM;
2476 goto unlock;
2477 }
2478
2479 i = 0;
2480 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2481 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2482 continue;
2483 bacpy(&rp->addr[i].bdaddr, &c->dst);
2484 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2485 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2486 continue;
2487 i++;
2488 }
2489
2490 rp->conn_count = cpu_to_le16(i);
2491
2492 /* Recalculate length in case of filtered SCO connections, etc */
2493 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2494
2495 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2496 rp_len);
2497
2498 kfree(rp);
2499
2500 unlock:
2501 hci_dev_unlock(hdev);
2502 return err;
2503 }
2504
2505 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2506 struct mgmt_cp_pin_code_neg_reply *cp)
2507 {
2508 struct pending_cmd *cmd;
2509 int err;
2510
2511 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2512 sizeof(*cp));
2513 if (!cmd)
2514 return -ENOMEM;
2515
2516 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2517 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2518 if (err < 0)
2519 mgmt_pending_remove(cmd);
2520
2521 return err;
2522 }
2523
2524 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2525 u16 len)
2526 {
2527 struct hci_conn *conn;
2528 struct mgmt_cp_pin_code_reply *cp = data;
2529 struct hci_cp_pin_code_reply reply;
2530 struct pending_cmd *cmd;
2531 int err;
2532
2533 BT_DBG("");
2534
2535 hci_dev_lock(hdev);
2536
2537 if (!hdev_is_powered(hdev)) {
2538 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2539 MGMT_STATUS_NOT_POWERED);
2540 goto failed;
2541 }
2542
2543 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2544 if (!conn) {
2545 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2546 MGMT_STATUS_NOT_CONNECTED);
2547 goto failed;
2548 }
2549
2550 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2551 struct mgmt_cp_pin_code_neg_reply ncp;
2552
2553 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2554
2555 BT_ERR("PIN code is not 16 bytes long");
2556
2557 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2558 if (err >= 0)
2559 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2560 MGMT_STATUS_INVALID_PARAMS);
2561
2562 goto failed;
2563 }
2564
2565 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2566 if (!cmd) {
2567 err = -ENOMEM;
2568 goto failed;
2569 }
2570
2571 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2572 reply.pin_len = cp->pin_len;
2573 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2574
2575 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2576 if (err < 0)
2577 mgmt_pending_remove(cmd);
2578
2579 failed:
2580 hci_dev_unlock(hdev);
2581 return err;
2582 }
2583
2584 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2585 u16 len)
2586 {
2587 struct mgmt_cp_set_io_capability *cp = data;
2588
2589 BT_DBG("");
2590
2591 hci_dev_lock(hdev);
2592
2593 hdev->io_capability = cp->io_capability;
2594
2595 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2596 hdev->io_capability);
2597
2598 hci_dev_unlock(hdev);
2599
2600 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
2601 0);
2602 }
2603
2604 static struct pending_cmd *find_pairing(struct hci_conn *conn)
2605 {
2606 struct hci_dev *hdev = conn->hdev;
2607 struct pending_cmd *cmd;
2608
2609 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2610 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2611 continue;
2612
2613 if (cmd->user_data != conn)
2614 continue;
2615
2616 return cmd;
2617 }
2618
2619 return NULL;
2620 }
2621
2622 static void pairing_complete(struct pending_cmd *cmd, u8 status)
2623 {
2624 struct mgmt_rp_pair_device rp;
2625 struct hci_conn *conn = cmd->user_data;
2626
2627 bacpy(&rp.addr.bdaddr, &conn->dst);
2628 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2629
2630 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
2631 &rp, sizeof(rp));
2632
2633 /* So we don't get further callbacks for this connection */
2634 conn->connect_cfm_cb = NULL;
2635 conn->security_cfm_cb = NULL;
2636 conn->disconn_cfm_cb = NULL;
2637
2638 hci_conn_drop(conn);
2639
2640 mgmt_pending_remove(cmd);
2641 }
2642
2643 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2644 {
2645 struct pending_cmd *cmd;
2646
2647 BT_DBG("status %u", status);
2648
2649 cmd = find_pairing(conn);
2650 if (!cmd)
2651 BT_DBG("Unable to find a pending command");
2652 else
2653 pairing_complete(cmd, mgmt_status(status));
2654 }
2655
2656 static void le_connect_complete_cb(struct hci_conn *conn, u8 status)
2657 {
2658 struct pending_cmd *cmd;
2659
2660 BT_DBG("status %u", status);
2661
2662 if (!status)
2663 return;
2664
2665 cmd = find_pairing(conn);
2666 if (!cmd)
2667 BT_DBG("Unable to find a pending command");
2668 else
2669 pairing_complete(cmd, mgmt_status(status));
2670 }
2671
2672 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2673 u16 len)
2674 {
2675 struct mgmt_cp_pair_device *cp = data;
2676 struct mgmt_rp_pair_device rp;
2677 struct pending_cmd *cmd;
2678 u8 sec_level, auth_type;
2679 struct hci_conn *conn;
2680 int err;
2681
2682 BT_DBG("");
2683
2684 memset(&rp, 0, sizeof(rp));
2685 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2686 rp.addr.type = cp->addr.type;
2687
2688 if (!bdaddr_type_is_valid(cp->addr.type))
2689 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2690 MGMT_STATUS_INVALID_PARAMS,
2691 &rp, sizeof(rp));
2692
2693 hci_dev_lock(hdev);
2694
2695 if (!hdev_is_powered(hdev)) {
2696 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2697 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2698 goto unlock;
2699 }
2700
2701 sec_level = BT_SECURITY_MEDIUM;
2702 if (cp->io_cap == 0x03)
2703 auth_type = HCI_AT_DEDICATED_BONDING;
2704 else
2705 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
2706
2707 if (cp->addr.type == BDADDR_BREDR)
2708 conn = hci_connect(hdev, ACL_LINK, &cp->addr.bdaddr,
2709 cp->addr.type, sec_level, auth_type);
2710 else
2711 conn = hci_connect(hdev, LE_LINK, &cp->addr.bdaddr,
2712 cp->addr.type, sec_level, auth_type);
2713
2714 if (IS_ERR(conn)) {
2715 int status;
2716
2717 if (PTR_ERR(conn) == -EBUSY)
2718 status = MGMT_STATUS_BUSY;
2719 else
2720 status = MGMT_STATUS_CONNECT_FAILED;
2721
2722 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2723 status, &rp,
2724 sizeof(rp));
2725 goto unlock;
2726 }
2727
2728 if (conn->connect_cfm_cb) {
2729 hci_conn_drop(conn);
2730 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2731 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2732 goto unlock;
2733 }
2734
2735 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2736 if (!cmd) {
2737 err = -ENOMEM;
2738 hci_conn_drop(conn);
2739 goto unlock;
2740 }
2741
2742 /* For LE, just connecting isn't a proof that the pairing finished */
2743 if (cp->addr.type == BDADDR_BREDR)
2744 conn->connect_cfm_cb = pairing_complete_cb;
2745 else
2746 conn->connect_cfm_cb = le_connect_complete_cb;
2747
2748 conn->security_cfm_cb = pairing_complete_cb;
2749 conn->disconn_cfm_cb = pairing_complete_cb;
2750 conn->io_capability = cp->io_cap;
2751 cmd->user_data = conn;
2752
2753 if (conn->state == BT_CONNECTED &&
2754 hci_conn_security(conn, sec_level, auth_type))
2755 pairing_complete(cmd, 0);
2756
2757 err = 0;
2758
2759 unlock:
2760 hci_dev_unlock(hdev);
2761 return err;
2762 }
2763
2764 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2765 u16 len)
2766 {
2767 struct mgmt_addr_info *addr = data;
2768 struct pending_cmd *cmd;
2769 struct hci_conn *conn;
2770 int err;
2771
2772 BT_DBG("");
2773
2774 hci_dev_lock(hdev);
2775
2776 if (!hdev_is_powered(hdev)) {
2777 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2778 MGMT_STATUS_NOT_POWERED);
2779 goto unlock;
2780 }
2781
2782 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
2783 if (!cmd) {
2784 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2785 MGMT_STATUS_INVALID_PARAMS);
2786 goto unlock;
2787 }
2788
2789 conn = cmd->user_data;
2790
2791 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
2792 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2793 MGMT_STATUS_INVALID_PARAMS);
2794 goto unlock;
2795 }
2796
2797 pairing_complete(cmd, MGMT_STATUS_CANCELLED);
2798
2799 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
2800 addr, sizeof(*addr));
2801 unlock:
2802 hci_dev_unlock(hdev);
2803 return err;
2804 }
2805
2806 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
2807 struct mgmt_addr_info *addr, u16 mgmt_op,
2808 u16 hci_op, __le32 passkey)
2809 {
2810 struct pending_cmd *cmd;
2811 struct hci_conn *conn;
2812 int err;
2813
2814 hci_dev_lock(hdev);
2815
2816 if (!hdev_is_powered(hdev)) {
2817 err = cmd_complete(sk, hdev->id, mgmt_op,
2818 MGMT_STATUS_NOT_POWERED, addr,
2819 sizeof(*addr));
2820 goto done;
2821 }
2822
2823 if (addr->type == BDADDR_BREDR)
2824 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
2825 else
2826 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
2827
2828 if (!conn) {
2829 err = cmd_complete(sk, hdev->id, mgmt_op,
2830 MGMT_STATUS_NOT_CONNECTED, addr,
2831 sizeof(*addr));
2832 goto done;
2833 }
2834
2835 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
2836 /* Continue with pairing via SMP */
2837 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
2838
2839 if (!err)
2840 err = cmd_complete(sk, hdev->id, mgmt_op,
2841 MGMT_STATUS_SUCCESS, addr,
2842 sizeof(*addr));
2843 else
2844 err = cmd_complete(sk, hdev->id, mgmt_op,
2845 MGMT_STATUS_FAILED, addr,
2846 sizeof(*addr));
2847
2848 goto done;
2849 }
2850
2851 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
2852 if (!cmd) {
2853 err = -ENOMEM;
2854 goto done;
2855 }
2856
2857 /* Continue with pairing via HCI */
2858 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
2859 struct hci_cp_user_passkey_reply cp;
2860
2861 bacpy(&cp.bdaddr, &addr->bdaddr);
2862 cp.passkey = passkey;
2863 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
2864 } else
2865 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
2866 &addr->bdaddr);
2867
2868 if (err < 0)
2869 mgmt_pending_remove(cmd);
2870
2871 done:
2872 hci_dev_unlock(hdev);
2873 return err;
2874 }
2875
2876 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2877 void *data, u16 len)
2878 {
2879 struct mgmt_cp_pin_code_neg_reply *cp = data;
2880
2881 BT_DBG("");
2882
2883 return user_pairing_resp(sk, hdev, &cp->addr,
2884 MGMT_OP_PIN_CODE_NEG_REPLY,
2885 HCI_OP_PIN_CODE_NEG_REPLY, 0);
2886 }
2887
2888 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2889 u16 len)
2890 {
2891 struct mgmt_cp_user_confirm_reply *cp = data;
2892
2893 BT_DBG("");
2894
2895 if (len != sizeof(*cp))
2896 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
2897 MGMT_STATUS_INVALID_PARAMS);
2898
2899 return user_pairing_resp(sk, hdev, &cp->addr,
2900 MGMT_OP_USER_CONFIRM_REPLY,
2901 HCI_OP_USER_CONFIRM_REPLY, 0);
2902 }
2903
2904 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
2905 void *data, u16 len)
2906 {
2907 struct mgmt_cp_user_confirm_neg_reply *cp = data;
2908
2909 BT_DBG("");
2910
2911 return user_pairing_resp(sk, hdev, &cp->addr,
2912 MGMT_OP_USER_CONFIRM_NEG_REPLY,
2913 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
2914 }
2915
2916 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2917 u16 len)
2918 {
2919 struct mgmt_cp_user_passkey_reply *cp = data;
2920
2921 BT_DBG("");
2922
2923 return user_pairing_resp(sk, hdev, &cp->addr,
2924 MGMT_OP_USER_PASSKEY_REPLY,
2925 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
2926 }
2927
2928 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
2929 void *data, u16 len)
2930 {
2931 struct mgmt_cp_user_passkey_neg_reply *cp = data;
2932
2933 BT_DBG("");
2934
2935 return user_pairing_resp(sk, hdev, &cp->addr,
2936 MGMT_OP_USER_PASSKEY_NEG_REPLY,
2937 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
2938 }
2939
2940 static void update_name(struct hci_request *req)
2941 {
2942 struct hci_dev *hdev = req->hdev;
2943 struct hci_cp_write_local_name cp;
2944
2945 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
2946
2947 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
2948 }
2949
2950 static void set_name_complete(struct hci_dev *hdev, u8 status)
2951 {
2952 struct mgmt_cp_set_local_name *cp;
2953 struct pending_cmd *cmd;
2954
2955 BT_DBG("status 0x%02x", status);
2956
2957 hci_dev_lock(hdev);
2958
2959 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
2960 if (!cmd)
2961 goto unlock;
2962
2963 cp = cmd->param;
2964
2965 if (status)
2966 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
2967 mgmt_status(status));
2968 else
2969 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2970 cp, sizeof(*cp));
2971
2972 mgmt_pending_remove(cmd);
2973
2974 unlock:
2975 hci_dev_unlock(hdev);
2976 }
2977
2978 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
2979 u16 len)
2980 {
2981 struct mgmt_cp_set_local_name *cp = data;
2982 struct pending_cmd *cmd;
2983 struct hci_request req;
2984 int err;
2985
2986 BT_DBG("");
2987
2988 hci_dev_lock(hdev);
2989
2990 /* If the old values are the same as the new ones just return a
2991 * direct command complete event.
2992 */
2993 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
2994 !memcmp(hdev->short_name, cp->short_name,
2995 sizeof(hdev->short_name))) {
2996 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2997 data, len);
2998 goto failed;
2999 }
3000
3001 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3002
3003 if (!hdev_is_powered(hdev)) {
3004 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3005
3006 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3007 data, len);
3008 if (err < 0)
3009 goto failed;
3010
3011 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
3012 sk);
3013
3014 goto failed;
3015 }
3016
3017 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3018 if (!cmd) {
3019 err = -ENOMEM;
3020 goto failed;
3021 }
3022
3023 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3024
3025 hci_req_init(&req, hdev);
3026
3027 if (lmp_bredr_capable(hdev)) {
3028 update_name(&req);
3029 update_eir(&req);
3030 }
3031
3032 /* The name is stored in the scan response data and so
3033 * no need to udpate the advertising data here.
3034 */
3035 if (lmp_le_capable(hdev))
3036 update_scan_rsp_data(&req);
3037
3038 err = hci_req_run(&req, set_name_complete);
3039 if (err < 0)
3040 mgmt_pending_remove(cmd);
3041
3042 failed:
3043 hci_dev_unlock(hdev);
3044 return err;
3045 }
3046
3047 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3048 void *data, u16 data_len)
3049 {
3050 struct pending_cmd *cmd;
3051 int err;
3052
3053 BT_DBG("%s", hdev->name);
3054
3055 hci_dev_lock(hdev);
3056
3057 if (!hdev_is_powered(hdev)) {
3058 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3059 MGMT_STATUS_NOT_POWERED);
3060 goto unlock;
3061 }
3062
3063 if (!lmp_ssp_capable(hdev)) {
3064 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3065 MGMT_STATUS_NOT_SUPPORTED);
3066 goto unlock;
3067 }
3068
3069 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3070 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3071 MGMT_STATUS_BUSY);
3072 goto unlock;
3073 }
3074
3075 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3076 if (!cmd) {
3077 err = -ENOMEM;
3078 goto unlock;
3079 }
3080
3081 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
3082 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
3083 0, NULL);
3084 else
3085 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3086
3087 if (err < 0)
3088 mgmt_pending_remove(cmd);
3089
3090 unlock:
3091 hci_dev_unlock(hdev);
3092 return err;
3093 }
3094
3095 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3096 void *data, u16 len)
3097 {
3098 struct mgmt_cp_add_remote_oob_data *cp = data;
3099 u8 status;
3100 int err;
3101
3102 BT_DBG("%s ", hdev->name);
3103
3104 hci_dev_lock(hdev);
3105
3106 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr, cp->hash,
3107 cp->randomizer);
3108 if (err < 0)
3109 status = MGMT_STATUS_FAILED;
3110 else
3111 status = MGMT_STATUS_SUCCESS;
3112
3113 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA, status,
3114 &cp->addr, sizeof(cp->addr));
3115
3116 hci_dev_unlock(hdev);
3117 return err;
3118 }
3119
3120 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3121 void *data, u16 len)
3122 {
3123 struct mgmt_cp_remove_remote_oob_data *cp = data;
3124 u8 status;
3125 int err;
3126
3127 BT_DBG("%s", hdev->name);
3128
3129 hci_dev_lock(hdev);
3130
3131 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
3132 if (err < 0)
3133 status = MGMT_STATUS_INVALID_PARAMS;
3134 else
3135 status = MGMT_STATUS_SUCCESS;
3136
3137 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3138 status, &cp->addr, sizeof(cp->addr));
3139
3140 hci_dev_unlock(hdev);
3141 return err;
3142 }
3143
3144 static int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
3145 {
3146 struct pending_cmd *cmd;
3147 u8 type;
3148 int err;
3149
3150 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3151
3152 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3153 if (!cmd)
3154 return -ENOENT;
3155
3156 type = hdev->discovery.type;
3157
3158 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3159 &type, sizeof(type));
3160 mgmt_pending_remove(cmd);
3161
3162 return err;
3163 }
3164
3165 static void start_discovery_complete(struct hci_dev *hdev, u8 status)
3166 {
3167 BT_DBG("status %d", status);
3168
3169 if (status) {
3170 hci_dev_lock(hdev);
3171 mgmt_start_discovery_failed(hdev, status);
3172 hci_dev_unlock(hdev);
3173 return;
3174 }
3175
3176 hci_dev_lock(hdev);
3177 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3178 hci_dev_unlock(hdev);
3179
3180 switch (hdev->discovery.type) {
3181 case DISCOV_TYPE_LE:
3182 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
3183 DISCOV_LE_TIMEOUT);
3184 break;
3185
3186 case DISCOV_TYPE_INTERLEAVED:
3187 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
3188 DISCOV_INTERLEAVED_TIMEOUT);
3189 break;
3190
3191 case DISCOV_TYPE_BREDR:
3192 break;
3193
3194 default:
3195 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
3196 }
3197 }
3198
3199 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3200 void *data, u16 len)
3201 {
3202 struct mgmt_cp_start_discovery *cp = data;
3203 struct pending_cmd *cmd;
3204 struct hci_cp_le_set_scan_param param_cp;
3205 struct hci_cp_le_set_scan_enable enable_cp;
3206 struct hci_cp_inquiry inq_cp;
3207 struct hci_request req;
3208 /* General inquiry access code (GIAC) */
3209 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3210 u8 status;
3211 int err;
3212
3213 BT_DBG("%s", hdev->name);
3214
3215 hci_dev_lock(hdev);
3216
3217 if (!hdev_is_powered(hdev)) {
3218 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3219 MGMT_STATUS_NOT_POWERED);
3220 goto failed;
3221 }
3222
3223 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3224 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3225 MGMT_STATUS_BUSY);
3226 goto failed;
3227 }
3228
3229 if (hdev->discovery.state != DISCOVERY_STOPPED) {
3230 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3231 MGMT_STATUS_BUSY);
3232 goto failed;
3233 }
3234
3235 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
3236 if (!cmd) {
3237 err = -ENOMEM;
3238 goto failed;
3239 }
3240
3241 hdev->discovery.type = cp->type;
3242
3243 hci_req_init(&req, hdev);
3244
3245 switch (hdev->discovery.type) {
3246 case DISCOV_TYPE_BREDR:
3247 status = mgmt_bredr_support(hdev);
3248 if (status) {
3249 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3250 status);
3251 mgmt_pending_remove(cmd);
3252 goto failed;
3253 }
3254
3255 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3256 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3257 MGMT_STATUS_BUSY);
3258 mgmt_pending_remove(cmd);
3259 goto failed;
3260 }
3261
3262 hci_inquiry_cache_flush(hdev);
3263
3264 memset(&inq_cp, 0, sizeof(inq_cp));
3265 memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
3266 inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
3267 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
3268 break;
3269
3270 case DISCOV_TYPE_LE:
3271 case DISCOV_TYPE_INTERLEAVED:
3272 status = mgmt_le_support(hdev);
3273 if (status) {
3274 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3275 status);
3276 mgmt_pending_remove(cmd);
3277 goto failed;
3278 }
3279
3280 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3281 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3282 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3283 MGMT_STATUS_NOT_SUPPORTED);
3284 mgmt_pending_remove(cmd);
3285 goto failed;
3286 }
3287
3288 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3289 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3290 MGMT_STATUS_REJECTED);
3291 mgmt_pending_remove(cmd);
3292 goto failed;
3293 }
3294
3295 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
3296 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3297 MGMT_STATUS_BUSY);
3298 mgmt_pending_remove(cmd);
3299 goto failed;
3300 }
3301
3302 memset(&param_cp, 0, sizeof(param_cp));
3303 param_cp.type = LE_SCAN_ACTIVE;
3304 param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
3305 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3306 param_cp.own_address_type = hdev->own_addr_type;
3307 hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
3308 &param_cp);
3309
3310 memset(&enable_cp, 0, sizeof(enable_cp));
3311 enable_cp.enable = LE_SCAN_ENABLE;
3312 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3313 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
3314 &enable_cp);
3315 break;
3316
3317 default:
3318 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3319 MGMT_STATUS_INVALID_PARAMS);
3320 mgmt_pending_remove(cmd);
3321 goto failed;
3322 }
3323
3324 err = hci_req_run(&req, start_discovery_complete);
3325 if (err < 0)
3326 mgmt_pending_remove(cmd);
3327 else
3328 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3329
3330 failed:
3331 hci_dev_unlock(hdev);
3332 return err;
3333 }
3334
3335 static int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
3336 {
3337 struct pending_cmd *cmd;
3338 int err;
3339
3340 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
3341 if (!cmd)
3342 return -ENOENT;
3343
3344 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3345 &hdev->discovery.type, sizeof(hdev->discovery.type));
3346 mgmt_pending_remove(cmd);
3347
3348 return err;
3349 }
3350
3351 static void stop_discovery_complete(struct hci_dev *hdev, u8 status)
3352 {
3353 BT_DBG("status %d", status);
3354
3355 hci_dev_lock(hdev);
3356
3357 if (status) {
3358 mgmt_stop_discovery_failed(hdev, status);
3359 goto unlock;
3360 }
3361
3362 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3363
3364 unlock:
3365 hci_dev_unlock(hdev);
3366 }
3367
3368 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
3369 u16 len)
3370 {
3371 struct mgmt_cp_stop_discovery *mgmt_cp = data;
3372 struct pending_cmd *cmd;
3373 struct hci_cp_remote_name_req_cancel cp;
3374 struct inquiry_entry *e;
3375 struct hci_request req;
3376 struct hci_cp_le_set_scan_enable enable_cp;
3377 int err;
3378
3379 BT_DBG("%s", hdev->name);
3380
3381 hci_dev_lock(hdev);
3382
3383 if (!hci_discovery_active(hdev)) {
3384 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3385 MGMT_STATUS_REJECTED, &mgmt_cp->type,
3386 sizeof(mgmt_cp->type));
3387 goto unlock;
3388 }
3389
3390 if (hdev->discovery.type != mgmt_cp->type) {
3391 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3392 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
3393 sizeof(mgmt_cp->type));
3394 goto unlock;
3395 }
3396
3397 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
3398 if (!cmd) {
3399 err = -ENOMEM;
3400 goto unlock;
3401 }
3402
3403 hci_req_init(&req, hdev);
3404
3405 switch (hdev->discovery.state) {
3406 case DISCOVERY_FINDING:
3407 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3408 hci_req_add(&req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3409 } else {
3410 cancel_delayed_work(&hdev->le_scan_disable);
3411
3412 memset(&enable_cp, 0, sizeof(enable_cp));
3413 enable_cp.enable = LE_SCAN_DISABLE;
3414 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE,
3415 sizeof(enable_cp), &enable_cp);
3416 }
3417
3418 break;
3419
3420 case DISCOVERY_RESOLVING:
3421 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
3422 NAME_PENDING);
3423 if (!e) {
3424 mgmt_pending_remove(cmd);
3425 err = cmd_complete(sk, hdev->id,
3426 MGMT_OP_STOP_DISCOVERY, 0,
3427 &mgmt_cp->type,
3428 sizeof(mgmt_cp->type));
3429 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3430 goto unlock;
3431 }
3432
3433 bacpy(&cp.bdaddr, &e->data.bdaddr);
3434 hci_req_add(&req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
3435 &cp);
3436
3437 break;
3438
3439 default:
3440 BT_DBG("unknown discovery state %u", hdev->discovery.state);
3441
3442 mgmt_pending_remove(cmd);
3443 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3444 MGMT_STATUS_FAILED, &mgmt_cp->type,
3445 sizeof(mgmt_cp->type));
3446 goto unlock;
3447 }
3448
3449 err = hci_req_run(&req, stop_discovery_complete);
3450 if (err < 0)
3451 mgmt_pending_remove(cmd);
3452 else
3453 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
3454
3455 unlock:
3456 hci_dev_unlock(hdev);
3457 return err;
3458 }
3459
3460 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
3461 u16 len)
3462 {
3463 struct mgmt_cp_confirm_name *cp = data;
3464 struct inquiry_entry *e;
3465 int err;
3466
3467 BT_DBG("%s", hdev->name);
3468
3469 hci_dev_lock(hdev);
3470
3471 if (!hci_discovery_active(hdev)) {
3472 err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3473 MGMT_STATUS_FAILED);
3474 goto failed;
3475 }
3476
3477 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
3478 if (!e) {
3479 err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3480 MGMT_STATUS_INVALID_PARAMS);
3481 goto failed;
3482 }
3483
3484 if (cp->name_known) {
3485 e->name_state = NAME_KNOWN;
3486 list_del(&e->list);
3487 } else {
3488 e->name_state = NAME_NEEDED;
3489 hci_inquiry_cache_update_resolve(hdev, e);
3490 }
3491
3492 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
3493 sizeof(cp->addr));
3494
3495 failed:
3496 hci_dev_unlock(hdev);
3497 return err;
3498 }
3499
3500 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
3501 u16 len)
3502 {
3503 struct mgmt_cp_block_device *cp = data;
3504 u8 status;
3505 int err;
3506
3507 BT_DBG("%s", hdev->name);
3508
3509 if (!bdaddr_type_is_valid(cp->addr.type))
3510 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
3511 MGMT_STATUS_INVALID_PARAMS,
3512 &cp->addr, sizeof(cp->addr));
3513
3514 hci_dev_lock(hdev);
3515
3516 err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type);
3517 if (err < 0)
3518 status = MGMT_STATUS_FAILED;
3519 else
3520 status = MGMT_STATUS_SUCCESS;
3521
3522 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
3523 &cp->addr, sizeof(cp->addr));
3524
3525 hci_dev_unlock(hdev);
3526
3527 return err;
3528 }
3529
3530 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
3531 u16 len)
3532 {
3533 struct mgmt_cp_unblock_device *cp = data;
3534 u8 status;
3535 int err;
3536
3537 BT_DBG("%s", hdev->name);
3538
3539 if (!bdaddr_type_is_valid(cp->addr.type))
3540 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
3541 MGMT_STATUS_INVALID_PARAMS,
3542 &cp->addr, sizeof(cp->addr));
3543
3544 hci_dev_lock(hdev);
3545
3546 err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type);
3547 if (err < 0)
3548 status = MGMT_STATUS_INVALID_PARAMS;
3549 else
3550 status = MGMT_STATUS_SUCCESS;
3551
3552 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
3553 &cp->addr, sizeof(cp->addr));
3554
3555 hci_dev_unlock(hdev);
3556
3557 return err;
3558 }
3559
3560 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
3561 u16 len)
3562 {
3563 struct mgmt_cp_set_device_id *cp = data;
3564 struct hci_request req;
3565 int err;
3566 __u16 source;
3567
3568 BT_DBG("%s", hdev->name);
3569
3570 source = __le16_to_cpu(cp->source);
3571
3572 if (source > 0x0002)
3573 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
3574 MGMT_STATUS_INVALID_PARAMS);
3575
3576 hci_dev_lock(hdev);
3577
3578 hdev->devid_source = source;
3579 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
3580 hdev->devid_product = __le16_to_cpu(cp->product);
3581 hdev->devid_version = __le16_to_cpu(cp->version);
3582
3583 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
3584
3585 hci_req_init(&req, hdev);
3586 update_eir(&req);
3587 hci_req_run(&req, NULL);
3588
3589 hci_dev_unlock(hdev);
3590
3591 return err;
3592 }
3593
3594 static void set_advertising_complete(struct hci_dev *hdev, u8 status)
3595 {
3596 struct cmd_lookup match = { NULL, hdev };
3597
3598 if (status) {
3599 u8 mgmt_err = mgmt_status(status);
3600
3601 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
3602 cmd_status_rsp, &mgmt_err);
3603 return;
3604 }
3605
3606 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
3607 &match);
3608
3609 new_settings(hdev, match.sk);
3610
3611 if (match.sk)
3612 sock_put(match.sk);
3613 }
3614
3615 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
3616 u16 len)
3617 {
3618 struct mgmt_mode *cp = data;
3619 struct pending_cmd *cmd;
3620 struct hci_request req;
3621 u8 val, enabled, status;
3622 int err;
3623
3624 BT_DBG("request for %s", hdev->name);
3625
3626 status = mgmt_le_support(hdev);
3627 if (status)
3628 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3629 status);
3630
3631 if (cp->val != 0x00 && cp->val != 0x01)
3632 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3633 MGMT_STATUS_INVALID_PARAMS);
3634
3635 hci_dev_lock(hdev);
3636
3637 val = !!cp->val;
3638 enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
3639
3640 /* The following conditions are ones which mean that we should
3641 * not do any HCI communication but directly send a mgmt
3642 * response to user space (after toggling the flag if
3643 * necessary).
3644 */
3645 if (!hdev_is_powered(hdev) || val == enabled ||
3646 hci_conn_num(hdev, LE_LINK) > 0) {
3647 bool changed = false;
3648
3649 if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3650 change_bit(HCI_ADVERTISING, &hdev->dev_flags);
3651 changed = true;
3652 }
3653
3654 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
3655 if (err < 0)
3656 goto unlock;
3657
3658 if (changed)
3659 err = new_settings(hdev, sk);
3660
3661 goto unlock;
3662 }
3663
3664 if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
3665 mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
3666 err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3667 MGMT_STATUS_BUSY);
3668 goto unlock;
3669 }
3670
3671 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
3672 if (!cmd) {
3673 err = -ENOMEM;
3674 goto unlock;
3675 }
3676
3677 hci_req_init(&req, hdev);
3678
3679 if (val)
3680 enable_advertising(&req);
3681 else
3682 disable_advertising(&req);
3683
3684 err = hci_req_run(&req, set_advertising_complete);
3685 if (err < 0)
3686 mgmt_pending_remove(cmd);
3687
3688 unlock:
3689 hci_dev_unlock(hdev);
3690 return err;
3691 }
3692
3693 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
3694 void *data, u16 len)
3695 {
3696 struct mgmt_cp_set_static_address *cp = data;
3697 int err;
3698
3699 BT_DBG("%s", hdev->name);
3700
3701 if (!lmp_le_capable(hdev))
3702 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
3703 MGMT_STATUS_NOT_SUPPORTED);
3704
3705 if (hdev_is_powered(hdev))
3706 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
3707 MGMT_STATUS_REJECTED);
3708
3709 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
3710 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
3711 return cmd_status(sk, hdev->id,
3712 MGMT_OP_SET_STATIC_ADDRESS,
3713 MGMT_STATUS_INVALID_PARAMS);
3714
3715 /* Two most significant bits shall be set */
3716 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
3717 return cmd_status(sk, hdev->id,
3718 MGMT_OP_SET_STATIC_ADDRESS,
3719 MGMT_STATUS_INVALID_PARAMS);
3720 }
3721
3722 hci_dev_lock(hdev);
3723
3724 bacpy(&hdev->static_addr, &cp->bdaddr);
3725
3726 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0);
3727
3728 hci_dev_unlock(hdev);
3729
3730 return err;
3731 }
3732
3733 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
3734 void *data, u16 len)
3735 {
3736 struct mgmt_cp_set_scan_params *cp = data;
3737 __u16 interval, window;
3738 int err;
3739
3740 BT_DBG("%s", hdev->name);
3741
3742 if (!lmp_le_capable(hdev))
3743 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3744 MGMT_STATUS_NOT_SUPPORTED);
3745
3746 interval = __le16_to_cpu(cp->interval);
3747
3748 if (interval < 0x0004 || interval > 0x4000)
3749 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3750 MGMT_STATUS_INVALID_PARAMS);
3751
3752 window = __le16_to_cpu(cp->window);
3753
3754 if (window < 0x0004 || window > 0x4000)
3755 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3756 MGMT_STATUS_INVALID_PARAMS);
3757
3758 if (window > interval)
3759 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3760 MGMT_STATUS_INVALID_PARAMS);
3761
3762 hci_dev_lock(hdev);
3763
3764 hdev->le_scan_interval = interval;
3765 hdev->le_scan_window = window;
3766
3767 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);
3768
3769 hci_dev_unlock(hdev);
3770
3771 return err;
3772 }
3773
3774 static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
3775 {
3776 struct pending_cmd *cmd;
3777
3778 BT_DBG("status 0x%02x", status);
3779
3780 hci_dev_lock(hdev);
3781
3782 cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
3783 if (!cmd)
3784 goto unlock;
3785
3786 if (status) {
3787 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3788 mgmt_status(status));
3789 } else {
3790 struct mgmt_mode *cp = cmd->param;
3791
3792 if (cp->val)
3793 set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
3794 else
3795 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
3796
3797 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
3798 new_settings(hdev, cmd->sk);
3799 }
3800
3801 mgmt_pending_remove(cmd);
3802
3803 unlock:
3804 hci_dev_unlock(hdev);
3805 }
3806
3807 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
3808 void *data, u16 len)
3809 {
3810 struct mgmt_mode *cp = data;
3811 struct pending_cmd *cmd;
3812 struct hci_request req;
3813 int err;
3814
3815 BT_DBG("%s", hdev->name);
3816
3817 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
3818 hdev->hci_ver < BLUETOOTH_VER_1_2)
3819 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3820 MGMT_STATUS_NOT_SUPPORTED);
3821
3822 if (cp->val != 0x00 && cp->val != 0x01)
3823 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3824 MGMT_STATUS_INVALID_PARAMS);
3825
3826 if (!hdev_is_powered(hdev))
3827 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3828 MGMT_STATUS_NOT_POWERED);
3829
3830 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3831 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3832 MGMT_STATUS_REJECTED);
3833
3834 hci_dev_lock(hdev);
3835
3836 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
3837 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3838 MGMT_STATUS_BUSY);
3839 goto unlock;
3840 }
3841
3842 if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
3843 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
3844 hdev);
3845 goto unlock;
3846 }
3847
3848 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
3849 data, len);
3850 if (!cmd) {
3851 err = -ENOMEM;
3852 goto unlock;
3853 }
3854
3855 hci_req_init(&req, hdev);
3856
3857 write_fast_connectable(&req, cp->val);
3858
3859 err = hci_req_run(&req, fast_connectable_complete);
3860 if (err < 0) {
3861 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3862 MGMT_STATUS_FAILED);
3863 mgmt_pending_remove(cmd);
3864 }
3865
3866 unlock:
3867 hci_dev_unlock(hdev);
3868
3869 return err;
3870 }
3871
3872 static void set_bredr_scan(struct hci_request *req)
3873 {
3874 struct hci_dev *hdev = req->hdev;
3875 u8 scan = 0;
3876
3877 /* Ensure that fast connectable is disabled. This function will
3878 * not do anything if the page scan parameters are already what
3879 * they should be.
3880 */
3881 write_fast_connectable(req, false);
3882
3883 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3884 scan |= SCAN_PAGE;
3885 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
3886 scan |= SCAN_INQUIRY;
3887
3888 if (scan)
3889 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
3890 }
3891
3892 static void set_bredr_complete(struct hci_dev *hdev, u8 status)
3893 {
3894 struct pending_cmd *cmd;
3895
3896 BT_DBG("status 0x%02x", status);
3897
3898 hci_dev_lock(hdev);
3899
3900 cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
3901 if (!cmd)
3902 goto unlock;
3903
3904 if (status) {
3905 u8 mgmt_err = mgmt_status(status);
3906
3907 /* We need to restore the flag if related HCI commands
3908 * failed.
3909 */
3910 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3911
3912 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
3913 } else {
3914 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
3915 new_settings(hdev, cmd->sk);
3916 }
3917
3918 mgmt_pending_remove(cmd);
3919
3920 unlock:
3921 hci_dev_unlock(hdev);
3922 }
3923
3924 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
3925 {
3926 struct mgmt_mode *cp = data;
3927 struct pending_cmd *cmd;
3928 struct hci_request req;
3929 int err;
3930
3931 BT_DBG("request for %s", hdev->name);
3932
3933 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
3934 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
3935 MGMT_STATUS_NOT_SUPPORTED);
3936
3937 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
3938 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
3939 MGMT_STATUS_REJECTED);
3940
3941 if (cp->val != 0x00 && cp->val != 0x01)
3942 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
3943 MGMT_STATUS_INVALID_PARAMS);
3944
3945 hci_dev_lock(hdev);
3946
3947 if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3948 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
3949 goto unlock;
3950 }
3951
3952 if (!hdev_is_powered(hdev)) {
3953 if (!cp->val) {
3954 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
3955 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
3956 clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
3957 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
3958 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
3959 }
3960
3961 change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3962
3963 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
3964 if (err < 0)
3965 goto unlock;
3966
3967 err = new_settings(hdev, sk);
3968 goto unlock;
3969 }
3970
3971 /* Reject disabling when powered on */
3972 if (!cp->val) {
3973 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
3974 MGMT_STATUS_REJECTED);
3975 goto unlock;
3976 }
3977
3978 if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
3979 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
3980 MGMT_STATUS_BUSY);
3981 goto unlock;
3982 }
3983
3984 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
3985 if (!cmd) {
3986 err = -ENOMEM;
3987 goto unlock;
3988 }
3989
3990 /* We need to flip the bit already here so that update_adv_data
3991 * generates the correct flags.
3992 */
3993 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3994
3995 hci_req_init(&req, hdev);
3996
3997 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3998 set_bredr_scan(&req);
3999
4000 /* Since only the advertising data flags will change, there
4001 * is no need to update the scan response data.
4002 */
4003 update_adv_data(&req);
4004
4005 err = hci_req_run(&req, set_bredr_complete);
4006 if (err < 0)
4007 mgmt_pending_remove(cmd);
4008
4009 unlock:
4010 hci_dev_unlock(hdev);
4011 return err;
4012 }
4013
4014 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4015 void *data, u16 len)
4016 {
4017 struct mgmt_mode *cp = data;
4018 struct pending_cmd *cmd;
4019 u8 status;
4020 int err;
4021
4022 BT_DBG("request for %s", hdev->name);
4023
4024 status = mgmt_bredr_support(hdev);
4025 if (status)
4026 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4027 status);
4028
4029 if (!lmp_sc_capable(hdev))
4030 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4031 MGMT_STATUS_NOT_SUPPORTED);
4032
4033 if (cp->val != 0x00 && cp->val != 0x01)
4034 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4035 MGMT_STATUS_INVALID_PARAMS);
4036
4037 hci_dev_lock(hdev);
4038
4039 if (!hdev_is_powered(hdev)) {
4040 bool changed;
4041
4042 if (cp->val)
4043 changed = !test_and_set_bit(HCI_SC_ENABLED,
4044 &hdev->dev_flags);
4045 else
4046 changed = test_and_clear_bit(HCI_SC_ENABLED,
4047 &hdev->dev_flags);
4048
4049 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4050 if (err < 0)
4051 goto failed;
4052
4053 if (changed)
4054 err = new_settings(hdev, sk);
4055
4056 goto failed;
4057 }
4058
4059 if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4060 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4061 MGMT_STATUS_BUSY);
4062 goto failed;
4063 }
4064
4065 if (!!cp->val == test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
4066 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4067 goto failed;
4068 }
4069
4070 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4071 if (!cmd) {
4072 err = -ENOMEM;
4073 goto failed;
4074 }
4075
4076 err = hci_send_cmd(hdev, HCI_OP_WRITE_SC_SUPPORT, 1, &cp->val);
4077 if (err < 0) {
4078 mgmt_pending_remove(cmd);
4079 goto failed;
4080 }
4081
4082 failed:
4083 hci_dev_unlock(hdev);
4084 return err;
4085 }
4086
4087 static bool ltk_is_valid(struct mgmt_ltk_info *key)
4088 {
4089 if (key->authenticated != 0x00 && key->authenticated != 0x01)
4090 return false;
4091 if (key->master != 0x00 && key->master != 0x01)
4092 return false;
4093 if (!bdaddr_type_is_le(key->addr.type))
4094 return false;
4095 return true;
4096 }
4097
4098 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
4099 void *cp_data, u16 len)
4100 {
4101 struct mgmt_cp_load_long_term_keys *cp = cp_data;
4102 u16 key_count, expected_len;
4103 int i, err;
4104
4105 BT_DBG("request for %s", hdev->name);
4106
4107 if (!lmp_le_capable(hdev))
4108 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4109 MGMT_STATUS_NOT_SUPPORTED);
4110
4111 key_count = __le16_to_cpu(cp->key_count);
4112
4113 expected_len = sizeof(*cp) + key_count *
4114 sizeof(struct mgmt_ltk_info);
4115 if (expected_len != len) {
4116 BT_ERR("load_keys: expected %u bytes, got %u bytes",
4117 len, expected_len);
4118 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4119 MGMT_STATUS_INVALID_PARAMS);
4120 }
4121
4122 BT_DBG("%s key_count %u", hdev->name, key_count);
4123
4124 for (i = 0; i < key_count; i++) {
4125 struct mgmt_ltk_info *key = &cp->keys[i];
4126
4127 if (!ltk_is_valid(key))
4128 return cmd_status(sk, hdev->id,
4129 MGMT_OP_LOAD_LONG_TERM_KEYS,
4130 MGMT_STATUS_INVALID_PARAMS);
4131 }
4132
4133 hci_dev_lock(hdev);
4134
4135 hci_smp_ltks_clear(hdev);
4136
4137 for (i = 0; i < key_count; i++) {
4138 struct mgmt_ltk_info *key = &cp->keys[i];
4139 u8 type, addr_type;
4140
4141 if (key->addr.type == BDADDR_LE_PUBLIC)
4142 addr_type = ADDR_LE_DEV_PUBLIC;
4143 else
4144 addr_type = ADDR_LE_DEV_RANDOM;
4145
4146 if (key->master)
4147 type = HCI_SMP_LTK;
4148 else
4149 type = HCI_SMP_LTK_SLAVE;
4150
4151 hci_add_ltk(hdev, &key->addr.bdaddr, addr_type,
4152 type, 0, key->authenticated, key->val,
4153 key->enc_size, key->ediv, key->rand);
4154 }
4155
4156 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
4157 NULL, 0);
4158
4159 hci_dev_unlock(hdev);
4160
4161 return err;
4162 }
4163
4164 static const struct mgmt_handler {
4165 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
4166 u16 data_len);
4167 bool var_len;
4168 size_t data_len;
4169 } mgmt_handlers[] = {
4170 { NULL }, /* 0x0000 (no command) */
4171 { read_version, false, MGMT_READ_VERSION_SIZE },
4172 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
4173 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
4174 { read_controller_info, false, MGMT_READ_INFO_SIZE },
4175 { set_powered, false, MGMT_SETTING_SIZE },
4176 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
4177 { set_connectable, false, MGMT_SETTING_SIZE },
4178 { set_fast_connectable, false, MGMT_SETTING_SIZE },
4179 { set_pairable, false, MGMT_SETTING_SIZE },
4180 { set_link_security, false, MGMT_SETTING_SIZE },
4181 { set_ssp, false, MGMT_SETTING_SIZE },
4182 { set_hs, false, MGMT_SETTING_SIZE },
4183 { set_le, false, MGMT_SETTING_SIZE },
4184 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
4185 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
4186 { add_uuid, false, MGMT_ADD_UUID_SIZE },
4187 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
4188 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
4189 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
4190 { disconnect, false, MGMT_DISCONNECT_SIZE },
4191 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
4192 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
4193 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
4194 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
4195 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
4196 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
4197 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
4198 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
4199 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
4200 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
4201 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
4202 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
4203 { add_remote_oob_data, false, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
4204 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
4205 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
4206 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
4207 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
4208 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
4209 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
4210 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
4211 { set_advertising, false, MGMT_SETTING_SIZE },
4212 { set_bredr, false, MGMT_SETTING_SIZE },
4213 { set_static_address, false, MGMT_SET_STATIC_ADDRESS_SIZE },
4214 { set_scan_params, false, MGMT_SET_SCAN_PARAMS_SIZE },
4215 { set_secure_conn, false, MGMT_SETTING_SIZE },
4216 };
4217
4218
4219 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
4220 {
4221 void *buf;
4222 u8 *cp;
4223 struct mgmt_hdr *hdr;
4224 u16 opcode, index, len;
4225 struct hci_dev *hdev = NULL;
4226 const struct mgmt_handler *handler;
4227 int err;
4228
4229 BT_DBG("got %zu bytes", msglen);
4230
4231 if (msglen < sizeof(*hdr))
4232 return -EINVAL;
4233
4234 buf = kmalloc(msglen, GFP_KERNEL);
4235 if (!buf)
4236 return -ENOMEM;
4237
4238 if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
4239 err = -EFAULT;
4240 goto done;
4241 }
4242
4243 hdr = buf;
4244 opcode = __le16_to_cpu(hdr->opcode);
4245 index = __le16_to_cpu(hdr->index);
4246 len = __le16_to_cpu(hdr->len);
4247
4248 if (len != msglen - sizeof(*hdr)) {
4249 err = -EINVAL;
4250 goto done;
4251 }
4252
4253 if (index != MGMT_INDEX_NONE) {
4254 hdev = hci_dev_get(index);
4255 if (!hdev) {
4256 err = cmd_status(sk, index, opcode,
4257 MGMT_STATUS_INVALID_INDEX);
4258 goto done;
4259 }
4260
4261 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
4262 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4263 err = cmd_status(sk, index, opcode,
4264 MGMT_STATUS_INVALID_INDEX);
4265 goto done;
4266 }
4267 }
4268
4269 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
4270 mgmt_handlers[opcode].func == NULL) {
4271 BT_DBG("Unknown op %u", opcode);
4272 err = cmd_status(sk, index, opcode,
4273 MGMT_STATUS_UNKNOWN_COMMAND);
4274 goto done;
4275 }
4276
4277 if ((hdev && opcode < MGMT_OP_READ_INFO) ||
4278 (!hdev && opcode >= MGMT_OP_READ_INFO)) {
4279 err = cmd_status(sk, index, opcode,
4280 MGMT_STATUS_INVALID_INDEX);
4281 goto done;
4282 }
4283
4284 handler = &mgmt_handlers[opcode];
4285
4286 if ((handler->var_len && len < handler->data_len) ||
4287 (!handler->var_len && len != handler->data_len)) {
4288 err = cmd_status(sk, index, opcode,
4289 MGMT_STATUS_INVALID_PARAMS);
4290 goto done;
4291 }
4292
4293 if (hdev)
4294 mgmt_init_hdev(sk, hdev);
4295
4296 cp = buf + sizeof(*hdr);
4297
4298 err = handler->func(sk, hdev, cp, len);
4299 if (err < 0)
4300 goto done;
4301
4302 err = msglen;
4303
4304 done:
4305 if (hdev)
4306 hci_dev_put(hdev);
4307
4308 kfree(buf);
4309 return err;
4310 }
4311
4312 void mgmt_index_added(struct hci_dev *hdev)
4313 {
4314 if (hdev->dev_type != HCI_BREDR)
4315 return;
4316
4317 mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
4318 }
4319
4320 void mgmt_index_removed(struct hci_dev *hdev)
4321 {
4322 u8 status = MGMT_STATUS_INVALID_INDEX;
4323
4324 if (hdev->dev_type != HCI_BREDR)
4325 return;
4326
4327 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
4328
4329 mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
4330 }
4331
4332 static void powered_complete(struct hci_dev *hdev, u8 status)
4333 {
4334 struct cmd_lookup match = { NULL, hdev };
4335
4336 BT_DBG("status 0x%02x", status);
4337
4338 hci_dev_lock(hdev);
4339
4340 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
4341
4342 new_settings(hdev, match.sk);
4343
4344 hci_dev_unlock(hdev);
4345
4346 if (match.sk)
4347 sock_put(match.sk);
4348 }
4349
4350 static int powered_update_hci(struct hci_dev *hdev)
4351 {
4352 struct hci_request req;
4353 u8 link_sec;
4354
4355 hci_req_init(&req, hdev);
4356
4357 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
4358 !lmp_host_ssp_capable(hdev)) {
4359 u8 ssp = 1;
4360
4361 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
4362 }
4363
4364 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
4365 lmp_bredr_capable(hdev)) {
4366 struct hci_cp_write_le_host_supported cp;
4367
4368 cp.le = 1;
4369 cp.simul = lmp_le_br_capable(hdev);
4370
4371 /* Check first if we already have the right
4372 * host state (host features set)
4373 */
4374 if (cp.le != lmp_host_le_capable(hdev) ||
4375 cp.simul != lmp_host_le_br_capable(hdev))
4376 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
4377 sizeof(cp), &cp);
4378 }
4379
4380 if (lmp_le_capable(hdev)) {
4381 /* Set random address to static address if configured */
4382 if (bacmp(&hdev->static_addr, BDADDR_ANY))
4383 hci_req_add(&req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
4384 &hdev->static_addr);
4385
4386 /* Make sure the controller has a good default for
4387 * advertising data. This also applies to the case
4388 * where BR/EDR was toggled during the AUTO_OFF phase.
4389 */
4390 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
4391 update_adv_data(&req);
4392 update_scan_rsp_data(&req);
4393 }
4394
4395 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
4396 enable_advertising(&req);
4397 }
4398
4399 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4400 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
4401 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
4402 sizeof(link_sec), &link_sec);
4403
4404 if (lmp_bredr_capable(hdev)) {
4405 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
4406 set_bredr_scan(&req);
4407 update_class(&req);
4408 update_name(&req);
4409 update_eir(&req);
4410 }
4411
4412 return hci_req_run(&req, powered_complete);
4413 }
4414
4415 int mgmt_powered(struct hci_dev *hdev, u8 powered)
4416 {
4417 struct cmd_lookup match = { NULL, hdev };
4418 u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
4419 u8 zero_cod[] = { 0, 0, 0 };
4420 int err;
4421
4422 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
4423 return 0;
4424
4425 if (powered) {
4426 if (powered_update_hci(hdev) == 0)
4427 return 0;
4428
4429 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
4430 &match);
4431 goto new_settings;
4432 }
4433
4434 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
4435 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered);
4436
4437 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
4438 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
4439 zero_cod, sizeof(zero_cod), NULL);
4440
4441 new_settings:
4442 err = new_settings(hdev, match.sk);
4443
4444 if (match.sk)
4445 sock_put(match.sk);
4446
4447 return err;
4448 }
4449
4450 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
4451 {
4452 struct pending_cmd *cmd;
4453 u8 status;
4454
4455 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
4456 if (!cmd)
4457 return;
4458
4459 if (err == -ERFKILL)
4460 status = MGMT_STATUS_RFKILLED;
4461 else
4462 status = MGMT_STATUS_FAILED;
4463
4464 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
4465
4466 mgmt_pending_remove(cmd);
4467 }
4468
4469 void mgmt_discoverable_timeout(struct hci_dev *hdev)
4470 {
4471 struct hci_request req;
4472
4473 hci_dev_lock(hdev);
4474
4475 /* When discoverable timeout triggers, then just make sure
4476 * the limited discoverable flag is cleared. Even in the case
4477 * of a timeout triggered from general discoverable, it is
4478 * safe to unconditionally clear the flag.
4479 */
4480 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
4481 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4482
4483 hci_req_init(&req, hdev);
4484 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4485 u8 scan = SCAN_PAGE;
4486 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
4487 sizeof(scan), &scan);
4488 }
4489 update_class(&req);
4490 update_adv_data(&req);
4491 hci_req_run(&req, NULL);
4492
4493 hdev->discov_timeout = 0;
4494
4495 new_settings(hdev, NULL);
4496
4497 hci_dev_unlock(hdev);
4498 }
4499
4500 void mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
4501 {
4502 bool changed;
4503
4504 /* Nothing needed here if there's a pending command since that
4505 * commands request completion callback takes care of everything
4506 * necessary.
4507 */
4508 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
4509 return;
4510
4511 if (discoverable) {
4512 changed = !test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4513 } else {
4514 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
4515 changed = test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4516 }
4517
4518 if (changed) {
4519 struct hci_request req;
4520
4521 /* In case this change in discoverable was triggered by
4522 * a disabling of connectable there could be a need to
4523 * update the advertising flags.
4524 */
4525 hci_req_init(&req, hdev);
4526 update_adv_data(&req);
4527 hci_req_run(&req, NULL);
4528
4529 new_settings(hdev, NULL);
4530 }
4531 }
4532
4533 void mgmt_connectable(struct hci_dev *hdev, u8 connectable)
4534 {
4535 bool changed;
4536
4537 /* Nothing needed here if there's a pending command since that
4538 * commands request completion callback takes care of everything
4539 * necessary.
4540 */
4541 if (mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
4542 return;
4543
4544 if (connectable)
4545 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
4546 else
4547 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
4548
4549 if (changed)
4550 new_settings(hdev, NULL);
4551 }
4552
4553 void mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
4554 {
4555 u8 mgmt_err = mgmt_status(status);
4556
4557 if (scan & SCAN_PAGE)
4558 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
4559 cmd_status_rsp, &mgmt_err);
4560
4561 if (scan & SCAN_INQUIRY)
4562 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
4563 cmd_status_rsp, &mgmt_err);
4564 }
4565
4566 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
4567 bool persistent)
4568 {
4569 struct mgmt_ev_new_link_key ev;
4570
4571 memset(&ev, 0, sizeof(ev));
4572
4573 ev.store_hint = persistent;
4574 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
4575 ev.key.addr.type = BDADDR_BREDR;
4576 ev.key.type = key->type;
4577 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
4578 ev.key.pin_len = key->pin_len;
4579
4580 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
4581 }
4582
4583 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent)
4584 {
4585 struct mgmt_ev_new_long_term_key ev;
4586
4587 memset(&ev, 0, sizeof(ev));
4588
4589 ev.store_hint = persistent;
4590 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
4591 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
4592 ev.key.authenticated = key->authenticated;
4593 ev.key.enc_size = key->enc_size;
4594 ev.key.ediv = key->ediv;
4595
4596 if (key->type == HCI_SMP_LTK)
4597 ev.key.master = 1;
4598
4599 memcpy(ev.key.rand, key->rand, sizeof(key->rand));
4600 memcpy(ev.key.val, key->val, sizeof(key->val));
4601
4602 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
4603 }
4604
4605 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
4606 u8 data_len)
4607 {
4608 eir[eir_len++] = sizeof(type) + data_len;
4609 eir[eir_len++] = type;
4610 memcpy(&eir[eir_len], data, data_len);
4611 eir_len += data_len;
4612
4613 return eir_len;
4614 }
4615
4616 void mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4617 u8 addr_type, u32 flags, u8 *name, u8 name_len,
4618 u8 *dev_class)
4619 {
4620 char buf[512];
4621 struct mgmt_ev_device_connected *ev = (void *) buf;
4622 u16 eir_len = 0;
4623
4624 bacpy(&ev->addr.bdaddr, bdaddr);
4625 ev->addr.type = link_to_bdaddr(link_type, addr_type);
4626
4627 ev->flags = __cpu_to_le32(flags);
4628
4629 if (name_len > 0)
4630 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
4631 name, name_len);
4632
4633 if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
4634 eir_len = eir_append_data(ev->eir, eir_len,
4635 EIR_CLASS_OF_DEV, dev_class, 3);
4636
4637 ev->eir_len = cpu_to_le16(eir_len);
4638
4639 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
4640 sizeof(*ev) + eir_len, NULL);
4641 }
4642
4643 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
4644 {
4645 struct mgmt_cp_disconnect *cp = cmd->param;
4646 struct sock **sk = data;
4647 struct mgmt_rp_disconnect rp;
4648
4649 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4650 rp.addr.type = cp->addr.type;
4651
4652 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
4653 sizeof(rp));
4654
4655 *sk = cmd->sk;
4656 sock_hold(*sk);
4657
4658 mgmt_pending_remove(cmd);
4659 }
4660
4661 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
4662 {
4663 struct hci_dev *hdev = data;
4664 struct mgmt_cp_unpair_device *cp = cmd->param;
4665 struct mgmt_rp_unpair_device rp;
4666
4667 memset(&rp, 0, sizeof(rp));
4668 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4669 rp.addr.type = cp->addr.type;
4670
4671 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
4672
4673 cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
4674
4675 mgmt_pending_remove(cmd);
4676 }
4677
4678 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
4679 u8 link_type, u8 addr_type, u8 reason)
4680 {
4681 struct mgmt_ev_device_disconnected ev;
4682 struct sock *sk = NULL;
4683
4684 if (link_type != ACL_LINK && link_type != LE_LINK)
4685 return;
4686
4687 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
4688
4689 bacpy(&ev.addr.bdaddr, bdaddr);
4690 ev.addr.type = link_to_bdaddr(link_type, addr_type);
4691 ev.reason = reason;
4692
4693 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
4694
4695 if (sk)
4696 sock_put(sk);
4697
4698 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
4699 hdev);
4700 }
4701
4702 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
4703 u8 link_type, u8 addr_type, u8 status)
4704 {
4705 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
4706 struct mgmt_cp_disconnect *cp;
4707 struct mgmt_rp_disconnect rp;
4708 struct pending_cmd *cmd;
4709
4710 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
4711 hdev);
4712
4713 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
4714 if (!cmd)
4715 return;
4716
4717 cp = cmd->param;
4718
4719 if (bacmp(bdaddr, &cp->addr.bdaddr))
4720 return;
4721
4722 if (cp->addr.type != bdaddr_type)
4723 return;
4724
4725 bacpy(&rp.addr.bdaddr, bdaddr);
4726 rp.addr.type = bdaddr_type;
4727
4728 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
4729 mgmt_status(status), &rp, sizeof(rp));
4730
4731 mgmt_pending_remove(cmd);
4732 }
4733
4734 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4735 u8 addr_type, u8 status)
4736 {
4737 struct mgmt_ev_connect_failed ev;
4738
4739 bacpy(&ev.addr.bdaddr, bdaddr);
4740 ev.addr.type = link_to_bdaddr(link_type, addr_type);
4741 ev.status = mgmt_status(status);
4742
4743 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
4744 }
4745
4746 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
4747 {
4748 struct mgmt_ev_pin_code_request ev;
4749
4750 bacpy(&ev.addr.bdaddr, bdaddr);
4751 ev.addr.type = BDADDR_BREDR;
4752 ev.secure = secure;
4753
4754 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
4755 }
4756
4757 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4758 u8 status)
4759 {
4760 struct pending_cmd *cmd;
4761 struct mgmt_rp_pin_code_reply rp;
4762
4763 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
4764 if (!cmd)
4765 return;
4766
4767 bacpy(&rp.addr.bdaddr, bdaddr);
4768 rp.addr.type = BDADDR_BREDR;
4769
4770 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
4771 mgmt_status(status), &rp, sizeof(rp));
4772
4773 mgmt_pending_remove(cmd);
4774 }
4775
4776 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4777 u8 status)
4778 {
4779 struct pending_cmd *cmd;
4780 struct mgmt_rp_pin_code_reply rp;
4781
4782 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
4783 if (!cmd)
4784 return;
4785
4786 bacpy(&rp.addr.bdaddr, bdaddr);
4787 rp.addr.type = BDADDR_BREDR;
4788
4789 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
4790 mgmt_status(status), &rp, sizeof(rp));
4791
4792 mgmt_pending_remove(cmd);
4793 }
4794
4795 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
4796 u8 link_type, u8 addr_type, __le32 value,
4797 u8 confirm_hint)
4798 {
4799 struct mgmt_ev_user_confirm_request ev;
4800
4801 BT_DBG("%s", hdev->name);
4802
4803 bacpy(&ev.addr.bdaddr, bdaddr);
4804 ev.addr.type = link_to_bdaddr(link_type, addr_type);
4805 ev.confirm_hint = confirm_hint;
4806 ev.value = value;
4807
4808 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
4809 NULL);
4810 }
4811
4812 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
4813 u8 link_type, u8 addr_type)
4814 {
4815 struct mgmt_ev_user_passkey_request ev;
4816
4817 BT_DBG("%s", hdev->name);
4818
4819 bacpy(&ev.addr.bdaddr, bdaddr);
4820 ev.addr.type = link_to_bdaddr(link_type, addr_type);
4821
4822 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
4823 NULL);
4824 }
4825
4826 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4827 u8 link_type, u8 addr_type, u8 status,
4828 u8 opcode)
4829 {
4830 struct pending_cmd *cmd;
4831 struct mgmt_rp_user_confirm_reply rp;
4832 int err;
4833
4834 cmd = mgmt_pending_find(opcode, hdev);
4835 if (!cmd)
4836 return -ENOENT;
4837
4838 bacpy(&rp.addr.bdaddr, bdaddr);
4839 rp.addr.type = link_to_bdaddr(link_type, addr_type);
4840 err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
4841 &rp, sizeof(rp));
4842
4843 mgmt_pending_remove(cmd);
4844
4845 return err;
4846 }
4847
4848 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4849 u8 link_type, u8 addr_type, u8 status)
4850 {
4851 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
4852 status, MGMT_OP_USER_CONFIRM_REPLY);
4853 }
4854
4855 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4856 u8 link_type, u8 addr_type, u8 status)
4857 {
4858 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
4859 status,
4860 MGMT_OP_USER_CONFIRM_NEG_REPLY);
4861 }
4862
4863 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4864 u8 link_type, u8 addr_type, u8 status)
4865 {
4866 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
4867 status, MGMT_OP_USER_PASSKEY_REPLY);
4868 }
4869
4870 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4871 u8 link_type, u8 addr_type, u8 status)
4872 {
4873 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
4874 status,
4875 MGMT_OP_USER_PASSKEY_NEG_REPLY);
4876 }
4877
4878 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
4879 u8 link_type, u8 addr_type, u32 passkey,
4880 u8 entered)
4881 {
4882 struct mgmt_ev_passkey_notify ev;
4883
4884 BT_DBG("%s", hdev->name);
4885
4886 bacpy(&ev.addr.bdaddr, bdaddr);
4887 ev.addr.type = link_to_bdaddr(link_type, addr_type);
4888 ev.passkey = __cpu_to_le32(passkey);
4889 ev.entered = entered;
4890
4891 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
4892 }
4893
4894 void mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4895 u8 addr_type, u8 status)
4896 {
4897 struct mgmt_ev_auth_failed ev;
4898
4899 bacpy(&ev.addr.bdaddr, bdaddr);
4900 ev.addr.type = link_to_bdaddr(link_type, addr_type);
4901 ev.status = mgmt_status(status);
4902
4903 mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
4904 }
4905
4906 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
4907 {
4908 struct cmd_lookup match = { NULL, hdev };
4909 bool changed;
4910
4911 if (status) {
4912 u8 mgmt_err = mgmt_status(status);
4913 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
4914 cmd_status_rsp, &mgmt_err);
4915 return;
4916 }
4917
4918 if (test_bit(HCI_AUTH, &hdev->flags))
4919 changed = !test_and_set_bit(HCI_LINK_SECURITY,
4920 &hdev->dev_flags);
4921 else
4922 changed = test_and_clear_bit(HCI_LINK_SECURITY,
4923 &hdev->dev_flags);
4924
4925 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
4926 &match);
4927
4928 if (changed)
4929 new_settings(hdev, match.sk);
4930
4931 if (match.sk)
4932 sock_put(match.sk);
4933 }
4934
4935 static void clear_eir(struct hci_request *req)
4936 {
4937 struct hci_dev *hdev = req->hdev;
4938 struct hci_cp_write_eir cp;
4939
4940 if (!lmp_ext_inq_capable(hdev))
4941 return;
4942
4943 memset(hdev->eir, 0, sizeof(hdev->eir));
4944
4945 memset(&cp, 0, sizeof(cp));
4946
4947 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
4948 }
4949
4950 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
4951 {
4952 struct cmd_lookup match = { NULL, hdev };
4953 struct hci_request req;
4954 bool changed = false;
4955
4956 if (status) {
4957 u8 mgmt_err = mgmt_status(status);
4958
4959 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
4960 &hdev->dev_flags)) {
4961 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4962 new_settings(hdev, NULL);
4963 }
4964
4965 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
4966 &mgmt_err);
4967 return;
4968 }
4969
4970 if (enable) {
4971 changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4972 } else {
4973 changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4974 if (!changed)
4975 changed = test_and_clear_bit(HCI_HS_ENABLED,
4976 &hdev->dev_flags);
4977 else
4978 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4979 }
4980
4981 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
4982
4983 if (changed)
4984 new_settings(hdev, match.sk);
4985
4986 if (match.sk)
4987 sock_put(match.sk);
4988
4989 hci_req_init(&req, hdev);
4990
4991 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
4992 update_eir(&req);
4993 else
4994 clear_eir(&req);
4995
4996 hci_req_run(&req, NULL);
4997 }
4998
4999 void mgmt_sc_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
5000 {
5001 struct cmd_lookup match = { NULL, hdev };
5002 bool changed = false;
5003
5004 if (status) {
5005 u8 mgmt_err = mgmt_status(status);
5006
5007 if (enable && test_and_clear_bit(HCI_SC_ENABLED,
5008 &hdev->dev_flags))
5009 new_settings(hdev, NULL);
5010
5011 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
5012 cmd_status_rsp, &mgmt_err);
5013 return;
5014 }
5015
5016 if (enable)
5017 changed = !test_and_set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
5018 else
5019 changed = test_and_clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
5020
5021 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
5022 settings_rsp, &match);
5023
5024 if (changed)
5025 new_settings(hdev, match.sk);
5026
5027 if (match.sk)
5028 sock_put(match.sk);
5029 }
5030
5031 static void sk_lookup(struct pending_cmd *cmd, void *data)
5032 {
5033 struct cmd_lookup *match = data;
5034
5035 if (match->sk == NULL) {
5036 match->sk = cmd->sk;
5037 sock_hold(match->sk);
5038 }
5039 }
5040
5041 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
5042 u8 status)
5043 {
5044 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
5045
5046 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
5047 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
5048 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
5049
5050 if (!status)
5051 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
5052 NULL);
5053
5054 if (match.sk)
5055 sock_put(match.sk);
5056 }
5057
5058 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
5059 {
5060 struct mgmt_cp_set_local_name ev;
5061 struct pending_cmd *cmd;
5062
5063 if (status)
5064 return;
5065
5066 memset(&ev, 0, sizeof(ev));
5067 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
5068 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
5069
5070 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
5071 if (!cmd) {
5072 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
5073
5074 /* If this is a HCI command related to powering on the
5075 * HCI dev don't send any mgmt signals.
5076 */
5077 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5078 return;
5079 }
5080
5081 mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
5082 cmd ? cmd->sk : NULL);
5083 }
5084
5085 void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
5086 u8 *randomizer192, u8 *hash256,
5087 u8 *randomizer256, u8 status)
5088 {
5089 struct pending_cmd *cmd;
5090
5091 BT_DBG("%s status %u", hdev->name, status);
5092
5093 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
5094 if (!cmd)
5095 return;
5096
5097 if (status) {
5098 cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5099 mgmt_status(status));
5100 } else {
5101 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
5102 hash256 && randomizer256) {
5103 struct mgmt_rp_read_local_oob_ext_data rp;
5104
5105 memcpy(rp.hash192, hash192, sizeof(rp.hash192));
5106 memcpy(rp.randomizer192, randomizer192,
5107 sizeof(rp.randomizer192));
5108
5109 memcpy(rp.hash256, hash256, sizeof(rp.hash256));
5110 memcpy(rp.randomizer256, randomizer256,
5111 sizeof(rp.randomizer256));
5112
5113 cmd_complete(cmd->sk, hdev->id,
5114 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
5115 &rp, sizeof(rp));
5116 } else {
5117 struct mgmt_rp_read_local_oob_data rp;
5118
5119 memcpy(rp.hash, hash192, sizeof(rp.hash));
5120 memcpy(rp.randomizer, randomizer192,
5121 sizeof(rp.randomizer));
5122
5123 cmd_complete(cmd->sk, hdev->id,
5124 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
5125 &rp, sizeof(rp));
5126 }
5127 }
5128
5129 mgmt_pending_remove(cmd);
5130 }
5131
5132 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5133 u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name, u8
5134 ssp, u8 *eir, u16 eir_len)
5135 {
5136 char buf[512];
5137 struct mgmt_ev_device_found *ev = (void *) buf;
5138 size_t ev_size;
5139
5140 if (!hci_discovery_active(hdev))
5141 return;
5142
5143 /* Leave 5 bytes for a potential CoD field */
5144 if (sizeof(*ev) + eir_len + 5 > sizeof(buf))
5145 return;
5146
5147 memset(buf, 0, sizeof(buf));
5148
5149 bacpy(&ev->addr.bdaddr, bdaddr);
5150 ev->addr.type = link_to_bdaddr(link_type, addr_type);
5151 ev->rssi = rssi;
5152 if (cfm_name)
5153 ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_CONFIRM_NAME);
5154 if (!ssp)
5155 ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_LEGACY_PAIRING);
5156
5157 if (eir_len > 0)
5158 memcpy(ev->eir, eir, eir_len);
5159
5160 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
5161 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
5162 dev_class, 3);
5163
5164 ev->eir_len = cpu_to_le16(eir_len);
5165 ev_size = sizeof(*ev) + eir_len;
5166
5167 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
5168 }
5169
5170 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5171 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
5172 {
5173 struct mgmt_ev_device_found *ev;
5174 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
5175 u16 eir_len;
5176
5177 ev = (struct mgmt_ev_device_found *) buf;
5178
5179 memset(buf, 0, sizeof(buf));
5180
5181 bacpy(&ev->addr.bdaddr, bdaddr);
5182 ev->addr.type = link_to_bdaddr(link_type, addr_type);
5183 ev->rssi = rssi;
5184
5185 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
5186 name_len);
5187
5188 ev->eir_len = cpu_to_le16(eir_len);
5189
5190 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
5191 }
5192
5193 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
5194 {
5195 struct mgmt_ev_discovering ev;
5196 struct pending_cmd *cmd;
5197
5198 BT_DBG("%s discovering %u", hdev->name, discovering);
5199
5200 if (discovering)
5201 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
5202 else
5203 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
5204
5205 if (cmd != NULL) {
5206 u8 type = hdev->discovery.type;
5207
5208 cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
5209 sizeof(type));
5210 mgmt_pending_remove(cmd);
5211 }
5212
5213 memset(&ev, 0, sizeof(ev));
5214 ev.type = hdev->discovery.type;
5215 ev.discovering = discovering;
5216
5217 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
5218 }
5219
5220 int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
5221 {
5222 struct pending_cmd *cmd;
5223 struct mgmt_ev_device_blocked ev;
5224
5225 cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, hdev);
5226
5227 bacpy(&ev.addr.bdaddr, bdaddr);
5228 ev.addr.type = type;
5229
5230 return mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &ev, sizeof(ev),
5231 cmd ? cmd->sk : NULL);
5232 }
5233
5234 int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
5235 {
5236 struct pending_cmd *cmd;
5237 struct mgmt_ev_device_unblocked ev;
5238
5239 cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, hdev);
5240
5241 bacpy(&ev.addr.bdaddr, bdaddr);
5242 ev.addr.type = type;
5243
5244 return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &ev, sizeof(ev),
5245 cmd ? cmd->sk : NULL);
5246 }
5247
5248 static void adv_enable_complete(struct hci_dev *hdev, u8 status)
5249 {
5250 BT_DBG("%s status %u", hdev->name, status);
5251
5252 /* Clear the advertising mgmt setting if we failed to re-enable it */
5253 if (status) {
5254 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
5255 new_settings(hdev, NULL);
5256 }
5257 }
5258
5259 void mgmt_reenable_advertising(struct hci_dev *hdev)
5260 {
5261 struct hci_request req;
5262
5263 if (hci_conn_num(hdev, LE_LINK) > 0)
5264 return;
5265
5266 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
5267 return;
5268
5269 hci_req_init(&req, hdev);
5270 enable_advertising(&req);
5271
5272 /* If this fails we have no option but to let user space know
5273 * that we've disabled advertising.
5274 */
5275 if (hci_req_run(&req, adv_enable_complete) < 0) {
5276 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
5277 new_settings(hdev, NULL);
5278 }
5279 }