]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - net/bluetooth/mgmt.c
Bluetooth: Remove Simultaneous LE & BR/EDR flags from AD
[mirror_ubuntu-artful-kernel.git] / net / bluetooth / mgmt.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI Management interface */
26
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/mgmt.h>
33
34 #include "smp.h"
35
36 #define MGMT_VERSION 1
37 #define MGMT_REVISION 5
38
39 static const u16 mgmt_commands[] = {
40 MGMT_OP_READ_INDEX_LIST,
41 MGMT_OP_READ_INFO,
42 MGMT_OP_SET_POWERED,
43 MGMT_OP_SET_DISCOVERABLE,
44 MGMT_OP_SET_CONNECTABLE,
45 MGMT_OP_SET_FAST_CONNECTABLE,
46 MGMT_OP_SET_PAIRABLE,
47 MGMT_OP_SET_LINK_SECURITY,
48 MGMT_OP_SET_SSP,
49 MGMT_OP_SET_HS,
50 MGMT_OP_SET_LE,
51 MGMT_OP_SET_DEV_CLASS,
52 MGMT_OP_SET_LOCAL_NAME,
53 MGMT_OP_ADD_UUID,
54 MGMT_OP_REMOVE_UUID,
55 MGMT_OP_LOAD_LINK_KEYS,
56 MGMT_OP_LOAD_LONG_TERM_KEYS,
57 MGMT_OP_DISCONNECT,
58 MGMT_OP_GET_CONNECTIONS,
59 MGMT_OP_PIN_CODE_REPLY,
60 MGMT_OP_PIN_CODE_NEG_REPLY,
61 MGMT_OP_SET_IO_CAPABILITY,
62 MGMT_OP_PAIR_DEVICE,
63 MGMT_OP_CANCEL_PAIR_DEVICE,
64 MGMT_OP_UNPAIR_DEVICE,
65 MGMT_OP_USER_CONFIRM_REPLY,
66 MGMT_OP_USER_CONFIRM_NEG_REPLY,
67 MGMT_OP_USER_PASSKEY_REPLY,
68 MGMT_OP_USER_PASSKEY_NEG_REPLY,
69 MGMT_OP_READ_LOCAL_OOB_DATA,
70 MGMT_OP_ADD_REMOTE_OOB_DATA,
71 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
72 MGMT_OP_START_DISCOVERY,
73 MGMT_OP_STOP_DISCOVERY,
74 MGMT_OP_CONFIRM_NAME,
75 MGMT_OP_BLOCK_DEVICE,
76 MGMT_OP_UNBLOCK_DEVICE,
77 MGMT_OP_SET_DEVICE_ID,
78 MGMT_OP_SET_ADVERTISING,
79 MGMT_OP_SET_BREDR,
80 MGMT_OP_SET_STATIC_ADDRESS,
81 MGMT_OP_SET_SCAN_PARAMS,
82 MGMT_OP_SET_SECURE_CONN,
83 };
84
85 static const u16 mgmt_events[] = {
86 MGMT_EV_CONTROLLER_ERROR,
87 MGMT_EV_INDEX_ADDED,
88 MGMT_EV_INDEX_REMOVED,
89 MGMT_EV_NEW_SETTINGS,
90 MGMT_EV_CLASS_OF_DEV_CHANGED,
91 MGMT_EV_LOCAL_NAME_CHANGED,
92 MGMT_EV_NEW_LINK_KEY,
93 MGMT_EV_NEW_LONG_TERM_KEY,
94 MGMT_EV_DEVICE_CONNECTED,
95 MGMT_EV_DEVICE_DISCONNECTED,
96 MGMT_EV_CONNECT_FAILED,
97 MGMT_EV_PIN_CODE_REQUEST,
98 MGMT_EV_USER_CONFIRM_REQUEST,
99 MGMT_EV_USER_PASSKEY_REQUEST,
100 MGMT_EV_AUTH_FAILED,
101 MGMT_EV_DEVICE_FOUND,
102 MGMT_EV_DISCOVERING,
103 MGMT_EV_DEVICE_BLOCKED,
104 MGMT_EV_DEVICE_UNBLOCKED,
105 MGMT_EV_DEVICE_UNPAIRED,
106 MGMT_EV_PASSKEY_NOTIFY,
107 };
108
109 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
110
111 #define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
112 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
113
114 struct pending_cmd {
115 struct list_head list;
116 u16 opcode;
117 int index;
118 void *param;
119 struct sock *sk;
120 void *user_data;
121 };
122
123 /* HCI to MGMT error code conversion table */
124 static u8 mgmt_status_table[] = {
125 MGMT_STATUS_SUCCESS,
126 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
127 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
128 MGMT_STATUS_FAILED, /* Hardware Failure */
129 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
130 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
131 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
132 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
133 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
134 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
135 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
136 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
137 MGMT_STATUS_BUSY, /* Command Disallowed */
138 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
139 MGMT_STATUS_REJECTED, /* Rejected Security */
140 MGMT_STATUS_REJECTED, /* Rejected Personal */
141 MGMT_STATUS_TIMEOUT, /* Host Timeout */
142 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
143 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
144 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
145 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
146 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
147 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
148 MGMT_STATUS_BUSY, /* Repeated Attempts */
149 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
150 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
151 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
152 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
153 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
154 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
155 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
156 MGMT_STATUS_FAILED, /* Unspecified Error */
157 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
158 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
159 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
160 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
161 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
162 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
163 MGMT_STATUS_FAILED, /* Unit Link Key Used */
164 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
165 MGMT_STATUS_TIMEOUT, /* Instant Passed */
166 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
167 MGMT_STATUS_FAILED, /* Transaction Collision */
168 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
169 MGMT_STATUS_REJECTED, /* QoS Rejected */
170 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
171 MGMT_STATUS_REJECTED, /* Insufficient Security */
172 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
173 MGMT_STATUS_BUSY, /* Role Switch Pending */
174 MGMT_STATUS_FAILED, /* Slot Violation */
175 MGMT_STATUS_FAILED, /* Role Switch Failed */
176 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
177 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
178 MGMT_STATUS_BUSY, /* Host Busy Pairing */
179 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
180 MGMT_STATUS_BUSY, /* Controller Busy */
181 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
182 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
183 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
184 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
185 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
186 };
187
188 static u8 mgmt_status(u8 hci_status)
189 {
190 if (hci_status < ARRAY_SIZE(mgmt_status_table))
191 return mgmt_status_table[hci_status];
192
193 return MGMT_STATUS_FAILED;
194 }
195
196 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
197 {
198 struct sk_buff *skb;
199 struct mgmt_hdr *hdr;
200 struct mgmt_ev_cmd_status *ev;
201 int err;
202
203 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
204
205 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
206 if (!skb)
207 return -ENOMEM;
208
209 hdr = (void *) skb_put(skb, sizeof(*hdr));
210
211 hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_STATUS);
212 hdr->index = cpu_to_le16(index);
213 hdr->len = cpu_to_le16(sizeof(*ev));
214
215 ev = (void *) skb_put(skb, sizeof(*ev));
216 ev->status = status;
217 ev->opcode = cpu_to_le16(cmd);
218
219 err = sock_queue_rcv_skb(sk, skb);
220 if (err < 0)
221 kfree_skb(skb);
222
223 return err;
224 }
225
226 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
227 void *rp, size_t rp_len)
228 {
229 struct sk_buff *skb;
230 struct mgmt_hdr *hdr;
231 struct mgmt_ev_cmd_complete *ev;
232 int err;
233
234 BT_DBG("sock %p", sk);
235
236 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
237 if (!skb)
238 return -ENOMEM;
239
240 hdr = (void *) skb_put(skb, sizeof(*hdr));
241
242 hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_COMPLETE);
243 hdr->index = cpu_to_le16(index);
244 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
245
246 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
247 ev->opcode = cpu_to_le16(cmd);
248 ev->status = status;
249
250 if (rp)
251 memcpy(ev->data, rp, rp_len);
252
253 err = sock_queue_rcv_skb(sk, skb);
254 if (err < 0)
255 kfree_skb(skb);
256
257 return err;
258 }
259
260 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
261 u16 data_len)
262 {
263 struct mgmt_rp_read_version rp;
264
265 BT_DBG("sock %p", sk);
266
267 rp.version = MGMT_VERSION;
268 rp.revision = __constant_cpu_to_le16(MGMT_REVISION);
269
270 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
271 sizeof(rp));
272 }
273
274 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
275 u16 data_len)
276 {
277 struct mgmt_rp_read_commands *rp;
278 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
279 const u16 num_events = ARRAY_SIZE(mgmt_events);
280 __le16 *opcode;
281 size_t rp_size;
282 int i, err;
283
284 BT_DBG("sock %p", sk);
285
286 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
287
288 rp = kmalloc(rp_size, GFP_KERNEL);
289 if (!rp)
290 return -ENOMEM;
291
292 rp->num_commands = __constant_cpu_to_le16(num_commands);
293 rp->num_events = __constant_cpu_to_le16(num_events);
294
295 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
296 put_unaligned_le16(mgmt_commands[i], opcode);
297
298 for (i = 0; i < num_events; i++, opcode++)
299 put_unaligned_le16(mgmt_events[i], opcode);
300
301 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
302 rp_size);
303 kfree(rp);
304
305 return err;
306 }
307
308 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
309 u16 data_len)
310 {
311 struct mgmt_rp_read_index_list *rp;
312 struct hci_dev *d;
313 size_t rp_len;
314 u16 count;
315 int err;
316
317 BT_DBG("sock %p", sk);
318
319 read_lock(&hci_dev_list_lock);
320
321 count = 0;
322 list_for_each_entry(d, &hci_dev_list, list) {
323 if (d->dev_type == HCI_BREDR)
324 count++;
325 }
326
327 rp_len = sizeof(*rp) + (2 * count);
328 rp = kmalloc(rp_len, GFP_ATOMIC);
329 if (!rp) {
330 read_unlock(&hci_dev_list_lock);
331 return -ENOMEM;
332 }
333
334 count = 0;
335 list_for_each_entry(d, &hci_dev_list, list) {
336 if (test_bit(HCI_SETUP, &d->dev_flags))
337 continue;
338
339 if (test_bit(HCI_USER_CHANNEL, &d->dev_flags))
340 continue;
341
342 if (d->dev_type == HCI_BREDR) {
343 rp->index[count++] = cpu_to_le16(d->id);
344 BT_DBG("Added hci%u", d->id);
345 }
346 }
347
348 rp->num_controllers = cpu_to_le16(count);
349 rp_len = sizeof(*rp) + (2 * count);
350
351 read_unlock(&hci_dev_list_lock);
352
353 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
354 rp_len);
355
356 kfree(rp);
357
358 return err;
359 }
360
361 static u32 get_supported_settings(struct hci_dev *hdev)
362 {
363 u32 settings = 0;
364
365 settings |= MGMT_SETTING_POWERED;
366 settings |= MGMT_SETTING_PAIRABLE;
367
368 if (lmp_bredr_capable(hdev)) {
369 settings |= MGMT_SETTING_CONNECTABLE;
370 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
371 settings |= MGMT_SETTING_FAST_CONNECTABLE;
372 settings |= MGMT_SETTING_DISCOVERABLE;
373 settings |= MGMT_SETTING_BREDR;
374 settings |= MGMT_SETTING_LINK_SECURITY;
375
376 if (lmp_ssp_capable(hdev)) {
377 settings |= MGMT_SETTING_SSP;
378 settings |= MGMT_SETTING_HS;
379 }
380
381 if (lmp_sc_capable(hdev) ||
382 test_bit(HCI_FORCE_SC, &hdev->dev_flags))
383 settings |= MGMT_SETTING_SECURE_CONN;
384 }
385
386 if (lmp_le_capable(hdev)) {
387 settings |= MGMT_SETTING_LE;
388 settings |= MGMT_SETTING_ADVERTISING;
389 }
390
391 return settings;
392 }
393
394 static u32 get_current_settings(struct hci_dev *hdev)
395 {
396 u32 settings = 0;
397
398 if (hdev_is_powered(hdev))
399 settings |= MGMT_SETTING_POWERED;
400
401 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
402 settings |= MGMT_SETTING_CONNECTABLE;
403
404 if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
405 settings |= MGMT_SETTING_FAST_CONNECTABLE;
406
407 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
408 settings |= MGMT_SETTING_DISCOVERABLE;
409
410 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
411 settings |= MGMT_SETTING_PAIRABLE;
412
413 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
414 settings |= MGMT_SETTING_BREDR;
415
416 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
417 settings |= MGMT_SETTING_LE;
418
419 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
420 settings |= MGMT_SETTING_LINK_SECURITY;
421
422 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
423 settings |= MGMT_SETTING_SSP;
424
425 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
426 settings |= MGMT_SETTING_HS;
427
428 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
429 settings |= MGMT_SETTING_ADVERTISING;
430
431 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
432 settings |= MGMT_SETTING_SECURE_CONN;
433
434 return settings;
435 }
436
437 #define PNP_INFO_SVCLASS_ID 0x1200
438
439 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
440 {
441 u8 *ptr = data, *uuids_start = NULL;
442 struct bt_uuid *uuid;
443
444 if (len < 4)
445 return ptr;
446
447 list_for_each_entry(uuid, &hdev->uuids, list) {
448 u16 uuid16;
449
450 if (uuid->size != 16)
451 continue;
452
453 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
454 if (uuid16 < 0x1100)
455 continue;
456
457 if (uuid16 == PNP_INFO_SVCLASS_ID)
458 continue;
459
460 if (!uuids_start) {
461 uuids_start = ptr;
462 uuids_start[0] = 1;
463 uuids_start[1] = EIR_UUID16_ALL;
464 ptr += 2;
465 }
466
467 /* Stop if not enough space to put next UUID */
468 if ((ptr - data) + sizeof(u16) > len) {
469 uuids_start[1] = EIR_UUID16_SOME;
470 break;
471 }
472
473 *ptr++ = (uuid16 & 0x00ff);
474 *ptr++ = (uuid16 & 0xff00) >> 8;
475 uuids_start[0] += sizeof(uuid16);
476 }
477
478 return ptr;
479 }
480
481 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
482 {
483 u8 *ptr = data, *uuids_start = NULL;
484 struct bt_uuid *uuid;
485
486 if (len < 6)
487 return ptr;
488
489 list_for_each_entry(uuid, &hdev->uuids, list) {
490 if (uuid->size != 32)
491 continue;
492
493 if (!uuids_start) {
494 uuids_start = ptr;
495 uuids_start[0] = 1;
496 uuids_start[1] = EIR_UUID32_ALL;
497 ptr += 2;
498 }
499
500 /* Stop if not enough space to put next UUID */
501 if ((ptr - data) + sizeof(u32) > len) {
502 uuids_start[1] = EIR_UUID32_SOME;
503 break;
504 }
505
506 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
507 ptr += sizeof(u32);
508 uuids_start[0] += sizeof(u32);
509 }
510
511 return ptr;
512 }
513
514 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
515 {
516 u8 *ptr = data, *uuids_start = NULL;
517 struct bt_uuid *uuid;
518
519 if (len < 18)
520 return ptr;
521
522 list_for_each_entry(uuid, &hdev->uuids, list) {
523 if (uuid->size != 128)
524 continue;
525
526 if (!uuids_start) {
527 uuids_start = ptr;
528 uuids_start[0] = 1;
529 uuids_start[1] = EIR_UUID128_ALL;
530 ptr += 2;
531 }
532
533 /* Stop if not enough space to put next UUID */
534 if ((ptr - data) + 16 > len) {
535 uuids_start[1] = EIR_UUID128_SOME;
536 break;
537 }
538
539 memcpy(ptr, uuid->uuid, 16);
540 ptr += 16;
541 uuids_start[0] += 16;
542 }
543
544 return ptr;
545 }
546
547 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
548 {
549 struct pending_cmd *cmd;
550
551 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
552 if (cmd->opcode == opcode)
553 return cmd;
554 }
555
556 return NULL;
557 }
558
559 static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
560 {
561 u8 ad_len = 0;
562 size_t name_len;
563
564 name_len = strlen(hdev->dev_name);
565 if (name_len > 0) {
566 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
567
568 if (name_len > max_len) {
569 name_len = max_len;
570 ptr[1] = EIR_NAME_SHORT;
571 } else
572 ptr[1] = EIR_NAME_COMPLETE;
573
574 ptr[0] = name_len + 1;
575
576 memcpy(ptr + 2, hdev->dev_name, name_len);
577
578 ad_len += (name_len + 2);
579 ptr += (name_len + 2);
580 }
581
582 return ad_len;
583 }
584
585 static void update_scan_rsp_data(struct hci_request *req)
586 {
587 struct hci_dev *hdev = req->hdev;
588 struct hci_cp_le_set_scan_rsp_data cp;
589 u8 len;
590
591 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
592 return;
593
594 memset(&cp, 0, sizeof(cp));
595
596 len = create_scan_rsp_data(hdev, cp.data);
597
598 if (hdev->scan_rsp_data_len == len &&
599 memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
600 return;
601
602 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
603 hdev->scan_rsp_data_len = len;
604
605 cp.length = len;
606
607 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
608 }
609
610 static u8 get_adv_discov_flags(struct hci_dev *hdev)
611 {
612 struct pending_cmd *cmd;
613
614 /* If there's a pending mgmt command the flags will not yet have
615 * their final values, so check for this first.
616 */
617 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
618 if (cmd) {
619 struct mgmt_mode *cp = cmd->param;
620 if (cp->val == 0x01)
621 return LE_AD_GENERAL;
622 else if (cp->val == 0x02)
623 return LE_AD_LIMITED;
624 } else {
625 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
626 return LE_AD_LIMITED;
627 else if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
628 return LE_AD_GENERAL;
629 }
630
631 return 0;
632 }
633
634 static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
635 {
636 u8 ad_len = 0, flags = 0;
637
638 flags |= get_adv_discov_flags(hdev);
639
640 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
641 flags |= LE_AD_NO_BREDR;
642
643 if (flags) {
644 BT_DBG("adv flags 0x%02x", flags);
645
646 ptr[0] = 2;
647 ptr[1] = EIR_FLAGS;
648 ptr[2] = flags;
649
650 ad_len += 3;
651 ptr += 3;
652 }
653
654 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
655 ptr[0] = 2;
656 ptr[1] = EIR_TX_POWER;
657 ptr[2] = (u8) hdev->adv_tx_power;
658
659 ad_len += 3;
660 ptr += 3;
661 }
662
663 return ad_len;
664 }
665
666 static void update_adv_data(struct hci_request *req)
667 {
668 struct hci_dev *hdev = req->hdev;
669 struct hci_cp_le_set_adv_data cp;
670 u8 len;
671
672 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
673 return;
674
675 memset(&cp, 0, sizeof(cp));
676
677 len = create_adv_data(hdev, cp.data);
678
679 if (hdev->adv_data_len == len &&
680 memcmp(cp.data, hdev->adv_data, len) == 0)
681 return;
682
683 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
684 hdev->adv_data_len = len;
685
686 cp.length = len;
687
688 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
689 }
690
691 static void create_eir(struct hci_dev *hdev, u8 *data)
692 {
693 u8 *ptr = data;
694 size_t name_len;
695
696 name_len = strlen(hdev->dev_name);
697
698 if (name_len > 0) {
699 /* EIR Data type */
700 if (name_len > 48) {
701 name_len = 48;
702 ptr[1] = EIR_NAME_SHORT;
703 } else
704 ptr[1] = EIR_NAME_COMPLETE;
705
706 /* EIR Data length */
707 ptr[0] = name_len + 1;
708
709 memcpy(ptr + 2, hdev->dev_name, name_len);
710
711 ptr += (name_len + 2);
712 }
713
714 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
715 ptr[0] = 2;
716 ptr[1] = EIR_TX_POWER;
717 ptr[2] = (u8) hdev->inq_tx_power;
718
719 ptr += 3;
720 }
721
722 if (hdev->devid_source > 0) {
723 ptr[0] = 9;
724 ptr[1] = EIR_DEVICE_ID;
725
726 put_unaligned_le16(hdev->devid_source, ptr + 2);
727 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
728 put_unaligned_le16(hdev->devid_product, ptr + 6);
729 put_unaligned_le16(hdev->devid_version, ptr + 8);
730
731 ptr += 10;
732 }
733
734 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
735 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
736 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
737 }
738
739 static void update_eir(struct hci_request *req)
740 {
741 struct hci_dev *hdev = req->hdev;
742 struct hci_cp_write_eir cp;
743
744 if (!hdev_is_powered(hdev))
745 return;
746
747 if (!lmp_ext_inq_capable(hdev))
748 return;
749
750 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
751 return;
752
753 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
754 return;
755
756 memset(&cp, 0, sizeof(cp));
757
758 create_eir(hdev, cp.data);
759
760 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
761 return;
762
763 memcpy(hdev->eir, cp.data, sizeof(cp.data));
764
765 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
766 }
767
768 static u8 get_service_classes(struct hci_dev *hdev)
769 {
770 struct bt_uuid *uuid;
771 u8 val = 0;
772
773 list_for_each_entry(uuid, &hdev->uuids, list)
774 val |= uuid->svc_hint;
775
776 return val;
777 }
778
779 static void update_class(struct hci_request *req)
780 {
781 struct hci_dev *hdev = req->hdev;
782 u8 cod[3];
783
784 BT_DBG("%s", hdev->name);
785
786 if (!hdev_is_powered(hdev))
787 return;
788
789 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
790 return;
791
792 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
793 return;
794
795 cod[0] = hdev->minor_class;
796 cod[1] = hdev->major_class;
797 cod[2] = get_service_classes(hdev);
798
799 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
800 cod[1] |= 0x20;
801
802 if (memcmp(cod, hdev->dev_class, 3) == 0)
803 return;
804
805 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
806 }
807
808 static void service_cache_off(struct work_struct *work)
809 {
810 struct hci_dev *hdev = container_of(work, struct hci_dev,
811 service_cache.work);
812 struct hci_request req;
813
814 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
815 return;
816
817 hci_req_init(&req, hdev);
818
819 hci_dev_lock(hdev);
820
821 update_eir(&req);
822 update_class(&req);
823
824 hci_dev_unlock(hdev);
825
826 hci_req_run(&req, NULL);
827 }
828
829 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
830 {
831 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
832 return;
833
834 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
835
836 /* Non-mgmt controlled devices get this bit set
837 * implicitly so that pairing works for them, however
838 * for mgmt we require user-space to explicitly enable
839 * it
840 */
841 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
842 }
843
844 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
845 void *data, u16 data_len)
846 {
847 struct mgmt_rp_read_info rp;
848
849 BT_DBG("sock %p %s", sk, hdev->name);
850
851 hci_dev_lock(hdev);
852
853 memset(&rp, 0, sizeof(rp));
854
855 bacpy(&rp.bdaddr, &hdev->bdaddr);
856
857 rp.version = hdev->hci_ver;
858 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
859
860 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
861 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
862
863 memcpy(rp.dev_class, hdev->dev_class, 3);
864
865 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
866 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
867
868 hci_dev_unlock(hdev);
869
870 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
871 sizeof(rp));
872 }
873
874 static void mgmt_pending_free(struct pending_cmd *cmd)
875 {
876 sock_put(cmd->sk);
877 kfree(cmd->param);
878 kfree(cmd);
879 }
880
881 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
882 struct hci_dev *hdev, void *data,
883 u16 len)
884 {
885 struct pending_cmd *cmd;
886
887 cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
888 if (!cmd)
889 return NULL;
890
891 cmd->opcode = opcode;
892 cmd->index = hdev->id;
893
894 cmd->param = kmalloc(len, GFP_KERNEL);
895 if (!cmd->param) {
896 kfree(cmd);
897 return NULL;
898 }
899
900 if (data)
901 memcpy(cmd->param, data, len);
902
903 cmd->sk = sk;
904 sock_hold(sk);
905
906 list_add(&cmd->list, &hdev->mgmt_pending);
907
908 return cmd;
909 }
910
911 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
912 void (*cb)(struct pending_cmd *cmd,
913 void *data),
914 void *data)
915 {
916 struct pending_cmd *cmd, *tmp;
917
918 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
919 if (opcode > 0 && cmd->opcode != opcode)
920 continue;
921
922 cb(cmd, data);
923 }
924 }
925
926 static void mgmt_pending_remove(struct pending_cmd *cmd)
927 {
928 list_del(&cmd->list);
929 mgmt_pending_free(cmd);
930 }
931
932 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
933 {
934 __le32 settings = cpu_to_le32(get_current_settings(hdev));
935
936 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
937 sizeof(settings));
938 }
939
940 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
941 u16 len)
942 {
943 struct mgmt_mode *cp = data;
944 struct pending_cmd *cmd;
945 int err;
946
947 BT_DBG("request for %s", hdev->name);
948
949 if (cp->val != 0x00 && cp->val != 0x01)
950 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
951 MGMT_STATUS_INVALID_PARAMS);
952
953 hci_dev_lock(hdev);
954
955 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
956 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
957 MGMT_STATUS_BUSY);
958 goto failed;
959 }
960
961 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
962 cancel_delayed_work(&hdev->power_off);
963
964 if (cp->val) {
965 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
966 data, len);
967 err = mgmt_powered(hdev, 1);
968 goto failed;
969 }
970 }
971
972 if (!!cp->val == hdev_is_powered(hdev)) {
973 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
974 goto failed;
975 }
976
977 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
978 if (!cmd) {
979 err = -ENOMEM;
980 goto failed;
981 }
982
983 if (cp->val)
984 queue_work(hdev->req_workqueue, &hdev->power_on);
985 else
986 queue_work(hdev->req_workqueue, &hdev->power_off.work);
987
988 err = 0;
989
990 failed:
991 hci_dev_unlock(hdev);
992 return err;
993 }
994
995 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
996 struct sock *skip_sk)
997 {
998 struct sk_buff *skb;
999 struct mgmt_hdr *hdr;
1000
1001 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
1002 if (!skb)
1003 return -ENOMEM;
1004
1005 hdr = (void *) skb_put(skb, sizeof(*hdr));
1006 hdr->opcode = cpu_to_le16(event);
1007 if (hdev)
1008 hdr->index = cpu_to_le16(hdev->id);
1009 else
1010 hdr->index = __constant_cpu_to_le16(MGMT_INDEX_NONE);
1011 hdr->len = cpu_to_le16(data_len);
1012
1013 if (data)
1014 memcpy(skb_put(skb, data_len), data, data_len);
1015
1016 /* Time stamp */
1017 __net_timestamp(skb);
1018
1019 hci_send_to_control(skb, skip_sk);
1020 kfree_skb(skb);
1021
1022 return 0;
1023 }
1024
1025 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1026 {
1027 __le32 ev;
1028
1029 ev = cpu_to_le32(get_current_settings(hdev));
1030
1031 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
1032 }
1033
1034 struct cmd_lookup {
1035 struct sock *sk;
1036 struct hci_dev *hdev;
1037 u8 mgmt_status;
1038 };
1039
1040 static void settings_rsp(struct pending_cmd *cmd, void *data)
1041 {
1042 struct cmd_lookup *match = data;
1043
1044 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1045
1046 list_del(&cmd->list);
1047
1048 if (match->sk == NULL) {
1049 match->sk = cmd->sk;
1050 sock_hold(match->sk);
1051 }
1052
1053 mgmt_pending_free(cmd);
1054 }
1055
1056 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
1057 {
1058 u8 *status = data;
1059
1060 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1061 mgmt_pending_remove(cmd);
1062 }
1063
1064 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1065 {
1066 if (!lmp_bredr_capable(hdev))
1067 return MGMT_STATUS_NOT_SUPPORTED;
1068 else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1069 return MGMT_STATUS_REJECTED;
1070 else
1071 return MGMT_STATUS_SUCCESS;
1072 }
1073
1074 static u8 mgmt_le_support(struct hci_dev *hdev)
1075 {
1076 if (!lmp_le_capable(hdev))
1077 return MGMT_STATUS_NOT_SUPPORTED;
1078 else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1079 return MGMT_STATUS_REJECTED;
1080 else
1081 return MGMT_STATUS_SUCCESS;
1082 }
1083
1084 static void set_discoverable_complete(struct hci_dev *hdev, u8 status)
1085 {
1086 struct pending_cmd *cmd;
1087 struct mgmt_mode *cp;
1088 struct hci_request req;
1089 bool changed;
1090
1091 BT_DBG("status 0x%02x", status);
1092
1093 hci_dev_lock(hdev);
1094
1095 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1096 if (!cmd)
1097 goto unlock;
1098
1099 if (status) {
1100 u8 mgmt_err = mgmt_status(status);
1101 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1102 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1103 goto remove_cmd;
1104 }
1105
1106 cp = cmd->param;
1107 if (cp->val) {
1108 changed = !test_and_set_bit(HCI_DISCOVERABLE,
1109 &hdev->dev_flags);
1110
1111 if (hdev->discov_timeout > 0) {
1112 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1113 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1114 to);
1115 }
1116 } else {
1117 changed = test_and_clear_bit(HCI_DISCOVERABLE,
1118 &hdev->dev_flags);
1119 }
1120
1121 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1122
1123 if (changed)
1124 new_settings(hdev, cmd->sk);
1125
1126 /* When the discoverable mode gets changed, make sure
1127 * that class of device has the limited discoverable
1128 * bit correctly set.
1129 */
1130 hci_req_init(&req, hdev);
1131 update_class(&req);
1132 hci_req_run(&req, NULL);
1133
1134 remove_cmd:
1135 mgmt_pending_remove(cmd);
1136
1137 unlock:
1138 hci_dev_unlock(hdev);
1139 }
1140
1141 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1142 u16 len)
1143 {
1144 struct mgmt_cp_set_discoverable *cp = data;
1145 struct pending_cmd *cmd;
1146 struct hci_request req;
1147 u16 timeout;
1148 u8 scan;
1149 int err;
1150
1151 BT_DBG("request for %s", hdev->name);
1152
1153 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1154 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1155 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1156 MGMT_STATUS_REJECTED);
1157
1158 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1159 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1160 MGMT_STATUS_INVALID_PARAMS);
1161
1162 timeout = __le16_to_cpu(cp->timeout);
1163
1164 /* Disabling discoverable requires that no timeout is set,
1165 * and enabling limited discoverable requires a timeout.
1166 */
1167 if ((cp->val == 0x00 && timeout > 0) ||
1168 (cp->val == 0x02 && timeout == 0))
1169 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1170 MGMT_STATUS_INVALID_PARAMS);
1171
1172 hci_dev_lock(hdev);
1173
1174 if (!hdev_is_powered(hdev) && timeout > 0) {
1175 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1176 MGMT_STATUS_NOT_POWERED);
1177 goto failed;
1178 }
1179
1180 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1181 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1182 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1183 MGMT_STATUS_BUSY);
1184 goto failed;
1185 }
1186
1187 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
1188 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1189 MGMT_STATUS_REJECTED);
1190 goto failed;
1191 }
1192
1193 if (!hdev_is_powered(hdev)) {
1194 bool changed = false;
1195
1196 /* Setting limited discoverable when powered off is
1197 * not a valid operation since it requires a timeout
1198 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1199 */
1200 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
1201 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1202 changed = true;
1203 }
1204
1205 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1206 if (err < 0)
1207 goto failed;
1208
1209 if (changed)
1210 err = new_settings(hdev, sk);
1211
1212 goto failed;
1213 }
1214
1215 /* If the current mode is the same, then just update the timeout
1216 * value with the new value. And if only the timeout gets updated,
1217 * then no need for any HCI transactions.
1218 */
1219 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
1220 (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
1221 &hdev->dev_flags)) {
1222 cancel_delayed_work(&hdev->discov_off);
1223 hdev->discov_timeout = timeout;
1224
1225 if (cp->val && hdev->discov_timeout > 0) {
1226 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1227 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1228 to);
1229 }
1230
1231 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1232 goto failed;
1233 }
1234
1235 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1236 if (!cmd) {
1237 err = -ENOMEM;
1238 goto failed;
1239 }
1240
1241 /* Cancel any potential discoverable timeout that might be
1242 * still active and store new timeout value. The arming of
1243 * the timeout happens in the complete handler.
1244 */
1245 cancel_delayed_work(&hdev->discov_off);
1246 hdev->discov_timeout = timeout;
1247
1248 /* Limited discoverable mode */
1249 if (cp->val == 0x02)
1250 set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1251 else
1252 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1253
1254 hci_req_init(&req, hdev);
1255
1256 /* The procedure for LE-only controllers is much simpler - just
1257 * update the advertising data.
1258 */
1259 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1260 goto update_ad;
1261
1262 scan = SCAN_PAGE;
1263
1264 if (cp->val) {
1265 struct hci_cp_write_current_iac_lap hci_cp;
1266
1267 if (cp->val == 0x02) {
1268 /* Limited discoverable mode */
1269 hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1270 hci_cp.iac_lap[0] = 0x00; /* LIAC */
1271 hci_cp.iac_lap[1] = 0x8b;
1272 hci_cp.iac_lap[2] = 0x9e;
1273 hci_cp.iac_lap[3] = 0x33; /* GIAC */
1274 hci_cp.iac_lap[4] = 0x8b;
1275 hci_cp.iac_lap[5] = 0x9e;
1276 } else {
1277 /* General discoverable mode */
1278 hci_cp.num_iac = 1;
1279 hci_cp.iac_lap[0] = 0x33; /* GIAC */
1280 hci_cp.iac_lap[1] = 0x8b;
1281 hci_cp.iac_lap[2] = 0x9e;
1282 }
1283
1284 hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1285 (hci_cp.num_iac * 3) + 1, &hci_cp);
1286
1287 scan |= SCAN_INQUIRY;
1288 } else {
1289 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1290 }
1291
1292 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1293
1294 update_ad:
1295 update_adv_data(&req);
1296
1297 err = hci_req_run(&req, set_discoverable_complete);
1298 if (err < 0)
1299 mgmt_pending_remove(cmd);
1300
1301 failed:
1302 hci_dev_unlock(hdev);
1303 return err;
1304 }
1305
1306 static void write_fast_connectable(struct hci_request *req, bool enable)
1307 {
1308 struct hci_dev *hdev = req->hdev;
1309 struct hci_cp_write_page_scan_activity acp;
1310 u8 type;
1311
1312 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1313 return;
1314
1315 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1316 return;
1317
1318 if (enable) {
1319 type = PAGE_SCAN_TYPE_INTERLACED;
1320
1321 /* 160 msec page scan interval */
1322 acp.interval = __constant_cpu_to_le16(0x0100);
1323 } else {
1324 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1325
1326 /* default 1.28 sec page scan */
1327 acp.interval = __constant_cpu_to_le16(0x0800);
1328 }
1329
1330 acp.window = __constant_cpu_to_le16(0x0012);
1331
1332 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1333 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1334 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1335 sizeof(acp), &acp);
1336
1337 if (hdev->page_scan_type != type)
1338 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1339 }
1340
1341 static u8 get_adv_type(struct hci_dev *hdev)
1342 {
1343 struct pending_cmd *cmd;
1344 bool connectable;
1345
1346 /* If there's a pending mgmt command the flag will not yet have
1347 * it's final value, so check for this first.
1348 */
1349 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1350 if (cmd) {
1351 struct mgmt_mode *cp = cmd->param;
1352 connectable = !!cp->val;
1353 } else {
1354 connectable = test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1355 }
1356
1357 return connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
1358 }
1359
1360 static void enable_advertising(struct hci_request *req)
1361 {
1362 struct hci_dev *hdev = req->hdev;
1363 struct hci_cp_le_set_adv_param cp;
1364 u8 enable = 0x01;
1365
1366 memset(&cp, 0, sizeof(cp));
1367 cp.min_interval = __constant_cpu_to_le16(0x0800);
1368 cp.max_interval = __constant_cpu_to_le16(0x0800);
1369 cp.type = get_adv_type(hdev);
1370 cp.own_address_type = hdev->own_addr_type;
1371 cp.channel_map = 0x07;
1372
1373 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1374
1375 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1376 }
1377
1378 static void disable_advertising(struct hci_request *req)
1379 {
1380 u8 enable = 0x00;
1381
1382 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1383 }
1384
1385 static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1386 {
1387 struct pending_cmd *cmd;
1388 struct mgmt_mode *cp;
1389 bool changed;
1390
1391 BT_DBG("status 0x%02x", status);
1392
1393 hci_dev_lock(hdev);
1394
1395 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1396 if (!cmd)
1397 goto unlock;
1398
1399 if (status) {
1400 u8 mgmt_err = mgmt_status(status);
1401 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1402 goto remove_cmd;
1403 }
1404
1405 cp = cmd->param;
1406 if (cp->val)
1407 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1408 else
1409 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1410
1411 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1412
1413 if (changed)
1414 new_settings(hdev, cmd->sk);
1415
1416 remove_cmd:
1417 mgmt_pending_remove(cmd);
1418
1419 unlock:
1420 hci_dev_unlock(hdev);
1421 }
1422
1423 static int set_connectable_update_settings(struct hci_dev *hdev,
1424 struct sock *sk, u8 val)
1425 {
1426 bool changed = false;
1427 int err;
1428
1429 if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1430 changed = true;
1431
1432 if (val) {
1433 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1434 } else {
1435 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1436 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1437 }
1438
1439 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1440 if (err < 0)
1441 return err;
1442
1443 if (changed)
1444 return new_settings(hdev, sk);
1445
1446 return 0;
1447 }
1448
1449 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1450 u16 len)
1451 {
1452 struct mgmt_mode *cp = data;
1453 struct pending_cmd *cmd;
1454 struct hci_request req;
1455 u8 scan;
1456 int err;
1457
1458 BT_DBG("request for %s", hdev->name);
1459
1460 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1461 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1462 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1463 MGMT_STATUS_REJECTED);
1464
1465 if (cp->val != 0x00 && cp->val != 0x01)
1466 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1467 MGMT_STATUS_INVALID_PARAMS);
1468
1469 hci_dev_lock(hdev);
1470
1471 if (!hdev_is_powered(hdev)) {
1472 err = set_connectable_update_settings(hdev, sk, cp->val);
1473 goto failed;
1474 }
1475
1476 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1477 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1478 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1479 MGMT_STATUS_BUSY);
1480 goto failed;
1481 }
1482
1483 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1484 if (!cmd) {
1485 err = -ENOMEM;
1486 goto failed;
1487 }
1488
1489 hci_req_init(&req, hdev);
1490
1491 /* If BR/EDR is not enabled and we disable advertising as a
1492 * by-product of disabling connectable, we need to update the
1493 * advertising flags.
1494 */
1495 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1496 if (!cp->val) {
1497 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1498 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1499 }
1500 update_adv_data(&req);
1501 } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
1502 if (cp->val) {
1503 scan = SCAN_PAGE;
1504 } else {
1505 scan = 0;
1506
1507 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1508 hdev->discov_timeout > 0)
1509 cancel_delayed_work(&hdev->discov_off);
1510 }
1511
1512 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1513 }
1514
1515 /* If we're going from non-connectable to connectable or
1516 * vice-versa when fast connectable is enabled ensure that fast
1517 * connectable gets disabled. write_fast_connectable won't do
1518 * anything if the page scan parameters are already what they
1519 * should be.
1520 */
1521 if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1522 write_fast_connectable(&req, false);
1523
1524 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) &&
1525 hci_conn_num(hdev, LE_LINK) == 0) {
1526 disable_advertising(&req);
1527 enable_advertising(&req);
1528 }
1529
1530 err = hci_req_run(&req, set_connectable_complete);
1531 if (err < 0) {
1532 mgmt_pending_remove(cmd);
1533 if (err == -ENODATA)
1534 err = set_connectable_update_settings(hdev, sk,
1535 cp->val);
1536 goto failed;
1537 }
1538
1539 failed:
1540 hci_dev_unlock(hdev);
1541 return err;
1542 }
1543
1544 static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
1545 u16 len)
1546 {
1547 struct mgmt_mode *cp = data;
1548 bool changed;
1549 int err;
1550
1551 BT_DBG("request for %s", hdev->name);
1552
1553 if (cp->val != 0x00 && cp->val != 0x01)
1554 return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE,
1555 MGMT_STATUS_INVALID_PARAMS);
1556
1557 hci_dev_lock(hdev);
1558
1559 if (cp->val)
1560 changed = !test_and_set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1561 else
1562 changed = test_and_clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1563
1564 err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
1565 if (err < 0)
1566 goto unlock;
1567
1568 if (changed)
1569 err = new_settings(hdev, sk);
1570
1571 unlock:
1572 hci_dev_unlock(hdev);
1573 return err;
1574 }
1575
1576 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1577 u16 len)
1578 {
1579 struct mgmt_mode *cp = data;
1580 struct pending_cmd *cmd;
1581 u8 val, status;
1582 int err;
1583
1584 BT_DBG("request for %s", hdev->name);
1585
1586 status = mgmt_bredr_support(hdev);
1587 if (status)
1588 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1589 status);
1590
1591 if (cp->val != 0x00 && cp->val != 0x01)
1592 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1593 MGMT_STATUS_INVALID_PARAMS);
1594
1595 hci_dev_lock(hdev);
1596
1597 if (!hdev_is_powered(hdev)) {
1598 bool changed = false;
1599
1600 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1601 &hdev->dev_flags)) {
1602 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
1603 changed = true;
1604 }
1605
1606 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1607 if (err < 0)
1608 goto failed;
1609
1610 if (changed)
1611 err = new_settings(hdev, sk);
1612
1613 goto failed;
1614 }
1615
1616 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1617 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1618 MGMT_STATUS_BUSY);
1619 goto failed;
1620 }
1621
1622 val = !!cp->val;
1623
1624 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1625 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1626 goto failed;
1627 }
1628
1629 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1630 if (!cmd) {
1631 err = -ENOMEM;
1632 goto failed;
1633 }
1634
1635 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1636 if (err < 0) {
1637 mgmt_pending_remove(cmd);
1638 goto failed;
1639 }
1640
1641 failed:
1642 hci_dev_unlock(hdev);
1643 return err;
1644 }
1645
1646 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1647 {
1648 struct mgmt_mode *cp = data;
1649 struct pending_cmd *cmd;
1650 u8 status;
1651 int err;
1652
1653 BT_DBG("request for %s", hdev->name);
1654
1655 status = mgmt_bredr_support(hdev);
1656 if (status)
1657 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1658
1659 if (!lmp_ssp_capable(hdev))
1660 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1661 MGMT_STATUS_NOT_SUPPORTED);
1662
1663 if (cp->val != 0x00 && cp->val != 0x01)
1664 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1665 MGMT_STATUS_INVALID_PARAMS);
1666
1667 hci_dev_lock(hdev);
1668
1669 if (!hdev_is_powered(hdev)) {
1670 bool changed;
1671
1672 if (cp->val) {
1673 changed = !test_and_set_bit(HCI_SSP_ENABLED,
1674 &hdev->dev_flags);
1675 } else {
1676 changed = test_and_clear_bit(HCI_SSP_ENABLED,
1677 &hdev->dev_flags);
1678 if (!changed)
1679 changed = test_and_clear_bit(HCI_HS_ENABLED,
1680 &hdev->dev_flags);
1681 else
1682 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1683 }
1684
1685 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1686 if (err < 0)
1687 goto failed;
1688
1689 if (changed)
1690 err = new_settings(hdev, sk);
1691
1692 goto failed;
1693 }
1694
1695 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev) ||
1696 mgmt_pending_find(MGMT_OP_SET_HS, hdev)) {
1697 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1698 MGMT_STATUS_BUSY);
1699 goto failed;
1700 }
1701
1702 if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1703 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1704 goto failed;
1705 }
1706
1707 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1708 if (!cmd) {
1709 err = -ENOMEM;
1710 goto failed;
1711 }
1712
1713 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1714 if (err < 0) {
1715 mgmt_pending_remove(cmd);
1716 goto failed;
1717 }
1718
1719 failed:
1720 hci_dev_unlock(hdev);
1721 return err;
1722 }
1723
1724 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1725 {
1726 struct mgmt_mode *cp = data;
1727 bool changed;
1728 u8 status;
1729 int err;
1730
1731 BT_DBG("request for %s", hdev->name);
1732
1733 status = mgmt_bredr_support(hdev);
1734 if (status)
1735 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1736
1737 if (!lmp_ssp_capable(hdev))
1738 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1739 MGMT_STATUS_NOT_SUPPORTED);
1740
1741 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
1742 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1743 MGMT_STATUS_REJECTED);
1744
1745 if (cp->val != 0x00 && cp->val != 0x01)
1746 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1747 MGMT_STATUS_INVALID_PARAMS);
1748
1749 hci_dev_lock(hdev);
1750
1751 if (cp->val) {
1752 changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1753 } else {
1754 if (hdev_is_powered(hdev)) {
1755 err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1756 MGMT_STATUS_REJECTED);
1757 goto unlock;
1758 }
1759
1760 changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1761 }
1762
1763 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1764 if (err < 0)
1765 goto unlock;
1766
1767 if (changed)
1768 err = new_settings(hdev, sk);
1769
1770 unlock:
1771 hci_dev_unlock(hdev);
1772 return err;
1773 }
1774
1775 static void le_enable_complete(struct hci_dev *hdev, u8 status)
1776 {
1777 struct cmd_lookup match = { NULL, hdev };
1778
1779 if (status) {
1780 u8 mgmt_err = mgmt_status(status);
1781
1782 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1783 &mgmt_err);
1784 return;
1785 }
1786
1787 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1788
1789 new_settings(hdev, match.sk);
1790
1791 if (match.sk)
1792 sock_put(match.sk);
1793
1794 /* Make sure the controller has a good default for
1795 * advertising data. Restrict the update to when LE
1796 * has actually been enabled. During power on, the
1797 * update in powered_update_hci will take care of it.
1798 */
1799 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1800 struct hci_request req;
1801
1802 hci_dev_lock(hdev);
1803
1804 hci_req_init(&req, hdev);
1805 update_adv_data(&req);
1806 update_scan_rsp_data(&req);
1807 hci_req_run(&req, NULL);
1808
1809 hci_dev_unlock(hdev);
1810 }
1811 }
1812
1813 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1814 {
1815 struct mgmt_mode *cp = data;
1816 struct hci_cp_write_le_host_supported hci_cp;
1817 struct pending_cmd *cmd;
1818 struct hci_request req;
1819 int err;
1820 u8 val, enabled;
1821
1822 BT_DBG("request for %s", hdev->name);
1823
1824 if (!lmp_le_capable(hdev))
1825 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1826 MGMT_STATUS_NOT_SUPPORTED);
1827
1828 if (cp->val != 0x00 && cp->val != 0x01)
1829 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1830 MGMT_STATUS_INVALID_PARAMS);
1831
1832 /* LE-only devices do not allow toggling LE on/off */
1833 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1834 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1835 MGMT_STATUS_REJECTED);
1836
1837 hci_dev_lock(hdev);
1838
1839 val = !!cp->val;
1840 enabled = lmp_host_le_capable(hdev);
1841
1842 if (!hdev_is_powered(hdev) || val == enabled) {
1843 bool changed = false;
1844
1845 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1846 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1847 changed = true;
1848 }
1849
1850 if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
1851 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
1852 changed = true;
1853 }
1854
1855 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1856 if (err < 0)
1857 goto unlock;
1858
1859 if (changed)
1860 err = new_settings(hdev, sk);
1861
1862 goto unlock;
1863 }
1864
1865 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
1866 mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
1867 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1868 MGMT_STATUS_BUSY);
1869 goto unlock;
1870 }
1871
1872 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
1873 if (!cmd) {
1874 err = -ENOMEM;
1875 goto unlock;
1876 }
1877
1878 hci_req_init(&req, hdev);
1879
1880 memset(&hci_cp, 0, sizeof(hci_cp));
1881
1882 if (val) {
1883 hci_cp.le = val;
1884 hci_cp.simul = lmp_le_br_capable(hdev);
1885 } else {
1886 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1887 disable_advertising(&req);
1888 }
1889
1890 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
1891 &hci_cp);
1892
1893 err = hci_req_run(&req, le_enable_complete);
1894 if (err < 0)
1895 mgmt_pending_remove(cmd);
1896
1897 unlock:
1898 hci_dev_unlock(hdev);
1899 return err;
1900 }
1901
1902 /* This is a helper function to test for pending mgmt commands that can
1903 * cause CoD or EIR HCI commands. We can only allow one such pending
1904 * mgmt command at a time since otherwise we cannot easily track what
1905 * the current values are, will be, and based on that calculate if a new
1906 * HCI command needs to be sent and if yes with what value.
1907 */
1908 static bool pending_eir_or_class(struct hci_dev *hdev)
1909 {
1910 struct pending_cmd *cmd;
1911
1912 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
1913 switch (cmd->opcode) {
1914 case MGMT_OP_ADD_UUID:
1915 case MGMT_OP_REMOVE_UUID:
1916 case MGMT_OP_SET_DEV_CLASS:
1917 case MGMT_OP_SET_POWERED:
1918 return true;
1919 }
1920 }
1921
1922 return false;
1923 }
1924
1925 static const u8 bluetooth_base_uuid[] = {
1926 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
1927 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1928 };
1929
1930 static u8 get_uuid_size(const u8 *uuid)
1931 {
1932 u32 val;
1933
1934 if (memcmp(uuid, bluetooth_base_uuid, 12))
1935 return 128;
1936
1937 val = get_unaligned_le32(&uuid[12]);
1938 if (val > 0xffff)
1939 return 32;
1940
1941 return 16;
1942 }
1943
1944 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
1945 {
1946 struct pending_cmd *cmd;
1947
1948 hci_dev_lock(hdev);
1949
1950 cmd = mgmt_pending_find(mgmt_op, hdev);
1951 if (!cmd)
1952 goto unlock;
1953
1954 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
1955 hdev->dev_class, 3);
1956
1957 mgmt_pending_remove(cmd);
1958
1959 unlock:
1960 hci_dev_unlock(hdev);
1961 }
1962
1963 static void add_uuid_complete(struct hci_dev *hdev, u8 status)
1964 {
1965 BT_DBG("status 0x%02x", status);
1966
1967 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
1968 }
1969
1970 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1971 {
1972 struct mgmt_cp_add_uuid *cp = data;
1973 struct pending_cmd *cmd;
1974 struct hci_request req;
1975 struct bt_uuid *uuid;
1976 int err;
1977
1978 BT_DBG("request for %s", hdev->name);
1979
1980 hci_dev_lock(hdev);
1981
1982 if (pending_eir_or_class(hdev)) {
1983 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
1984 MGMT_STATUS_BUSY);
1985 goto failed;
1986 }
1987
1988 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
1989 if (!uuid) {
1990 err = -ENOMEM;
1991 goto failed;
1992 }
1993
1994 memcpy(uuid->uuid, cp->uuid, 16);
1995 uuid->svc_hint = cp->svc_hint;
1996 uuid->size = get_uuid_size(cp->uuid);
1997
1998 list_add_tail(&uuid->list, &hdev->uuids);
1999
2000 hci_req_init(&req, hdev);
2001
2002 update_class(&req);
2003 update_eir(&req);
2004
2005 err = hci_req_run(&req, add_uuid_complete);
2006 if (err < 0) {
2007 if (err != -ENODATA)
2008 goto failed;
2009
2010 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2011 hdev->dev_class, 3);
2012 goto failed;
2013 }
2014
2015 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2016 if (!cmd) {
2017 err = -ENOMEM;
2018 goto failed;
2019 }
2020
2021 err = 0;
2022
2023 failed:
2024 hci_dev_unlock(hdev);
2025 return err;
2026 }
2027
2028 static bool enable_service_cache(struct hci_dev *hdev)
2029 {
2030 if (!hdev_is_powered(hdev))
2031 return false;
2032
2033 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2034 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2035 CACHE_TIMEOUT);
2036 return true;
2037 }
2038
2039 return false;
2040 }
2041
2042 static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
2043 {
2044 BT_DBG("status 0x%02x", status);
2045
2046 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2047 }
2048
2049 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2050 u16 len)
2051 {
2052 struct mgmt_cp_remove_uuid *cp = data;
2053 struct pending_cmd *cmd;
2054 struct bt_uuid *match, *tmp;
2055 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2056 struct hci_request req;
2057 int err, found;
2058
2059 BT_DBG("request for %s", hdev->name);
2060
2061 hci_dev_lock(hdev);
2062
2063 if (pending_eir_or_class(hdev)) {
2064 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2065 MGMT_STATUS_BUSY);
2066 goto unlock;
2067 }
2068
2069 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2070 err = hci_uuids_clear(hdev);
2071
2072 if (enable_service_cache(hdev)) {
2073 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2074 0, hdev->dev_class, 3);
2075 goto unlock;
2076 }
2077
2078 goto update_class;
2079 }
2080
2081 found = 0;
2082
2083 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2084 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2085 continue;
2086
2087 list_del(&match->list);
2088 kfree(match);
2089 found++;
2090 }
2091
2092 if (found == 0) {
2093 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2094 MGMT_STATUS_INVALID_PARAMS);
2095 goto unlock;
2096 }
2097
2098 update_class:
2099 hci_req_init(&req, hdev);
2100
2101 update_class(&req);
2102 update_eir(&req);
2103
2104 err = hci_req_run(&req, remove_uuid_complete);
2105 if (err < 0) {
2106 if (err != -ENODATA)
2107 goto unlock;
2108
2109 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2110 hdev->dev_class, 3);
2111 goto unlock;
2112 }
2113
2114 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2115 if (!cmd) {
2116 err = -ENOMEM;
2117 goto unlock;
2118 }
2119
2120 err = 0;
2121
2122 unlock:
2123 hci_dev_unlock(hdev);
2124 return err;
2125 }
2126
2127 static void set_class_complete(struct hci_dev *hdev, u8 status)
2128 {
2129 BT_DBG("status 0x%02x", status);
2130
2131 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2132 }
2133
2134 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2135 u16 len)
2136 {
2137 struct mgmt_cp_set_dev_class *cp = data;
2138 struct pending_cmd *cmd;
2139 struct hci_request req;
2140 int err;
2141
2142 BT_DBG("request for %s", hdev->name);
2143
2144 if (!lmp_bredr_capable(hdev))
2145 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2146 MGMT_STATUS_NOT_SUPPORTED);
2147
2148 hci_dev_lock(hdev);
2149
2150 if (pending_eir_or_class(hdev)) {
2151 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2152 MGMT_STATUS_BUSY);
2153 goto unlock;
2154 }
2155
2156 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2157 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2158 MGMT_STATUS_INVALID_PARAMS);
2159 goto unlock;
2160 }
2161
2162 hdev->major_class = cp->major;
2163 hdev->minor_class = cp->minor;
2164
2165 if (!hdev_is_powered(hdev)) {
2166 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2167 hdev->dev_class, 3);
2168 goto unlock;
2169 }
2170
2171 hci_req_init(&req, hdev);
2172
2173 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2174 hci_dev_unlock(hdev);
2175 cancel_delayed_work_sync(&hdev->service_cache);
2176 hci_dev_lock(hdev);
2177 update_eir(&req);
2178 }
2179
2180 update_class(&req);
2181
2182 err = hci_req_run(&req, set_class_complete);
2183 if (err < 0) {
2184 if (err != -ENODATA)
2185 goto unlock;
2186
2187 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2188 hdev->dev_class, 3);
2189 goto unlock;
2190 }
2191
2192 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2193 if (!cmd) {
2194 err = -ENOMEM;
2195 goto unlock;
2196 }
2197
2198 err = 0;
2199
2200 unlock:
2201 hci_dev_unlock(hdev);
2202 return err;
2203 }
2204
2205 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2206 u16 len)
2207 {
2208 struct mgmt_cp_load_link_keys *cp = data;
2209 u16 key_count, expected_len;
2210 int i;
2211
2212 BT_DBG("request for %s", hdev->name);
2213
2214 if (!lmp_bredr_capable(hdev))
2215 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2216 MGMT_STATUS_NOT_SUPPORTED);
2217
2218 key_count = __le16_to_cpu(cp->key_count);
2219
2220 expected_len = sizeof(*cp) + key_count *
2221 sizeof(struct mgmt_link_key_info);
2222 if (expected_len != len) {
2223 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2224 len, expected_len);
2225 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2226 MGMT_STATUS_INVALID_PARAMS);
2227 }
2228
2229 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2230 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2231 MGMT_STATUS_INVALID_PARAMS);
2232
2233 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2234 key_count);
2235
2236 for (i = 0; i < key_count; i++) {
2237 struct mgmt_link_key_info *key = &cp->keys[i];
2238
2239 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2240 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2241 MGMT_STATUS_INVALID_PARAMS);
2242 }
2243
2244 hci_dev_lock(hdev);
2245
2246 hci_link_keys_clear(hdev);
2247
2248 if (cp->debug_keys)
2249 set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
2250 else
2251 clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
2252
2253 for (i = 0; i < key_count; i++) {
2254 struct mgmt_link_key_info *key = &cp->keys[i];
2255
2256 hci_add_link_key(hdev, NULL, 0, &key->addr.bdaddr, key->val,
2257 key->type, key->pin_len);
2258 }
2259
2260 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2261
2262 hci_dev_unlock(hdev);
2263
2264 return 0;
2265 }
2266
2267 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2268 u8 addr_type, struct sock *skip_sk)
2269 {
2270 struct mgmt_ev_device_unpaired ev;
2271
2272 bacpy(&ev.addr.bdaddr, bdaddr);
2273 ev.addr.type = addr_type;
2274
2275 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2276 skip_sk);
2277 }
2278
2279 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2280 u16 len)
2281 {
2282 struct mgmt_cp_unpair_device *cp = data;
2283 struct mgmt_rp_unpair_device rp;
2284 struct hci_cp_disconnect dc;
2285 struct pending_cmd *cmd;
2286 struct hci_conn *conn;
2287 int err;
2288
2289 memset(&rp, 0, sizeof(rp));
2290 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2291 rp.addr.type = cp->addr.type;
2292
2293 if (!bdaddr_type_is_valid(cp->addr.type))
2294 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2295 MGMT_STATUS_INVALID_PARAMS,
2296 &rp, sizeof(rp));
2297
2298 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2299 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2300 MGMT_STATUS_INVALID_PARAMS,
2301 &rp, sizeof(rp));
2302
2303 hci_dev_lock(hdev);
2304
2305 if (!hdev_is_powered(hdev)) {
2306 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2307 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2308 goto unlock;
2309 }
2310
2311 if (cp->addr.type == BDADDR_BREDR)
2312 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2313 else
2314 err = hci_remove_ltk(hdev, &cp->addr.bdaddr);
2315
2316 if (err < 0) {
2317 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2318 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
2319 goto unlock;
2320 }
2321
2322 if (cp->disconnect) {
2323 if (cp->addr.type == BDADDR_BREDR)
2324 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2325 &cp->addr.bdaddr);
2326 else
2327 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2328 &cp->addr.bdaddr);
2329 } else {
2330 conn = NULL;
2331 }
2332
2333 if (!conn) {
2334 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2335 &rp, sizeof(rp));
2336 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2337 goto unlock;
2338 }
2339
2340 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2341 sizeof(*cp));
2342 if (!cmd) {
2343 err = -ENOMEM;
2344 goto unlock;
2345 }
2346
2347 dc.handle = cpu_to_le16(conn->handle);
2348 dc.reason = 0x13; /* Remote User Terminated Connection */
2349 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2350 if (err < 0)
2351 mgmt_pending_remove(cmd);
2352
2353 unlock:
2354 hci_dev_unlock(hdev);
2355 return err;
2356 }
2357
2358 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2359 u16 len)
2360 {
2361 struct mgmt_cp_disconnect *cp = data;
2362 struct mgmt_rp_disconnect rp;
2363 struct hci_cp_disconnect dc;
2364 struct pending_cmd *cmd;
2365 struct hci_conn *conn;
2366 int err;
2367
2368 BT_DBG("");
2369
2370 memset(&rp, 0, sizeof(rp));
2371 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2372 rp.addr.type = cp->addr.type;
2373
2374 if (!bdaddr_type_is_valid(cp->addr.type))
2375 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2376 MGMT_STATUS_INVALID_PARAMS,
2377 &rp, sizeof(rp));
2378
2379 hci_dev_lock(hdev);
2380
2381 if (!test_bit(HCI_UP, &hdev->flags)) {
2382 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2383 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2384 goto failed;
2385 }
2386
2387 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
2388 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2389 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2390 goto failed;
2391 }
2392
2393 if (cp->addr.type == BDADDR_BREDR)
2394 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2395 &cp->addr.bdaddr);
2396 else
2397 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2398
2399 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2400 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2401 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
2402 goto failed;
2403 }
2404
2405 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2406 if (!cmd) {
2407 err = -ENOMEM;
2408 goto failed;
2409 }
2410
2411 dc.handle = cpu_to_le16(conn->handle);
2412 dc.reason = HCI_ERROR_REMOTE_USER_TERM;
2413
2414 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2415 if (err < 0)
2416 mgmt_pending_remove(cmd);
2417
2418 failed:
2419 hci_dev_unlock(hdev);
2420 return err;
2421 }
2422
2423 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2424 {
2425 switch (link_type) {
2426 case LE_LINK:
2427 switch (addr_type) {
2428 case ADDR_LE_DEV_PUBLIC:
2429 return BDADDR_LE_PUBLIC;
2430
2431 default:
2432 /* Fallback to LE Random address type */
2433 return BDADDR_LE_RANDOM;
2434 }
2435
2436 default:
2437 /* Fallback to BR/EDR type */
2438 return BDADDR_BREDR;
2439 }
2440 }
2441
2442 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2443 u16 data_len)
2444 {
2445 struct mgmt_rp_get_connections *rp;
2446 struct hci_conn *c;
2447 size_t rp_len;
2448 int err;
2449 u16 i;
2450
2451 BT_DBG("");
2452
2453 hci_dev_lock(hdev);
2454
2455 if (!hdev_is_powered(hdev)) {
2456 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2457 MGMT_STATUS_NOT_POWERED);
2458 goto unlock;
2459 }
2460
2461 i = 0;
2462 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2463 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2464 i++;
2465 }
2466
2467 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2468 rp = kmalloc(rp_len, GFP_KERNEL);
2469 if (!rp) {
2470 err = -ENOMEM;
2471 goto unlock;
2472 }
2473
2474 i = 0;
2475 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2476 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2477 continue;
2478 bacpy(&rp->addr[i].bdaddr, &c->dst);
2479 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2480 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2481 continue;
2482 i++;
2483 }
2484
2485 rp->conn_count = cpu_to_le16(i);
2486
2487 /* Recalculate length in case of filtered SCO connections, etc */
2488 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2489
2490 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2491 rp_len);
2492
2493 kfree(rp);
2494
2495 unlock:
2496 hci_dev_unlock(hdev);
2497 return err;
2498 }
2499
2500 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2501 struct mgmt_cp_pin_code_neg_reply *cp)
2502 {
2503 struct pending_cmd *cmd;
2504 int err;
2505
2506 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2507 sizeof(*cp));
2508 if (!cmd)
2509 return -ENOMEM;
2510
2511 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2512 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2513 if (err < 0)
2514 mgmt_pending_remove(cmd);
2515
2516 return err;
2517 }
2518
2519 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2520 u16 len)
2521 {
2522 struct hci_conn *conn;
2523 struct mgmt_cp_pin_code_reply *cp = data;
2524 struct hci_cp_pin_code_reply reply;
2525 struct pending_cmd *cmd;
2526 int err;
2527
2528 BT_DBG("");
2529
2530 hci_dev_lock(hdev);
2531
2532 if (!hdev_is_powered(hdev)) {
2533 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2534 MGMT_STATUS_NOT_POWERED);
2535 goto failed;
2536 }
2537
2538 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2539 if (!conn) {
2540 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2541 MGMT_STATUS_NOT_CONNECTED);
2542 goto failed;
2543 }
2544
2545 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2546 struct mgmt_cp_pin_code_neg_reply ncp;
2547
2548 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2549
2550 BT_ERR("PIN code is not 16 bytes long");
2551
2552 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2553 if (err >= 0)
2554 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2555 MGMT_STATUS_INVALID_PARAMS);
2556
2557 goto failed;
2558 }
2559
2560 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2561 if (!cmd) {
2562 err = -ENOMEM;
2563 goto failed;
2564 }
2565
2566 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2567 reply.pin_len = cp->pin_len;
2568 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2569
2570 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2571 if (err < 0)
2572 mgmt_pending_remove(cmd);
2573
2574 failed:
2575 hci_dev_unlock(hdev);
2576 return err;
2577 }
2578
2579 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2580 u16 len)
2581 {
2582 struct mgmt_cp_set_io_capability *cp = data;
2583
2584 BT_DBG("");
2585
2586 hci_dev_lock(hdev);
2587
2588 hdev->io_capability = cp->io_capability;
2589
2590 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2591 hdev->io_capability);
2592
2593 hci_dev_unlock(hdev);
2594
2595 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
2596 0);
2597 }
2598
2599 static struct pending_cmd *find_pairing(struct hci_conn *conn)
2600 {
2601 struct hci_dev *hdev = conn->hdev;
2602 struct pending_cmd *cmd;
2603
2604 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2605 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2606 continue;
2607
2608 if (cmd->user_data != conn)
2609 continue;
2610
2611 return cmd;
2612 }
2613
2614 return NULL;
2615 }
2616
2617 static void pairing_complete(struct pending_cmd *cmd, u8 status)
2618 {
2619 struct mgmt_rp_pair_device rp;
2620 struct hci_conn *conn = cmd->user_data;
2621
2622 bacpy(&rp.addr.bdaddr, &conn->dst);
2623 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2624
2625 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
2626 &rp, sizeof(rp));
2627
2628 /* So we don't get further callbacks for this connection */
2629 conn->connect_cfm_cb = NULL;
2630 conn->security_cfm_cb = NULL;
2631 conn->disconn_cfm_cb = NULL;
2632
2633 hci_conn_drop(conn);
2634
2635 mgmt_pending_remove(cmd);
2636 }
2637
2638 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2639 {
2640 struct pending_cmd *cmd;
2641
2642 BT_DBG("status %u", status);
2643
2644 cmd = find_pairing(conn);
2645 if (!cmd)
2646 BT_DBG("Unable to find a pending command");
2647 else
2648 pairing_complete(cmd, mgmt_status(status));
2649 }
2650
2651 static void le_connect_complete_cb(struct hci_conn *conn, u8 status)
2652 {
2653 struct pending_cmd *cmd;
2654
2655 BT_DBG("status %u", status);
2656
2657 if (!status)
2658 return;
2659
2660 cmd = find_pairing(conn);
2661 if (!cmd)
2662 BT_DBG("Unable to find a pending command");
2663 else
2664 pairing_complete(cmd, mgmt_status(status));
2665 }
2666
2667 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2668 u16 len)
2669 {
2670 struct mgmt_cp_pair_device *cp = data;
2671 struct mgmt_rp_pair_device rp;
2672 struct pending_cmd *cmd;
2673 u8 sec_level, auth_type;
2674 struct hci_conn *conn;
2675 int err;
2676
2677 BT_DBG("");
2678
2679 memset(&rp, 0, sizeof(rp));
2680 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2681 rp.addr.type = cp->addr.type;
2682
2683 if (!bdaddr_type_is_valid(cp->addr.type))
2684 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2685 MGMT_STATUS_INVALID_PARAMS,
2686 &rp, sizeof(rp));
2687
2688 hci_dev_lock(hdev);
2689
2690 if (!hdev_is_powered(hdev)) {
2691 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2692 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2693 goto unlock;
2694 }
2695
2696 sec_level = BT_SECURITY_MEDIUM;
2697 if (cp->io_cap == 0x03)
2698 auth_type = HCI_AT_DEDICATED_BONDING;
2699 else
2700 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
2701
2702 if (cp->addr.type == BDADDR_BREDR)
2703 conn = hci_connect(hdev, ACL_LINK, &cp->addr.bdaddr,
2704 cp->addr.type, sec_level, auth_type);
2705 else
2706 conn = hci_connect(hdev, LE_LINK, &cp->addr.bdaddr,
2707 cp->addr.type, sec_level, auth_type);
2708
2709 if (IS_ERR(conn)) {
2710 int status;
2711
2712 if (PTR_ERR(conn) == -EBUSY)
2713 status = MGMT_STATUS_BUSY;
2714 else
2715 status = MGMT_STATUS_CONNECT_FAILED;
2716
2717 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2718 status, &rp,
2719 sizeof(rp));
2720 goto unlock;
2721 }
2722
2723 if (conn->connect_cfm_cb) {
2724 hci_conn_drop(conn);
2725 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2726 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2727 goto unlock;
2728 }
2729
2730 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2731 if (!cmd) {
2732 err = -ENOMEM;
2733 hci_conn_drop(conn);
2734 goto unlock;
2735 }
2736
2737 /* For LE, just connecting isn't a proof that the pairing finished */
2738 if (cp->addr.type == BDADDR_BREDR)
2739 conn->connect_cfm_cb = pairing_complete_cb;
2740 else
2741 conn->connect_cfm_cb = le_connect_complete_cb;
2742
2743 conn->security_cfm_cb = pairing_complete_cb;
2744 conn->disconn_cfm_cb = pairing_complete_cb;
2745 conn->io_capability = cp->io_cap;
2746 cmd->user_data = conn;
2747
2748 if (conn->state == BT_CONNECTED &&
2749 hci_conn_security(conn, sec_level, auth_type))
2750 pairing_complete(cmd, 0);
2751
2752 err = 0;
2753
2754 unlock:
2755 hci_dev_unlock(hdev);
2756 return err;
2757 }
2758
2759 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2760 u16 len)
2761 {
2762 struct mgmt_addr_info *addr = data;
2763 struct pending_cmd *cmd;
2764 struct hci_conn *conn;
2765 int err;
2766
2767 BT_DBG("");
2768
2769 hci_dev_lock(hdev);
2770
2771 if (!hdev_is_powered(hdev)) {
2772 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2773 MGMT_STATUS_NOT_POWERED);
2774 goto unlock;
2775 }
2776
2777 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
2778 if (!cmd) {
2779 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2780 MGMT_STATUS_INVALID_PARAMS);
2781 goto unlock;
2782 }
2783
2784 conn = cmd->user_data;
2785
2786 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
2787 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2788 MGMT_STATUS_INVALID_PARAMS);
2789 goto unlock;
2790 }
2791
2792 pairing_complete(cmd, MGMT_STATUS_CANCELLED);
2793
2794 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
2795 addr, sizeof(*addr));
2796 unlock:
2797 hci_dev_unlock(hdev);
2798 return err;
2799 }
2800
2801 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
2802 struct mgmt_addr_info *addr, u16 mgmt_op,
2803 u16 hci_op, __le32 passkey)
2804 {
2805 struct pending_cmd *cmd;
2806 struct hci_conn *conn;
2807 int err;
2808
2809 hci_dev_lock(hdev);
2810
2811 if (!hdev_is_powered(hdev)) {
2812 err = cmd_complete(sk, hdev->id, mgmt_op,
2813 MGMT_STATUS_NOT_POWERED, addr,
2814 sizeof(*addr));
2815 goto done;
2816 }
2817
2818 if (addr->type == BDADDR_BREDR)
2819 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
2820 else
2821 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
2822
2823 if (!conn) {
2824 err = cmd_complete(sk, hdev->id, mgmt_op,
2825 MGMT_STATUS_NOT_CONNECTED, addr,
2826 sizeof(*addr));
2827 goto done;
2828 }
2829
2830 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
2831 /* Continue with pairing via SMP */
2832 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
2833
2834 if (!err)
2835 err = cmd_complete(sk, hdev->id, mgmt_op,
2836 MGMT_STATUS_SUCCESS, addr,
2837 sizeof(*addr));
2838 else
2839 err = cmd_complete(sk, hdev->id, mgmt_op,
2840 MGMT_STATUS_FAILED, addr,
2841 sizeof(*addr));
2842
2843 goto done;
2844 }
2845
2846 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
2847 if (!cmd) {
2848 err = -ENOMEM;
2849 goto done;
2850 }
2851
2852 /* Continue with pairing via HCI */
2853 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
2854 struct hci_cp_user_passkey_reply cp;
2855
2856 bacpy(&cp.bdaddr, &addr->bdaddr);
2857 cp.passkey = passkey;
2858 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
2859 } else
2860 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
2861 &addr->bdaddr);
2862
2863 if (err < 0)
2864 mgmt_pending_remove(cmd);
2865
2866 done:
2867 hci_dev_unlock(hdev);
2868 return err;
2869 }
2870
2871 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2872 void *data, u16 len)
2873 {
2874 struct mgmt_cp_pin_code_neg_reply *cp = data;
2875
2876 BT_DBG("");
2877
2878 return user_pairing_resp(sk, hdev, &cp->addr,
2879 MGMT_OP_PIN_CODE_NEG_REPLY,
2880 HCI_OP_PIN_CODE_NEG_REPLY, 0);
2881 }
2882
2883 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2884 u16 len)
2885 {
2886 struct mgmt_cp_user_confirm_reply *cp = data;
2887
2888 BT_DBG("");
2889
2890 if (len != sizeof(*cp))
2891 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
2892 MGMT_STATUS_INVALID_PARAMS);
2893
2894 return user_pairing_resp(sk, hdev, &cp->addr,
2895 MGMT_OP_USER_CONFIRM_REPLY,
2896 HCI_OP_USER_CONFIRM_REPLY, 0);
2897 }
2898
2899 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
2900 void *data, u16 len)
2901 {
2902 struct mgmt_cp_user_confirm_neg_reply *cp = data;
2903
2904 BT_DBG("");
2905
2906 return user_pairing_resp(sk, hdev, &cp->addr,
2907 MGMT_OP_USER_CONFIRM_NEG_REPLY,
2908 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
2909 }
2910
2911 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2912 u16 len)
2913 {
2914 struct mgmt_cp_user_passkey_reply *cp = data;
2915
2916 BT_DBG("");
2917
2918 return user_pairing_resp(sk, hdev, &cp->addr,
2919 MGMT_OP_USER_PASSKEY_REPLY,
2920 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
2921 }
2922
2923 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
2924 void *data, u16 len)
2925 {
2926 struct mgmt_cp_user_passkey_neg_reply *cp = data;
2927
2928 BT_DBG("");
2929
2930 return user_pairing_resp(sk, hdev, &cp->addr,
2931 MGMT_OP_USER_PASSKEY_NEG_REPLY,
2932 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
2933 }
2934
2935 static void update_name(struct hci_request *req)
2936 {
2937 struct hci_dev *hdev = req->hdev;
2938 struct hci_cp_write_local_name cp;
2939
2940 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
2941
2942 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
2943 }
2944
2945 static void set_name_complete(struct hci_dev *hdev, u8 status)
2946 {
2947 struct mgmt_cp_set_local_name *cp;
2948 struct pending_cmd *cmd;
2949
2950 BT_DBG("status 0x%02x", status);
2951
2952 hci_dev_lock(hdev);
2953
2954 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
2955 if (!cmd)
2956 goto unlock;
2957
2958 cp = cmd->param;
2959
2960 if (status)
2961 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
2962 mgmt_status(status));
2963 else
2964 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2965 cp, sizeof(*cp));
2966
2967 mgmt_pending_remove(cmd);
2968
2969 unlock:
2970 hci_dev_unlock(hdev);
2971 }
2972
2973 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
2974 u16 len)
2975 {
2976 struct mgmt_cp_set_local_name *cp = data;
2977 struct pending_cmd *cmd;
2978 struct hci_request req;
2979 int err;
2980
2981 BT_DBG("");
2982
2983 hci_dev_lock(hdev);
2984
2985 /* If the old values are the same as the new ones just return a
2986 * direct command complete event.
2987 */
2988 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
2989 !memcmp(hdev->short_name, cp->short_name,
2990 sizeof(hdev->short_name))) {
2991 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2992 data, len);
2993 goto failed;
2994 }
2995
2996 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
2997
2998 if (!hdev_is_powered(hdev)) {
2999 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3000
3001 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3002 data, len);
3003 if (err < 0)
3004 goto failed;
3005
3006 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
3007 sk);
3008
3009 goto failed;
3010 }
3011
3012 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3013 if (!cmd) {
3014 err = -ENOMEM;
3015 goto failed;
3016 }
3017
3018 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3019
3020 hci_req_init(&req, hdev);
3021
3022 if (lmp_bredr_capable(hdev)) {
3023 update_name(&req);
3024 update_eir(&req);
3025 }
3026
3027 /* The name is stored in the scan response data and so
3028 * no need to udpate the advertising data here.
3029 */
3030 if (lmp_le_capable(hdev))
3031 update_scan_rsp_data(&req);
3032
3033 err = hci_req_run(&req, set_name_complete);
3034 if (err < 0)
3035 mgmt_pending_remove(cmd);
3036
3037 failed:
3038 hci_dev_unlock(hdev);
3039 return err;
3040 }
3041
3042 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3043 void *data, u16 data_len)
3044 {
3045 struct pending_cmd *cmd;
3046 int err;
3047
3048 BT_DBG("%s", hdev->name);
3049
3050 hci_dev_lock(hdev);
3051
3052 if (!hdev_is_powered(hdev)) {
3053 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3054 MGMT_STATUS_NOT_POWERED);
3055 goto unlock;
3056 }
3057
3058 if (!lmp_ssp_capable(hdev)) {
3059 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3060 MGMT_STATUS_NOT_SUPPORTED);
3061 goto unlock;
3062 }
3063
3064 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3065 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3066 MGMT_STATUS_BUSY);
3067 goto unlock;
3068 }
3069
3070 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3071 if (!cmd) {
3072 err = -ENOMEM;
3073 goto unlock;
3074 }
3075
3076 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
3077 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
3078 0, NULL);
3079 else
3080 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3081
3082 if (err < 0)
3083 mgmt_pending_remove(cmd);
3084
3085 unlock:
3086 hci_dev_unlock(hdev);
3087 return err;
3088 }
3089
3090 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3091 void *data, u16 len)
3092 {
3093 int err;
3094
3095 BT_DBG("%s ", hdev->name);
3096
3097 hci_dev_lock(hdev);
3098
3099 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3100 struct mgmt_cp_add_remote_oob_data *cp = data;
3101 u8 status;
3102
3103 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3104 cp->hash, cp->randomizer);
3105 if (err < 0)
3106 status = MGMT_STATUS_FAILED;
3107 else
3108 status = MGMT_STATUS_SUCCESS;
3109
3110 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3111 status, &cp->addr, sizeof(cp->addr));
3112 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3113 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3114 u8 status;
3115
3116 err = hci_add_remote_oob_ext_data(hdev, &cp->addr.bdaddr,
3117 cp->hash192,
3118 cp->randomizer192,
3119 cp->hash256,
3120 cp->randomizer256);
3121 if (err < 0)
3122 status = MGMT_STATUS_FAILED;
3123 else
3124 status = MGMT_STATUS_SUCCESS;
3125
3126 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3127 status, &cp->addr, sizeof(cp->addr));
3128 } else {
3129 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3130 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3131 MGMT_STATUS_INVALID_PARAMS);
3132 }
3133
3134 hci_dev_unlock(hdev);
3135 return err;
3136 }
3137
3138 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3139 void *data, u16 len)
3140 {
3141 struct mgmt_cp_remove_remote_oob_data *cp = data;
3142 u8 status;
3143 int err;
3144
3145 BT_DBG("%s", hdev->name);
3146
3147 hci_dev_lock(hdev);
3148
3149 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
3150 if (err < 0)
3151 status = MGMT_STATUS_INVALID_PARAMS;
3152 else
3153 status = MGMT_STATUS_SUCCESS;
3154
3155 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3156 status, &cp->addr, sizeof(cp->addr));
3157
3158 hci_dev_unlock(hdev);
3159 return err;
3160 }
3161
3162 static int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
3163 {
3164 struct pending_cmd *cmd;
3165 u8 type;
3166 int err;
3167
3168 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3169
3170 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3171 if (!cmd)
3172 return -ENOENT;
3173
3174 type = hdev->discovery.type;
3175
3176 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3177 &type, sizeof(type));
3178 mgmt_pending_remove(cmd);
3179
3180 return err;
3181 }
3182
3183 static void start_discovery_complete(struct hci_dev *hdev, u8 status)
3184 {
3185 BT_DBG("status %d", status);
3186
3187 if (status) {
3188 hci_dev_lock(hdev);
3189 mgmt_start_discovery_failed(hdev, status);
3190 hci_dev_unlock(hdev);
3191 return;
3192 }
3193
3194 hci_dev_lock(hdev);
3195 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3196 hci_dev_unlock(hdev);
3197
3198 switch (hdev->discovery.type) {
3199 case DISCOV_TYPE_LE:
3200 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
3201 DISCOV_LE_TIMEOUT);
3202 break;
3203
3204 case DISCOV_TYPE_INTERLEAVED:
3205 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
3206 DISCOV_INTERLEAVED_TIMEOUT);
3207 break;
3208
3209 case DISCOV_TYPE_BREDR:
3210 break;
3211
3212 default:
3213 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
3214 }
3215 }
3216
3217 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3218 void *data, u16 len)
3219 {
3220 struct mgmt_cp_start_discovery *cp = data;
3221 struct pending_cmd *cmd;
3222 struct hci_cp_le_set_scan_param param_cp;
3223 struct hci_cp_le_set_scan_enable enable_cp;
3224 struct hci_cp_inquiry inq_cp;
3225 struct hci_request req;
3226 /* General inquiry access code (GIAC) */
3227 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3228 u8 status;
3229 int err;
3230
3231 BT_DBG("%s", hdev->name);
3232
3233 hci_dev_lock(hdev);
3234
3235 if (!hdev_is_powered(hdev)) {
3236 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3237 MGMT_STATUS_NOT_POWERED);
3238 goto failed;
3239 }
3240
3241 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3242 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3243 MGMT_STATUS_BUSY);
3244 goto failed;
3245 }
3246
3247 if (hdev->discovery.state != DISCOVERY_STOPPED) {
3248 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3249 MGMT_STATUS_BUSY);
3250 goto failed;
3251 }
3252
3253 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
3254 if (!cmd) {
3255 err = -ENOMEM;
3256 goto failed;
3257 }
3258
3259 hdev->discovery.type = cp->type;
3260
3261 hci_req_init(&req, hdev);
3262
3263 switch (hdev->discovery.type) {
3264 case DISCOV_TYPE_BREDR:
3265 status = mgmt_bredr_support(hdev);
3266 if (status) {
3267 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3268 status);
3269 mgmt_pending_remove(cmd);
3270 goto failed;
3271 }
3272
3273 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3274 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3275 MGMT_STATUS_BUSY);
3276 mgmt_pending_remove(cmd);
3277 goto failed;
3278 }
3279
3280 hci_inquiry_cache_flush(hdev);
3281
3282 memset(&inq_cp, 0, sizeof(inq_cp));
3283 memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
3284 inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
3285 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
3286 break;
3287
3288 case DISCOV_TYPE_LE:
3289 case DISCOV_TYPE_INTERLEAVED:
3290 status = mgmt_le_support(hdev);
3291 if (status) {
3292 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3293 status);
3294 mgmt_pending_remove(cmd);
3295 goto failed;
3296 }
3297
3298 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3299 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3300 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3301 MGMT_STATUS_NOT_SUPPORTED);
3302 mgmt_pending_remove(cmd);
3303 goto failed;
3304 }
3305
3306 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3307 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3308 MGMT_STATUS_REJECTED);
3309 mgmt_pending_remove(cmd);
3310 goto failed;
3311 }
3312
3313 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
3314 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3315 MGMT_STATUS_BUSY);
3316 mgmt_pending_remove(cmd);
3317 goto failed;
3318 }
3319
3320 memset(&param_cp, 0, sizeof(param_cp));
3321 param_cp.type = LE_SCAN_ACTIVE;
3322 param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
3323 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3324 param_cp.own_address_type = hdev->own_addr_type;
3325 hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
3326 &param_cp);
3327
3328 memset(&enable_cp, 0, sizeof(enable_cp));
3329 enable_cp.enable = LE_SCAN_ENABLE;
3330 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3331 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
3332 &enable_cp);
3333 break;
3334
3335 default:
3336 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3337 MGMT_STATUS_INVALID_PARAMS);
3338 mgmt_pending_remove(cmd);
3339 goto failed;
3340 }
3341
3342 err = hci_req_run(&req, start_discovery_complete);
3343 if (err < 0)
3344 mgmt_pending_remove(cmd);
3345 else
3346 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3347
3348 failed:
3349 hci_dev_unlock(hdev);
3350 return err;
3351 }
3352
3353 static int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
3354 {
3355 struct pending_cmd *cmd;
3356 int err;
3357
3358 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
3359 if (!cmd)
3360 return -ENOENT;
3361
3362 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3363 &hdev->discovery.type, sizeof(hdev->discovery.type));
3364 mgmt_pending_remove(cmd);
3365
3366 return err;
3367 }
3368
3369 static void stop_discovery_complete(struct hci_dev *hdev, u8 status)
3370 {
3371 BT_DBG("status %d", status);
3372
3373 hci_dev_lock(hdev);
3374
3375 if (status) {
3376 mgmt_stop_discovery_failed(hdev, status);
3377 goto unlock;
3378 }
3379
3380 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3381
3382 unlock:
3383 hci_dev_unlock(hdev);
3384 }
3385
3386 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
3387 u16 len)
3388 {
3389 struct mgmt_cp_stop_discovery *mgmt_cp = data;
3390 struct pending_cmd *cmd;
3391 struct hci_cp_remote_name_req_cancel cp;
3392 struct inquiry_entry *e;
3393 struct hci_request req;
3394 struct hci_cp_le_set_scan_enable enable_cp;
3395 int err;
3396
3397 BT_DBG("%s", hdev->name);
3398
3399 hci_dev_lock(hdev);
3400
3401 if (!hci_discovery_active(hdev)) {
3402 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3403 MGMT_STATUS_REJECTED, &mgmt_cp->type,
3404 sizeof(mgmt_cp->type));
3405 goto unlock;
3406 }
3407
3408 if (hdev->discovery.type != mgmt_cp->type) {
3409 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3410 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
3411 sizeof(mgmt_cp->type));
3412 goto unlock;
3413 }
3414
3415 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
3416 if (!cmd) {
3417 err = -ENOMEM;
3418 goto unlock;
3419 }
3420
3421 hci_req_init(&req, hdev);
3422
3423 switch (hdev->discovery.state) {
3424 case DISCOVERY_FINDING:
3425 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3426 hci_req_add(&req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3427 } else {
3428 cancel_delayed_work(&hdev->le_scan_disable);
3429
3430 memset(&enable_cp, 0, sizeof(enable_cp));
3431 enable_cp.enable = LE_SCAN_DISABLE;
3432 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE,
3433 sizeof(enable_cp), &enable_cp);
3434 }
3435
3436 break;
3437
3438 case DISCOVERY_RESOLVING:
3439 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
3440 NAME_PENDING);
3441 if (!e) {
3442 mgmt_pending_remove(cmd);
3443 err = cmd_complete(sk, hdev->id,
3444 MGMT_OP_STOP_DISCOVERY, 0,
3445 &mgmt_cp->type,
3446 sizeof(mgmt_cp->type));
3447 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3448 goto unlock;
3449 }
3450
3451 bacpy(&cp.bdaddr, &e->data.bdaddr);
3452 hci_req_add(&req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
3453 &cp);
3454
3455 break;
3456
3457 default:
3458 BT_DBG("unknown discovery state %u", hdev->discovery.state);
3459
3460 mgmt_pending_remove(cmd);
3461 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3462 MGMT_STATUS_FAILED, &mgmt_cp->type,
3463 sizeof(mgmt_cp->type));
3464 goto unlock;
3465 }
3466
3467 err = hci_req_run(&req, stop_discovery_complete);
3468 if (err < 0)
3469 mgmt_pending_remove(cmd);
3470 else
3471 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
3472
3473 unlock:
3474 hci_dev_unlock(hdev);
3475 return err;
3476 }
3477
3478 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
3479 u16 len)
3480 {
3481 struct mgmt_cp_confirm_name *cp = data;
3482 struct inquiry_entry *e;
3483 int err;
3484
3485 BT_DBG("%s", hdev->name);
3486
3487 hci_dev_lock(hdev);
3488
3489 if (!hci_discovery_active(hdev)) {
3490 err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3491 MGMT_STATUS_FAILED);
3492 goto failed;
3493 }
3494
3495 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
3496 if (!e) {
3497 err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3498 MGMT_STATUS_INVALID_PARAMS);
3499 goto failed;
3500 }
3501
3502 if (cp->name_known) {
3503 e->name_state = NAME_KNOWN;
3504 list_del(&e->list);
3505 } else {
3506 e->name_state = NAME_NEEDED;
3507 hci_inquiry_cache_update_resolve(hdev, e);
3508 }
3509
3510 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
3511 sizeof(cp->addr));
3512
3513 failed:
3514 hci_dev_unlock(hdev);
3515 return err;
3516 }
3517
3518 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
3519 u16 len)
3520 {
3521 struct mgmt_cp_block_device *cp = data;
3522 u8 status;
3523 int err;
3524
3525 BT_DBG("%s", hdev->name);
3526
3527 if (!bdaddr_type_is_valid(cp->addr.type))
3528 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
3529 MGMT_STATUS_INVALID_PARAMS,
3530 &cp->addr, sizeof(cp->addr));
3531
3532 hci_dev_lock(hdev);
3533
3534 err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type);
3535 if (err < 0)
3536 status = MGMT_STATUS_FAILED;
3537 else
3538 status = MGMT_STATUS_SUCCESS;
3539
3540 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
3541 &cp->addr, sizeof(cp->addr));
3542
3543 hci_dev_unlock(hdev);
3544
3545 return err;
3546 }
3547
3548 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
3549 u16 len)
3550 {
3551 struct mgmt_cp_unblock_device *cp = data;
3552 u8 status;
3553 int err;
3554
3555 BT_DBG("%s", hdev->name);
3556
3557 if (!bdaddr_type_is_valid(cp->addr.type))
3558 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
3559 MGMT_STATUS_INVALID_PARAMS,
3560 &cp->addr, sizeof(cp->addr));
3561
3562 hci_dev_lock(hdev);
3563
3564 err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type);
3565 if (err < 0)
3566 status = MGMT_STATUS_INVALID_PARAMS;
3567 else
3568 status = MGMT_STATUS_SUCCESS;
3569
3570 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
3571 &cp->addr, sizeof(cp->addr));
3572
3573 hci_dev_unlock(hdev);
3574
3575 return err;
3576 }
3577
3578 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
3579 u16 len)
3580 {
3581 struct mgmt_cp_set_device_id *cp = data;
3582 struct hci_request req;
3583 int err;
3584 __u16 source;
3585
3586 BT_DBG("%s", hdev->name);
3587
3588 source = __le16_to_cpu(cp->source);
3589
3590 if (source > 0x0002)
3591 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
3592 MGMT_STATUS_INVALID_PARAMS);
3593
3594 hci_dev_lock(hdev);
3595
3596 hdev->devid_source = source;
3597 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
3598 hdev->devid_product = __le16_to_cpu(cp->product);
3599 hdev->devid_version = __le16_to_cpu(cp->version);
3600
3601 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
3602
3603 hci_req_init(&req, hdev);
3604 update_eir(&req);
3605 hci_req_run(&req, NULL);
3606
3607 hci_dev_unlock(hdev);
3608
3609 return err;
3610 }
3611
3612 static void set_advertising_complete(struct hci_dev *hdev, u8 status)
3613 {
3614 struct cmd_lookup match = { NULL, hdev };
3615
3616 if (status) {
3617 u8 mgmt_err = mgmt_status(status);
3618
3619 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
3620 cmd_status_rsp, &mgmt_err);
3621 return;
3622 }
3623
3624 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
3625 &match);
3626
3627 new_settings(hdev, match.sk);
3628
3629 if (match.sk)
3630 sock_put(match.sk);
3631 }
3632
3633 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
3634 u16 len)
3635 {
3636 struct mgmt_mode *cp = data;
3637 struct pending_cmd *cmd;
3638 struct hci_request req;
3639 u8 val, enabled, status;
3640 int err;
3641
3642 BT_DBG("request for %s", hdev->name);
3643
3644 status = mgmt_le_support(hdev);
3645 if (status)
3646 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3647 status);
3648
3649 if (cp->val != 0x00 && cp->val != 0x01)
3650 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3651 MGMT_STATUS_INVALID_PARAMS);
3652
3653 hci_dev_lock(hdev);
3654
3655 val = !!cp->val;
3656 enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
3657
3658 /* The following conditions are ones which mean that we should
3659 * not do any HCI communication but directly send a mgmt
3660 * response to user space (after toggling the flag if
3661 * necessary).
3662 */
3663 if (!hdev_is_powered(hdev) || val == enabled ||
3664 hci_conn_num(hdev, LE_LINK) > 0) {
3665 bool changed = false;
3666
3667 if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3668 change_bit(HCI_ADVERTISING, &hdev->dev_flags);
3669 changed = true;
3670 }
3671
3672 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
3673 if (err < 0)
3674 goto unlock;
3675
3676 if (changed)
3677 err = new_settings(hdev, sk);
3678
3679 goto unlock;
3680 }
3681
3682 if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
3683 mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
3684 err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3685 MGMT_STATUS_BUSY);
3686 goto unlock;
3687 }
3688
3689 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
3690 if (!cmd) {
3691 err = -ENOMEM;
3692 goto unlock;
3693 }
3694
3695 hci_req_init(&req, hdev);
3696
3697 if (val)
3698 enable_advertising(&req);
3699 else
3700 disable_advertising(&req);
3701
3702 err = hci_req_run(&req, set_advertising_complete);
3703 if (err < 0)
3704 mgmt_pending_remove(cmd);
3705
3706 unlock:
3707 hci_dev_unlock(hdev);
3708 return err;
3709 }
3710
3711 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
3712 void *data, u16 len)
3713 {
3714 struct mgmt_cp_set_static_address *cp = data;
3715 int err;
3716
3717 BT_DBG("%s", hdev->name);
3718
3719 if (!lmp_le_capable(hdev))
3720 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
3721 MGMT_STATUS_NOT_SUPPORTED);
3722
3723 if (hdev_is_powered(hdev))
3724 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
3725 MGMT_STATUS_REJECTED);
3726
3727 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
3728 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
3729 return cmd_status(sk, hdev->id,
3730 MGMT_OP_SET_STATIC_ADDRESS,
3731 MGMT_STATUS_INVALID_PARAMS);
3732
3733 /* Two most significant bits shall be set */
3734 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
3735 return cmd_status(sk, hdev->id,
3736 MGMT_OP_SET_STATIC_ADDRESS,
3737 MGMT_STATUS_INVALID_PARAMS);
3738 }
3739
3740 hci_dev_lock(hdev);
3741
3742 bacpy(&hdev->static_addr, &cp->bdaddr);
3743
3744 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0);
3745
3746 hci_dev_unlock(hdev);
3747
3748 return err;
3749 }
3750
3751 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
3752 void *data, u16 len)
3753 {
3754 struct mgmt_cp_set_scan_params *cp = data;
3755 __u16 interval, window;
3756 int err;
3757
3758 BT_DBG("%s", hdev->name);
3759
3760 if (!lmp_le_capable(hdev))
3761 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3762 MGMT_STATUS_NOT_SUPPORTED);
3763
3764 interval = __le16_to_cpu(cp->interval);
3765
3766 if (interval < 0x0004 || interval > 0x4000)
3767 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3768 MGMT_STATUS_INVALID_PARAMS);
3769
3770 window = __le16_to_cpu(cp->window);
3771
3772 if (window < 0x0004 || window > 0x4000)
3773 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3774 MGMT_STATUS_INVALID_PARAMS);
3775
3776 if (window > interval)
3777 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3778 MGMT_STATUS_INVALID_PARAMS);
3779
3780 hci_dev_lock(hdev);
3781
3782 hdev->le_scan_interval = interval;
3783 hdev->le_scan_window = window;
3784
3785 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);
3786
3787 hci_dev_unlock(hdev);
3788
3789 return err;
3790 }
3791
3792 static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
3793 {
3794 struct pending_cmd *cmd;
3795
3796 BT_DBG("status 0x%02x", status);
3797
3798 hci_dev_lock(hdev);
3799
3800 cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
3801 if (!cmd)
3802 goto unlock;
3803
3804 if (status) {
3805 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3806 mgmt_status(status));
3807 } else {
3808 struct mgmt_mode *cp = cmd->param;
3809
3810 if (cp->val)
3811 set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
3812 else
3813 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
3814
3815 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
3816 new_settings(hdev, cmd->sk);
3817 }
3818
3819 mgmt_pending_remove(cmd);
3820
3821 unlock:
3822 hci_dev_unlock(hdev);
3823 }
3824
3825 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
3826 void *data, u16 len)
3827 {
3828 struct mgmt_mode *cp = data;
3829 struct pending_cmd *cmd;
3830 struct hci_request req;
3831 int err;
3832
3833 BT_DBG("%s", hdev->name);
3834
3835 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
3836 hdev->hci_ver < BLUETOOTH_VER_1_2)
3837 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3838 MGMT_STATUS_NOT_SUPPORTED);
3839
3840 if (cp->val != 0x00 && cp->val != 0x01)
3841 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3842 MGMT_STATUS_INVALID_PARAMS);
3843
3844 if (!hdev_is_powered(hdev))
3845 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3846 MGMT_STATUS_NOT_POWERED);
3847
3848 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3849 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3850 MGMT_STATUS_REJECTED);
3851
3852 hci_dev_lock(hdev);
3853
3854 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
3855 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3856 MGMT_STATUS_BUSY);
3857 goto unlock;
3858 }
3859
3860 if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
3861 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
3862 hdev);
3863 goto unlock;
3864 }
3865
3866 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
3867 data, len);
3868 if (!cmd) {
3869 err = -ENOMEM;
3870 goto unlock;
3871 }
3872
3873 hci_req_init(&req, hdev);
3874
3875 write_fast_connectable(&req, cp->val);
3876
3877 err = hci_req_run(&req, fast_connectable_complete);
3878 if (err < 0) {
3879 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3880 MGMT_STATUS_FAILED);
3881 mgmt_pending_remove(cmd);
3882 }
3883
3884 unlock:
3885 hci_dev_unlock(hdev);
3886
3887 return err;
3888 }
3889
3890 static void set_bredr_scan(struct hci_request *req)
3891 {
3892 struct hci_dev *hdev = req->hdev;
3893 u8 scan = 0;
3894
3895 /* Ensure that fast connectable is disabled. This function will
3896 * not do anything if the page scan parameters are already what
3897 * they should be.
3898 */
3899 write_fast_connectable(req, false);
3900
3901 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3902 scan |= SCAN_PAGE;
3903 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
3904 scan |= SCAN_INQUIRY;
3905
3906 if (scan)
3907 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
3908 }
3909
3910 static void set_bredr_complete(struct hci_dev *hdev, u8 status)
3911 {
3912 struct pending_cmd *cmd;
3913
3914 BT_DBG("status 0x%02x", status);
3915
3916 hci_dev_lock(hdev);
3917
3918 cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
3919 if (!cmd)
3920 goto unlock;
3921
3922 if (status) {
3923 u8 mgmt_err = mgmt_status(status);
3924
3925 /* We need to restore the flag if related HCI commands
3926 * failed.
3927 */
3928 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3929
3930 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
3931 } else {
3932 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
3933 new_settings(hdev, cmd->sk);
3934 }
3935
3936 mgmt_pending_remove(cmd);
3937
3938 unlock:
3939 hci_dev_unlock(hdev);
3940 }
3941
3942 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
3943 {
3944 struct mgmt_mode *cp = data;
3945 struct pending_cmd *cmd;
3946 struct hci_request req;
3947 int err;
3948
3949 BT_DBG("request for %s", hdev->name);
3950
3951 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
3952 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
3953 MGMT_STATUS_NOT_SUPPORTED);
3954
3955 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
3956 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
3957 MGMT_STATUS_REJECTED);
3958
3959 if (cp->val != 0x00 && cp->val != 0x01)
3960 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
3961 MGMT_STATUS_INVALID_PARAMS);
3962
3963 hci_dev_lock(hdev);
3964
3965 if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3966 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
3967 goto unlock;
3968 }
3969
3970 if (!hdev_is_powered(hdev)) {
3971 if (!cp->val) {
3972 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
3973 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
3974 clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
3975 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
3976 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
3977 }
3978
3979 change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3980
3981 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
3982 if (err < 0)
3983 goto unlock;
3984
3985 err = new_settings(hdev, sk);
3986 goto unlock;
3987 }
3988
3989 /* Reject disabling when powered on */
3990 if (!cp->val) {
3991 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
3992 MGMT_STATUS_REJECTED);
3993 goto unlock;
3994 }
3995
3996 if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
3997 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
3998 MGMT_STATUS_BUSY);
3999 goto unlock;
4000 }
4001
4002 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4003 if (!cmd) {
4004 err = -ENOMEM;
4005 goto unlock;
4006 }
4007
4008 /* We need to flip the bit already here so that update_adv_data
4009 * generates the correct flags.
4010 */
4011 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4012
4013 hci_req_init(&req, hdev);
4014
4015 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4016 set_bredr_scan(&req);
4017
4018 /* Since only the advertising data flags will change, there
4019 * is no need to update the scan response data.
4020 */
4021 update_adv_data(&req);
4022
4023 err = hci_req_run(&req, set_bredr_complete);
4024 if (err < 0)
4025 mgmt_pending_remove(cmd);
4026
4027 unlock:
4028 hci_dev_unlock(hdev);
4029 return err;
4030 }
4031
4032 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4033 void *data, u16 len)
4034 {
4035 struct mgmt_mode *cp = data;
4036 struct pending_cmd *cmd;
4037 u8 status;
4038 int err;
4039
4040 BT_DBG("request for %s", hdev->name);
4041
4042 status = mgmt_bredr_support(hdev);
4043 if (status)
4044 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4045 status);
4046
4047 if (!lmp_sc_capable(hdev) &&
4048 !test_bit(HCI_FORCE_SC, &hdev->dev_flags))
4049 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4050 MGMT_STATUS_NOT_SUPPORTED);
4051
4052 if (cp->val != 0x00 && cp->val != 0x01)
4053 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4054 MGMT_STATUS_INVALID_PARAMS);
4055
4056 hci_dev_lock(hdev);
4057
4058 if (!hdev_is_powered(hdev)) {
4059 bool changed;
4060
4061 if (cp->val)
4062 changed = !test_and_set_bit(HCI_SC_ENABLED,
4063 &hdev->dev_flags);
4064 else
4065 changed = test_and_clear_bit(HCI_SC_ENABLED,
4066 &hdev->dev_flags);
4067
4068 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4069 if (err < 0)
4070 goto failed;
4071
4072 if (changed)
4073 err = new_settings(hdev, sk);
4074
4075 goto failed;
4076 }
4077
4078 if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4079 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4080 MGMT_STATUS_BUSY);
4081 goto failed;
4082 }
4083
4084 if (!!cp->val == test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
4085 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4086 goto failed;
4087 }
4088
4089 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4090 if (!cmd) {
4091 err = -ENOMEM;
4092 goto failed;
4093 }
4094
4095 err = hci_send_cmd(hdev, HCI_OP_WRITE_SC_SUPPORT, 1, &cp->val);
4096 if (err < 0) {
4097 mgmt_pending_remove(cmd);
4098 goto failed;
4099 }
4100
4101 failed:
4102 hci_dev_unlock(hdev);
4103 return err;
4104 }
4105
4106 static bool ltk_is_valid(struct mgmt_ltk_info *key)
4107 {
4108 if (key->authenticated != 0x00 && key->authenticated != 0x01)
4109 return false;
4110 if (key->master != 0x00 && key->master != 0x01)
4111 return false;
4112 if (!bdaddr_type_is_le(key->addr.type))
4113 return false;
4114 return true;
4115 }
4116
4117 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
4118 void *cp_data, u16 len)
4119 {
4120 struct mgmt_cp_load_long_term_keys *cp = cp_data;
4121 u16 key_count, expected_len;
4122 int i, err;
4123
4124 BT_DBG("request for %s", hdev->name);
4125
4126 if (!lmp_le_capable(hdev))
4127 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4128 MGMT_STATUS_NOT_SUPPORTED);
4129
4130 key_count = __le16_to_cpu(cp->key_count);
4131
4132 expected_len = sizeof(*cp) + key_count *
4133 sizeof(struct mgmt_ltk_info);
4134 if (expected_len != len) {
4135 BT_ERR("load_keys: expected %u bytes, got %u bytes",
4136 len, expected_len);
4137 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4138 MGMT_STATUS_INVALID_PARAMS);
4139 }
4140
4141 BT_DBG("%s key_count %u", hdev->name, key_count);
4142
4143 for (i = 0; i < key_count; i++) {
4144 struct mgmt_ltk_info *key = &cp->keys[i];
4145
4146 if (!ltk_is_valid(key))
4147 return cmd_status(sk, hdev->id,
4148 MGMT_OP_LOAD_LONG_TERM_KEYS,
4149 MGMT_STATUS_INVALID_PARAMS);
4150 }
4151
4152 hci_dev_lock(hdev);
4153
4154 hci_smp_ltks_clear(hdev);
4155
4156 for (i = 0; i < key_count; i++) {
4157 struct mgmt_ltk_info *key = &cp->keys[i];
4158 u8 type, addr_type;
4159
4160 if (key->addr.type == BDADDR_LE_PUBLIC)
4161 addr_type = ADDR_LE_DEV_PUBLIC;
4162 else
4163 addr_type = ADDR_LE_DEV_RANDOM;
4164
4165 if (key->master)
4166 type = HCI_SMP_LTK;
4167 else
4168 type = HCI_SMP_LTK_SLAVE;
4169
4170 hci_add_ltk(hdev, &key->addr.bdaddr, addr_type,
4171 type, 0, key->authenticated, key->val,
4172 key->enc_size, key->ediv, key->rand);
4173 }
4174
4175 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
4176 NULL, 0);
4177
4178 hci_dev_unlock(hdev);
4179
4180 return err;
4181 }
4182
4183 static const struct mgmt_handler {
4184 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
4185 u16 data_len);
4186 bool var_len;
4187 size_t data_len;
4188 } mgmt_handlers[] = {
4189 { NULL }, /* 0x0000 (no command) */
4190 { read_version, false, MGMT_READ_VERSION_SIZE },
4191 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
4192 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
4193 { read_controller_info, false, MGMT_READ_INFO_SIZE },
4194 { set_powered, false, MGMT_SETTING_SIZE },
4195 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
4196 { set_connectable, false, MGMT_SETTING_SIZE },
4197 { set_fast_connectable, false, MGMT_SETTING_SIZE },
4198 { set_pairable, false, MGMT_SETTING_SIZE },
4199 { set_link_security, false, MGMT_SETTING_SIZE },
4200 { set_ssp, false, MGMT_SETTING_SIZE },
4201 { set_hs, false, MGMT_SETTING_SIZE },
4202 { set_le, false, MGMT_SETTING_SIZE },
4203 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
4204 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
4205 { add_uuid, false, MGMT_ADD_UUID_SIZE },
4206 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
4207 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
4208 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
4209 { disconnect, false, MGMT_DISCONNECT_SIZE },
4210 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
4211 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
4212 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
4213 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
4214 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
4215 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
4216 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
4217 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
4218 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
4219 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
4220 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
4221 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
4222 { add_remote_oob_data, true, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
4223 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
4224 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
4225 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
4226 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
4227 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
4228 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
4229 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
4230 { set_advertising, false, MGMT_SETTING_SIZE },
4231 { set_bredr, false, MGMT_SETTING_SIZE },
4232 { set_static_address, false, MGMT_SET_STATIC_ADDRESS_SIZE },
4233 { set_scan_params, false, MGMT_SET_SCAN_PARAMS_SIZE },
4234 { set_secure_conn, false, MGMT_SETTING_SIZE },
4235 };
4236
4237
4238 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
4239 {
4240 void *buf;
4241 u8 *cp;
4242 struct mgmt_hdr *hdr;
4243 u16 opcode, index, len;
4244 struct hci_dev *hdev = NULL;
4245 const struct mgmt_handler *handler;
4246 int err;
4247
4248 BT_DBG("got %zu bytes", msglen);
4249
4250 if (msglen < sizeof(*hdr))
4251 return -EINVAL;
4252
4253 buf = kmalloc(msglen, GFP_KERNEL);
4254 if (!buf)
4255 return -ENOMEM;
4256
4257 if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
4258 err = -EFAULT;
4259 goto done;
4260 }
4261
4262 hdr = buf;
4263 opcode = __le16_to_cpu(hdr->opcode);
4264 index = __le16_to_cpu(hdr->index);
4265 len = __le16_to_cpu(hdr->len);
4266
4267 if (len != msglen - sizeof(*hdr)) {
4268 err = -EINVAL;
4269 goto done;
4270 }
4271
4272 if (index != MGMT_INDEX_NONE) {
4273 hdev = hci_dev_get(index);
4274 if (!hdev) {
4275 err = cmd_status(sk, index, opcode,
4276 MGMT_STATUS_INVALID_INDEX);
4277 goto done;
4278 }
4279
4280 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
4281 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4282 err = cmd_status(sk, index, opcode,
4283 MGMT_STATUS_INVALID_INDEX);
4284 goto done;
4285 }
4286 }
4287
4288 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
4289 mgmt_handlers[opcode].func == NULL) {
4290 BT_DBG("Unknown op %u", opcode);
4291 err = cmd_status(sk, index, opcode,
4292 MGMT_STATUS_UNKNOWN_COMMAND);
4293 goto done;
4294 }
4295
4296 if ((hdev && opcode < MGMT_OP_READ_INFO) ||
4297 (!hdev && opcode >= MGMT_OP_READ_INFO)) {
4298 err = cmd_status(sk, index, opcode,
4299 MGMT_STATUS_INVALID_INDEX);
4300 goto done;
4301 }
4302
4303 handler = &mgmt_handlers[opcode];
4304
4305 if ((handler->var_len && len < handler->data_len) ||
4306 (!handler->var_len && len != handler->data_len)) {
4307 err = cmd_status(sk, index, opcode,
4308 MGMT_STATUS_INVALID_PARAMS);
4309 goto done;
4310 }
4311
4312 if (hdev)
4313 mgmt_init_hdev(sk, hdev);
4314
4315 cp = buf + sizeof(*hdr);
4316
4317 err = handler->func(sk, hdev, cp, len);
4318 if (err < 0)
4319 goto done;
4320
4321 err = msglen;
4322
4323 done:
4324 if (hdev)
4325 hci_dev_put(hdev);
4326
4327 kfree(buf);
4328 return err;
4329 }
4330
4331 void mgmt_index_added(struct hci_dev *hdev)
4332 {
4333 if (hdev->dev_type != HCI_BREDR)
4334 return;
4335
4336 mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
4337 }
4338
4339 void mgmt_index_removed(struct hci_dev *hdev)
4340 {
4341 u8 status = MGMT_STATUS_INVALID_INDEX;
4342
4343 if (hdev->dev_type != HCI_BREDR)
4344 return;
4345
4346 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
4347
4348 mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
4349 }
4350
4351 static void powered_complete(struct hci_dev *hdev, u8 status)
4352 {
4353 struct cmd_lookup match = { NULL, hdev };
4354
4355 BT_DBG("status 0x%02x", status);
4356
4357 hci_dev_lock(hdev);
4358
4359 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
4360
4361 new_settings(hdev, match.sk);
4362
4363 hci_dev_unlock(hdev);
4364
4365 if (match.sk)
4366 sock_put(match.sk);
4367 }
4368
4369 static int powered_update_hci(struct hci_dev *hdev)
4370 {
4371 struct hci_request req;
4372 u8 link_sec;
4373
4374 hci_req_init(&req, hdev);
4375
4376 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
4377 !lmp_host_ssp_capable(hdev)) {
4378 u8 ssp = 1;
4379
4380 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
4381 }
4382
4383 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
4384 lmp_bredr_capable(hdev)) {
4385 struct hci_cp_write_le_host_supported cp;
4386
4387 cp.le = 1;
4388 cp.simul = lmp_le_br_capable(hdev);
4389
4390 /* Check first if we already have the right
4391 * host state (host features set)
4392 */
4393 if (cp.le != lmp_host_le_capable(hdev) ||
4394 cp.simul != lmp_host_le_br_capable(hdev))
4395 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
4396 sizeof(cp), &cp);
4397 }
4398
4399 if (lmp_le_capable(hdev)) {
4400 /* Set random address to static address if configured */
4401 if (bacmp(&hdev->static_addr, BDADDR_ANY))
4402 hci_req_add(&req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
4403 &hdev->static_addr);
4404
4405 /* Make sure the controller has a good default for
4406 * advertising data. This also applies to the case
4407 * where BR/EDR was toggled during the AUTO_OFF phase.
4408 */
4409 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
4410 update_adv_data(&req);
4411 update_scan_rsp_data(&req);
4412 }
4413
4414 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
4415 enable_advertising(&req);
4416 }
4417
4418 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4419 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
4420 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
4421 sizeof(link_sec), &link_sec);
4422
4423 if (lmp_bredr_capable(hdev)) {
4424 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
4425 set_bredr_scan(&req);
4426 update_class(&req);
4427 update_name(&req);
4428 update_eir(&req);
4429 }
4430
4431 return hci_req_run(&req, powered_complete);
4432 }
4433
4434 int mgmt_powered(struct hci_dev *hdev, u8 powered)
4435 {
4436 struct cmd_lookup match = { NULL, hdev };
4437 u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
4438 u8 zero_cod[] = { 0, 0, 0 };
4439 int err;
4440
4441 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
4442 return 0;
4443
4444 if (powered) {
4445 if (powered_update_hci(hdev) == 0)
4446 return 0;
4447
4448 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
4449 &match);
4450 goto new_settings;
4451 }
4452
4453 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
4454 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered);
4455
4456 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
4457 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
4458 zero_cod, sizeof(zero_cod), NULL);
4459
4460 new_settings:
4461 err = new_settings(hdev, match.sk);
4462
4463 if (match.sk)
4464 sock_put(match.sk);
4465
4466 return err;
4467 }
4468
4469 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
4470 {
4471 struct pending_cmd *cmd;
4472 u8 status;
4473
4474 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
4475 if (!cmd)
4476 return;
4477
4478 if (err == -ERFKILL)
4479 status = MGMT_STATUS_RFKILLED;
4480 else
4481 status = MGMT_STATUS_FAILED;
4482
4483 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
4484
4485 mgmt_pending_remove(cmd);
4486 }
4487
4488 void mgmt_discoverable_timeout(struct hci_dev *hdev)
4489 {
4490 struct hci_request req;
4491
4492 hci_dev_lock(hdev);
4493
4494 /* When discoverable timeout triggers, then just make sure
4495 * the limited discoverable flag is cleared. Even in the case
4496 * of a timeout triggered from general discoverable, it is
4497 * safe to unconditionally clear the flag.
4498 */
4499 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
4500 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4501
4502 hci_req_init(&req, hdev);
4503 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4504 u8 scan = SCAN_PAGE;
4505 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
4506 sizeof(scan), &scan);
4507 }
4508 update_class(&req);
4509 update_adv_data(&req);
4510 hci_req_run(&req, NULL);
4511
4512 hdev->discov_timeout = 0;
4513
4514 new_settings(hdev, NULL);
4515
4516 hci_dev_unlock(hdev);
4517 }
4518
4519 void mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
4520 {
4521 bool changed;
4522
4523 /* Nothing needed here if there's a pending command since that
4524 * commands request completion callback takes care of everything
4525 * necessary.
4526 */
4527 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
4528 return;
4529
4530 if (discoverable) {
4531 changed = !test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4532 } else {
4533 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
4534 changed = test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4535 }
4536
4537 if (changed) {
4538 struct hci_request req;
4539
4540 /* In case this change in discoverable was triggered by
4541 * a disabling of connectable there could be a need to
4542 * update the advertising flags.
4543 */
4544 hci_req_init(&req, hdev);
4545 update_adv_data(&req);
4546 hci_req_run(&req, NULL);
4547
4548 new_settings(hdev, NULL);
4549 }
4550 }
4551
4552 void mgmt_connectable(struct hci_dev *hdev, u8 connectable)
4553 {
4554 bool changed;
4555
4556 /* Nothing needed here if there's a pending command since that
4557 * commands request completion callback takes care of everything
4558 * necessary.
4559 */
4560 if (mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
4561 return;
4562
4563 if (connectable)
4564 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
4565 else
4566 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
4567
4568 if (changed)
4569 new_settings(hdev, NULL);
4570 }
4571
4572 void mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
4573 {
4574 u8 mgmt_err = mgmt_status(status);
4575
4576 if (scan & SCAN_PAGE)
4577 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
4578 cmd_status_rsp, &mgmt_err);
4579
4580 if (scan & SCAN_INQUIRY)
4581 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
4582 cmd_status_rsp, &mgmt_err);
4583 }
4584
4585 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
4586 bool persistent)
4587 {
4588 struct mgmt_ev_new_link_key ev;
4589
4590 memset(&ev, 0, sizeof(ev));
4591
4592 ev.store_hint = persistent;
4593 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
4594 ev.key.addr.type = BDADDR_BREDR;
4595 ev.key.type = key->type;
4596 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
4597 ev.key.pin_len = key->pin_len;
4598
4599 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
4600 }
4601
4602 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent)
4603 {
4604 struct mgmt_ev_new_long_term_key ev;
4605
4606 memset(&ev, 0, sizeof(ev));
4607
4608 ev.store_hint = persistent;
4609 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
4610 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
4611 ev.key.authenticated = key->authenticated;
4612 ev.key.enc_size = key->enc_size;
4613 ev.key.ediv = key->ediv;
4614
4615 if (key->type == HCI_SMP_LTK)
4616 ev.key.master = 1;
4617
4618 memcpy(ev.key.rand, key->rand, sizeof(key->rand));
4619 memcpy(ev.key.val, key->val, sizeof(key->val));
4620
4621 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
4622 }
4623
4624 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
4625 u8 data_len)
4626 {
4627 eir[eir_len++] = sizeof(type) + data_len;
4628 eir[eir_len++] = type;
4629 memcpy(&eir[eir_len], data, data_len);
4630 eir_len += data_len;
4631
4632 return eir_len;
4633 }
4634
4635 void mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4636 u8 addr_type, u32 flags, u8 *name, u8 name_len,
4637 u8 *dev_class)
4638 {
4639 char buf[512];
4640 struct mgmt_ev_device_connected *ev = (void *) buf;
4641 u16 eir_len = 0;
4642
4643 bacpy(&ev->addr.bdaddr, bdaddr);
4644 ev->addr.type = link_to_bdaddr(link_type, addr_type);
4645
4646 ev->flags = __cpu_to_le32(flags);
4647
4648 if (name_len > 0)
4649 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
4650 name, name_len);
4651
4652 if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
4653 eir_len = eir_append_data(ev->eir, eir_len,
4654 EIR_CLASS_OF_DEV, dev_class, 3);
4655
4656 ev->eir_len = cpu_to_le16(eir_len);
4657
4658 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
4659 sizeof(*ev) + eir_len, NULL);
4660 }
4661
4662 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
4663 {
4664 struct mgmt_cp_disconnect *cp = cmd->param;
4665 struct sock **sk = data;
4666 struct mgmt_rp_disconnect rp;
4667
4668 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4669 rp.addr.type = cp->addr.type;
4670
4671 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
4672 sizeof(rp));
4673
4674 *sk = cmd->sk;
4675 sock_hold(*sk);
4676
4677 mgmt_pending_remove(cmd);
4678 }
4679
4680 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
4681 {
4682 struct hci_dev *hdev = data;
4683 struct mgmt_cp_unpair_device *cp = cmd->param;
4684 struct mgmt_rp_unpair_device rp;
4685
4686 memset(&rp, 0, sizeof(rp));
4687 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4688 rp.addr.type = cp->addr.type;
4689
4690 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
4691
4692 cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
4693
4694 mgmt_pending_remove(cmd);
4695 }
4696
4697 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
4698 u8 link_type, u8 addr_type, u8 reason)
4699 {
4700 struct mgmt_ev_device_disconnected ev;
4701 struct sock *sk = NULL;
4702
4703 if (link_type != ACL_LINK && link_type != LE_LINK)
4704 return;
4705
4706 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
4707
4708 bacpy(&ev.addr.bdaddr, bdaddr);
4709 ev.addr.type = link_to_bdaddr(link_type, addr_type);
4710 ev.reason = reason;
4711
4712 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
4713
4714 if (sk)
4715 sock_put(sk);
4716
4717 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
4718 hdev);
4719 }
4720
4721 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
4722 u8 link_type, u8 addr_type, u8 status)
4723 {
4724 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
4725 struct mgmt_cp_disconnect *cp;
4726 struct mgmt_rp_disconnect rp;
4727 struct pending_cmd *cmd;
4728
4729 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
4730 hdev);
4731
4732 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
4733 if (!cmd)
4734 return;
4735
4736 cp = cmd->param;
4737
4738 if (bacmp(bdaddr, &cp->addr.bdaddr))
4739 return;
4740
4741 if (cp->addr.type != bdaddr_type)
4742 return;
4743
4744 bacpy(&rp.addr.bdaddr, bdaddr);
4745 rp.addr.type = bdaddr_type;
4746
4747 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
4748 mgmt_status(status), &rp, sizeof(rp));
4749
4750 mgmt_pending_remove(cmd);
4751 }
4752
4753 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4754 u8 addr_type, u8 status)
4755 {
4756 struct mgmt_ev_connect_failed ev;
4757
4758 bacpy(&ev.addr.bdaddr, bdaddr);
4759 ev.addr.type = link_to_bdaddr(link_type, addr_type);
4760 ev.status = mgmt_status(status);
4761
4762 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
4763 }
4764
4765 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
4766 {
4767 struct mgmt_ev_pin_code_request ev;
4768
4769 bacpy(&ev.addr.bdaddr, bdaddr);
4770 ev.addr.type = BDADDR_BREDR;
4771 ev.secure = secure;
4772
4773 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
4774 }
4775
4776 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4777 u8 status)
4778 {
4779 struct pending_cmd *cmd;
4780 struct mgmt_rp_pin_code_reply rp;
4781
4782 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
4783 if (!cmd)
4784 return;
4785
4786 bacpy(&rp.addr.bdaddr, bdaddr);
4787 rp.addr.type = BDADDR_BREDR;
4788
4789 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
4790 mgmt_status(status), &rp, sizeof(rp));
4791
4792 mgmt_pending_remove(cmd);
4793 }
4794
4795 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4796 u8 status)
4797 {
4798 struct pending_cmd *cmd;
4799 struct mgmt_rp_pin_code_reply rp;
4800
4801 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
4802 if (!cmd)
4803 return;
4804
4805 bacpy(&rp.addr.bdaddr, bdaddr);
4806 rp.addr.type = BDADDR_BREDR;
4807
4808 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
4809 mgmt_status(status), &rp, sizeof(rp));
4810
4811 mgmt_pending_remove(cmd);
4812 }
4813
4814 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
4815 u8 link_type, u8 addr_type, __le32 value,
4816 u8 confirm_hint)
4817 {
4818 struct mgmt_ev_user_confirm_request ev;
4819
4820 BT_DBG("%s", hdev->name);
4821
4822 bacpy(&ev.addr.bdaddr, bdaddr);
4823 ev.addr.type = link_to_bdaddr(link_type, addr_type);
4824 ev.confirm_hint = confirm_hint;
4825 ev.value = value;
4826
4827 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
4828 NULL);
4829 }
4830
4831 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
4832 u8 link_type, u8 addr_type)
4833 {
4834 struct mgmt_ev_user_passkey_request ev;
4835
4836 BT_DBG("%s", hdev->name);
4837
4838 bacpy(&ev.addr.bdaddr, bdaddr);
4839 ev.addr.type = link_to_bdaddr(link_type, addr_type);
4840
4841 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
4842 NULL);
4843 }
4844
4845 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4846 u8 link_type, u8 addr_type, u8 status,
4847 u8 opcode)
4848 {
4849 struct pending_cmd *cmd;
4850 struct mgmt_rp_user_confirm_reply rp;
4851 int err;
4852
4853 cmd = mgmt_pending_find(opcode, hdev);
4854 if (!cmd)
4855 return -ENOENT;
4856
4857 bacpy(&rp.addr.bdaddr, bdaddr);
4858 rp.addr.type = link_to_bdaddr(link_type, addr_type);
4859 err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
4860 &rp, sizeof(rp));
4861
4862 mgmt_pending_remove(cmd);
4863
4864 return err;
4865 }
4866
4867 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4868 u8 link_type, u8 addr_type, u8 status)
4869 {
4870 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
4871 status, MGMT_OP_USER_CONFIRM_REPLY);
4872 }
4873
4874 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4875 u8 link_type, u8 addr_type, u8 status)
4876 {
4877 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
4878 status,
4879 MGMT_OP_USER_CONFIRM_NEG_REPLY);
4880 }
4881
4882 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4883 u8 link_type, u8 addr_type, u8 status)
4884 {
4885 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
4886 status, MGMT_OP_USER_PASSKEY_REPLY);
4887 }
4888
4889 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4890 u8 link_type, u8 addr_type, u8 status)
4891 {
4892 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
4893 status,
4894 MGMT_OP_USER_PASSKEY_NEG_REPLY);
4895 }
4896
4897 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
4898 u8 link_type, u8 addr_type, u32 passkey,
4899 u8 entered)
4900 {
4901 struct mgmt_ev_passkey_notify ev;
4902
4903 BT_DBG("%s", hdev->name);
4904
4905 bacpy(&ev.addr.bdaddr, bdaddr);
4906 ev.addr.type = link_to_bdaddr(link_type, addr_type);
4907 ev.passkey = __cpu_to_le32(passkey);
4908 ev.entered = entered;
4909
4910 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
4911 }
4912
4913 void mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4914 u8 addr_type, u8 status)
4915 {
4916 struct mgmt_ev_auth_failed ev;
4917
4918 bacpy(&ev.addr.bdaddr, bdaddr);
4919 ev.addr.type = link_to_bdaddr(link_type, addr_type);
4920 ev.status = mgmt_status(status);
4921
4922 mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
4923 }
4924
4925 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
4926 {
4927 struct cmd_lookup match = { NULL, hdev };
4928 bool changed;
4929
4930 if (status) {
4931 u8 mgmt_err = mgmt_status(status);
4932 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
4933 cmd_status_rsp, &mgmt_err);
4934 return;
4935 }
4936
4937 if (test_bit(HCI_AUTH, &hdev->flags))
4938 changed = !test_and_set_bit(HCI_LINK_SECURITY,
4939 &hdev->dev_flags);
4940 else
4941 changed = test_and_clear_bit(HCI_LINK_SECURITY,
4942 &hdev->dev_flags);
4943
4944 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
4945 &match);
4946
4947 if (changed)
4948 new_settings(hdev, match.sk);
4949
4950 if (match.sk)
4951 sock_put(match.sk);
4952 }
4953
4954 static void clear_eir(struct hci_request *req)
4955 {
4956 struct hci_dev *hdev = req->hdev;
4957 struct hci_cp_write_eir cp;
4958
4959 if (!lmp_ext_inq_capable(hdev))
4960 return;
4961
4962 memset(hdev->eir, 0, sizeof(hdev->eir));
4963
4964 memset(&cp, 0, sizeof(cp));
4965
4966 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
4967 }
4968
4969 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
4970 {
4971 struct cmd_lookup match = { NULL, hdev };
4972 struct hci_request req;
4973 bool changed = false;
4974
4975 if (status) {
4976 u8 mgmt_err = mgmt_status(status);
4977
4978 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
4979 &hdev->dev_flags)) {
4980 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4981 new_settings(hdev, NULL);
4982 }
4983
4984 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
4985 &mgmt_err);
4986 return;
4987 }
4988
4989 if (enable) {
4990 changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4991 } else {
4992 changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4993 if (!changed)
4994 changed = test_and_clear_bit(HCI_HS_ENABLED,
4995 &hdev->dev_flags);
4996 else
4997 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4998 }
4999
5000 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
5001
5002 if (changed)
5003 new_settings(hdev, match.sk);
5004
5005 if (match.sk)
5006 sock_put(match.sk);
5007
5008 hci_req_init(&req, hdev);
5009
5010 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
5011 update_eir(&req);
5012 else
5013 clear_eir(&req);
5014
5015 hci_req_run(&req, NULL);
5016 }
5017
5018 void mgmt_sc_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
5019 {
5020 struct cmd_lookup match = { NULL, hdev };
5021 bool changed = false;
5022
5023 if (status) {
5024 u8 mgmt_err = mgmt_status(status);
5025
5026 if (enable && test_and_clear_bit(HCI_SC_ENABLED,
5027 &hdev->dev_flags))
5028 new_settings(hdev, NULL);
5029
5030 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
5031 cmd_status_rsp, &mgmt_err);
5032 return;
5033 }
5034
5035 if (enable)
5036 changed = !test_and_set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
5037 else
5038 changed = test_and_clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
5039
5040 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
5041 settings_rsp, &match);
5042
5043 if (changed)
5044 new_settings(hdev, match.sk);
5045
5046 if (match.sk)
5047 sock_put(match.sk);
5048 }
5049
5050 static void sk_lookup(struct pending_cmd *cmd, void *data)
5051 {
5052 struct cmd_lookup *match = data;
5053
5054 if (match->sk == NULL) {
5055 match->sk = cmd->sk;
5056 sock_hold(match->sk);
5057 }
5058 }
5059
5060 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
5061 u8 status)
5062 {
5063 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
5064
5065 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
5066 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
5067 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
5068
5069 if (!status)
5070 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
5071 NULL);
5072
5073 if (match.sk)
5074 sock_put(match.sk);
5075 }
5076
5077 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
5078 {
5079 struct mgmt_cp_set_local_name ev;
5080 struct pending_cmd *cmd;
5081
5082 if (status)
5083 return;
5084
5085 memset(&ev, 0, sizeof(ev));
5086 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
5087 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
5088
5089 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
5090 if (!cmd) {
5091 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
5092
5093 /* If this is a HCI command related to powering on the
5094 * HCI dev don't send any mgmt signals.
5095 */
5096 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5097 return;
5098 }
5099
5100 mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
5101 cmd ? cmd->sk : NULL);
5102 }
5103
5104 void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
5105 u8 *randomizer192, u8 *hash256,
5106 u8 *randomizer256, u8 status)
5107 {
5108 struct pending_cmd *cmd;
5109
5110 BT_DBG("%s status %u", hdev->name, status);
5111
5112 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
5113 if (!cmd)
5114 return;
5115
5116 if (status) {
5117 cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5118 mgmt_status(status));
5119 } else {
5120 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
5121 hash256 && randomizer256) {
5122 struct mgmt_rp_read_local_oob_ext_data rp;
5123
5124 memcpy(rp.hash192, hash192, sizeof(rp.hash192));
5125 memcpy(rp.randomizer192, randomizer192,
5126 sizeof(rp.randomizer192));
5127
5128 memcpy(rp.hash256, hash256, sizeof(rp.hash256));
5129 memcpy(rp.randomizer256, randomizer256,
5130 sizeof(rp.randomizer256));
5131
5132 cmd_complete(cmd->sk, hdev->id,
5133 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
5134 &rp, sizeof(rp));
5135 } else {
5136 struct mgmt_rp_read_local_oob_data rp;
5137
5138 memcpy(rp.hash, hash192, sizeof(rp.hash));
5139 memcpy(rp.randomizer, randomizer192,
5140 sizeof(rp.randomizer));
5141
5142 cmd_complete(cmd->sk, hdev->id,
5143 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
5144 &rp, sizeof(rp));
5145 }
5146 }
5147
5148 mgmt_pending_remove(cmd);
5149 }
5150
5151 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5152 u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name, u8
5153 ssp, u8 *eir, u16 eir_len)
5154 {
5155 char buf[512];
5156 struct mgmt_ev_device_found *ev = (void *) buf;
5157 size_t ev_size;
5158
5159 if (!hci_discovery_active(hdev))
5160 return;
5161
5162 /* Leave 5 bytes for a potential CoD field */
5163 if (sizeof(*ev) + eir_len + 5 > sizeof(buf))
5164 return;
5165
5166 memset(buf, 0, sizeof(buf));
5167
5168 bacpy(&ev->addr.bdaddr, bdaddr);
5169 ev->addr.type = link_to_bdaddr(link_type, addr_type);
5170 ev->rssi = rssi;
5171 if (cfm_name)
5172 ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_CONFIRM_NAME);
5173 if (!ssp)
5174 ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_LEGACY_PAIRING);
5175
5176 if (eir_len > 0)
5177 memcpy(ev->eir, eir, eir_len);
5178
5179 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
5180 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
5181 dev_class, 3);
5182
5183 ev->eir_len = cpu_to_le16(eir_len);
5184 ev_size = sizeof(*ev) + eir_len;
5185
5186 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
5187 }
5188
5189 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5190 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
5191 {
5192 struct mgmt_ev_device_found *ev;
5193 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
5194 u16 eir_len;
5195
5196 ev = (struct mgmt_ev_device_found *) buf;
5197
5198 memset(buf, 0, sizeof(buf));
5199
5200 bacpy(&ev->addr.bdaddr, bdaddr);
5201 ev->addr.type = link_to_bdaddr(link_type, addr_type);
5202 ev->rssi = rssi;
5203
5204 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
5205 name_len);
5206
5207 ev->eir_len = cpu_to_le16(eir_len);
5208
5209 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
5210 }
5211
5212 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
5213 {
5214 struct mgmt_ev_discovering ev;
5215 struct pending_cmd *cmd;
5216
5217 BT_DBG("%s discovering %u", hdev->name, discovering);
5218
5219 if (discovering)
5220 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
5221 else
5222 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
5223
5224 if (cmd != NULL) {
5225 u8 type = hdev->discovery.type;
5226
5227 cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
5228 sizeof(type));
5229 mgmt_pending_remove(cmd);
5230 }
5231
5232 memset(&ev, 0, sizeof(ev));
5233 ev.type = hdev->discovery.type;
5234 ev.discovering = discovering;
5235
5236 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
5237 }
5238
5239 int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
5240 {
5241 struct pending_cmd *cmd;
5242 struct mgmt_ev_device_blocked ev;
5243
5244 cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, hdev);
5245
5246 bacpy(&ev.addr.bdaddr, bdaddr);
5247 ev.addr.type = type;
5248
5249 return mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &ev, sizeof(ev),
5250 cmd ? cmd->sk : NULL);
5251 }
5252
5253 int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
5254 {
5255 struct pending_cmd *cmd;
5256 struct mgmt_ev_device_unblocked ev;
5257
5258 cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, hdev);
5259
5260 bacpy(&ev.addr.bdaddr, bdaddr);
5261 ev.addr.type = type;
5262
5263 return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &ev, sizeof(ev),
5264 cmd ? cmd->sk : NULL);
5265 }
5266
5267 static void adv_enable_complete(struct hci_dev *hdev, u8 status)
5268 {
5269 BT_DBG("%s status %u", hdev->name, status);
5270
5271 /* Clear the advertising mgmt setting if we failed to re-enable it */
5272 if (status) {
5273 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
5274 new_settings(hdev, NULL);
5275 }
5276 }
5277
5278 void mgmt_reenable_advertising(struct hci_dev *hdev)
5279 {
5280 struct hci_request req;
5281
5282 if (hci_conn_num(hdev, LE_LINK) > 0)
5283 return;
5284
5285 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
5286 return;
5287
5288 hci_req_init(&req, hdev);
5289 enable_advertising(&req);
5290
5291 /* If this fails we have no option but to let user space know
5292 * that we've disabled advertising.
5293 */
5294 if (hci_req_run(&req, adv_enable_complete) < 0) {
5295 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
5296 new_settings(hdev, NULL);
5297 }
5298 }