]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/bluetooth/mgmt.c
Bluetooth: Ensure that background scanning gets enabled on power on
[mirror_ubuntu-bionic-kernel.git] / net / bluetooth / mgmt.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI Management interface */
26
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/l2cap.h>
33 #include <net/bluetooth/mgmt.h>
34
35 #include "smp.h"
36
37 #define MGMT_VERSION 1
38 #define MGMT_REVISION 7
39
40 static const u16 mgmt_commands[] = {
41 MGMT_OP_READ_INDEX_LIST,
42 MGMT_OP_READ_INFO,
43 MGMT_OP_SET_POWERED,
44 MGMT_OP_SET_DISCOVERABLE,
45 MGMT_OP_SET_CONNECTABLE,
46 MGMT_OP_SET_FAST_CONNECTABLE,
47 MGMT_OP_SET_PAIRABLE,
48 MGMT_OP_SET_LINK_SECURITY,
49 MGMT_OP_SET_SSP,
50 MGMT_OP_SET_HS,
51 MGMT_OP_SET_LE,
52 MGMT_OP_SET_DEV_CLASS,
53 MGMT_OP_SET_LOCAL_NAME,
54 MGMT_OP_ADD_UUID,
55 MGMT_OP_REMOVE_UUID,
56 MGMT_OP_LOAD_LINK_KEYS,
57 MGMT_OP_LOAD_LONG_TERM_KEYS,
58 MGMT_OP_DISCONNECT,
59 MGMT_OP_GET_CONNECTIONS,
60 MGMT_OP_PIN_CODE_REPLY,
61 MGMT_OP_PIN_CODE_NEG_REPLY,
62 MGMT_OP_SET_IO_CAPABILITY,
63 MGMT_OP_PAIR_DEVICE,
64 MGMT_OP_CANCEL_PAIR_DEVICE,
65 MGMT_OP_UNPAIR_DEVICE,
66 MGMT_OP_USER_CONFIRM_REPLY,
67 MGMT_OP_USER_CONFIRM_NEG_REPLY,
68 MGMT_OP_USER_PASSKEY_REPLY,
69 MGMT_OP_USER_PASSKEY_NEG_REPLY,
70 MGMT_OP_READ_LOCAL_OOB_DATA,
71 MGMT_OP_ADD_REMOTE_OOB_DATA,
72 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
73 MGMT_OP_START_DISCOVERY,
74 MGMT_OP_STOP_DISCOVERY,
75 MGMT_OP_CONFIRM_NAME,
76 MGMT_OP_BLOCK_DEVICE,
77 MGMT_OP_UNBLOCK_DEVICE,
78 MGMT_OP_SET_DEVICE_ID,
79 MGMT_OP_SET_ADVERTISING,
80 MGMT_OP_SET_BREDR,
81 MGMT_OP_SET_STATIC_ADDRESS,
82 MGMT_OP_SET_SCAN_PARAMS,
83 MGMT_OP_SET_SECURE_CONN,
84 MGMT_OP_SET_DEBUG_KEYS,
85 MGMT_OP_SET_PRIVACY,
86 MGMT_OP_LOAD_IRKS,
87 MGMT_OP_GET_CONN_INFO,
88 MGMT_OP_GET_CLOCK_INFO,
89 MGMT_OP_ADD_DEVICE,
90 MGMT_OP_REMOVE_DEVICE,
91 };
92
93 static const u16 mgmt_events[] = {
94 MGMT_EV_CONTROLLER_ERROR,
95 MGMT_EV_INDEX_ADDED,
96 MGMT_EV_INDEX_REMOVED,
97 MGMT_EV_NEW_SETTINGS,
98 MGMT_EV_CLASS_OF_DEV_CHANGED,
99 MGMT_EV_LOCAL_NAME_CHANGED,
100 MGMT_EV_NEW_LINK_KEY,
101 MGMT_EV_NEW_LONG_TERM_KEY,
102 MGMT_EV_DEVICE_CONNECTED,
103 MGMT_EV_DEVICE_DISCONNECTED,
104 MGMT_EV_CONNECT_FAILED,
105 MGMT_EV_PIN_CODE_REQUEST,
106 MGMT_EV_USER_CONFIRM_REQUEST,
107 MGMT_EV_USER_PASSKEY_REQUEST,
108 MGMT_EV_AUTH_FAILED,
109 MGMT_EV_DEVICE_FOUND,
110 MGMT_EV_DISCOVERING,
111 MGMT_EV_DEVICE_BLOCKED,
112 MGMT_EV_DEVICE_UNBLOCKED,
113 MGMT_EV_DEVICE_UNPAIRED,
114 MGMT_EV_PASSKEY_NOTIFY,
115 MGMT_EV_NEW_IRK,
116 MGMT_EV_NEW_CSRK,
117 MGMT_EV_DEVICE_ADDED,
118 MGMT_EV_DEVICE_REMOVED,
119 };
120
121 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
122
123 #define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
124 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
125
126 struct pending_cmd {
127 struct list_head list;
128 u16 opcode;
129 int index;
130 void *param;
131 struct sock *sk;
132 void *user_data;
133 };
134
135 /* HCI to MGMT error code conversion table */
136 static u8 mgmt_status_table[] = {
137 MGMT_STATUS_SUCCESS,
138 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
139 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
140 MGMT_STATUS_FAILED, /* Hardware Failure */
141 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
142 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
143 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
144 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
145 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
146 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
147 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
148 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
149 MGMT_STATUS_BUSY, /* Command Disallowed */
150 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
151 MGMT_STATUS_REJECTED, /* Rejected Security */
152 MGMT_STATUS_REJECTED, /* Rejected Personal */
153 MGMT_STATUS_TIMEOUT, /* Host Timeout */
154 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
155 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
156 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
157 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
158 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
159 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
160 MGMT_STATUS_BUSY, /* Repeated Attempts */
161 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
162 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
163 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
164 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
165 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
166 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
167 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
168 MGMT_STATUS_FAILED, /* Unspecified Error */
169 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
170 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
171 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
172 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
173 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
174 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
175 MGMT_STATUS_FAILED, /* Unit Link Key Used */
176 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
177 MGMT_STATUS_TIMEOUT, /* Instant Passed */
178 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
179 MGMT_STATUS_FAILED, /* Transaction Collision */
180 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
181 MGMT_STATUS_REJECTED, /* QoS Rejected */
182 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
183 MGMT_STATUS_REJECTED, /* Insufficient Security */
184 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
185 MGMT_STATUS_BUSY, /* Role Switch Pending */
186 MGMT_STATUS_FAILED, /* Slot Violation */
187 MGMT_STATUS_FAILED, /* Role Switch Failed */
188 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
189 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
190 MGMT_STATUS_BUSY, /* Host Busy Pairing */
191 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
192 MGMT_STATUS_BUSY, /* Controller Busy */
193 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
194 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
195 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
196 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
197 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
198 };
199
200 static u8 mgmt_status(u8 hci_status)
201 {
202 if (hci_status < ARRAY_SIZE(mgmt_status_table))
203 return mgmt_status_table[hci_status];
204
205 return MGMT_STATUS_FAILED;
206 }
207
208 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
209 {
210 struct sk_buff *skb;
211 struct mgmt_hdr *hdr;
212 struct mgmt_ev_cmd_status *ev;
213 int err;
214
215 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
216
217 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
218 if (!skb)
219 return -ENOMEM;
220
221 hdr = (void *) skb_put(skb, sizeof(*hdr));
222
223 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS);
224 hdr->index = cpu_to_le16(index);
225 hdr->len = cpu_to_le16(sizeof(*ev));
226
227 ev = (void *) skb_put(skb, sizeof(*ev));
228 ev->status = status;
229 ev->opcode = cpu_to_le16(cmd);
230
231 err = sock_queue_rcv_skb(sk, skb);
232 if (err < 0)
233 kfree_skb(skb);
234
235 return err;
236 }
237
238 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
239 void *rp, size_t rp_len)
240 {
241 struct sk_buff *skb;
242 struct mgmt_hdr *hdr;
243 struct mgmt_ev_cmd_complete *ev;
244 int err;
245
246 BT_DBG("sock %p", sk);
247
248 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
249 if (!skb)
250 return -ENOMEM;
251
252 hdr = (void *) skb_put(skb, sizeof(*hdr));
253
254 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
255 hdr->index = cpu_to_le16(index);
256 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
257
258 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
259 ev->opcode = cpu_to_le16(cmd);
260 ev->status = status;
261
262 if (rp)
263 memcpy(ev->data, rp, rp_len);
264
265 err = sock_queue_rcv_skb(sk, skb);
266 if (err < 0)
267 kfree_skb(skb);
268
269 return err;
270 }
271
272 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
273 u16 data_len)
274 {
275 struct mgmt_rp_read_version rp;
276
277 BT_DBG("sock %p", sk);
278
279 rp.version = MGMT_VERSION;
280 rp.revision = cpu_to_le16(MGMT_REVISION);
281
282 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
283 sizeof(rp));
284 }
285
286 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
287 u16 data_len)
288 {
289 struct mgmt_rp_read_commands *rp;
290 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
291 const u16 num_events = ARRAY_SIZE(mgmt_events);
292 __le16 *opcode;
293 size_t rp_size;
294 int i, err;
295
296 BT_DBG("sock %p", sk);
297
298 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
299
300 rp = kmalloc(rp_size, GFP_KERNEL);
301 if (!rp)
302 return -ENOMEM;
303
304 rp->num_commands = cpu_to_le16(num_commands);
305 rp->num_events = cpu_to_le16(num_events);
306
307 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
308 put_unaligned_le16(mgmt_commands[i], opcode);
309
310 for (i = 0; i < num_events; i++, opcode++)
311 put_unaligned_le16(mgmt_events[i], opcode);
312
313 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
314 rp_size);
315 kfree(rp);
316
317 return err;
318 }
319
320 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
321 u16 data_len)
322 {
323 struct mgmt_rp_read_index_list *rp;
324 struct hci_dev *d;
325 size_t rp_len;
326 u16 count;
327 int err;
328
329 BT_DBG("sock %p", sk);
330
331 read_lock(&hci_dev_list_lock);
332
333 count = 0;
334 list_for_each_entry(d, &hci_dev_list, list) {
335 if (d->dev_type == HCI_BREDR)
336 count++;
337 }
338
339 rp_len = sizeof(*rp) + (2 * count);
340 rp = kmalloc(rp_len, GFP_ATOMIC);
341 if (!rp) {
342 read_unlock(&hci_dev_list_lock);
343 return -ENOMEM;
344 }
345
346 count = 0;
347 list_for_each_entry(d, &hci_dev_list, list) {
348 if (test_bit(HCI_SETUP, &d->dev_flags))
349 continue;
350
351 if (test_bit(HCI_USER_CHANNEL, &d->dev_flags))
352 continue;
353
354 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
355 continue;
356
357 if (d->dev_type == HCI_BREDR) {
358 rp->index[count++] = cpu_to_le16(d->id);
359 BT_DBG("Added hci%u", d->id);
360 }
361 }
362
363 rp->num_controllers = cpu_to_le16(count);
364 rp_len = sizeof(*rp) + (2 * count);
365
366 read_unlock(&hci_dev_list_lock);
367
368 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
369 rp_len);
370
371 kfree(rp);
372
373 return err;
374 }
375
376 static u32 get_supported_settings(struct hci_dev *hdev)
377 {
378 u32 settings = 0;
379
380 settings |= MGMT_SETTING_POWERED;
381 settings |= MGMT_SETTING_PAIRABLE;
382 settings |= MGMT_SETTING_DEBUG_KEYS;
383
384 if (lmp_bredr_capable(hdev)) {
385 settings |= MGMT_SETTING_CONNECTABLE;
386 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
387 settings |= MGMT_SETTING_FAST_CONNECTABLE;
388 settings |= MGMT_SETTING_DISCOVERABLE;
389 settings |= MGMT_SETTING_BREDR;
390 settings |= MGMT_SETTING_LINK_SECURITY;
391
392 if (lmp_ssp_capable(hdev)) {
393 settings |= MGMT_SETTING_SSP;
394 settings |= MGMT_SETTING_HS;
395 }
396
397 if (lmp_sc_capable(hdev) ||
398 test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
399 settings |= MGMT_SETTING_SECURE_CONN;
400 }
401
402 if (lmp_le_capable(hdev)) {
403 settings |= MGMT_SETTING_LE;
404 settings |= MGMT_SETTING_ADVERTISING;
405 settings |= MGMT_SETTING_PRIVACY;
406 }
407
408 return settings;
409 }
410
411 static u32 get_current_settings(struct hci_dev *hdev)
412 {
413 u32 settings = 0;
414
415 if (hdev_is_powered(hdev))
416 settings |= MGMT_SETTING_POWERED;
417
418 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
419 settings |= MGMT_SETTING_CONNECTABLE;
420
421 if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
422 settings |= MGMT_SETTING_FAST_CONNECTABLE;
423
424 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
425 settings |= MGMT_SETTING_DISCOVERABLE;
426
427 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
428 settings |= MGMT_SETTING_PAIRABLE;
429
430 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
431 settings |= MGMT_SETTING_BREDR;
432
433 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
434 settings |= MGMT_SETTING_LE;
435
436 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
437 settings |= MGMT_SETTING_LINK_SECURITY;
438
439 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
440 settings |= MGMT_SETTING_SSP;
441
442 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
443 settings |= MGMT_SETTING_HS;
444
445 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
446 settings |= MGMT_SETTING_ADVERTISING;
447
448 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
449 settings |= MGMT_SETTING_SECURE_CONN;
450
451 if (test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags))
452 settings |= MGMT_SETTING_DEBUG_KEYS;
453
454 if (test_bit(HCI_PRIVACY, &hdev->dev_flags))
455 settings |= MGMT_SETTING_PRIVACY;
456
457 return settings;
458 }
459
460 #define PNP_INFO_SVCLASS_ID 0x1200
461
462 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
463 {
464 u8 *ptr = data, *uuids_start = NULL;
465 struct bt_uuid *uuid;
466
467 if (len < 4)
468 return ptr;
469
470 list_for_each_entry(uuid, &hdev->uuids, list) {
471 u16 uuid16;
472
473 if (uuid->size != 16)
474 continue;
475
476 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
477 if (uuid16 < 0x1100)
478 continue;
479
480 if (uuid16 == PNP_INFO_SVCLASS_ID)
481 continue;
482
483 if (!uuids_start) {
484 uuids_start = ptr;
485 uuids_start[0] = 1;
486 uuids_start[1] = EIR_UUID16_ALL;
487 ptr += 2;
488 }
489
490 /* Stop if not enough space to put next UUID */
491 if ((ptr - data) + sizeof(u16) > len) {
492 uuids_start[1] = EIR_UUID16_SOME;
493 break;
494 }
495
496 *ptr++ = (uuid16 & 0x00ff);
497 *ptr++ = (uuid16 & 0xff00) >> 8;
498 uuids_start[0] += sizeof(uuid16);
499 }
500
501 return ptr;
502 }
503
504 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
505 {
506 u8 *ptr = data, *uuids_start = NULL;
507 struct bt_uuid *uuid;
508
509 if (len < 6)
510 return ptr;
511
512 list_for_each_entry(uuid, &hdev->uuids, list) {
513 if (uuid->size != 32)
514 continue;
515
516 if (!uuids_start) {
517 uuids_start = ptr;
518 uuids_start[0] = 1;
519 uuids_start[1] = EIR_UUID32_ALL;
520 ptr += 2;
521 }
522
523 /* Stop if not enough space to put next UUID */
524 if ((ptr - data) + sizeof(u32) > len) {
525 uuids_start[1] = EIR_UUID32_SOME;
526 break;
527 }
528
529 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
530 ptr += sizeof(u32);
531 uuids_start[0] += sizeof(u32);
532 }
533
534 return ptr;
535 }
536
537 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
538 {
539 u8 *ptr = data, *uuids_start = NULL;
540 struct bt_uuid *uuid;
541
542 if (len < 18)
543 return ptr;
544
545 list_for_each_entry(uuid, &hdev->uuids, list) {
546 if (uuid->size != 128)
547 continue;
548
549 if (!uuids_start) {
550 uuids_start = ptr;
551 uuids_start[0] = 1;
552 uuids_start[1] = EIR_UUID128_ALL;
553 ptr += 2;
554 }
555
556 /* Stop if not enough space to put next UUID */
557 if ((ptr - data) + 16 > len) {
558 uuids_start[1] = EIR_UUID128_SOME;
559 break;
560 }
561
562 memcpy(ptr, uuid->uuid, 16);
563 ptr += 16;
564 uuids_start[0] += 16;
565 }
566
567 return ptr;
568 }
569
570 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
571 {
572 struct pending_cmd *cmd;
573
574 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
575 if (cmd->opcode == opcode)
576 return cmd;
577 }
578
579 return NULL;
580 }
581
582 static struct pending_cmd *mgmt_pending_find_data(u16 opcode,
583 struct hci_dev *hdev,
584 const void *data)
585 {
586 struct pending_cmd *cmd;
587
588 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
589 if (cmd->user_data != data)
590 continue;
591 if (cmd->opcode == opcode)
592 return cmd;
593 }
594
595 return NULL;
596 }
597
598 static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
599 {
600 u8 ad_len = 0;
601 size_t name_len;
602
603 name_len = strlen(hdev->dev_name);
604 if (name_len > 0) {
605 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
606
607 if (name_len > max_len) {
608 name_len = max_len;
609 ptr[1] = EIR_NAME_SHORT;
610 } else
611 ptr[1] = EIR_NAME_COMPLETE;
612
613 ptr[0] = name_len + 1;
614
615 memcpy(ptr + 2, hdev->dev_name, name_len);
616
617 ad_len += (name_len + 2);
618 ptr += (name_len + 2);
619 }
620
621 return ad_len;
622 }
623
624 static void update_scan_rsp_data(struct hci_request *req)
625 {
626 struct hci_dev *hdev = req->hdev;
627 struct hci_cp_le_set_scan_rsp_data cp;
628 u8 len;
629
630 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
631 return;
632
633 memset(&cp, 0, sizeof(cp));
634
635 len = create_scan_rsp_data(hdev, cp.data);
636
637 if (hdev->scan_rsp_data_len == len &&
638 memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
639 return;
640
641 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
642 hdev->scan_rsp_data_len = len;
643
644 cp.length = len;
645
646 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
647 }
648
649 static u8 get_adv_discov_flags(struct hci_dev *hdev)
650 {
651 struct pending_cmd *cmd;
652
653 /* If there's a pending mgmt command the flags will not yet have
654 * their final values, so check for this first.
655 */
656 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
657 if (cmd) {
658 struct mgmt_mode *cp = cmd->param;
659 if (cp->val == 0x01)
660 return LE_AD_GENERAL;
661 else if (cp->val == 0x02)
662 return LE_AD_LIMITED;
663 } else {
664 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
665 return LE_AD_LIMITED;
666 else if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
667 return LE_AD_GENERAL;
668 }
669
670 return 0;
671 }
672
673 static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
674 {
675 u8 ad_len = 0, flags = 0;
676
677 flags |= get_adv_discov_flags(hdev);
678
679 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
680 flags |= LE_AD_NO_BREDR;
681
682 if (flags) {
683 BT_DBG("adv flags 0x%02x", flags);
684
685 ptr[0] = 2;
686 ptr[1] = EIR_FLAGS;
687 ptr[2] = flags;
688
689 ad_len += 3;
690 ptr += 3;
691 }
692
693 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
694 ptr[0] = 2;
695 ptr[1] = EIR_TX_POWER;
696 ptr[2] = (u8) hdev->adv_tx_power;
697
698 ad_len += 3;
699 ptr += 3;
700 }
701
702 return ad_len;
703 }
704
705 static void update_adv_data(struct hci_request *req)
706 {
707 struct hci_dev *hdev = req->hdev;
708 struct hci_cp_le_set_adv_data cp;
709 u8 len;
710
711 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
712 return;
713
714 memset(&cp, 0, sizeof(cp));
715
716 len = create_adv_data(hdev, cp.data);
717
718 if (hdev->adv_data_len == len &&
719 memcmp(cp.data, hdev->adv_data, len) == 0)
720 return;
721
722 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
723 hdev->adv_data_len = len;
724
725 cp.length = len;
726
727 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
728 }
729
730 static void create_eir(struct hci_dev *hdev, u8 *data)
731 {
732 u8 *ptr = data;
733 size_t name_len;
734
735 name_len = strlen(hdev->dev_name);
736
737 if (name_len > 0) {
738 /* EIR Data type */
739 if (name_len > 48) {
740 name_len = 48;
741 ptr[1] = EIR_NAME_SHORT;
742 } else
743 ptr[1] = EIR_NAME_COMPLETE;
744
745 /* EIR Data length */
746 ptr[0] = name_len + 1;
747
748 memcpy(ptr + 2, hdev->dev_name, name_len);
749
750 ptr += (name_len + 2);
751 }
752
753 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
754 ptr[0] = 2;
755 ptr[1] = EIR_TX_POWER;
756 ptr[2] = (u8) hdev->inq_tx_power;
757
758 ptr += 3;
759 }
760
761 if (hdev->devid_source > 0) {
762 ptr[0] = 9;
763 ptr[1] = EIR_DEVICE_ID;
764
765 put_unaligned_le16(hdev->devid_source, ptr + 2);
766 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
767 put_unaligned_le16(hdev->devid_product, ptr + 6);
768 put_unaligned_le16(hdev->devid_version, ptr + 8);
769
770 ptr += 10;
771 }
772
773 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
774 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
775 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
776 }
777
778 static void update_eir(struct hci_request *req)
779 {
780 struct hci_dev *hdev = req->hdev;
781 struct hci_cp_write_eir cp;
782
783 if (!hdev_is_powered(hdev))
784 return;
785
786 if (!lmp_ext_inq_capable(hdev))
787 return;
788
789 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
790 return;
791
792 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
793 return;
794
795 memset(&cp, 0, sizeof(cp));
796
797 create_eir(hdev, cp.data);
798
799 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
800 return;
801
802 memcpy(hdev->eir, cp.data, sizeof(cp.data));
803
804 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
805 }
806
807 static u8 get_service_classes(struct hci_dev *hdev)
808 {
809 struct bt_uuid *uuid;
810 u8 val = 0;
811
812 list_for_each_entry(uuid, &hdev->uuids, list)
813 val |= uuid->svc_hint;
814
815 return val;
816 }
817
818 static void update_class(struct hci_request *req)
819 {
820 struct hci_dev *hdev = req->hdev;
821 u8 cod[3];
822
823 BT_DBG("%s", hdev->name);
824
825 if (!hdev_is_powered(hdev))
826 return;
827
828 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
829 return;
830
831 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
832 return;
833
834 cod[0] = hdev->minor_class;
835 cod[1] = hdev->major_class;
836 cod[2] = get_service_classes(hdev);
837
838 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
839 cod[1] |= 0x20;
840
841 if (memcmp(cod, hdev->dev_class, 3) == 0)
842 return;
843
844 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
845 }
846
847 static bool get_connectable(struct hci_dev *hdev)
848 {
849 struct pending_cmd *cmd;
850
851 /* If there's a pending mgmt command the flag will not yet have
852 * it's final value, so check for this first.
853 */
854 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
855 if (cmd) {
856 struct mgmt_mode *cp = cmd->param;
857 return cp->val;
858 }
859
860 return test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
861 }
862
863 static void enable_advertising(struct hci_request *req)
864 {
865 struct hci_dev *hdev = req->hdev;
866 struct hci_cp_le_set_adv_param cp;
867 u8 own_addr_type, enable = 0x01;
868 bool connectable;
869
870 /* Clear the HCI_ADVERTISING bit temporarily so that the
871 * hci_update_random_address knows that it's safe to go ahead
872 * and write a new random address. The flag will be set back on
873 * as soon as the SET_ADV_ENABLE HCI command completes.
874 */
875 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
876
877 connectable = get_connectable(hdev);
878
879 /* Set require_privacy to true only when non-connectable
880 * advertising is used. In that case it is fine to use a
881 * non-resolvable private address.
882 */
883 if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
884 return;
885
886 memset(&cp, 0, sizeof(cp));
887 cp.min_interval = cpu_to_le16(0x0800);
888 cp.max_interval = cpu_to_le16(0x0800);
889 cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
890 cp.own_address_type = own_addr_type;
891 cp.channel_map = hdev->le_adv_channel_map;
892
893 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
894
895 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
896 }
897
898 static void disable_advertising(struct hci_request *req)
899 {
900 u8 enable = 0x00;
901
902 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
903 }
904
905 static void service_cache_off(struct work_struct *work)
906 {
907 struct hci_dev *hdev = container_of(work, struct hci_dev,
908 service_cache.work);
909 struct hci_request req;
910
911 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
912 return;
913
914 hci_req_init(&req, hdev);
915
916 hci_dev_lock(hdev);
917
918 update_eir(&req);
919 update_class(&req);
920
921 hci_dev_unlock(hdev);
922
923 hci_req_run(&req, NULL);
924 }
925
926 static void rpa_expired(struct work_struct *work)
927 {
928 struct hci_dev *hdev = container_of(work, struct hci_dev,
929 rpa_expired.work);
930 struct hci_request req;
931
932 BT_DBG("");
933
934 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
935
936 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags) ||
937 hci_conn_num(hdev, LE_LINK) > 0)
938 return;
939
940 /* The generation of a new RPA and programming it into the
941 * controller happens in the enable_advertising() function.
942 */
943
944 hci_req_init(&req, hdev);
945
946 disable_advertising(&req);
947 enable_advertising(&req);
948
949 hci_req_run(&req, NULL);
950 }
951
952 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
953 {
954 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
955 return;
956
957 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
958 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
959
960 /* Non-mgmt controlled devices get this bit set
961 * implicitly so that pairing works for them, however
962 * for mgmt we require user-space to explicitly enable
963 * it
964 */
965 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
966 }
967
968 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
969 void *data, u16 data_len)
970 {
971 struct mgmt_rp_read_info rp;
972
973 BT_DBG("sock %p %s", sk, hdev->name);
974
975 hci_dev_lock(hdev);
976
977 memset(&rp, 0, sizeof(rp));
978
979 bacpy(&rp.bdaddr, &hdev->bdaddr);
980
981 rp.version = hdev->hci_ver;
982 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
983
984 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
985 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
986
987 memcpy(rp.dev_class, hdev->dev_class, 3);
988
989 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
990 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
991
992 hci_dev_unlock(hdev);
993
994 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
995 sizeof(rp));
996 }
997
998 static void mgmt_pending_free(struct pending_cmd *cmd)
999 {
1000 sock_put(cmd->sk);
1001 kfree(cmd->param);
1002 kfree(cmd);
1003 }
1004
1005 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
1006 struct hci_dev *hdev, void *data,
1007 u16 len)
1008 {
1009 struct pending_cmd *cmd;
1010
1011 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
1012 if (!cmd)
1013 return NULL;
1014
1015 cmd->opcode = opcode;
1016 cmd->index = hdev->id;
1017
1018 cmd->param = kmalloc(len, GFP_KERNEL);
1019 if (!cmd->param) {
1020 kfree(cmd);
1021 return NULL;
1022 }
1023
1024 if (data)
1025 memcpy(cmd->param, data, len);
1026
1027 cmd->sk = sk;
1028 sock_hold(sk);
1029
1030 list_add(&cmd->list, &hdev->mgmt_pending);
1031
1032 return cmd;
1033 }
1034
1035 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
1036 void (*cb)(struct pending_cmd *cmd,
1037 void *data),
1038 void *data)
1039 {
1040 struct pending_cmd *cmd, *tmp;
1041
1042 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
1043 if (opcode > 0 && cmd->opcode != opcode)
1044 continue;
1045
1046 cb(cmd, data);
1047 }
1048 }
1049
1050 static void mgmt_pending_remove(struct pending_cmd *cmd)
1051 {
1052 list_del(&cmd->list);
1053 mgmt_pending_free(cmd);
1054 }
1055
1056 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1057 {
1058 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1059
1060 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
1061 sizeof(settings));
1062 }
1063
1064 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status)
1065 {
1066 BT_DBG("%s status 0x%02x", hdev->name, status);
1067
1068 if (hci_conn_count(hdev) == 0) {
1069 cancel_delayed_work(&hdev->power_off);
1070 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1071 }
1072 }
1073
1074 static void hci_stop_discovery(struct hci_request *req)
1075 {
1076 struct hci_dev *hdev = req->hdev;
1077 struct hci_cp_remote_name_req_cancel cp;
1078 struct inquiry_entry *e;
1079
1080 switch (hdev->discovery.state) {
1081 case DISCOVERY_FINDING:
1082 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
1083 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1084 } else {
1085 cancel_delayed_work(&hdev->le_scan_disable);
1086 hci_req_add_le_scan_disable(req);
1087 }
1088
1089 break;
1090
1091 case DISCOVERY_RESOLVING:
1092 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
1093 NAME_PENDING);
1094 if (!e)
1095 return;
1096
1097 bacpy(&cp.bdaddr, &e->data.bdaddr);
1098 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
1099 &cp);
1100
1101 break;
1102
1103 default:
1104 /* Passive scanning */
1105 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1106 hci_req_add_le_scan_disable(req);
1107 break;
1108 }
1109 }
1110
1111 static int clean_up_hci_state(struct hci_dev *hdev)
1112 {
1113 struct hci_request req;
1114 struct hci_conn *conn;
1115
1116 hci_req_init(&req, hdev);
1117
1118 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1119 test_bit(HCI_PSCAN, &hdev->flags)) {
1120 u8 scan = 0x00;
1121 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1122 }
1123
1124 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1125 disable_advertising(&req);
1126
1127 hci_stop_discovery(&req);
1128
1129 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1130 struct hci_cp_disconnect dc;
1131 struct hci_cp_reject_conn_req rej;
1132
1133 switch (conn->state) {
1134 case BT_CONNECTED:
1135 case BT_CONFIG:
1136 dc.handle = cpu_to_le16(conn->handle);
1137 dc.reason = 0x15; /* Terminated due to Power Off */
1138 hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1139 break;
1140 case BT_CONNECT:
1141 if (conn->type == LE_LINK)
1142 hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL,
1143 0, NULL);
1144 else if (conn->type == ACL_LINK)
1145 hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL,
1146 6, &conn->dst);
1147 break;
1148 case BT_CONNECT2:
1149 bacpy(&rej.bdaddr, &conn->dst);
1150 rej.reason = 0x15; /* Terminated due to Power Off */
1151 if (conn->type == ACL_LINK)
1152 hci_req_add(&req, HCI_OP_REJECT_CONN_REQ,
1153 sizeof(rej), &rej);
1154 else if (conn->type == SCO_LINK)
1155 hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ,
1156 sizeof(rej), &rej);
1157 break;
1158 }
1159 }
1160
1161 return hci_req_run(&req, clean_up_hci_complete);
1162 }
1163
1164 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1165 u16 len)
1166 {
1167 struct mgmt_mode *cp = data;
1168 struct pending_cmd *cmd;
1169 int err;
1170
1171 BT_DBG("request for %s", hdev->name);
1172
1173 if (cp->val != 0x00 && cp->val != 0x01)
1174 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1175 MGMT_STATUS_INVALID_PARAMS);
1176
1177 hci_dev_lock(hdev);
1178
1179 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
1180 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1181 MGMT_STATUS_BUSY);
1182 goto failed;
1183 }
1184
1185 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1186 cancel_delayed_work(&hdev->power_off);
1187
1188 if (cp->val) {
1189 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
1190 data, len);
1191 err = mgmt_powered(hdev, 1);
1192 goto failed;
1193 }
1194 }
1195
1196 if (!!cp->val == hdev_is_powered(hdev)) {
1197 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1198 goto failed;
1199 }
1200
1201 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1202 if (!cmd) {
1203 err = -ENOMEM;
1204 goto failed;
1205 }
1206
1207 if (cp->val) {
1208 queue_work(hdev->req_workqueue, &hdev->power_on);
1209 err = 0;
1210 } else {
1211 /* Disconnect connections, stop scans, etc */
1212 err = clean_up_hci_state(hdev);
1213 if (!err)
1214 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1215 HCI_POWER_OFF_TIMEOUT);
1216
1217 /* ENODATA means there were no HCI commands queued */
1218 if (err == -ENODATA) {
1219 cancel_delayed_work(&hdev->power_off);
1220 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1221 err = 0;
1222 }
1223 }
1224
1225 failed:
1226 hci_dev_unlock(hdev);
1227 return err;
1228 }
1229
1230 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
1231 struct sock *skip_sk)
1232 {
1233 struct sk_buff *skb;
1234 struct mgmt_hdr *hdr;
1235
1236 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
1237 if (!skb)
1238 return -ENOMEM;
1239
1240 hdr = (void *) skb_put(skb, sizeof(*hdr));
1241 hdr->opcode = cpu_to_le16(event);
1242 if (hdev)
1243 hdr->index = cpu_to_le16(hdev->id);
1244 else
1245 hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
1246 hdr->len = cpu_to_le16(data_len);
1247
1248 if (data)
1249 memcpy(skb_put(skb, data_len), data, data_len);
1250
1251 /* Time stamp */
1252 __net_timestamp(skb);
1253
1254 hci_send_to_control(skb, skip_sk);
1255 kfree_skb(skb);
1256
1257 return 0;
1258 }
1259
1260 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1261 {
1262 __le32 ev;
1263
1264 ev = cpu_to_le32(get_current_settings(hdev));
1265
1266 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
1267 }
1268
1269 struct cmd_lookup {
1270 struct sock *sk;
1271 struct hci_dev *hdev;
1272 u8 mgmt_status;
1273 };
1274
1275 static void settings_rsp(struct pending_cmd *cmd, void *data)
1276 {
1277 struct cmd_lookup *match = data;
1278
1279 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1280
1281 list_del(&cmd->list);
1282
1283 if (match->sk == NULL) {
1284 match->sk = cmd->sk;
1285 sock_hold(match->sk);
1286 }
1287
1288 mgmt_pending_free(cmd);
1289 }
1290
1291 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
1292 {
1293 u8 *status = data;
1294
1295 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1296 mgmt_pending_remove(cmd);
1297 }
1298
1299 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1300 {
1301 if (!lmp_bredr_capable(hdev))
1302 return MGMT_STATUS_NOT_SUPPORTED;
1303 else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1304 return MGMT_STATUS_REJECTED;
1305 else
1306 return MGMT_STATUS_SUCCESS;
1307 }
1308
1309 static u8 mgmt_le_support(struct hci_dev *hdev)
1310 {
1311 if (!lmp_le_capable(hdev))
1312 return MGMT_STATUS_NOT_SUPPORTED;
1313 else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1314 return MGMT_STATUS_REJECTED;
1315 else
1316 return MGMT_STATUS_SUCCESS;
1317 }
1318
1319 static void set_discoverable_complete(struct hci_dev *hdev, u8 status)
1320 {
1321 struct pending_cmd *cmd;
1322 struct mgmt_mode *cp;
1323 struct hci_request req;
1324 bool changed;
1325
1326 BT_DBG("status 0x%02x", status);
1327
1328 hci_dev_lock(hdev);
1329
1330 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1331 if (!cmd)
1332 goto unlock;
1333
1334 if (status) {
1335 u8 mgmt_err = mgmt_status(status);
1336 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1337 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1338 goto remove_cmd;
1339 }
1340
1341 cp = cmd->param;
1342 if (cp->val) {
1343 changed = !test_and_set_bit(HCI_DISCOVERABLE,
1344 &hdev->dev_flags);
1345
1346 if (hdev->discov_timeout > 0) {
1347 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1348 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1349 to);
1350 }
1351 } else {
1352 changed = test_and_clear_bit(HCI_DISCOVERABLE,
1353 &hdev->dev_flags);
1354 }
1355
1356 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1357
1358 if (changed)
1359 new_settings(hdev, cmd->sk);
1360
1361 /* When the discoverable mode gets changed, make sure
1362 * that class of device has the limited discoverable
1363 * bit correctly set.
1364 */
1365 hci_req_init(&req, hdev);
1366 update_class(&req);
1367 hci_req_run(&req, NULL);
1368
1369 remove_cmd:
1370 mgmt_pending_remove(cmd);
1371
1372 unlock:
1373 hci_dev_unlock(hdev);
1374 }
1375
1376 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1377 u16 len)
1378 {
1379 struct mgmt_cp_set_discoverable *cp = data;
1380 struct pending_cmd *cmd;
1381 struct hci_request req;
1382 u16 timeout;
1383 u8 scan;
1384 int err;
1385
1386 BT_DBG("request for %s", hdev->name);
1387
1388 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1389 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1390 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1391 MGMT_STATUS_REJECTED);
1392
1393 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1394 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1395 MGMT_STATUS_INVALID_PARAMS);
1396
1397 timeout = __le16_to_cpu(cp->timeout);
1398
1399 /* Disabling discoverable requires that no timeout is set,
1400 * and enabling limited discoverable requires a timeout.
1401 */
1402 if ((cp->val == 0x00 && timeout > 0) ||
1403 (cp->val == 0x02 && timeout == 0))
1404 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1405 MGMT_STATUS_INVALID_PARAMS);
1406
1407 hci_dev_lock(hdev);
1408
1409 if (!hdev_is_powered(hdev) && timeout > 0) {
1410 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1411 MGMT_STATUS_NOT_POWERED);
1412 goto failed;
1413 }
1414
1415 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1416 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1417 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1418 MGMT_STATUS_BUSY);
1419 goto failed;
1420 }
1421
1422 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
1423 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1424 MGMT_STATUS_REJECTED);
1425 goto failed;
1426 }
1427
1428 if (!hdev_is_powered(hdev)) {
1429 bool changed = false;
1430
1431 /* Setting limited discoverable when powered off is
1432 * not a valid operation since it requires a timeout
1433 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1434 */
1435 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
1436 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1437 changed = true;
1438 }
1439
1440 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1441 if (err < 0)
1442 goto failed;
1443
1444 if (changed)
1445 err = new_settings(hdev, sk);
1446
1447 goto failed;
1448 }
1449
1450 /* If the current mode is the same, then just update the timeout
1451 * value with the new value. And if only the timeout gets updated,
1452 * then no need for any HCI transactions.
1453 */
1454 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
1455 (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
1456 &hdev->dev_flags)) {
1457 cancel_delayed_work(&hdev->discov_off);
1458 hdev->discov_timeout = timeout;
1459
1460 if (cp->val && hdev->discov_timeout > 0) {
1461 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1462 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1463 to);
1464 }
1465
1466 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1467 goto failed;
1468 }
1469
1470 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1471 if (!cmd) {
1472 err = -ENOMEM;
1473 goto failed;
1474 }
1475
1476 /* Cancel any potential discoverable timeout that might be
1477 * still active and store new timeout value. The arming of
1478 * the timeout happens in the complete handler.
1479 */
1480 cancel_delayed_work(&hdev->discov_off);
1481 hdev->discov_timeout = timeout;
1482
1483 /* Limited discoverable mode */
1484 if (cp->val == 0x02)
1485 set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1486 else
1487 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1488
1489 hci_req_init(&req, hdev);
1490
1491 /* The procedure for LE-only controllers is much simpler - just
1492 * update the advertising data.
1493 */
1494 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1495 goto update_ad;
1496
1497 scan = SCAN_PAGE;
1498
1499 if (cp->val) {
1500 struct hci_cp_write_current_iac_lap hci_cp;
1501
1502 if (cp->val == 0x02) {
1503 /* Limited discoverable mode */
1504 hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1505 hci_cp.iac_lap[0] = 0x00; /* LIAC */
1506 hci_cp.iac_lap[1] = 0x8b;
1507 hci_cp.iac_lap[2] = 0x9e;
1508 hci_cp.iac_lap[3] = 0x33; /* GIAC */
1509 hci_cp.iac_lap[4] = 0x8b;
1510 hci_cp.iac_lap[5] = 0x9e;
1511 } else {
1512 /* General discoverable mode */
1513 hci_cp.num_iac = 1;
1514 hci_cp.iac_lap[0] = 0x33; /* GIAC */
1515 hci_cp.iac_lap[1] = 0x8b;
1516 hci_cp.iac_lap[2] = 0x9e;
1517 }
1518
1519 hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1520 (hci_cp.num_iac * 3) + 1, &hci_cp);
1521
1522 scan |= SCAN_INQUIRY;
1523 } else {
1524 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1525 }
1526
1527 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1528
1529 update_ad:
1530 update_adv_data(&req);
1531
1532 err = hci_req_run(&req, set_discoverable_complete);
1533 if (err < 0)
1534 mgmt_pending_remove(cmd);
1535
1536 failed:
1537 hci_dev_unlock(hdev);
1538 return err;
1539 }
1540
1541 static void write_fast_connectable(struct hci_request *req, bool enable)
1542 {
1543 struct hci_dev *hdev = req->hdev;
1544 struct hci_cp_write_page_scan_activity acp;
1545 u8 type;
1546
1547 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1548 return;
1549
1550 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1551 return;
1552
1553 if (enable) {
1554 type = PAGE_SCAN_TYPE_INTERLACED;
1555
1556 /* 160 msec page scan interval */
1557 acp.interval = cpu_to_le16(0x0100);
1558 } else {
1559 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1560
1561 /* default 1.28 sec page scan */
1562 acp.interval = cpu_to_le16(0x0800);
1563 }
1564
1565 acp.window = cpu_to_le16(0x0012);
1566
1567 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1568 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1569 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1570 sizeof(acp), &acp);
1571
1572 if (hdev->page_scan_type != type)
1573 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1574 }
1575
1576 static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1577 {
1578 struct pending_cmd *cmd;
1579 struct mgmt_mode *cp;
1580 bool changed;
1581
1582 BT_DBG("status 0x%02x", status);
1583
1584 hci_dev_lock(hdev);
1585
1586 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1587 if (!cmd)
1588 goto unlock;
1589
1590 if (status) {
1591 u8 mgmt_err = mgmt_status(status);
1592 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1593 goto remove_cmd;
1594 }
1595
1596 cp = cmd->param;
1597 if (cp->val)
1598 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1599 else
1600 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1601
1602 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1603
1604 if (changed)
1605 new_settings(hdev, cmd->sk);
1606
1607 remove_cmd:
1608 mgmt_pending_remove(cmd);
1609
1610 unlock:
1611 hci_dev_unlock(hdev);
1612 }
1613
1614 static int set_connectable_update_settings(struct hci_dev *hdev,
1615 struct sock *sk, u8 val)
1616 {
1617 bool changed = false;
1618 int err;
1619
1620 if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1621 changed = true;
1622
1623 if (val) {
1624 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1625 } else {
1626 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1627 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1628 }
1629
1630 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1631 if (err < 0)
1632 return err;
1633
1634 if (changed)
1635 return new_settings(hdev, sk);
1636
1637 return 0;
1638 }
1639
1640 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1641 u16 len)
1642 {
1643 struct mgmt_mode *cp = data;
1644 struct pending_cmd *cmd;
1645 struct hci_request req;
1646 u8 scan;
1647 int err;
1648
1649 BT_DBG("request for %s", hdev->name);
1650
1651 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1652 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1653 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1654 MGMT_STATUS_REJECTED);
1655
1656 if (cp->val != 0x00 && cp->val != 0x01)
1657 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1658 MGMT_STATUS_INVALID_PARAMS);
1659
1660 hci_dev_lock(hdev);
1661
1662 if (!hdev_is_powered(hdev)) {
1663 err = set_connectable_update_settings(hdev, sk, cp->val);
1664 goto failed;
1665 }
1666
1667 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1668 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1669 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1670 MGMT_STATUS_BUSY);
1671 goto failed;
1672 }
1673
1674 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1675 if (!cmd) {
1676 err = -ENOMEM;
1677 goto failed;
1678 }
1679
1680 hci_req_init(&req, hdev);
1681
1682 /* If BR/EDR is not enabled and we disable advertising as a
1683 * by-product of disabling connectable, we need to update the
1684 * advertising flags.
1685 */
1686 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1687 if (!cp->val) {
1688 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1689 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1690 }
1691 update_adv_data(&req);
1692 } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
1693 if (cp->val) {
1694 scan = SCAN_PAGE;
1695 } else {
1696 scan = 0;
1697
1698 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1699 hdev->discov_timeout > 0)
1700 cancel_delayed_work(&hdev->discov_off);
1701 }
1702
1703 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1704 }
1705
1706 /* If we're going from non-connectable to connectable or
1707 * vice-versa when fast connectable is enabled ensure that fast
1708 * connectable gets disabled. write_fast_connectable won't do
1709 * anything if the page scan parameters are already what they
1710 * should be.
1711 */
1712 if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1713 write_fast_connectable(&req, false);
1714
1715 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) &&
1716 hci_conn_num(hdev, LE_LINK) == 0) {
1717 disable_advertising(&req);
1718 enable_advertising(&req);
1719 }
1720
1721 err = hci_req_run(&req, set_connectable_complete);
1722 if (err < 0) {
1723 mgmt_pending_remove(cmd);
1724 if (err == -ENODATA)
1725 err = set_connectable_update_settings(hdev, sk,
1726 cp->val);
1727 goto failed;
1728 }
1729
1730 failed:
1731 hci_dev_unlock(hdev);
1732 return err;
1733 }
1734
1735 static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
1736 u16 len)
1737 {
1738 struct mgmt_mode *cp = data;
1739 bool changed;
1740 int err;
1741
1742 BT_DBG("request for %s", hdev->name);
1743
1744 if (cp->val != 0x00 && cp->val != 0x01)
1745 return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE,
1746 MGMT_STATUS_INVALID_PARAMS);
1747
1748 hci_dev_lock(hdev);
1749
1750 if (cp->val)
1751 changed = !test_and_set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1752 else
1753 changed = test_and_clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1754
1755 err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
1756 if (err < 0)
1757 goto unlock;
1758
1759 if (changed)
1760 err = new_settings(hdev, sk);
1761
1762 unlock:
1763 hci_dev_unlock(hdev);
1764 return err;
1765 }
1766
1767 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1768 u16 len)
1769 {
1770 struct mgmt_mode *cp = data;
1771 struct pending_cmd *cmd;
1772 u8 val, status;
1773 int err;
1774
1775 BT_DBG("request for %s", hdev->name);
1776
1777 status = mgmt_bredr_support(hdev);
1778 if (status)
1779 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1780 status);
1781
1782 if (cp->val != 0x00 && cp->val != 0x01)
1783 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1784 MGMT_STATUS_INVALID_PARAMS);
1785
1786 hci_dev_lock(hdev);
1787
1788 if (!hdev_is_powered(hdev)) {
1789 bool changed = false;
1790
1791 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1792 &hdev->dev_flags)) {
1793 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
1794 changed = true;
1795 }
1796
1797 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1798 if (err < 0)
1799 goto failed;
1800
1801 if (changed)
1802 err = new_settings(hdev, sk);
1803
1804 goto failed;
1805 }
1806
1807 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1808 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1809 MGMT_STATUS_BUSY);
1810 goto failed;
1811 }
1812
1813 val = !!cp->val;
1814
1815 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1816 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1817 goto failed;
1818 }
1819
1820 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1821 if (!cmd) {
1822 err = -ENOMEM;
1823 goto failed;
1824 }
1825
1826 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1827 if (err < 0) {
1828 mgmt_pending_remove(cmd);
1829 goto failed;
1830 }
1831
1832 failed:
1833 hci_dev_unlock(hdev);
1834 return err;
1835 }
1836
1837 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1838 {
1839 struct mgmt_mode *cp = data;
1840 struct pending_cmd *cmd;
1841 u8 status;
1842 int err;
1843
1844 BT_DBG("request for %s", hdev->name);
1845
1846 status = mgmt_bredr_support(hdev);
1847 if (status)
1848 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1849
1850 if (!lmp_ssp_capable(hdev))
1851 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1852 MGMT_STATUS_NOT_SUPPORTED);
1853
1854 if (cp->val != 0x00 && cp->val != 0x01)
1855 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1856 MGMT_STATUS_INVALID_PARAMS);
1857
1858 hci_dev_lock(hdev);
1859
1860 if (!hdev_is_powered(hdev)) {
1861 bool changed;
1862
1863 if (cp->val) {
1864 changed = !test_and_set_bit(HCI_SSP_ENABLED,
1865 &hdev->dev_flags);
1866 } else {
1867 changed = test_and_clear_bit(HCI_SSP_ENABLED,
1868 &hdev->dev_flags);
1869 if (!changed)
1870 changed = test_and_clear_bit(HCI_HS_ENABLED,
1871 &hdev->dev_flags);
1872 else
1873 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1874 }
1875
1876 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1877 if (err < 0)
1878 goto failed;
1879
1880 if (changed)
1881 err = new_settings(hdev, sk);
1882
1883 goto failed;
1884 }
1885
1886 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev) ||
1887 mgmt_pending_find(MGMT_OP_SET_HS, hdev)) {
1888 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1889 MGMT_STATUS_BUSY);
1890 goto failed;
1891 }
1892
1893 if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1894 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1895 goto failed;
1896 }
1897
1898 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1899 if (!cmd) {
1900 err = -ENOMEM;
1901 goto failed;
1902 }
1903
1904 if (!cp->val && test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
1905 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
1906 sizeof(cp->val), &cp->val);
1907
1908 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1909 if (err < 0) {
1910 mgmt_pending_remove(cmd);
1911 goto failed;
1912 }
1913
1914 failed:
1915 hci_dev_unlock(hdev);
1916 return err;
1917 }
1918
1919 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1920 {
1921 struct mgmt_mode *cp = data;
1922 bool changed;
1923 u8 status;
1924 int err;
1925
1926 BT_DBG("request for %s", hdev->name);
1927
1928 status = mgmt_bredr_support(hdev);
1929 if (status)
1930 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1931
1932 if (!lmp_ssp_capable(hdev))
1933 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1934 MGMT_STATUS_NOT_SUPPORTED);
1935
1936 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
1937 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1938 MGMT_STATUS_REJECTED);
1939
1940 if (cp->val != 0x00 && cp->val != 0x01)
1941 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1942 MGMT_STATUS_INVALID_PARAMS);
1943
1944 hci_dev_lock(hdev);
1945
1946 if (cp->val) {
1947 changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1948 } else {
1949 if (hdev_is_powered(hdev)) {
1950 err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1951 MGMT_STATUS_REJECTED);
1952 goto unlock;
1953 }
1954
1955 changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1956 }
1957
1958 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1959 if (err < 0)
1960 goto unlock;
1961
1962 if (changed)
1963 err = new_settings(hdev, sk);
1964
1965 unlock:
1966 hci_dev_unlock(hdev);
1967 return err;
1968 }
1969
1970 static void le_enable_complete(struct hci_dev *hdev, u8 status)
1971 {
1972 struct cmd_lookup match = { NULL, hdev };
1973
1974 if (status) {
1975 u8 mgmt_err = mgmt_status(status);
1976
1977 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1978 &mgmt_err);
1979 return;
1980 }
1981
1982 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1983
1984 new_settings(hdev, match.sk);
1985
1986 if (match.sk)
1987 sock_put(match.sk);
1988
1989 /* Make sure the controller has a good default for
1990 * advertising data. Restrict the update to when LE
1991 * has actually been enabled. During power on, the
1992 * update in powered_update_hci will take care of it.
1993 */
1994 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1995 struct hci_request req;
1996
1997 hci_dev_lock(hdev);
1998
1999 hci_req_init(&req, hdev);
2000 update_adv_data(&req);
2001 update_scan_rsp_data(&req);
2002 hci_req_run(&req, NULL);
2003
2004 hci_dev_unlock(hdev);
2005 }
2006 }
2007
2008 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2009 {
2010 struct mgmt_mode *cp = data;
2011 struct hci_cp_write_le_host_supported hci_cp;
2012 struct pending_cmd *cmd;
2013 struct hci_request req;
2014 int err;
2015 u8 val, enabled;
2016
2017 BT_DBG("request for %s", hdev->name);
2018
2019 if (!lmp_le_capable(hdev))
2020 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2021 MGMT_STATUS_NOT_SUPPORTED);
2022
2023 if (cp->val != 0x00 && cp->val != 0x01)
2024 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2025 MGMT_STATUS_INVALID_PARAMS);
2026
2027 /* LE-only devices do not allow toggling LE on/off */
2028 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
2029 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2030 MGMT_STATUS_REJECTED);
2031
2032 hci_dev_lock(hdev);
2033
2034 val = !!cp->val;
2035 enabled = lmp_host_le_capable(hdev);
2036
2037 if (!hdev_is_powered(hdev) || val == enabled) {
2038 bool changed = false;
2039
2040 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2041 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2042 changed = true;
2043 }
2044
2045 if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
2046 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
2047 changed = true;
2048 }
2049
2050 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2051 if (err < 0)
2052 goto unlock;
2053
2054 if (changed)
2055 err = new_settings(hdev, sk);
2056
2057 goto unlock;
2058 }
2059
2060 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
2061 mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2062 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2063 MGMT_STATUS_BUSY);
2064 goto unlock;
2065 }
2066
2067 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2068 if (!cmd) {
2069 err = -ENOMEM;
2070 goto unlock;
2071 }
2072
2073 hci_req_init(&req, hdev);
2074
2075 memset(&hci_cp, 0, sizeof(hci_cp));
2076
2077 if (val) {
2078 hci_cp.le = val;
2079 hci_cp.simul = lmp_le_br_capable(hdev);
2080 } else {
2081 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
2082 disable_advertising(&req);
2083 }
2084
2085 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2086 &hci_cp);
2087
2088 err = hci_req_run(&req, le_enable_complete);
2089 if (err < 0)
2090 mgmt_pending_remove(cmd);
2091
2092 unlock:
2093 hci_dev_unlock(hdev);
2094 return err;
2095 }
2096
2097 /* This is a helper function to test for pending mgmt commands that can
2098 * cause CoD or EIR HCI commands. We can only allow one such pending
2099 * mgmt command at a time since otherwise we cannot easily track what
2100 * the current values are, will be, and based on that calculate if a new
2101 * HCI command needs to be sent and if yes with what value.
2102 */
2103 static bool pending_eir_or_class(struct hci_dev *hdev)
2104 {
2105 struct pending_cmd *cmd;
2106
2107 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2108 switch (cmd->opcode) {
2109 case MGMT_OP_ADD_UUID:
2110 case MGMT_OP_REMOVE_UUID:
2111 case MGMT_OP_SET_DEV_CLASS:
2112 case MGMT_OP_SET_POWERED:
2113 return true;
2114 }
2115 }
2116
2117 return false;
2118 }
2119
2120 static const u8 bluetooth_base_uuid[] = {
2121 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2122 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2123 };
2124
2125 static u8 get_uuid_size(const u8 *uuid)
2126 {
2127 u32 val;
2128
2129 if (memcmp(uuid, bluetooth_base_uuid, 12))
2130 return 128;
2131
2132 val = get_unaligned_le32(&uuid[12]);
2133 if (val > 0xffff)
2134 return 32;
2135
2136 return 16;
2137 }
2138
2139 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2140 {
2141 struct pending_cmd *cmd;
2142
2143 hci_dev_lock(hdev);
2144
2145 cmd = mgmt_pending_find(mgmt_op, hdev);
2146 if (!cmd)
2147 goto unlock;
2148
2149 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
2150 hdev->dev_class, 3);
2151
2152 mgmt_pending_remove(cmd);
2153
2154 unlock:
2155 hci_dev_unlock(hdev);
2156 }
2157
2158 static void add_uuid_complete(struct hci_dev *hdev, u8 status)
2159 {
2160 BT_DBG("status 0x%02x", status);
2161
2162 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2163 }
2164
2165 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2166 {
2167 struct mgmt_cp_add_uuid *cp = data;
2168 struct pending_cmd *cmd;
2169 struct hci_request req;
2170 struct bt_uuid *uuid;
2171 int err;
2172
2173 BT_DBG("request for %s", hdev->name);
2174
2175 hci_dev_lock(hdev);
2176
2177 if (pending_eir_or_class(hdev)) {
2178 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2179 MGMT_STATUS_BUSY);
2180 goto failed;
2181 }
2182
2183 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2184 if (!uuid) {
2185 err = -ENOMEM;
2186 goto failed;
2187 }
2188
2189 memcpy(uuid->uuid, cp->uuid, 16);
2190 uuid->svc_hint = cp->svc_hint;
2191 uuid->size = get_uuid_size(cp->uuid);
2192
2193 list_add_tail(&uuid->list, &hdev->uuids);
2194
2195 hci_req_init(&req, hdev);
2196
2197 update_class(&req);
2198 update_eir(&req);
2199
2200 err = hci_req_run(&req, add_uuid_complete);
2201 if (err < 0) {
2202 if (err != -ENODATA)
2203 goto failed;
2204
2205 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2206 hdev->dev_class, 3);
2207 goto failed;
2208 }
2209
2210 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2211 if (!cmd) {
2212 err = -ENOMEM;
2213 goto failed;
2214 }
2215
2216 err = 0;
2217
2218 failed:
2219 hci_dev_unlock(hdev);
2220 return err;
2221 }
2222
2223 static bool enable_service_cache(struct hci_dev *hdev)
2224 {
2225 if (!hdev_is_powered(hdev))
2226 return false;
2227
2228 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2229 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2230 CACHE_TIMEOUT);
2231 return true;
2232 }
2233
2234 return false;
2235 }
2236
2237 static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
2238 {
2239 BT_DBG("status 0x%02x", status);
2240
2241 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2242 }
2243
2244 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2245 u16 len)
2246 {
2247 struct mgmt_cp_remove_uuid *cp = data;
2248 struct pending_cmd *cmd;
2249 struct bt_uuid *match, *tmp;
2250 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2251 struct hci_request req;
2252 int err, found;
2253
2254 BT_DBG("request for %s", hdev->name);
2255
2256 hci_dev_lock(hdev);
2257
2258 if (pending_eir_or_class(hdev)) {
2259 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2260 MGMT_STATUS_BUSY);
2261 goto unlock;
2262 }
2263
2264 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2265 hci_uuids_clear(hdev);
2266
2267 if (enable_service_cache(hdev)) {
2268 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2269 0, hdev->dev_class, 3);
2270 goto unlock;
2271 }
2272
2273 goto update_class;
2274 }
2275
2276 found = 0;
2277
2278 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2279 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2280 continue;
2281
2282 list_del(&match->list);
2283 kfree(match);
2284 found++;
2285 }
2286
2287 if (found == 0) {
2288 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2289 MGMT_STATUS_INVALID_PARAMS);
2290 goto unlock;
2291 }
2292
2293 update_class:
2294 hci_req_init(&req, hdev);
2295
2296 update_class(&req);
2297 update_eir(&req);
2298
2299 err = hci_req_run(&req, remove_uuid_complete);
2300 if (err < 0) {
2301 if (err != -ENODATA)
2302 goto unlock;
2303
2304 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2305 hdev->dev_class, 3);
2306 goto unlock;
2307 }
2308
2309 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2310 if (!cmd) {
2311 err = -ENOMEM;
2312 goto unlock;
2313 }
2314
2315 err = 0;
2316
2317 unlock:
2318 hci_dev_unlock(hdev);
2319 return err;
2320 }
2321
2322 static void set_class_complete(struct hci_dev *hdev, u8 status)
2323 {
2324 BT_DBG("status 0x%02x", status);
2325
2326 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2327 }
2328
2329 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2330 u16 len)
2331 {
2332 struct mgmt_cp_set_dev_class *cp = data;
2333 struct pending_cmd *cmd;
2334 struct hci_request req;
2335 int err;
2336
2337 BT_DBG("request for %s", hdev->name);
2338
2339 if (!lmp_bredr_capable(hdev))
2340 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2341 MGMT_STATUS_NOT_SUPPORTED);
2342
2343 hci_dev_lock(hdev);
2344
2345 if (pending_eir_or_class(hdev)) {
2346 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2347 MGMT_STATUS_BUSY);
2348 goto unlock;
2349 }
2350
2351 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2352 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2353 MGMT_STATUS_INVALID_PARAMS);
2354 goto unlock;
2355 }
2356
2357 hdev->major_class = cp->major;
2358 hdev->minor_class = cp->minor;
2359
2360 if (!hdev_is_powered(hdev)) {
2361 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2362 hdev->dev_class, 3);
2363 goto unlock;
2364 }
2365
2366 hci_req_init(&req, hdev);
2367
2368 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2369 hci_dev_unlock(hdev);
2370 cancel_delayed_work_sync(&hdev->service_cache);
2371 hci_dev_lock(hdev);
2372 update_eir(&req);
2373 }
2374
2375 update_class(&req);
2376
2377 err = hci_req_run(&req, set_class_complete);
2378 if (err < 0) {
2379 if (err != -ENODATA)
2380 goto unlock;
2381
2382 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2383 hdev->dev_class, 3);
2384 goto unlock;
2385 }
2386
2387 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2388 if (!cmd) {
2389 err = -ENOMEM;
2390 goto unlock;
2391 }
2392
2393 err = 0;
2394
2395 unlock:
2396 hci_dev_unlock(hdev);
2397 return err;
2398 }
2399
2400 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2401 u16 len)
2402 {
2403 struct mgmt_cp_load_link_keys *cp = data;
2404 u16 key_count, expected_len;
2405 bool changed;
2406 int i;
2407
2408 BT_DBG("request for %s", hdev->name);
2409
2410 if (!lmp_bredr_capable(hdev))
2411 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2412 MGMT_STATUS_NOT_SUPPORTED);
2413
2414 key_count = __le16_to_cpu(cp->key_count);
2415
2416 expected_len = sizeof(*cp) + key_count *
2417 sizeof(struct mgmt_link_key_info);
2418 if (expected_len != len) {
2419 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2420 expected_len, len);
2421 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2422 MGMT_STATUS_INVALID_PARAMS);
2423 }
2424
2425 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2426 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2427 MGMT_STATUS_INVALID_PARAMS);
2428
2429 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2430 key_count);
2431
2432 for (i = 0; i < key_count; i++) {
2433 struct mgmt_link_key_info *key = &cp->keys[i];
2434
2435 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2436 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2437 MGMT_STATUS_INVALID_PARAMS);
2438 }
2439
2440 hci_dev_lock(hdev);
2441
2442 hci_link_keys_clear(hdev);
2443
2444 if (cp->debug_keys)
2445 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
2446 &hdev->dev_flags);
2447 else
2448 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
2449 &hdev->dev_flags);
2450
2451 if (changed)
2452 new_settings(hdev, NULL);
2453
2454 for (i = 0; i < key_count; i++) {
2455 struct mgmt_link_key_info *key = &cp->keys[i];
2456
2457 /* Always ignore debug keys and require a new pairing if
2458 * the user wants to use them.
2459 */
2460 if (key->type == HCI_LK_DEBUG_COMBINATION)
2461 continue;
2462
2463 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2464 key->type, key->pin_len, NULL);
2465 }
2466
2467 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2468
2469 hci_dev_unlock(hdev);
2470
2471 return 0;
2472 }
2473
2474 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2475 u8 addr_type, struct sock *skip_sk)
2476 {
2477 struct mgmt_ev_device_unpaired ev;
2478
2479 bacpy(&ev.addr.bdaddr, bdaddr);
2480 ev.addr.type = addr_type;
2481
2482 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2483 skip_sk);
2484 }
2485
2486 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2487 u16 len)
2488 {
2489 struct mgmt_cp_unpair_device *cp = data;
2490 struct mgmt_rp_unpair_device rp;
2491 struct hci_cp_disconnect dc;
2492 struct pending_cmd *cmd;
2493 struct hci_conn *conn;
2494 int err;
2495
2496 memset(&rp, 0, sizeof(rp));
2497 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2498 rp.addr.type = cp->addr.type;
2499
2500 if (!bdaddr_type_is_valid(cp->addr.type))
2501 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2502 MGMT_STATUS_INVALID_PARAMS,
2503 &rp, sizeof(rp));
2504
2505 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2506 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2507 MGMT_STATUS_INVALID_PARAMS,
2508 &rp, sizeof(rp));
2509
2510 hci_dev_lock(hdev);
2511
2512 if (!hdev_is_powered(hdev)) {
2513 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2514 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2515 goto unlock;
2516 }
2517
2518 if (cp->addr.type == BDADDR_BREDR) {
2519 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2520 } else {
2521 u8 addr_type;
2522
2523 if (cp->addr.type == BDADDR_LE_PUBLIC)
2524 addr_type = ADDR_LE_DEV_PUBLIC;
2525 else
2526 addr_type = ADDR_LE_DEV_RANDOM;
2527
2528 hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2529
2530 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2531
2532 err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2533 }
2534
2535 if (err < 0) {
2536 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2537 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
2538 goto unlock;
2539 }
2540
2541 if (cp->disconnect) {
2542 if (cp->addr.type == BDADDR_BREDR)
2543 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2544 &cp->addr.bdaddr);
2545 else
2546 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2547 &cp->addr.bdaddr);
2548 } else {
2549 conn = NULL;
2550 }
2551
2552 if (!conn) {
2553 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2554 &rp, sizeof(rp));
2555 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2556 goto unlock;
2557 }
2558
2559 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2560 sizeof(*cp));
2561 if (!cmd) {
2562 err = -ENOMEM;
2563 goto unlock;
2564 }
2565
2566 dc.handle = cpu_to_le16(conn->handle);
2567 dc.reason = 0x13; /* Remote User Terminated Connection */
2568 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2569 if (err < 0)
2570 mgmt_pending_remove(cmd);
2571
2572 unlock:
2573 hci_dev_unlock(hdev);
2574 return err;
2575 }
2576
2577 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2578 u16 len)
2579 {
2580 struct mgmt_cp_disconnect *cp = data;
2581 struct mgmt_rp_disconnect rp;
2582 struct hci_cp_disconnect dc;
2583 struct pending_cmd *cmd;
2584 struct hci_conn *conn;
2585 int err;
2586
2587 BT_DBG("");
2588
2589 memset(&rp, 0, sizeof(rp));
2590 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2591 rp.addr.type = cp->addr.type;
2592
2593 if (!bdaddr_type_is_valid(cp->addr.type))
2594 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2595 MGMT_STATUS_INVALID_PARAMS,
2596 &rp, sizeof(rp));
2597
2598 hci_dev_lock(hdev);
2599
2600 if (!test_bit(HCI_UP, &hdev->flags)) {
2601 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2602 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2603 goto failed;
2604 }
2605
2606 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
2607 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2608 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2609 goto failed;
2610 }
2611
2612 if (cp->addr.type == BDADDR_BREDR)
2613 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2614 &cp->addr.bdaddr);
2615 else
2616 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2617
2618 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2619 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2620 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
2621 goto failed;
2622 }
2623
2624 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2625 if (!cmd) {
2626 err = -ENOMEM;
2627 goto failed;
2628 }
2629
2630 dc.handle = cpu_to_le16(conn->handle);
2631 dc.reason = HCI_ERROR_REMOTE_USER_TERM;
2632
2633 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2634 if (err < 0)
2635 mgmt_pending_remove(cmd);
2636
2637 failed:
2638 hci_dev_unlock(hdev);
2639 return err;
2640 }
2641
2642 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2643 {
2644 switch (link_type) {
2645 case LE_LINK:
2646 switch (addr_type) {
2647 case ADDR_LE_DEV_PUBLIC:
2648 return BDADDR_LE_PUBLIC;
2649
2650 default:
2651 /* Fallback to LE Random address type */
2652 return BDADDR_LE_RANDOM;
2653 }
2654
2655 default:
2656 /* Fallback to BR/EDR type */
2657 return BDADDR_BREDR;
2658 }
2659 }
2660
2661 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2662 u16 data_len)
2663 {
2664 struct mgmt_rp_get_connections *rp;
2665 struct hci_conn *c;
2666 size_t rp_len;
2667 int err;
2668 u16 i;
2669
2670 BT_DBG("");
2671
2672 hci_dev_lock(hdev);
2673
2674 if (!hdev_is_powered(hdev)) {
2675 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2676 MGMT_STATUS_NOT_POWERED);
2677 goto unlock;
2678 }
2679
2680 i = 0;
2681 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2682 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2683 i++;
2684 }
2685
2686 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2687 rp = kmalloc(rp_len, GFP_KERNEL);
2688 if (!rp) {
2689 err = -ENOMEM;
2690 goto unlock;
2691 }
2692
2693 i = 0;
2694 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2695 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2696 continue;
2697 bacpy(&rp->addr[i].bdaddr, &c->dst);
2698 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2699 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2700 continue;
2701 i++;
2702 }
2703
2704 rp->conn_count = cpu_to_le16(i);
2705
2706 /* Recalculate length in case of filtered SCO connections, etc */
2707 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2708
2709 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2710 rp_len);
2711
2712 kfree(rp);
2713
2714 unlock:
2715 hci_dev_unlock(hdev);
2716 return err;
2717 }
2718
2719 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2720 struct mgmt_cp_pin_code_neg_reply *cp)
2721 {
2722 struct pending_cmd *cmd;
2723 int err;
2724
2725 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2726 sizeof(*cp));
2727 if (!cmd)
2728 return -ENOMEM;
2729
2730 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2731 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2732 if (err < 0)
2733 mgmt_pending_remove(cmd);
2734
2735 return err;
2736 }
2737
2738 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2739 u16 len)
2740 {
2741 struct hci_conn *conn;
2742 struct mgmt_cp_pin_code_reply *cp = data;
2743 struct hci_cp_pin_code_reply reply;
2744 struct pending_cmd *cmd;
2745 int err;
2746
2747 BT_DBG("");
2748
2749 hci_dev_lock(hdev);
2750
2751 if (!hdev_is_powered(hdev)) {
2752 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2753 MGMT_STATUS_NOT_POWERED);
2754 goto failed;
2755 }
2756
2757 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2758 if (!conn) {
2759 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2760 MGMT_STATUS_NOT_CONNECTED);
2761 goto failed;
2762 }
2763
2764 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2765 struct mgmt_cp_pin_code_neg_reply ncp;
2766
2767 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2768
2769 BT_ERR("PIN code is not 16 bytes long");
2770
2771 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2772 if (err >= 0)
2773 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2774 MGMT_STATUS_INVALID_PARAMS);
2775
2776 goto failed;
2777 }
2778
2779 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2780 if (!cmd) {
2781 err = -ENOMEM;
2782 goto failed;
2783 }
2784
2785 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2786 reply.pin_len = cp->pin_len;
2787 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2788
2789 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2790 if (err < 0)
2791 mgmt_pending_remove(cmd);
2792
2793 failed:
2794 hci_dev_unlock(hdev);
2795 return err;
2796 }
2797
2798 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2799 u16 len)
2800 {
2801 struct mgmt_cp_set_io_capability *cp = data;
2802
2803 BT_DBG("");
2804
2805 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2806 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2807 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
2808
2809 hci_dev_lock(hdev);
2810
2811 hdev->io_capability = cp->io_capability;
2812
2813 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2814 hdev->io_capability);
2815
2816 hci_dev_unlock(hdev);
2817
2818 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
2819 0);
2820 }
2821
2822 static struct pending_cmd *find_pairing(struct hci_conn *conn)
2823 {
2824 struct hci_dev *hdev = conn->hdev;
2825 struct pending_cmd *cmd;
2826
2827 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2828 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2829 continue;
2830
2831 if (cmd->user_data != conn)
2832 continue;
2833
2834 return cmd;
2835 }
2836
2837 return NULL;
2838 }
2839
2840 static void pairing_complete(struct pending_cmd *cmd, u8 status)
2841 {
2842 struct mgmt_rp_pair_device rp;
2843 struct hci_conn *conn = cmd->user_data;
2844
2845 bacpy(&rp.addr.bdaddr, &conn->dst);
2846 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2847
2848 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
2849 &rp, sizeof(rp));
2850
2851 /* So we don't get further callbacks for this connection */
2852 conn->connect_cfm_cb = NULL;
2853 conn->security_cfm_cb = NULL;
2854 conn->disconn_cfm_cb = NULL;
2855
2856 hci_conn_drop(conn);
2857
2858 mgmt_pending_remove(cmd);
2859 }
2860
2861 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2862 {
2863 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2864 struct pending_cmd *cmd;
2865
2866 cmd = find_pairing(conn);
2867 if (cmd)
2868 pairing_complete(cmd, status);
2869 }
2870
2871 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2872 {
2873 struct pending_cmd *cmd;
2874
2875 BT_DBG("status %u", status);
2876
2877 cmd = find_pairing(conn);
2878 if (!cmd)
2879 BT_DBG("Unable to find a pending command");
2880 else
2881 pairing_complete(cmd, mgmt_status(status));
2882 }
2883
2884 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2885 {
2886 struct pending_cmd *cmd;
2887
2888 BT_DBG("status %u", status);
2889
2890 if (!status)
2891 return;
2892
2893 cmd = find_pairing(conn);
2894 if (!cmd)
2895 BT_DBG("Unable to find a pending command");
2896 else
2897 pairing_complete(cmd, mgmt_status(status));
2898 }
2899
2900 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2901 u16 len)
2902 {
2903 struct mgmt_cp_pair_device *cp = data;
2904 struct mgmt_rp_pair_device rp;
2905 struct pending_cmd *cmd;
2906 u8 sec_level, auth_type;
2907 struct hci_conn *conn;
2908 int err;
2909
2910 BT_DBG("");
2911
2912 memset(&rp, 0, sizeof(rp));
2913 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2914 rp.addr.type = cp->addr.type;
2915
2916 if (!bdaddr_type_is_valid(cp->addr.type))
2917 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2918 MGMT_STATUS_INVALID_PARAMS,
2919 &rp, sizeof(rp));
2920
2921 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
2922 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2923 MGMT_STATUS_INVALID_PARAMS,
2924 &rp, sizeof(rp));
2925
2926 hci_dev_lock(hdev);
2927
2928 if (!hdev_is_powered(hdev)) {
2929 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2930 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2931 goto unlock;
2932 }
2933
2934 sec_level = BT_SECURITY_MEDIUM;
2935 auth_type = HCI_AT_DEDICATED_BONDING;
2936
2937 if (cp->addr.type == BDADDR_BREDR) {
2938 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
2939 auth_type);
2940 } else {
2941 u8 addr_type;
2942
2943 /* Convert from L2CAP channel address type to HCI address type
2944 */
2945 if (cp->addr.type == BDADDR_LE_PUBLIC)
2946 addr_type = ADDR_LE_DEV_PUBLIC;
2947 else
2948 addr_type = ADDR_LE_DEV_RANDOM;
2949
2950 /* When pairing a new device, it is expected to remember
2951 * this device for future connections. Adding the connection
2952 * parameter information ahead of time allows tracking
2953 * of the slave preferred values and will speed up any
2954 * further connection establishment.
2955 *
2956 * If connection parameters already exist, then they
2957 * will be kept and this function does nothing.
2958 */
2959 hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
2960
2961 conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
2962 sec_level, auth_type);
2963 }
2964
2965 if (IS_ERR(conn)) {
2966 int status;
2967
2968 if (PTR_ERR(conn) == -EBUSY)
2969 status = MGMT_STATUS_BUSY;
2970 else
2971 status = MGMT_STATUS_CONNECT_FAILED;
2972
2973 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2974 status, &rp,
2975 sizeof(rp));
2976 goto unlock;
2977 }
2978
2979 if (conn->connect_cfm_cb) {
2980 hci_conn_drop(conn);
2981 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2982 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2983 goto unlock;
2984 }
2985
2986 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2987 if (!cmd) {
2988 err = -ENOMEM;
2989 hci_conn_drop(conn);
2990 goto unlock;
2991 }
2992
2993 /* For LE, just connecting isn't a proof that the pairing finished */
2994 if (cp->addr.type == BDADDR_BREDR) {
2995 conn->connect_cfm_cb = pairing_complete_cb;
2996 conn->security_cfm_cb = pairing_complete_cb;
2997 conn->disconn_cfm_cb = pairing_complete_cb;
2998 } else {
2999 conn->connect_cfm_cb = le_pairing_complete_cb;
3000 conn->security_cfm_cb = le_pairing_complete_cb;
3001 conn->disconn_cfm_cb = le_pairing_complete_cb;
3002 }
3003
3004 conn->io_capability = cp->io_cap;
3005 cmd->user_data = conn;
3006
3007 if (conn->state == BT_CONNECTED &&
3008 hci_conn_security(conn, sec_level, auth_type))
3009 pairing_complete(cmd, 0);
3010
3011 err = 0;
3012
3013 unlock:
3014 hci_dev_unlock(hdev);
3015 return err;
3016 }
3017
3018 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3019 u16 len)
3020 {
3021 struct mgmt_addr_info *addr = data;
3022 struct pending_cmd *cmd;
3023 struct hci_conn *conn;
3024 int err;
3025
3026 BT_DBG("");
3027
3028 hci_dev_lock(hdev);
3029
3030 if (!hdev_is_powered(hdev)) {
3031 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3032 MGMT_STATUS_NOT_POWERED);
3033 goto unlock;
3034 }
3035
3036 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3037 if (!cmd) {
3038 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3039 MGMT_STATUS_INVALID_PARAMS);
3040 goto unlock;
3041 }
3042
3043 conn = cmd->user_data;
3044
3045 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3046 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3047 MGMT_STATUS_INVALID_PARAMS);
3048 goto unlock;
3049 }
3050
3051 pairing_complete(cmd, MGMT_STATUS_CANCELLED);
3052
3053 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3054 addr, sizeof(*addr));
3055 unlock:
3056 hci_dev_unlock(hdev);
3057 return err;
3058 }
3059
3060 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3061 struct mgmt_addr_info *addr, u16 mgmt_op,
3062 u16 hci_op, __le32 passkey)
3063 {
3064 struct pending_cmd *cmd;
3065 struct hci_conn *conn;
3066 int err;
3067
3068 hci_dev_lock(hdev);
3069
3070 if (!hdev_is_powered(hdev)) {
3071 err = cmd_complete(sk, hdev->id, mgmt_op,
3072 MGMT_STATUS_NOT_POWERED, addr,
3073 sizeof(*addr));
3074 goto done;
3075 }
3076
3077 if (addr->type == BDADDR_BREDR)
3078 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3079 else
3080 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
3081
3082 if (!conn) {
3083 err = cmd_complete(sk, hdev->id, mgmt_op,
3084 MGMT_STATUS_NOT_CONNECTED, addr,
3085 sizeof(*addr));
3086 goto done;
3087 }
3088
3089 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3090 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3091 if (!err)
3092 err = cmd_complete(sk, hdev->id, mgmt_op,
3093 MGMT_STATUS_SUCCESS, addr,
3094 sizeof(*addr));
3095 else
3096 err = cmd_complete(sk, hdev->id, mgmt_op,
3097 MGMT_STATUS_FAILED, addr,
3098 sizeof(*addr));
3099
3100 goto done;
3101 }
3102
3103 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3104 if (!cmd) {
3105 err = -ENOMEM;
3106 goto done;
3107 }
3108
3109 /* Continue with pairing via HCI */
3110 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3111 struct hci_cp_user_passkey_reply cp;
3112
3113 bacpy(&cp.bdaddr, &addr->bdaddr);
3114 cp.passkey = passkey;
3115 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3116 } else
3117 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3118 &addr->bdaddr);
3119
3120 if (err < 0)
3121 mgmt_pending_remove(cmd);
3122
3123 done:
3124 hci_dev_unlock(hdev);
3125 return err;
3126 }
3127
3128 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3129 void *data, u16 len)
3130 {
3131 struct mgmt_cp_pin_code_neg_reply *cp = data;
3132
3133 BT_DBG("");
3134
3135 return user_pairing_resp(sk, hdev, &cp->addr,
3136 MGMT_OP_PIN_CODE_NEG_REPLY,
3137 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3138 }
3139
3140 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3141 u16 len)
3142 {
3143 struct mgmt_cp_user_confirm_reply *cp = data;
3144
3145 BT_DBG("");
3146
3147 if (len != sizeof(*cp))
3148 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3149 MGMT_STATUS_INVALID_PARAMS);
3150
3151 return user_pairing_resp(sk, hdev, &cp->addr,
3152 MGMT_OP_USER_CONFIRM_REPLY,
3153 HCI_OP_USER_CONFIRM_REPLY, 0);
3154 }
3155
3156 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3157 void *data, u16 len)
3158 {
3159 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3160
3161 BT_DBG("");
3162
3163 return user_pairing_resp(sk, hdev, &cp->addr,
3164 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3165 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3166 }
3167
3168 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3169 u16 len)
3170 {
3171 struct mgmt_cp_user_passkey_reply *cp = data;
3172
3173 BT_DBG("");
3174
3175 return user_pairing_resp(sk, hdev, &cp->addr,
3176 MGMT_OP_USER_PASSKEY_REPLY,
3177 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3178 }
3179
3180 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3181 void *data, u16 len)
3182 {
3183 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3184
3185 BT_DBG("");
3186
3187 return user_pairing_resp(sk, hdev, &cp->addr,
3188 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3189 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3190 }
3191
3192 static void update_name(struct hci_request *req)
3193 {
3194 struct hci_dev *hdev = req->hdev;
3195 struct hci_cp_write_local_name cp;
3196
3197 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3198
3199 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3200 }
3201
3202 static void set_name_complete(struct hci_dev *hdev, u8 status)
3203 {
3204 struct mgmt_cp_set_local_name *cp;
3205 struct pending_cmd *cmd;
3206
3207 BT_DBG("status 0x%02x", status);
3208
3209 hci_dev_lock(hdev);
3210
3211 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3212 if (!cmd)
3213 goto unlock;
3214
3215 cp = cmd->param;
3216
3217 if (status)
3218 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3219 mgmt_status(status));
3220 else
3221 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3222 cp, sizeof(*cp));
3223
3224 mgmt_pending_remove(cmd);
3225
3226 unlock:
3227 hci_dev_unlock(hdev);
3228 }
3229
3230 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3231 u16 len)
3232 {
3233 struct mgmt_cp_set_local_name *cp = data;
3234 struct pending_cmd *cmd;
3235 struct hci_request req;
3236 int err;
3237
3238 BT_DBG("");
3239
3240 hci_dev_lock(hdev);
3241
3242 /* If the old values are the same as the new ones just return a
3243 * direct command complete event.
3244 */
3245 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3246 !memcmp(hdev->short_name, cp->short_name,
3247 sizeof(hdev->short_name))) {
3248 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3249 data, len);
3250 goto failed;
3251 }
3252
3253 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3254
3255 if (!hdev_is_powered(hdev)) {
3256 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3257
3258 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3259 data, len);
3260 if (err < 0)
3261 goto failed;
3262
3263 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
3264 sk);
3265
3266 goto failed;
3267 }
3268
3269 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3270 if (!cmd) {
3271 err = -ENOMEM;
3272 goto failed;
3273 }
3274
3275 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3276
3277 hci_req_init(&req, hdev);
3278
3279 if (lmp_bredr_capable(hdev)) {
3280 update_name(&req);
3281 update_eir(&req);
3282 }
3283
3284 /* The name is stored in the scan response data and so
3285 * no need to udpate the advertising data here.
3286 */
3287 if (lmp_le_capable(hdev))
3288 update_scan_rsp_data(&req);
3289
3290 err = hci_req_run(&req, set_name_complete);
3291 if (err < 0)
3292 mgmt_pending_remove(cmd);
3293
3294 failed:
3295 hci_dev_unlock(hdev);
3296 return err;
3297 }
3298
3299 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3300 void *data, u16 data_len)
3301 {
3302 struct pending_cmd *cmd;
3303 int err;
3304
3305 BT_DBG("%s", hdev->name);
3306
3307 hci_dev_lock(hdev);
3308
3309 if (!hdev_is_powered(hdev)) {
3310 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3311 MGMT_STATUS_NOT_POWERED);
3312 goto unlock;
3313 }
3314
3315 if (!lmp_ssp_capable(hdev)) {
3316 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3317 MGMT_STATUS_NOT_SUPPORTED);
3318 goto unlock;
3319 }
3320
3321 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3322 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3323 MGMT_STATUS_BUSY);
3324 goto unlock;
3325 }
3326
3327 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3328 if (!cmd) {
3329 err = -ENOMEM;
3330 goto unlock;
3331 }
3332
3333 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
3334 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
3335 0, NULL);
3336 else
3337 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3338
3339 if (err < 0)
3340 mgmt_pending_remove(cmd);
3341
3342 unlock:
3343 hci_dev_unlock(hdev);
3344 return err;
3345 }
3346
3347 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3348 void *data, u16 len)
3349 {
3350 int err;
3351
3352 BT_DBG("%s ", hdev->name);
3353
3354 hci_dev_lock(hdev);
3355
3356 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3357 struct mgmt_cp_add_remote_oob_data *cp = data;
3358 u8 status;
3359
3360 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3361 cp->hash, cp->randomizer);
3362 if (err < 0)
3363 status = MGMT_STATUS_FAILED;
3364 else
3365 status = MGMT_STATUS_SUCCESS;
3366
3367 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3368 status, &cp->addr, sizeof(cp->addr));
3369 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3370 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3371 u8 status;
3372
3373 err = hci_add_remote_oob_ext_data(hdev, &cp->addr.bdaddr,
3374 cp->hash192,
3375 cp->randomizer192,
3376 cp->hash256,
3377 cp->randomizer256);
3378 if (err < 0)
3379 status = MGMT_STATUS_FAILED;
3380 else
3381 status = MGMT_STATUS_SUCCESS;
3382
3383 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3384 status, &cp->addr, sizeof(cp->addr));
3385 } else {
3386 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3387 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3388 MGMT_STATUS_INVALID_PARAMS);
3389 }
3390
3391 hci_dev_unlock(hdev);
3392 return err;
3393 }
3394
3395 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3396 void *data, u16 len)
3397 {
3398 struct mgmt_cp_remove_remote_oob_data *cp = data;
3399 u8 status;
3400 int err;
3401
3402 BT_DBG("%s", hdev->name);
3403
3404 hci_dev_lock(hdev);
3405
3406 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
3407 if (err < 0)
3408 status = MGMT_STATUS_INVALID_PARAMS;
3409 else
3410 status = MGMT_STATUS_SUCCESS;
3411
3412 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3413 status, &cp->addr, sizeof(cp->addr));
3414
3415 hci_dev_unlock(hdev);
3416 return err;
3417 }
3418
3419 static int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
3420 {
3421 struct pending_cmd *cmd;
3422 u8 type;
3423 int err;
3424
3425 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3426
3427 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3428 if (!cmd)
3429 return -ENOENT;
3430
3431 type = hdev->discovery.type;
3432
3433 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3434 &type, sizeof(type));
3435 mgmt_pending_remove(cmd);
3436
3437 return err;
3438 }
3439
3440 static void start_discovery_complete(struct hci_dev *hdev, u8 status)
3441 {
3442 unsigned long timeout = 0;
3443
3444 BT_DBG("status %d", status);
3445
3446 if (status) {
3447 hci_dev_lock(hdev);
3448 mgmt_start_discovery_failed(hdev, status);
3449 hci_dev_unlock(hdev);
3450 return;
3451 }
3452
3453 hci_dev_lock(hdev);
3454 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3455 hci_dev_unlock(hdev);
3456
3457 switch (hdev->discovery.type) {
3458 case DISCOV_TYPE_LE:
3459 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3460 break;
3461
3462 case DISCOV_TYPE_INTERLEAVED:
3463 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
3464 break;
3465
3466 case DISCOV_TYPE_BREDR:
3467 break;
3468
3469 default:
3470 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
3471 }
3472
3473 if (!timeout)
3474 return;
3475
3476 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable, timeout);
3477 }
3478
3479 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3480 void *data, u16 len)
3481 {
3482 struct mgmt_cp_start_discovery *cp = data;
3483 struct pending_cmd *cmd;
3484 struct hci_cp_le_set_scan_param param_cp;
3485 struct hci_cp_le_set_scan_enable enable_cp;
3486 struct hci_cp_inquiry inq_cp;
3487 struct hci_request req;
3488 /* General inquiry access code (GIAC) */
3489 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3490 u8 status, own_addr_type;
3491 int err;
3492
3493 BT_DBG("%s", hdev->name);
3494
3495 hci_dev_lock(hdev);
3496
3497 if (!hdev_is_powered(hdev)) {
3498 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3499 MGMT_STATUS_NOT_POWERED);
3500 goto failed;
3501 }
3502
3503 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3504 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3505 MGMT_STATUS_BUSY);
3506 goto failed;
3507 }
3508
3509 if (hdev->discovery.state != DISCOVERY_STOPPED) {
3510 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3511 MGMT_STATUS_BUSY);
3512 goto failed;
3513 }
3514
3515 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
3516 if (!cmd) {
3517 err = -ENOMEM;
3518 goto failed;
3519 }
3520
3521 hdev->discovery.type = cp->type;
3522
3523 hci_req_init(&req, hdev);
3524
3525 switch (hdev->discovery.type) {
3526 case DISCOV_TYPE_BREDR:
3527 status = mgmt_bredr_support(hdev);
3528 if (status) {
3529 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3530 status);
3531 mgmt_pending_remove(cmd);
3532 goto failed;
3533 }
3534
3535 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3536 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3537 MGMT_STATUS_BUSY);
3538 mgmt_pending_remove(cmd);
3539 goto failed;
3540 }
3541
3542 hci_inquiry_cache_flush(hdev);
3543
3544 memset(&inq_cp, 0, sizeof(inq_cp));
3545 memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
3546 inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
3547 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
3548 break;
3549
3550 case DISCOV_TYPE_LE:
3551 case DISCOV_TYPE_INTERLEAVED:
3552 status = mgmt_le_support(hdev);
3553 if (status) {
3554 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3555 status);
3556 mgmt_pending_remove(cmd);
3557 goto failed;
3558 }
3559
3560 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3561 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3562 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3563 MGMT_STATUS_NOT_SUPPORTED);
3564 mgmt_pending_remove(cmd);
3565 goto failed;
3566 }
3567
3568 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3569 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3570 MGMT_STATUS_REJECTED);
3571 mgmt_pending_remove(cmd);
3572 goto failed;
3573 }
3574
3575 /* If controller is scanning, it means the background scanning
3576 * is running. Thus, we should temporarily stop it in order to
3577 * set the discovery scanning parameters.
3578 */
3579 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
3580 hci_req_add_le_scan_disable(&req);
3581
3582 memset(&param_cp, 0, sizeof(param_cp));
3583
3584 /* All active scans will be done with either a resolvable
3585 * private address (when privacy feature has been enabled)
3586 * or unresolvable private address.
3587 */
3588 err = hci_update_random_address(&req, true, &own_addr_type);
3589 if (err < 0) {
3590 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3591 MGMT_STATUS_FAILED);
3592 mgmt_pending_remove(cmd);
3593 goto failed;
3594 }
3595
3596 param_cp.type = LE_SCAN_ACTIVE;
3597 param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
3598 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3599 param_cp.own_address_type = own_addr_type;
3600 hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
3601 &param_cp);
3602
3603 memset(&enable_cp, 0, sizeof(enable_cp));
3604 enable_cp.enable = LE_SCAN_ENABLE;
3605 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3606 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
3607 &enable_cp);
3608 break;
3609
3610 default:
3611 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3612 MGMT_STATUS_INVALID_PARAMS);
3613 mgmt_pending_remove(cmd);
3614 goto failed;
3615 }
3616
3617 err = hci_req_run(&req, start_discovery_complete);
3618 if (err < 0)
3619 mgmt_pending_remove(cmd);
3620 else
3621 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3622
3623 failed:
3624 hci_dev_unlock(hdev);
3625 return err;
3626 }
3627
3628 static int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
3629 {
3630 struct pending_cmd *cmd;
3631 int err;
3632
3633 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
3634 if (!cmd)
3635 return -ENOENT;
3636
3637 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3638 &hdev->discovery.type, sizeof(hdev->discovery.type));
3639 mgmt_pending_remove(cmd);
3640
3641 return err;
3642 }
3643
3644 static void stop_discovery_complete(struct hci_dev *hdev, u8 status)
3645 {
3646 BT_DBG("status %d", status);
3647
3648 hci_dev_lock(hdev);
3649
3650 if (status) {
3651 mgmt_stop_discovery_failed(hdev, status);
3652 goto unlock;
3653 }
3654
3655 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3656
3657 unlock:
3658 hci_dev_unlock(hdev);
3659 }
3660
3661 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
3662 u16 len)
3663 {
3664 struct mgmt_cp_stop_discovery *mgmt_cp = data;
3665 struct pending_cmd *cmd;
3666 struct hci_request req;
3667 int err;
3668
3669 BT_DBG("%s", hdev->name);
3670
3671 hci_dev_lock(hdev);
3672
3673 if (!hci_discovery_active(hdev)) {
3674 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3675 MGMT_STATUS_REJECTED, &mgmt_cp->type,
3676 sizeof(mgmt_cp->type));
3677 goto unlock;
3678 }
3679
3680 if (hdev->discovery.type != mgmt_cp->type) {
3681 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3682 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
3683 sizeof(mgmt_cp->type));
3684 goto unlock;
3685 }
3686
3687 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
3688 if (!cmd) {
3689 err = -ENOMEM;
3690 goto unlock;
3691 }
3692
3693 hci_req_init(&req, hdev);
3694
3695 hci_stop_discovery(&req);
3696
3697 err = hci_req_run(&req, stop_discovery_complete);
3698 if (!err) {
3699 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
3700 goto unlock;
3701 }
3702
3703 mgmt_pending_remove(cmd);
3704
3705 /* If no HCI commands were sent we're done */
3706 if (err == -ENODATA) {
3707 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
3708 &mgmt_cp->type, sizeof(mgmt_cp->type));
3709 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3710 }
3711
3712 unlock:
3713 hci_dev_unlock(hdev);
3714 return err;
3715 }
3716
3717 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
3718 u16 len)
3719 {
3720 struct mgmt_cp_confirm_name *cp = data;
3721 struct inquiry_entry *e;
3722 int err;
3723
3724 BT_DBG("%s", hdev->name);
3725
3726 hci_dev_lock(hdev);
3727
3728 if (!hci_discovery_active(hdev)) {
3729 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3730 MGMT_STATUS_FAILED, &cp->addr,
3731 sizeof(cp->addr));
3732 goto failed;
3733 }
3734
3735 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
3736 if (!e) {
3737 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3738 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
3739 sizeof(cp->addr));
3740 goto failed;
3741 }
3742
3743 if (cp->name_known) {
3744 e->name_state = NAME_KNOWN;
3745 list_del(&e->list);
3746 } else {
3747 e->name_state = NAME_NEEDED;
3748 hci_inquiry_cache_update_resolve(hdev, e);
3749 }
3750
3751 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
3752 sizeof(cp->addr));
3753
3754 failed:
3755 hci_dev_unlock(hdev);
3756 return err;
3757 }
3758
3759 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
3760 u16 len)
3761 {
3762 struct mgmt_cp_block_device *cp = data;
3763 u8 status;
3764 int err;
3765
3766 BT_DBG("%s", hdev->name);
3767
3768 if (!bdaddr_type_is_valid(cp->addr.type))
3769 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
3770 MGMT_STATUS_INVALID_PARAMS,
3771 &cp->addr, sizeof(cp->addr));
3772
3773 hci_dev_lock(hdev);
3774
3775 err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type);
3776 if (err < 0)
3777 status = MGMT_STATUS_FAILED;
3778 else
3779 status = MGMT_STATUS_SUCCESS;
3780
3781 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
3782 &cp->addr, sizeof(cp->addr));
3783
3784 hci_dev_unlock(hdev);
3785
3786 return err;
3787 }
3788
3789 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
3790 u16 len)
3791 {
3792 struct mgmt_cp_unblock_device *cp = data;
3793 u8 status;
3794 int err;
3795
3796 BT_DBG("%s", hdev->name);
3797
3798 if (!bdaddr_type_is_valid(cp->addr.type))
3799 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
3800 MGMT_STATUS_INVALID_PARAMS,
3801 &cp->addr, sizeof(cp->addr));
3802
3803 hci_dev_lock(hdev);
3804
3805 err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type);
3806 if (err < 0)
3807 status = MGMT_STATUS_INVALID_PARAMS;
3808 else
3809 status = MGMT_STATUS_SUCCESS;
3810
3811 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
3812 &cp->addr, sizeof(cp->addr));
3813
3814 hci_dev_unlock(hdev);
3815
3816 return err;
3817 }
3818
3819 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
3820 u16 len)
3821 {
3822 struct mgmt_cp_set_device_id *cp = data;
3823 struct hci_request req;
3824 int err;
3825 __u16 source;
3826
3827 BT_DBG("%s", hdev->name);
3828
3829 source = __le16_to_cpu(cp->source);
3830
3831 if (source > 0x0002)
3832 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
3833 MGMT_STATUS_INVALID_PARAMS);
3834
3835 hci_dev_lock(hdev);
3836
3837 hdev->devid_source = source;
3838 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
3839 hdev->devid_product = __le16_to_cpu(cp->product);
3840 hdev->devid_version = __le16_to_cpu(cp->version);
3841
3842 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
3843
3844 hci_req_init(&req, hdev);
3845 update_eir(&req);
3846 hci_req_run(&req, NULL);
3847
3848 hci_dev_unlock(hdev);
3849
3850 return err;
3851 }
3852
3853 static void set_advertising_complete(struct hci_dev *hdev, u8 status)
3854 {
3855 struct cmd_lookup match = { NULL, hdev };
3856
3857 if (status) {
3858 u8 mgmt_err = mgmt_status(status);
3859
3860 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
3861 cmd_status_rsp, &mgmt_err);
3862 return;
3863 }
3864
3865 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
3866 &match);
3867
3868 new_settings(hdev, match.sk);
3869
3870 if (match.sk)
3871 sock_put(match.sk);
3872 }
3873
3874 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
3875 u16 len)
3876 {
3877 struct mgmt_mode *cp = data;
3878 struct pending_cmd *cmd;
3879 struct hci_request req;
3880 u8 val, enabled, status;
3881 int err;
3882
3883 BT_DBG("request for %s", hdev->name);
3884
3885 status = mgmt_le_support(hdev);
3886 if (status)
3887 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3888 status);
3889
3890 if (cp->val != 0x00 && cp->val != 0x01)
3891 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3892 MGMT_STATUS_INVALID_PARAMS);
3893
3894 hci_dev_lock(hdev);
3895
3896 val = !!cp->val;
3897 enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
3898
3899 /* The following conditions are ones which mean that we should
3900 * not do any HCI communication but directly send a mgmt
3901 * response to user space (after toggling the flag if
3902 * necessary).
3903 */
3904 if (!hdev_is_powered(hdev) || val == enabled ||
3905 hci_conn_num(hdev, LE_LINK) > 0) {
3906 bool changed = false;
3907
3908 if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3909 change_bit(HCI_ADVERTISING, &hdev->dev_flags);
3910 changed = true;
3911 }
3912
3913 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
3914 if (err < 0)
3915 goto unlock;
3916
3917 if (changed)
3918 err = new_settings(hdev, sk);
3919
3920 goto unlock;
3921 }
3922
3923 if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
3924 mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
3925 err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3926 MGMT_STATUS_BUSY);
3927 goto unlock;
3928 }
3929
3930 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
3931 if (!cmd) {
3932 err = -ENOMEM;
3933 goto unlock;
3934 }
3935
3936 hci_req_init(&req, hdev);
3937
3938 if (val)
3939 enable_advertising(&req);
3940 else
3941 disable_advertising(&req);
3942
3943 err = hci_req_run(&req, set_advertising_complete);
3944 if (err < 0)
3945 mgmt_pending_remove(cmd);
3946
3947 unlock:
3948 hci_dev_unlock(hdev);
3949 return err;
3950 }
3951
3952 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
3953 void *data, u16 len)
3954 {
3955 struct mgmt_cp_set_static_address *cp = data;
3956 int err;
3957
3958 BT_DBG("%s", hdev->name);
3959
3960 if (!lmp_le_capable(hdev))
3961 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
3962 MGMT_STATUS_NOT_SUPPORTED);
3963
3964 if (hdev_is_powered(hdev))
3965 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
3966 MGMT_STATUS_REJECTED);
3967
3968 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
3969 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
3970 return cmd_status(sk, hdev->id,
3971 MGMT_OP_SET_STATIC_ADDRESS,
3972 MGMT_STATUS_INVALID_PARAMS);
3973
3974 /* Two most significant bits shall be set */
3975 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
3976 return cmd_status(sk, hdev->id,
3977 MGMT_OP_SET_STATIC_ADDRESS,
3978 MGMT_STATUS_INVALID_PARAMS);
3979 }
3980
3981 hci_dev_lock(hdev);
3982
3983 bacpy(&hdev->static_addr, &cp->bdaddr);
3984
3985 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0);
3986
3987 hci_dev_unlock(hdev);
3988
3989 return err;
3990 }
3991
3992 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
3993 void *data, u16 len)
3994 {
3995 struct mgmt_cp_set_scan_params *cp = data;
3996 __u16 interval, window;
3997 int err;
3998
3999 BT_DBG("%s", hdev->name);
4000
4001 if (!lmp_le_capable(hdev))
4002 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4003 MGMT_STATUS_NOT_SUPPORTED);
4004
4005 interval = __le16_to_cpu(cp->interval);
4006
4007 if (interval < 0x0004 || interval > 0x4000)
4008 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4009 MGMT_STATUS_INVALID_PARAMS);
4010
4011 window = __le16_to_cpu(cp->window);
4012
4013 if (window < 0x0004 || window > 0x4000)
4014 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4015 MGMT_STATUS_INVALID_PARAMS);
4016
4017 if (window > interval)
4018 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4019 MGMT_STATUS_INVALID_PARAMS);
4020
4021 hci_dev_lock(hdev);
4022
4023 hdev->le_scan_interval = interval;
4024 hdev->le_scan_window = window;
4025
4026 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);
4027
4028 /* If background scan is running, restart it so new parameters are
4029 * loaded.
4030 */
4031 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
4032 hdev->discovery.state == DISCOVERY_STOPPED) {
4033 struct hci_request req;
4034
4035 hci_req_init(&req, hdev);
4036
4037 hci_req_add_le_scan_disable(&req);
4038 hci_req_add_le_passive_scan(&req);
4039
4040 hci_req_run(&req, NULL);
4041 }
4042
4043 hci_dev_unlock(hdev);
4044
4045 return err;
4046 }
4047
4048 static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
4049 {
4050 struct pending_cmd *cmd;
4051
4052 BT_DBG("status 0x%02x", status);
4053
4054 hci_dev_lock(hdev);
4055
4056 cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4057 if (!cmd)
4058 goto unlock;
4059
4060 if (status) {
4061 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4062 mgmt_status(status));
4063 } else {
4064 struct mgmt_mode *cp = cmd->param;
4065
4066 if (cp->val)
4067 set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4068 else
4069 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4070
4071 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4072 new_settings(hdev, cmd->sk);
4073 }
4074
4075 mgmt_pending_remove(cmd);
4076
4077 unlock:
4078 hci_dev_unlock(hdev);
4079 }
4080
4081 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4082 void *data, u16 len)
4083 {
4084 struct mgmt_mode *cp = data;
4085 struct pending_cmd *cmd;
4086 struct hci_request req;
4087 int err;
4088
4089 BT_DBG("%s", hdev->name);
4090
4091 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
4092 hdev->hci_ver < BLUETOOTH_VER_1_2)
4093 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4094 MGMT_STATUS_NOT_SUPPORTED);
4095
4096 if (cp->val != 0x00 && cp->val != 0x01)
4097 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4098 MGMT_STATUS_INVALID_PARAMS);
4099
4100 if (!hdev_is_powered(hdev))
4101 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4102 MGMT_STATUS_NOT_POWERED);
4103
4104 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4105 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4106 MGMT_STATUS_REJECTED);
4107
4108 hci_dev_lock(hdev);
4109
4110 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4111 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4112 MGMT_STATUS_BUSY);
4113 goto unlock;
4114 }
4115
4116 if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
4117 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4118 hdev);
4119 goto unlock;
4120 }
4121
4122 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4123 data, len);
4124 if (!cmd) {
4125 err = -ENOMEM;
4126 goto unlock;
4127 }
4128
4129 hci_req_init(&req, hdev);
4130
4131 write_fast_connectable(&req, cp->val);
4132
4133 err = hci_req_run(&req, fast_connectable_complete);
4134 if (err < 0) {
4135 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4136 MGMT_STATUS_FAILED);
4137 mgmt_pending_remove(cmd);
4138 }
4139
4140 unlock:
4141 hci_dev_unlock(hdev);
4142
4143 return err;
4144 }
4145
4146 static void set_bredr_scan(struct hci_request *req)
4147 {
4148 struct hci_dev *hdev = req->hdev;
4149 u8 scan = 0;
4150
4151 /* Ensure that fast connectable is disabled. This function will
4152 * not do anything if the page scan parameters are already what
4153 * they should be.
4154 */
4155 write_fast_connectable(req, false);
4156
4157 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4158 scan |= SCAN_PAGE;
4159 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
4160 scan |= SCAN_INQUIRY;
4161
4162 if (scan)
4163 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
4164 }
4165
4166 static void set_bredr_complete(struct hci_dev *hdev, u8 status)
4167 {
4168 struct pending_cmd *cmd;
4169
4170 BT_DBG("status 0x%02x", status);
4171
4172 hci_dev_lock(hdev);
4173
4174 cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
4175 if (!cmd)
4176 goto unlock;
4177
4178 if (status) {
4179 u8 mgmt_err = mgmt_status(status);
4180
4181 /* We need to restore the flag if related HCI commands
4182 * failed.
4183 */
4184 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4185
4186 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4187 } else {
4188 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4189 new_settings(hdev, cmd->sk);
4190 }
4191
4192 mgmt_pending_remove(cmd);
4193
4194 unlock:
4195 hci_dev_unlock(hdev);
4196 }
4197
4198 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4199 {
4200 struct mgmt_mode *cp = data;
4201 struct pending_cmd *cmd;
4202 struct hci_request req;
4203 int err;
4204
4205 BT_DBG("request for %s", hdev->name);
4206
4207 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4208 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4209 MGMT_STATUS_NOT_SUPPORTED);
4210
4211 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4212 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4213 MGMT_STATUS_REJECTED);
4214
4215 if (cp->val != 0x00 && cp->val != 0x01)
4216 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4217 MGMT_STATUS_INVALID_PARAMS);
4218
4219 hci_dev_lock(hdev);
4220
4221 if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4222 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4223 goto unlock;
4224 }
4225
4226 if (!hdev_is_powered(hdev)) {
4227 if (!cp->val) {
4228 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4229 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4230 clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4231 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4232 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4233 }
4234
4235 change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4236
4237 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4238 if (err < 0)
4239 goto unlock;
4240
4241 err = new_settings(hdev, sk);
4242 goto unlock;
4243 }
4244
4245 /* Reject disabling when powered on */
4246 if (!cp->val) {
4247 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4248 MGMT_STATUS_REJECTED);
4249 goto unlock;
4250 }
4251
4252 if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
4253 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4254 MGMT_STATUS_BUSY);
4255 goto unlock;
4256 }
4257
4258 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4259 if (!cmd) {
4260 err = -ENOMEM;
4261 goto unlock;
4262 }
4263
4264 /* We need to flip the bit already here so that update_adv_data
4265 * generates the correct flags.
4266 */
4267 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4268
4269 hci_req_init(&req, hdev);
4270
4271 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4272 set_bredr_scan(&req);
4273
4274 /* Since only the advertising data flags will change, there
4275 * is no need to update the scan response data.
4276 */
4277 update_adv_data(&req);
4278
4279 err = hci_req_run(&req, set_bredr_complete);
4280 if (err < 0)
4281 mgmt_pending_remove(cmd);
4282
4283 unlock:
4284 hci_dev_unlock(hdev);
4285 return err;
4286 }
4287
4288 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4289 void *data, u16 len)
4290 {
4291 struct mgmt_mode *cp = data;
4292 struct pending_cmd *cmd;
4293 u8 val, status;
4294 int err;
4295
4296 BT_DBG("request for %s", hdev->name);
4297
4298 status = mgmt_bredr_support(hdev);
4299 if (status)
4300 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4301 status);
4302
4303 if (!lmp_sc_capable(hdev) &&
4304 !test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
4305 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4306 MGMT_STATUS_NOT_SUPPORTED);
4307
4308 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4309 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4310 MGMT_STATUS_INVALID_PARAMS);
4311
4312 hci_dev_lock(hdev);
4313
4314 if (!hdev_is_powered(hdev)) {
4315 bool changed;
4316
4317 if (cp->val) {
4318 changed = !test_and_set_bit(HCI_SC_ENABLED,
4319 &hdev->dev_flags);
4320 if (cp->val == 0x02)
4321 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4322 else
4323 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4324 } else {
4325 changed = test_and_clear_bit(HCI_SC_ENABLED,
4326 &hdev->dev_flags);
4327 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4328 }
4329
4330 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4331 if (err < 0)
4332 goto failed;
4333
4334 if (changed)
4335 err = new_settings(hdev, sk);
4336
4337 goto failed;
4338 }
4339
4340 if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4341 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4342 MGMT_STATUS_BUSY);
4343 goto failed;
4344 }
4345
4346 val = !!cp->val;
4347
4348 if (val == test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
4349 (cp->val == 0x02) == test_bit(HCI_SC_ONLY, &hdev->dev_flags)) {
4350 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4351 goto failed;
4352 }
4353
4354 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4355 if (!cmd) {
4356 err = -ENOMEM;
4357 goto failed;
4358 }
4359
4360 err = hci_send_cmd(hdev, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4361 if (err < 0) {
4362 mgmt_pending_remove(cmd);
4363 goto failed;
4364 }
4365
4366 if (cp->val == 0x02)
4367 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4368 else
4369 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4370
4371 failed:
4372 hci_dev_unlock(hdev);
4373 return err;
4374 }
4375
4376 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
4377 void *data, u16 len)
4378 {
4379 struct mgmt_mode *cp = data;
4380 bool changed, use_changed;
4381 int err;
4382
4383 BT_DBG("request for %s", hdev->name);
4384
4385 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4386 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
4387 MGMT_STATUS_INVALID_PARAMS);
4388
4389 hci_dev_lock(hdev);
4390
4391 if (cp->val)
4392 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
4393 &hdev->dev_flags);
4394 else
4395 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
4396 &hdev->dev_flags);
4397
4398 if (cp->val == 0x02)
4399 use_changed = !test_and_set_bit(HCI_USE_DEBUG_KEYS,
4400 &hdev->dev_flags);
4401 else
4402 use_changed = test_and_clear_bit(HCI_USE_DEBUG_KEYS,
4403 &hdev->dev_flags);
4404
4405 if (hdev_is_powered(hdev) && use_changed &&
4406 test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
4407 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
4408 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
4409 sizeof(mode), &mode);
4410 }
4411
4412 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
4413 if (err < 0)
4414 goto unlock;
4415
4416 if (changed)
4417 err = new_settings(hdev, sk);
4418
4419 unlock:
4420 hci_dev_unlock(hdev);
4421 return err;
4422 }
4423
4424 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4425 u16 len)
4426 {
4427 struct mgmt_cp_set_privacy *cp = cp_data;
4428 bool changed;
4429 int err;
4430
4431 BT_DBG("request for %s", hdev->name);
4432
4433 if (!lmp_le_capable(hdev))
4434 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4435 MGMT_STATUS_NOT_SUPPORTED);
4436
4437 if (cp->privacy != 0x00 && cp->privacy != 0x01)
4438 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4439 MGMT_STATUS_INVALID_PARAMS);
4440
4441 if (hdev_is_powered(hdev))
4442 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4443 MGMT_STATUS_REJECTED);
4444
4445 hci_dev_lock(hdev);
4446
4447 /* If user space supports this command it is also expected to
4448 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
4449 */
4450 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4451
4452 if (cp->privacy) {
4453 changed = !test_and_set_bit(HCI_PRIVACY, &hdev->dev_flags);
4454 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
4455 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4456 } else {
4457 changed = test_and_clear_bit(HCI_PRIVACY, &hdev->dev_flags);
4458 memset(hdev->irk, 0, sizeof(hdev->irk));
4459 clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4460 }
4461
4462 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
4463 if (err < 0)
4464 goto unlock;
4465
4466 if (changed)
4467 err = new_settings(hdev, sk);
4468
4469 unlock:
4470 hci_dev_unlock(hdev);
4471 return err;
4472 }
4473
4474 static bool irk_is_valid(struct mgmt_irk_info *irk)
4475 {
4476 switch (irk->addr.type) {
4477 case BDADDR_LE_PUBLIC:
4478 return true;
4479
4480 case BDADDR_LE_RANDOM:
4481 /* Two most significant bits shall be set */
4482 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4483 return false;
4484 return true;
4485 }
4486
4487 return false;
4488 }
4489
4490 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4491 u16 len)
4492 {
4493 struct mgmt_cp_load_irks *cp = cp_data;
4494 u16 irk_count, expected_len;
4495 int i, err;
4496
4497 BT_DBG("request for %s", hdev->name);
4498
4499 if (!lmp_le_capable(hdev))
4500 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4501 MGMT_STATUS_NOT_SUPPORTED);
4502
4503 irk_count = __le16_to_cpu(cp->irk_count);
4504
4505 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
4506 if (expected_len != len) {
4507 BT_ERR("load_irks: expected %u bytes, got %u bytes",
4508 expected_len, len);
4509 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4510 MGMT_STATUS_INVALID_PARAMS);
4511 }
4512
4513 BT_DBG("%s irk_count %u", hdev->name, irk_count);
4514
4515 for (i = 0; i < irk_count; i++) {
4516 struct mgmt_irk_info *key = &cp->irks[i];
4517
4518 if (!irk_is_valid(key))
4519 return cmd_status(sk, hdev->id,
4520 MGMT_OP_LOAD_IRKS,
4521 MGMT_STATUS_INVALID_PARAMS);
4522 }
4523
4524 hci_dev_lock(hdev);
4525
4526 hci_smp_irks_clear(hdev);
4527
4528 for (i = 0; i < irk_count; i++) {
4529 struct mgmt_irk_info *irk = &cp->irks[i];
4530 u8 addr_type;
4531
4532 if (irk->addr.type == BDADDR_LE_PUBLIC)
4533 addr_type = ADDR_LE_DEV_PUBLIC;
4534 else
4535 addr_type = ADDR_LE_DEV_RANDOM;
4536
4537 hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
4538 BDADDR_ANY);
4539 }
4540
4541 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4542
4543 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
4544
4545 hci_dev_unlock(hdev);
4546
4547 return err;
4548 }
4549
4550 static bool ltk_is_valid(struct mgmt_ltk_info *key)
4551 {
4552 if (key->master != 0x00 && key->master != 0x01)
4553 return false;
4554
4555 switch (key->addr.type) {
4556 case BDADDR_LE_PUBLIC:
4557 return true;
4558
4559 case BDADDR_LE_RANDOM:
4560 /* Two most significant bits shall be set */
4561 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4562 return false;
4563 return true;
4564 }
4565
4566 return false;
4567 }
4568
4569 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
4570 void *cp_data, u16 len)
4571 {
4572 struct mgmt_cp_load_long_term_keys *cp = cp_data;
4573 u16 key_count, expected_len;
4574 int i, err;
4575
4576 BT_DBG("request for %s", hdev->name);
4577
4578 if (!lmp_le_capable(hdev))
4579 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4580 MGMT_STATUS_NOT_SUPPORTED);
4581
4582 key_count = __le16_to_cpu(cp->key_count);
4583
4584 expected_len = sizeof(*cp) + key_count *
4585 sizeof(struct mgmt_ltk_info);
4586 if (expected_len != len) {
4587 BT_ERR("load_keys: expected %u bytes, got %u bytes",
4588 expected_len, len);
4589 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4590 MGMT_STATUS_INVALID_PARAMS);
4591 }
4592
4593 BT_DBG("%s key_count %u", hdev->name, key_count);
4594
4595 for (i = 0; i < key_count; i++) {
4596 struct mgmt_ltk_info *key = &cp->keys[i];
4597
4598 if (!ltk_is_valid(key))
4599 return cmd_status(sk, hdev->id,
4600 MGMT_OP_LOAD_LONG_TERM_KEYS,
4601 MGMT_STATUS_INVALID_PARAMS);
4602 }
4603
4604 hci_dev_lock(hdev);
4605
4606 hci_smp_ltks_clear(hdev);
4607
4608 for (i = 0; i < key_count; i++) {
4609 struct mgmt_ltk_info *key = &cp->keys[i];
4610 u8 type, addr_type, authenticated;
4611
4612 if (key->addr.type == BDADDR_LE_PUBLIC)
4613 addr_type = ADDR_LE_DEV_PUBLIC;
4614 else
4615 addr_type = ADDR_LE_DEV_RANDOM;
4616
4617 if (key->master)
4618 type = SMP_LTK;
4619 else
4620 type = SMP_LTK_SLAVE;
4621
4622 switch (key->type) {
4623 case MGMT_LTK_UNAUTHENTICATED:
4624 authenticated = 0x00;
4625 break;
4626 case MGMT_LTK_AUTHENTICATED:
4627 authenticated = 0x01;
4628 break;
4629 default:
4630 continue;
4631 }
4632
4633 hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
4634 authenticated, key->val, key->enc_size, key->ediv,
4635 key->rand);
4636 }
4637
4638 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
4639 NULL, 0);
4640
4641 hci_dev_unlock(hdev);
4642
4643 return err;
4644 }
4645
4646 struct cmd_conn_lookup {
4647 struct hci_conn *conn;
4648 bool valid_tx_power;
4649 u8 mgmt_status;
4650 };
4651
4652 static void get_conn_info_complete(struct pending_cmd *cmd, void *data)
4653 {
4654 struct cmd_conn_lookup *match = data;
4655 struct mgmt_cp_get_conn_info *cp;
4656 struct mgmt_rp_get_conn_info rp;
4657 struct hci_conn *conn = cmd->user_data;
4658
4659 if (conn != match->conn)
4660 return;
4661
4662 cp = (struct mgmt_cp_get_conn_info *) cmd->param;
4663
4664 memset(&rp, 0, sizeof(rp));
4665 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4666 rp.addr.type = cp->addr.type;
4667
4668 if (!match->mgmt_status) {
4669 rp.rssi = conn->rssi;
4670
4671 if (match->valid_tx_power) {
4672 rp.tx_power = conn->tx_power;
4673 rp.max_tx_power = conn->max_tx_power;
4674 } else {
4675 rp.tx_power = HCI_TX_POWER_INVALID;
4676 rp.max_tx_power = HCI_TX_POWER_INVALID;
4677 }
4678 }
4679
4680 cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
4681 match->mgmt_status, &rp, sizeof(rp));
4682
4683 hci_conn_drop(conn);
4684
4685 mgmt_pending_remove(cmd);
4686 }
4687
4688 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 status)
4689 {
4690 struct hci_cp_read_rssi *cp;
4691 struct hci_conn *conn;
4692 struct cmd_conn_lookup match;
4693 u16 handle;
4694
4695 BT_DBG("status 0x%02x", status);
4696
4697 hci_dev_lock(hdev);
4698
4699 /* TX power data is valid in case request completed successfully,
4700 * otherwise we assume it's not valid. At the moment we assume that
4701 * either both or none of current and max values are valid to keep code
4702 * simple.
4703 */
4704 match.valid_tx_power = !status;
4705
4706 /* Commands sent in request are either Read RSSI or Read Transmit Power
4707 * Level so we check which one was last sent to retrieve connection
4708 * handle. Both commands have handle as first parameter so it's safe to
4709 * cast data on the same command struct.
4710 *
4711 * First command sent is always Read RSSI and we fail only if it fails.
4712 * In other case we simply override error to indicate success as we
4713 * already remembered if TX power value is actually valid.
4714 */
4715 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
4716 if (!cp) {
4717 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
4718 status = 0;
4719 }
4720
4721 if (!cp) {
4722 BT_ERR("invalid sent_cmd in response");
4723 goto unlock;
4724 }
4725
4726 handle = __le16_to_cpu(cp->handle);
4727 conn = hci_conn_hash_lookup_handle(hdev, handle);
4728 if (!conn) {
4729 BT_ERR("unknown handle (%d) in response", handle);
4730 goto unlock;
4731 }
4732
4733 match.conn = conn;
4734 match.mgmt_status = mgmt_status(status);
4735
4736 /* Cache refresh is complete, now reply for mgmt request for given
4737 * connection only.
4738 */
4739 mgmt_pending_foreach(MGMT_OP_GET_CONN_INFO, hdev,
4740 get_conn_info_complete, &match);
4741
4742 unlock:
4743 hci_dev_unlock(hdev);
4744 }
4745
4746 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
4747 u16 len)
4748 {
4749 struct mgmt_cp_get_conn_info *cp = data;
4750 struct mgmt_rp_get_conn_info rp;
4751 struct hci_conn *conn;
4752 unsigned long conn_info_age;
4753 int err = 0;
4754
4755 BT_DBG("%s", hdev->name);
4756
4757 memset(&rp, 0, sizeof(rp));
4758 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4759 rp.addr.type = cp->addr.type;
4760
4761 if (!bdaddr_type_is_valid(cp->addr.type))
4762 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4763 MGMT_STATUS_INVALID_PARAMS,
4764 &rp, sizeof(rp));
4765
4766 hci_dev_lock(hdev);
4767
4768 if (!hdev_is_powered(hdev)) {
4769 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4770 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
4771 goto unlock;
4772 }
4773
4774 if (cp->addr.type == BDADDR_BREDR)
4775 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
4776 &cp->addr.bdaddr);
4777 else
4778 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
4779
4780 if (!conn || conn->state != BT_CONNECTED) {
4781 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4782 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
4783 goto unlock;
4784 }
4785
4786 /* To avoid client trying to guess when to poll again for information we
4787 * calculate conn info age as random value between min/max set in hdev.
4788 */
4789 conn_info_age = hdev->conn_info_min_age +
4790 prandom_u32_max(hdev->conn_info_max_age -
4791 hdev->conn_info_min_age);
4792
4793 /* Query controller to refresh cached values if they are too old or were
4794 * never read.
4795 */
4796 if (time_after(jiffies, conn->conn_info_timestamp +
4797 msecs_to_jiffies(conn_info_age)) ||
4798 !conn->conn_info_timestamp) {
4799 struct hci_request req;
4800 struct hci_cp_read_tx_power req_txp_cp;
4801 struct hci_cp_read_rssi req_rssi_cp;
4802 struct pending_cmd *cmd;
4803
4804 hci_req_init(&req, hdev);
4805 req_rssi_cp.handle = cpu_to_le16(conn->handle);
4806 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
4807 &req_rssi_cp);
4808
4809 /* For LE links TX power does not change thus we don't need to
4810 * query for it once value is known.
4811 */
4812 if (!bdaddr_type_is_le(cp->addr.type) ||
4813 conn->tx_power == HCI_TX_POWER_INVALID) {
4814 req_txp_cp.handle = cpu_to_le16(conn->handle);
4815 req_txp_cp.type = 0x00;
4816 hci_req_add(&req, HCI_OP_READ_TX_POWER,
4817 sizeof(req_txp_cp), &req_txp_cp);
4818 }
4819
4820 /* Max TX power needs to be read only once per connection */
4821 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
4822 req_txp_cp.handle = cpu_to_le16(conn->handle);
4823 req_txp_cp.type = 0x01;
4824 hci_req_add(&req, HCI_OP_READ_TX_POWER,
4825 sizeof(req_txp_cp), &req_txp_cp);
4826 }
4827
4828 err = hci_req_run(&req, conn_info_refresh_complete);
4829 if (err < 0)
4830 goto unlock;
4831
4832 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
4833 data, len);
4834 if (!cmd) {
4835 err = -ENOMEM;
4836 goto unlock;
4837 }
4838
4839 hci_conn_hold(conn);
4840 cmd->user_data = conn;
4841
4842 conn->conn_info_timestamp = jiffies;
4843 } else {
4844 /* Cache is valid, just reply with values cached in hci_conn */
4845 rp.rssi = conn->rssi;
4846 rp.tx_power = conn->tx_power;
4847 rp.max_tx_power = conn->max_tx_power;
4848
4849 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4850 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
4851 }
4852
4853 unlock:
4854 hci_dev_unlock(hdev);
4855 return err;
4856 }
4857
4858 static void get_clock_info_complete(struct hci_dev *hdev, u8 status)
4859 {
4860 struct mgmt_cp_get_clock_info *cp;
4861 struct mgmt_rp_get_clock_info rp;
4862 struct hci_cp_read_clock *hci_cp;
4863 struct pending_cmd *cmd;
4864 struct hci_conn *conn;
4865
4866 BT_DBG("%s status %u", hdev->name, status);
4867
4868 hci_dev_lock(hdev);
4869
4870 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
4871 if (!hci_cp)
4872 goto unlock;
4873
4874 if (hci_cp->which) {
4875 u16 handle = __le16_to_cpu(hci_cp->handle);
4876 conn = hci_conn_hash_lookup_handle(hdev, handle);
4877 } else {
4878 conn = NULL;
4879 }
4880
4881 cmd = mgmt_pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
4882 if (!cmd)
4883 goto unlock;
4884
4885 cp = cmd->param;
4886
4887 memset(&rp, 0, sizeof(rp));
4888 memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
4889
4890 if (status)
4891 goto send_rsp;
4892
4893 rp.local_clock = cpu_to_le32(hdev->clock);
4894
4895 if (conn) {
4896 rp.piconet_clock = cpu_to_le32(conn->clock);
4897 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
4898 }
4899
4900 send_rsp:
4901 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
4902 &rp, sizeof(rp));
4903 mgmt_pending_remove(cmd);
4904 if (conn)
4905 hci_conn_drop(conn);
4906
4907 unlock:
4908 hci_dev_unlock(hdev);
4909 }
4910
4911 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
4912 u16 len)
4913 {
4914 struct mgmt_cp_get_clock_info *cp = data;
4915 struct mgmt_rp_get_clock_info rp;
4916 struct hci_cp_read_clock hci_cp;
4917 struct pending_cmd *cmd;
4918 struct hci_request req;
4919 struct hci_conn *conn;
4920 int err;
4921
4922 BT_DBG("%s", hdev->name);
4923
4924 memset(&rp, 0, sizeof(rp));
4925 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4926 rp.addr.type = cp->addr.type;
4927
4928 if (cp->addr.type != BDADDR_BREDR)
4929 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
4930 MGMT_STATUS_INVALID_PARAMS,
4931 &rp, sizeof(rp));
4932
4933 hci_dev_lock(hdev);
4934
4935 if (!hdev_is_powered(hdev)) {
4936 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
4937 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
4938 goto unlock;
4939 }
4940
4941 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
4942 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
4943 &cp->addr.bdaddr);
4944 if (!conn || conn->state != BT_CONNECTED) {
4945 err = cmd_complete(sk, hdev->id,
4946 MGMT_OP_GET_CLOCK_INFO,
4947 MGMT_STATUS_NOT_CONNECTED,
4948 &rp, sizeof(rp));
4949 goto unlock;
4950 }
4951 } else {
4952 conn = NULL;
4953 }
4954
4955 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
4956 if (!cmd) {
4957 err = -ENOMEM;
4958 goto unlock;
4959 }
4960
4961 hci_req_init(&req, hdev);
4962
4963 memset(&hci_cp, 0, sizeof(hci_cp));
4964 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
4965
4966 if (conn) {
4967 hci_conn_hold(conn);
4968 cmd->user_data = conn;
4969
4970 hci_cp.handle = cpu_to_le16(conn->handle);
4971 hci_cp.which = 0x01; /* Piconet clock */
4972 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
4973 }
4974
4975 err = hci_req_run(&req, get_clock_info_complete);
4976 if (err < 0)
4977 mgmt_pending_remove(cmd);
4978
4979 unlock:
4980 hci_dev_unlock(hdev);
4981 return err;
4982 }
4983
4984 static void device_added(struct sock *sk, struct hci_dev *hdev,
4985 bdaddr_t *bdaddr, u8 type, u8 action)
4986 {
4987 struct mgmt_ev_device_added ev;
4988
4989 bacpy(&ev.addr.bdaddr, bdaddr);
4990 ev.addr.type = type;
4991 ev.action = action;
4992
4993 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
4994 }
4995
4996 static int add_device(struct sock *sk, struct hci_dev *hdev,
4997 void *data, u16 len)
4998 {
4999 struct mgmt_cp_add_device *cp = data;
5000 u8 auto_conn, addr_type;
5001 int err;
5002
5003 BT_DBG("%s", hdev->name);
5004
5005 if (!bdaddr_type_is_le(cp->addr.type) ||
5006 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
5007 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5008 MGMT_STATUS_INVALID_PARAMS,
5009 &cp->addr, sizeof(cp->addr));
5010
5011 if (cp->action != 0x00 && cp->action != 0x01)
5012 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5013 MGMT_STATUS_INVALID_PARAMS,
5014 &cp->addr, sizeof(cp->addr));
5015
5016 hci_dev_lock(hdev);
5017
5018 if (cp->addr.type == BDADDR_LE_PUBLIC)
5019 addr_type = ADDR_LE_DEV_PUBLIC;
5020 else
5021 addr_type = ADDR_LE_DEV_RANDOM;
5022
5023 if (cp->action)
5024 auto_conn = HCI_AUTO_CONN_ALWAYS;
5025 else
5026 auto_conn = HCI_AUTO_CONN_DISABLED;
5027
5028 /* If the connection parameters don't exist for this device,
5029 * they will be created and configured with defaults.
5030 */
5031 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
5032 auto_conn) < 0) {
5033 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5034 MGMT_STATUS_FAILED,
5035 &cp->addr, sizeof(cp->addr));
5036 goto unlock;
5037 }
5038
5039 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
5040
5041 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5042 MGMT_STATUS_SUCCESS, &cp->addr, sizeof(cp->addr));
5043
5044 unlock:
5045 hci_dev_unlock(hdev);
5046 return err;
5047 }
5048
5049 static void device_removed(struct sock *sk, struct hci_dev *hdev,
5050 bdaddr_t *bdaddr, u8 type)
5051 {
5052 struct mgmt_ev_device_removed ev;
5053
5054 bacpy(&ev.addr.bdaddr, bdaddr);
5055 ev.addr.type = type;
5056
5057 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
5058 }
5059
5060 static int remove_device(struct sock *sk, struct hci_dev *hdev,
5061 void *data, u16 len)
5062 {
5063 struct mgmt_cp_remove_device *cp = data;
5064 int err;
5065
5066 BT_DBG("%s", hdev->name);
5067
5068 hci_dev_lock(hdev);
5069
5070 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5071 u8 addr_type;
5072
5073 if (!bdaddr_type_is_le(cp->addr.type)) {
5074 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5075 MGMT_STATUS_INVALID_PARAMS,
5076 &cp->addr, sizeof(cp->addr));
5077 goto unlock;
5078 }
5079
5080 if (cp->addr.type == BDADDR_LE_PUBLIC)
5081 addr_type = ADDR_LE_DEV_PUBLIC;
5082 else
5083 addr_type = ADDR_LE_DEV_RANDOM;
5084
5085 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
5086
5087 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
5088 } else {
5089 if (cp->addr.type) {
5090 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5091 MGMT_STATUS_INVALID_PARAMS,
5092 &cp->addr, sizeof(cp->addr));
5093 goto unlock;
5094 }
5095
5096 hci_conn_params_clear(hdev);
5097 }
5098
5099 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5100 MGMT_STATUS_SUCCESS, &cp->addr, sizeof(cp->addr));
5101
5102 unlock:
5103 hci_dev_unlock(hdev);
5104 return err;
5105 }
5106
5107 static const struct mgmt_handler {
5108 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
5109 u16 data_len);
5110 bool var_len;
5111 size_t data_len;
5112 } mgmt_handlers[] = {
5113 { NULL }, /* 0x0000 (no command) */
5114 { read_version, false, MGMT_READ_VERSION_SIZE },
5115 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
5116 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
5117 { read_controller_info, false, MGMT_READ_INFO_SIZE },
5118 { set_powered, false, MGMT_SETTING_SIZE },
5119 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
5120 { set_connectable, false, MGMT_SETTING_SIZE },
5121 { set_fast_connectable, false, MGMT_SETTING_SIZE },
5122 { set_pairable, false, MGMT_SETTING_SIZE },
5123 { set_link_security, false, MGMT_SETTING_SIZE },
5124 { set_ssp, false, MGMT_SETTING_SIZE },
5125 { set_hs, false, MGMT_SETTING_SIZE },
5126 { set_le, false, MGMT_SETTING_SIZE },
5127 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
5128 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
5129 { add_uuid, false, MGMT_ADD_UUID_SIZE },
5130 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
5131 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
5132 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
5133 { disconnect, false, MGMT_DISCONNECT_SIZE },
5134 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
5135 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
5136 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
5137 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
5138 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
5139 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
5140 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
5141 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
5142 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
5143 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
5144 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
5145 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
5146 { add_remote_oob_data, true, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
5147 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
5148 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
5149 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
5150 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
5151 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
5152 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
5153 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
5154 { set_advertising, false, MGMT_SETTING_SIZE },
5155 { set_bredr, false, MGMT_SETTING_SIZE },
5156 { set_static_address, false, MGMT_SET_STATIC_ADDRESS_SIZE },
5157 { set_scan_params, false, MGMT_SET_SCAN_PARAMS_SIZE },
5158 { set_secure_conn, false, MGMT_SETTING_SIZE },
5159 { set_debug_keys, false, MGMT_SETTING_SIZE },
5160 { set_privacy, false, MGMT_SET_PRIVACY_SIZE },
5161 { load_irks, true, MGMT_LOAD_IRKS_SIZE },
5162 { get_conn_info, false, MGMT_GET_CONN_INFO_SIZE },
5163 { get_clock_info, false, MGMT_GET_CLOCK_INFO_SIZE },
5164 { add_device, false, MGMT_ADD_DEVICE_SIZE },
5165 { remove_device, false, MGMT_REMOVE_DEVICE_SIZE },
5166 };
5167
5168 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
5169 {
5170 void *buf;
5171 u8 *cp;
5172 struct mgmt_hdr *hdr;
5173 u16 opcode, index, len;
5174 struct hci_dev *hdev = NULL;
5175 const struct mgmt_handler *handler;
5176 int err;
5177
5178 BT_DBG("got %zu bytes", msglen);
5179
5180 if (msglen < sizeof(*hdr))
5181 return -EINVAL;
5182
5183 buf = kmalloc(msglen, GFP_KERNEL);
5184 if (!buf)
5185 return -ENOMEM;
5186
5187 if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
5188 err = -EFAULT;
5189 goto done;
5190 }
5191
5192 hdr = buf;
5193 opcode = __le16_to_cpu(hdr->opcode);
5194 index = __le16_to_cpu(hdr->index);
5195 len = __le16_to_cpu(hdr->len);
5196
5197 if (len != msglen - sizeof(*hdr)) {
5198 err = -EINVAL;
5199 goto done;
5200 }
5201
5202 if (index != MGMT_INDEX_NONE) {
5203 hdev = hci_dev_get(index);
5204 if (!hdev) {
5205 err = cmd_status(sk, index, opcode,
5206 MGMT_STATUS_INVALID_INDEX);
5207 goto done;
5208 }
5209
5210 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
5211 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) ||
5212 test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
5213 err = cmd_status(sk, index, opcode,
5214 MGMT_STATUS_INVALID_INDEX);
5215 goto done;
5216 }
5217 }
5218
5219 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
5220 mgmt_handlers[opcode].func == NULL) {
5221 BT_DBG("Unknown op %u", opcode);
5222 err = cmd_status(sk, index, opcode,
5223 MGMT_STATUS_UNKNOWN_COMMAND);
5224 goto done;
5225 }
5226
5227 if ((hdev && opcode < MGMT_OP_READ_INFO) ||
5228 (!hdev && opcode >= MGMT_OP_READ_INFO)) {
5229 err = cmd_status(sk, index, opcode,
5230 MGMT_STATUS_INVALID_INDEX);
5231 goto done;
5232 }
5233
5234 handler = &mgmt_handlers[opcode];
5235
5236 if ((handler->var_len && len < handler->data_len) ||
5237 (!handler->var_len && len != handler->data_len)) {
5238 err = cmd_status(sk, index, opcode,
5239 MGMT_STATUS_INVALID_PARAMS);
5240 goto done;
5241 }
5242
5243 if (hdev)
5244 mgmt_init_hdev(sk, hdev);
5245
5246 cp = buf + sizeof(*hdr);
5247
5248 err = handler->func(sk, hdev, cp, len);
5249 if (err < 0)
5250 goto done;
5251
5252 err = msglen;
5253
5254 done:
5255 if (hdev)
5256 hci_dev_put(hdev);
5257
5258 kfree(buf);
5259 return err;
5260 }
5261
5262 void mgmt_index_added(struct hci_dev *hdev)
5263 {
5264 if (hdev->dev_type != HCI_BREDR)
5265 return;
5266
5267 mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
5268 }
5269
5270 void mgmt_index_removed(struct hci_dev *hdev)
5271 {
5272 u8 status = MGMT_STATUS_INVALID_INDEX;
5273
5274 if (hdev->dev_type != HCI_BREDR)
5275 return;
5276
5277 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
5278
5279 mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
5280 }
5281
5282 /* This function requires the caller holds hdev->lock */
5283 static void restart_le_auto_conns(struct hci_dev *hdev)
5284 {
5285 struct hci_conn_params *p;
5286 bool added = false;
5287
5288 list_for_each_entry(p, &hdev->le_conn_params, list) {
5289 if (p->auto_connect == HCI_AUTO_CONN_ALWAYS) {
5290 hci_pend_le_conn_add(hdev, &p->addr, p->addr_type);
5291 added = true;
5292 }
5293 }
5294
5295 /* Calling hci_pend_le_conn_add will actually already trigger
5296 * background scanning when needed. So no need to trigger it
5297 * just another time.
5298 *
5299 * This check is here to avoid an unneeded restart of the
5300 * passive scanning. Since this is during the controller
5301 * power up phase the duplicate filtering is not an issue.
5302 */
5303 if (added)
5304 return;
5305
5306 hci_update_background_scan(hdev);
5307 }
5308
5309 static void powered_complete(struct hci_dev *hdev, u8 status)
5310 {
5311 struct cmd_lookup match = { NULL, hdev };
5312
5313 BT_DBG("status 0x%02x", status);
5314
5315 hci_dev_lock(hdev);
5316
5317 restart_le_auto_conns(hdev);
5318
5319 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
5320
5321 new_settings(hdev, match.sk);
5322
5323 hci_dev_unlock(hdev);
5324
5325 if (match.sk)
5326 sock_put(match.sk);
5327 }
5328
5329 static int powered_update_hci(struct hci_dev *hdev)
5330 {
5331 struct hci_request req;
5332 u8 link_sec;
5333
5334 hci_req_init(&req, hdev);
5335
5336 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
5337 !lmp_host_ssp_capable(hdev)) {
5338 u8 ssp = 1;
5339
5340 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
5341 }
5342
5343 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
5344 lmp_bredr_capable(hdev)) {
5345 struct hci_cp_write_le_host_supported cp;
5346
5347 cp.le = 1;
5348 cp.simul = lmp_le_br_capable(hdev);
5349
5350 /* Check first if we already have the right
5351 * host state (host features set)
5352 */
5353 if (cp.le != lmp_host_le_capable(hdev) ||
5354 cp.simul != lmp_host_le_br_capable(hdev))
5355 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
5356 sizeof(cp), &cp);
5357 }
5358
5359 if (lmp_le_capable(hdev)) {
5360 /* Make sure the controller has a good default for
5361 * advertising data. This also applies to the case
5362 * where BR/EDR was toggled during the AUTO_OFF phase.
5363 */
5364 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
5365 update_adv_data(&req);
5366 update_scan_rsp_data(&req);
5367 }
5368
5369 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
5370 enable_advertising(&req);
5371 }
5372
5373 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
5374 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
5375 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
5376 sizeof(link_sec), &link_sec);
5377
5378 if (lmp_bredr_capable(hdev)) {
5379 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
5380 set_bredr_scan(&req);
5381 update_class(&req);
5382 update_name(&req);
5383 update_eir(&req);
5384 }
5385
5386 return hci_req_run(&req, powered_complete);
5387 }
5388
5389 int mgmt_powered(struct hci_dev *hdev, u8 powered)
5390 {
5391 struct cmd_lookup match = { NULL, hdev };
5392 u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
5393 u8 zero_cod[] = { 0, 0, 0 };
5394 int err;
5395
5396 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
5397 return 0;
5398
5399 if (powered) {
5400 if (powered_update_hci(hdev) == 0)
5401 return 0;
5402
5403 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
5404 &match);
5405 goto new_settings;
5406 }
5407
5408 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
5409 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered);
5410
5411 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
5412 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
5413 zero_cod, sizeof(zero_cod), NULL);
5414
5415 new_settings:
5416 err = new_settings(hdev, match.sk);
5417
5418 if (match.sk)
5419 sock_put(match.sk);
5420
5421 return err;
5422 }
5423
5424 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
5425 {
5426 struct pending_cmd *cmd;
5427 u8 status;
5428
5429 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
5430 if (!cmd)
5431 return;
5432
5433 if (err == -ERFKILL)
5434 status = MGMT_STATUS_RFKILLED;
5435 else
5436 status = MGMT_STATUS_FAILED;
5437
5438 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
5439
5440 mgmt_pending_remove(cmd);
5441 }
5442
5443 void mgmt_discoverable_timeout(struct hci_dev *hdev)
5444 {
5445 struct hci_request req;
5446
5447 hci_dev_lock(hdev);
5448
5449 /* When discoverable timeout triggers, then just make sure
5450 * the limited discoverable flag is cleared. Even in the case
5451 * of a timeout triggered from general discoverable, it is
5452 * safe to unconditionally clear the flag.
5453 */
5454 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
5455 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
5456
5457 hci_req_init(&req, hdev);
5458 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
5459 u8 scan = SCAN_PAGE;
5460 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
5461 sizeof(scan), &scan);
5462 }
5463 update_class(&req);
5464 update_adv_data(&req);
5465 hci_req_run(&req, NULL);
5466
5467 hdev->discov_timeout = 0;
5468
5469 new_settings(hdev, NULL);
5470
5471 hci_dev_unlock(hdev);
5472 }
5473
5474 void mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
5475 {
5476 bool changed;
5477
5478 /* Nothing needed here if there's a pending command since that
5479 * commands request completion callback takes care of everything
5480 * necessary.
5481 */
5482 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
5483 return;
5484
5485 /* Powering off may clear the scan mode - don't let that interfere */
5486 if (!discoverable && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5487 return;
5488
5489 if (discoverable) {
5490 changed = !test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
5491 } else {
5492 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
5493 changed = test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
5494 }
5495
5496 if (changed) {
5497 struct hci_request req;
5498
5499 /* In case this change in discoverable was triggered by
5500 * a disabling of connectable there could be a need to
5501 * update the advertising flags.
5502 */
5503 hci_req_init(&req, hdev);
5504 update_adv_data(&req);
5505 hci_req_run(&req, NULL);
5506
5507 new_settings(hdev, NULL);
5508 }
5509 }
5510
5511 void mgmt_connectable(struct hci_dev *hdev, u8 connectable)
5512 {
5513 bool changed;
5514
5515 /* Nothing needed here if there's a pending command since that
5516 * commands request completion callback takes care of everything
5517 * necessary.
5518 */
5519 if (mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
5520 return;
5521
5522 /* Powering off may clear the scan mode - don't let that interfere */
5523 if (!connectable && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5524 return;
5525
5526 if (connectable)
5527 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
5528 else
5529 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
5530
5531 if (changed)
5532 new_settings(hdev, NULL);
5533 }
5534
5535 void mgmt_advertising(struct hci_dev *hdev, u8 advertising)
5536 {
5537 /* Powering off may stop advertising - don't let that interfere */
5538 if (!advertising && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5539 return;
5540
5541 if (advertising)
5542 set_bit(HCI_ADVERTISING, &hdev->dev_flags);
5543 else
5544 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
5545 }
5546
5547 void mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
5548 {
5549 u8 mgmt_err = mgmt_status(status);
5550
5551 if (scan & SCAN_PAGE)
5552 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
5553 cmd_status_rsp, &mgmt_err);
5554
5555 if (scan & SCAN_INQUIRY)
5556 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
5557 cmd_status_rsp, &mgmt_err);
5558 }
5559
5560 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
5561 bool persistent)
5562 {
5563 struct mgmt_ev_new_link_key ev;
5564
5565 memset(&ev, 0, sizeof(ev));
5566
5567 ev.store_hint = persistent;
5568 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
5569 ev.key.addr.type = BDADDR_BREDR;
5570 ev.key.type = key->type;
5571 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
5572 ev.key.pin_len = key->pin_len;
5573
5574 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
5575 }
5576
5577 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
5578 {
5579 if (ltk->authenticated)
5580 return MGMT_LTK_AUTHENTICATED;
5581
5582 return MGMT_LTK_UNAUTHENTICATED;
5583 }
5584
5585 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
5586 {
5587 struct mgmt_ev_new_long_term_key ev;
5588
5589 memset(&ev, 0, sizeof(ev));
5590
5591 /* Devices using resolvable or non-resolvable random addresses
5592 * without providing an indentity resolving key don't require
5593 * to store long term keys. Their addresses will change the
5594 * next time around.
5595 *
5596 * Only when a remote device provides an identity address
5597 * make sure the long term key is stored. If the remote
5598 * identity is known, the long term keys are internally
5599 * mapped to the identity address. So allow static random
5600 * and public addresses here.
5601 */
5602 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
5603 (key->bdaddr.b[5] & 0xc0) != 0xc0)
5604 ev.store_hint = 0x00;
5605 else
5606 ev.store_hint = persistent;
5607
5608 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
5609 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
5610 ev.key.type = mgmt_ltk_type(key);
5611 ev.key.enc_size = key->enc_size;
5612 ev.key.ediv = key->ediv;
5613 ev.key.rand = key->rand;
5614
5615 if (key->type == SMP_LTK)
5616 ev.key.master = 1;
5617
5618 memcpy(ev.key.val, key->val, sizeof(key->val));
5619
5620 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
5621 }
5622
5623 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
5624 {
5625 struct mgmt_ev_new_irk ev;
5626
5627 memset(&ev, 0, sizeof(ev));
5628
5629 /* For identity resolving keys from devices that are already
5630 * using a public address or static random address, do not
5631 * ask for storing this key. The identity resolving key really
5632 * is only mandatory for devices using resovlable random
5633 * addresses.
5634 *
5635 * Storing all identity resolving keys has the downside that
5636 * they will be also loaded on next boot of they system. More
5637 * identity resolving keys, means more time during scanning is
5638 * needed to actually resolve these addresses.
5639 */
5640 if (bacmp(&irk->rpa, BDADDR_ANY))
5641 ev.store_hint = 0x01;
5642 else
5643 ev.store_hint = 0x00;
5644
5645 bacpy(&ev.rpa, &irk->rpa);
5646 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
5647 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
5648 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
5649
5650 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
5651 }
5652
5653 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
5654 bool persistent)
5655 {
5656 struct mgmt_ev_new_csrk ev;
5657
5658 memset(&ev, 0, sizeof(ev));
5659
5660 /* Devices using resolvable or non-resolvable random addresses
5661 * without providing an indentity resolving key don't require
5662 * to store signature resolving keys. Their addresses will change
5663 * the next time around.
5664 *
5665 * Only when a remote device provides an identity address
5666 * make sure the signature resolving key is stored. So allow
5667 * static random and public addresses here.
5668 */
5669 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
5670 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
5671 ev.store_hint = 0x00;
5672 else
5673 ev.store_hint = persistent;
5674
5675 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
5676 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
5677 ev.key.master = csrk->master;
5678 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
5679
5680 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
5681 }
5682
5683 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
5684 u8 data_len)
5685 {
5686 eir[eir_len++] = sizeof(type) + data_len;
5687 eir[eir_len++] = type;
5688 memcpy(&eir[eir_len], data, data_len);
5689 eir_len += data_len;
5690
5691 return eir_len;
5692 }
5693
5694 void mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5695 u8 addr_type, u32 flags, u8 *name, u8 name_len,
5696 u8 *dev_class)
5697 {
5698 char buf[512];
5699 struct mgmt_ev_device_connected *ev = (void *) buf;
5700 u16 eir_len = 0;
5701
5702 bacpy(&ev->addr.bdaddr, bdaddr);
5703 ev->addr.type = link_to_bdaddr(link_type, addr_type);
5704
5705 ev->flags = __cpu_to_le32(flags);
5706
5707 if (name_len > 0)
5708 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
5709 name, name_len);
5710
5711 if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
5712 eir_len = eir_append_data(ev->eir, eir_len,
5713 EIR_CLASS_OF_DEV, dev_class, 3);
5714
5715 ev->eir_len = cpu_to_le16(eir_len);
5716
5717 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
5718 sizeof(*ev) + eir_len, NULL);
5719 }
5720
5721 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
5722 {
5723 struct mgmt_cp_disconnect *cp = cmd->param;
5724 struct sock **sk = data;
5725 struct mgmt_rp_disconnect rp;
5726
5727 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5728 rp.addr.type = cp->addr.type;
5729
5730 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
5731 sizeof(rp));
5732
5733 *sk = cmd->sk;
5734 sock_hold(*sk);
5735
5736 mgmt_pending_remove(cmd);
5737 }
5738
5739 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
5740 {
5741 struct hci_dev *hdev = data;
5742 struct mgmt_cp_unpair_device *cp = cmd->param;
5743 struct mgmt_rp_unpair_device rp;
5744
5745 memset(&rp, 0, sizeof(rp));
5746 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5747 rp.addr.type = cp->addr.type;
5748
5749 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
5750
5751 cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
5752
5753 mgmt_pending_remove(cmd);
5754 }
5755
5756 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
5757 u8 link_type, u8 addr_type, u8 reason,
5758 bool mgmt_connected)
5759 {
5760 struct mgmt_ev_device_disconnected ev;
5761 struct pending_cmd *power_off;
5762 struct sock *sk = NULL;
5763
5764 power_off = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
5765 if (power_off) {
5766 struct mgmt_mode *cp = power_off->param;
5767
5768 /* The connection is still in hci_conn_hash so test for 1
5769 * instead of 0 to know if this is the last one.
5770 */
5771 if (!cp->val && hci_conn_count(hdev) == 1) {
5772 cancel_delayed_work(&hdev->power_off);
5773 queue_work(hdev->req_workqueue, &hdev->power_off.work);
5774 }
5775 }
5776
5777 if (!mgmt_connected)
5778 return;
5779
5780 if (link_type != ACL_LINK && link_type != LE_LINK)
5781 return;
5782
5783 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
5784
5785 bacpy(&ev.addr.bdaddr, bdaddr);
5786 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5787 ev.reason = reason;
5788
5789 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
5790
5791 if (sk)
5792 sock_put(sk);
5793
5794 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
5795 hdev);
5796 }
5797
5798 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
5799 u8 link_type, u8 addr_type, u8 status)
5800 {
5801 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
5802 struct mgmt_cp_disconnect *cp;
5803 struct mgmt_rp_disconnect rp;
5804 struct pending_cmd *cmd;
5805
5806 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
5807 hdev);
5808
5809 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
5810 if (!cmd)
5811 return;
5812
5813 cp = cmd->param;
5814
5815 if (bacmp(bdaddr, &cp->addr.bdaddr))
5816 return;
5817
5818 if (cp->addr.type != bdaddr_type)
5819 return;
5820
5821 bacpy(&rp.addr.bdaddr, bdaddr);
5822 rp.addr.type = bdaddr_type;
5823
5824 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
5825 mgmt_status(status), &rp, sizeof(rp));
5826
5827 mgmt_pending_remove(cmd);
5828 }
5829
5830 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5831 u8 addr_type, u8 status)
5832 {
5833 struct mgmt_ev_connect_failed ev;
5834 struct pending_cmd *power_off;
5835
5836 power_off = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
5837 if (power_off) {
5838 struct mgmt_mode *cp = power_off->param;
5839
5840 /* The connection is still in hci_conn_hash so test for 1
5841 * instead of 0 to know if this is the last one.
5842 */
5843 if (!cp->val && hci_conn_count(hdev) == 1) {
5844 cancel_delayed_work(&hdev->power_off);
5845 queue_work(hdev->req_workqueue, &hdev->power_off.work);
5846 }
5847 }
5848
5849 bacpy(&ev.addr.bdaddr, bdaddr);
5850 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5851 ev.status = mgmt_status(status);
5852
5853 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
5854 }
5855
5856 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
5857 {
5858 struct mgmt_ev_pin_code_request ev;
5859
5860 bacpy(&ev.addr.bdaddr, bdaddr);
5861 ev.addr.type = BDADDR_BREDR;
5862 ev.secure = secure;
5863
5864 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
5865 }
5866
5867 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5868 u8 status)
5869 {
5870 struct pending_cmd *cmd;
5871 struct mgmt_rp_pin_code_reply rp;
5872
5873 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
5874 if (!cmd)
5875 return;
5876
5877 bacpy(&rp.addr.bdaddr, bdaddr);
5878 rp.addr.type = BDADDR_BREDR;
5879
5880 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
5881 mgmt_status(status), &rp, sizeof(rp));
5882
5883 mgmt_pending_remove(cmd);
5884 }
5885
5886 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5887 u8 status)
5888 {
5889 struct pending_cmd *cmd;
5890 struct mgmt_rp_pin_code_reply rp;
5891
5892 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
5893 if (!cmd)
5894 return;
5895
5896 bacpy(&rp.addr.bdaddr, bdaddr);
5897 rp.addr.type = BDADDR_BREDR;
5898
5899 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
5900 mgmt_status(status), &rp, sizeof(rp));
5901
5902 mgmt_pending_remove(cmd);
5903 }
5904
5905 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
5906 u8 link_type, u8 addr_type, u32 value,
5907 u8 confirm_hint)
5908 {
5909 struct mgmt_ev_user_confirm_request ev;
5910
5911 BT_DBG("%s", hdev->name);
5912
5913 bacpy(&ev.addr.bdaddr, bdaddr);
5914 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5915 ev.confirm_hint = confirm_hint;
5916 ev.value = cpu_to_le32(value);
5917
5918 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
5919 NULL);
5920 }
5921
5922 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
5923 u8 link_type, u8 addr_type)
5924 {
5925 struct mgmt_ev_user_passkey_request ev;
5926
5927 BT_DBG("%s", hdev->name);
5928
5929 bacpy(&ev.addr.bdaddr, bdaddr);
5930 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5931
5932 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
5933 NULL);
5934 }
5935
5936 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5937 u8 link_type, u8 addr_type, u8 status,
5938 u8 opcode)
5939 {
5940 struct pending_cmd *cmd;
5941 struct mgmt_rp_user_confirm_reply rp;
5942 int err;
5943
5944 cmd = mgmt_pending_find(opcode, hdev);
5945 if (!cmd)
5946 return -ENOENT;
5947
5948 bacpy(&rp.addr.bdaddr, bdaddr);
5949 rp.addr.type = link_to_bdaddr(link_type, addr_type);
5950 err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
5951 &rp, sizeof(rp));
5952
5953 mgmt_pending_remove(cmd);
5954
5955 return err;
5956 }
5957
5958 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5959 u8 link_type, u8 addr_type, u8 status)
5960 {
5961 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5962 status, MGMT_OP_USER_CONFIRM_REPLY);
5963 }
5964
5965 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5966 u8 link_type, u8 addr_type, u8 status)
5967 {
5968 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5969 status,
5970 MGMT_OP_USER_CONFIRM_NEG_REPLY);
5971 }
5972
5973 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5974 u8 link_type, u8 addr_type, u8 status)
5975 {
5976 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5977 status, MGMT_OP_USER_PASSKEY_REPLY);
5978 }
5979
5980 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5981 u8 link_type, u8 addr_type, u8 status)
5982 {
5983 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5984 status,
5985 MGMT_OP_USER_PASSKEY_NEG_REPLY);
5986 }
5987
5988 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
5989 u8 link_type, u8 addr_type, u32 passkey,
5990 u8 entered)
5991 {
5992 struct mgmt_ev_passkey_notify ev;
5993
5994 BT_DBG("%s", hdev->name);
5995
5996 bacpy(&ev.addr.bdaddr, bdaddr);
5997 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5998 ev.passkey = __cpu_to_le32(passkey);
5999 ev.entered = entered;
6000
6001 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
6002 }
6003
6004 void mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6005 u8 addr_type, u8 status)
6006 {
6007 struct mgmt_ev_auth_failed ev;
6008
6009 bacpy(&ev.addr.bdaddr, bdaddr);
6010 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6011 ev.status = mgmt_status(status);
6012
6013 mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
6014 }
6015
6016 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
6017 {
6018 struct cmd_lookup match = { NULL, hdev };
6019 bool changed;
6020
6021 if (status) {
6022 u8 mgmt_err = mgmt_status(status);
6023 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
6024 cmd_status_rsp, &mgmt_err);
6025 return;
6026 }
6027
6028 if (test_bit(HCI_AUTH, &hdev->flags))
6029 changed = !test_and_set_bit(HCI_LINK_SECURITY,
6030 &hdev->dev_flags);
6031 else
6032 changed = test_and_clear_bit(HCI_LINK_SECURITY,
6033 &hdev->dev_flags);
6034
6035 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
6036 &match);
6037
6038 if (changed)
6039 new_settings(hdev, match.sk);
6040
6041 if (match.sk)
6042 sock_put(match.sk);
6043 }
6044
6045 static void clear_eir(struct hci_request *req)
6046 {
6047 struct hci_dev *hdev = req->hdev;
6048 struct hci_cp_write_eir cp;
6049
6050 if (!lmp_ext_inq_capable(hdev))
6051 return;
6052
6053 memset(hdev->eir, 0, sizeof(hdev->eir));
6054
6055 memset(&cp, 0, sizeof(cp));
6056
6057 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
6058 }
6059
6060 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
6061 {
6062 struct cmd_lookup match = { NULL, hdev };
6063 struct hci_request req;
6064 bool changed = false;
6065
6066 if (status) {
6067 u8 mgmt_err = mgmt_status(status);
6068
6069 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
6070 &hdev->dev_flags)) {
6071 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
6072 new_settings(hdev, NULL);
6073 }
6074
6075 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
6076 &mgmt_err);
6077 return;
6078 }
6079
6080 if (enable) {
6081 changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
6082 } else {
6083 changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
6084 if (!changed)
6085 changed = test_and_clear_bit(HCI_HS_ENABLED,
6086 &hdev->dev_flags);
6087 else
6088 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
6089 }
6090
6091 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
6092
6093 if (changed)
6094 new_settings(hdev, match.sk);
6095
6096 if (match.sk)
6097 sock_put(match.sk);
6098
6099 hci_req_init(&req, hdev);
6100
6101 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
6102 if (test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
6103 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
6104 sizeof(enable), &enable);
6105 update_eir(&req);
6106 } else {
6107 clear_eir(&req);
6108 }
6109
6110 hci_req_run(&req, NULL);
6111 }
6112
6113 void mgmt_sc_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
6114 {
6115 struct cmd_lookup match = { NULL, hdev };
6116 bool changed = false;
6117
6118 if (status) {
6119 u8 mgmt_err = mgmt_status(status);
6120
6121 if (enable) {
6122 if (test_and_clear_bit(HCI_SC_ENABLED,
6123 &hdev->dev_flags))
6124 new_settings(hdev, NULL);
6125 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
6126 }
6127
6128 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
6129 cmd_status_rsp, &mgmt_err);
6130 return;
6131 }
6132
6133 if (enable) {
6134 changed = !test_and_set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
6135 } else {
6136 changed = test_and_clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
6137 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
6138 }
6139
6140 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
6141 settings_rsp, &match);
6142
6143 if (changed)
6144 new_settings(hdev, match.sk);
6145
6146 if (match.sk)
6147 sock_put(match.sk);
6148 }
6149
6150 static void sk_lookup(struct pending_cmd *cmd, void *data)
6151 {
6152 struct cmd_lookup *match = data;
6153
6154 if (match->sk == NULL) {
6155 match->sk = cmd->sk;
6156 sock_hold(match->sk);
6157 }
6158 }
6159
6160 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
6161 u8 status)
6162 {
6163 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
6164
6165 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
6166 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
6167 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
6168
6169 if (!status)
6170 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
6171 NULL);
6172
6173 if (match.sk)
6174 sock_put(match.sk);
6175 }
6176
6177 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
6178 {
6179 struct mgmt_cp_set_local_name ev;
6180 struct pending_cmd *cmd;
6181
6182 if (status)
6183 return;
6184
6185 memset(&ev, 0, sizeof(ev));
6186 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
6187 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
6188
6189 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
6190 if (!cmd) {
6191 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
6192
6193 /* If this is a HCI command related to powering on the
6194 * HCI dev don't send any mgmt signals.
6195 */
6196 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
6197 return;
6198 }
6199
6200 mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
6201 cmd ? cmd->sk : NULL);
6202 }
6203
6204 void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
6205 u8 *randomizer192, u8 *hash256,
6206 u8 *randomizer256, u8 status)
6207 {
6208 struct pending_cmd *cmd;
6209
6210 BT_DBG("%s status %u", hdev->name, status);
6211
6212 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
6213 if (!cmd)
6214 return;
6215
6216 if (status) {
6217 cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
6218 mgmt_status(status));
6219 } else {
6220 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
6221 hash256 && randomizer256) {
6222 struct mgmt_rp_read_local_oob_ext_data rp;
6223
6224 memcpy(rp.hash192, hash192, sizeof(rp.hash192));
6225 memcpy(rp.randomizer192, randomizer192,
6226 sizeof(rp.randomizer192));
6227
6228 memcpy(rp.hash256, hash256, sizeof(rp.hash256));
6229 memcpy(rp.randomizer256, randomizer256,
6230 sizeof(rp.randomizer256));
6231
6232 cmd_complete(cmd->sk, hdev->id,
6233 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
6234 &rp, sizeof(rp));
6235 } else {
6236 struct mgmt_rp_read_local_oob_data rp;
6237
6238 memcpy(rp.hash, hash192, sizeof(rp.hash));
6239 memcpy(rp.randomizer, randomizer192,
6240 sizeof(rp.randomizer));
6241
6242 cmd_complete(cmd->sk, hdev->id,
6243 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
6244 &rp, sizeof(rp));
6245 }
6246 }
6247
6248 mgmt_pending_remove(cmd);
6249 }
6250
6251 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6252 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
6253 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
6254 {
6255 char buf[512];
6256 struct mgmt_ev_device_found *ev = (void *) buf;
6257 struct smp_irk *irk;
6258 size_t ev_size;
6259
6260 if (!hci_discovery_active(hdev))
6261 return;
6262
6263 /* Make sure that the buffer is big enough. The 5 extra bytes
6264 * are for the potential CoD field.
6265 */
6266 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
6267 return;
6268
6269 memset(buf, 0, sizeof(buf));
6270
6271 irk = hci_get_irk(hdev, bdaddr, addr_type);
6272 if (irk) {
6273 bacpy(&ev->addr.bdaddr, &irk->bdaddr);
6274 ev->addr.type = link_to_bdaddr(link_type, irk->addr_type);
6275 } else {
6276 bacpy(&ev->addr.bdaddr, bdaddr);
6277 ev->addr.type = link_to_bdaddr(link_type, addr_type);
6278 }
6279
6280 ev->rssi = rssi;
6281 ev->flags = cpu_to_le32(flags);
6282
6283 if (eir_len > 0)
6284 memcpy(ev->eir, eir, eir_len);
6285
6286 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
6287 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
6288 dev_class, 3);
6289
6290 if (scan_rsp_len > 0)
6291 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
6292
6293 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
6294 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
6295
6296 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
6297 }
6298
6299 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6300 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
6301 {
6302 struct mgmt_ev_device_found *ev;
6303 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
6304 u16 eir_len;
6305
6306 ev = (struct mgmt_ev_device_found *) buf;
6307
6308 memset(buf, 0, sizeof(buf));
6309
6310 bacpy(&ev->addr.bdaddr, bdaddr);
6311 ev->addr.type = link_to_bdaddr(link_type, addr_type);
6312 ev->rssi = rssi;
6313
6314 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
6315 name_len);
6316
6317 ev->eir_len = cpu_to_le16(eir_len);
6318
6319 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
6320 }
6321
6322 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
6323 {
6324 struct mgmt_ev_discovering ev;
6325 struct pending_cmd *cmd;
6326
6327 BT_DBG("%s discovering %u", hdev->name, discovering);
6328
6329 if (discovering)
6330 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
6331 else
6332 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
6333
6334 if (cmd != NULL) {
6335 u8 type = hdev->discovery.type;
6336
6337 cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
6338 sizeof(type));
6339 mgmt_pending_remove(cmd);
6340 }
6341
6342 memset(&ev, 0, sizeof(ev));
6343 ev.type = hdev->discovery.type;
6344 ev.discovering = discovering;
6345
6346 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
6347 }
6348
6349 int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
6350 {
6351 struct pending_cmd *cmd;
6352 struct mgmt_ev_device_blocked ev;
6353
6354 cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, hdev);
6355
6356 bacpy(&ev.addr.bdaddr, bdaddr);
6357 ev.addr.type = type;
6358
6359 return mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &ev, sizeof(ev),
6360 cmd ? cmd->sk : NULL);
6361 }
6362
6363 int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
6364 {
6365 struct pending_cmd *cmd;
6366 struct mgmt_ev_device_unblocked ev;
6367
6368 cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, hdev);
6369
6370 bacpy(&ev.addr.bdaddr, bdaddr);
6371 ev.addr.type = type;
6372
6373 return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &ev, sizeof(ev),
6374 cmd ? cmd->sk : NULL);
6375 }
6376
6377 static void adv_enable_complete(struct hci_dev *hdev, u8 status)
6378 {
6379 BT_DBG("%s status %u", hdev->name, status);
6380
6381 /* Clear the advertising mgmt setting if we failed to re-enable it */
6382 if (status) {
6383 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
6384 new_settings(hdev, NULL);
6385 }
6386 }
6387
6388 void mgmt_reenable_advertising(struct hci_dev *hdev)
6389 {
6390 struct hci_request req;
6391
6392 if (hci_conn_num(hdev, LE_LINK) > 0)
6393 return;
6394
6395 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
6396 return;
6397
6398 hci_req_init(&req, hdev);
6399 enable_advertising(&req);
6400
6401 /* If this fails we have no option but to let user space know
6402 * that we've disabled advertising.
6403 */
6404 if (hci_req_run(&req, adv_enable_complete) < 0) {
6405 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
6406 new_settings(hdev, NULL);
6407 }
6408 }