]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - net/bluetooth/mgmt.c
Bluetooth: Add timer to force power off
[mirror_ubuntu-artful-kernel.git] / net / bluetooth / mgmt.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI Management interface */
26
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/mgmt.h>
33
34 #include "smp.h"
35
36 #define MGMT_VERSION 1
37 #define MGMT_REVISION 5
38
39 static const u16 mgmt_commands[] = {
40 MGMT_OP_READ_INDEX_LIST,
41 MGMT_OP_READ_INFO,
42 MGMT_OP_SET_POWERED,
43 MGMT_OP_SET_DISCOVERABLE,
44 MGMT_OP_SET_CONNECTABLE,
45 MGMT_OP_SET_FAST_CONNECTABLE,
46 MGMT_OP_SET_PAIRABLE,
47 MGMT_OP_SET_LINK_SECURITY,
48 MGMT_OP_SET_SSP,
49 MGMT_OP_SET_HS,
50 MGMT_OP_SET_LE,
51 MGMT_OP_SET_DEV_CLASS,
52 MGMT_OP_SET_LOCAL_NAME,
53 MGMT_OP_ADD_UUID,
54 MGMT_OP_REMOVE_UUID,
55 MGMT_OP_LOAD_LINK_KEYS,
56 MGMT_OP_LOAD_LONG_TERM_KEYS,
57 MGMT_OP_DISCONNECT,
58 MGMT_OP_GET_CONNECTIONS,
59 MGMT_OP_PIN_CODE_REPLY,
60 MGMT_OP_PIN_CODE_NEG_REPLY,
61 MGMT_OP_SET_IO_CAPABILITY,
62 MGMT_OP_PAIR_DEVICE,
63 MGMT_OP_CANCEL_PAIR_DEVICE,
64 MGMT_OP_UNPAIR_DEVICE,
65 MGMT_OP_USER_CONFIRM_REPLY,
66 MGMT_OP_USER_CONFIRM_NEG_REPLY,
67 MGMT_OP_USER_PASSKEY_REPLY,
68 MGMT_OP_USER_PASSKEY_NEG_REPLY,
69 MGMT_OP_READ_LOCAL_OOB_DATA,
70 MGMT_OP_ADD_REMOTE_OOB_DATA,
71 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
72 MGMT_OP_START_DISCOVERY,
73 MGMT_OP_STOP_DISCOVERY,
74 MGMT_OP_CONFIRM_NAME,
75 MGMT_OP_BLOCK_DEVICE,
76 MGMT_OP_UNBLOCK_DEVICE,
77 MGMT_OP_SET_DEVICE_ID,
78 MGMT_OP_SET_ADVERTISING,
79 MGMT_OP_SET_BREDR,
80 MGMT_OP_SET_STATIC_ADDRESS,
81 MGMT_OP_SET_SCAN_PARAMS,
82 MGMT_OP_SET_SECURE_CONN,
83 MGMT_OP_SET_DEBUG_KEYS,
84 MGMT_OP_SET_PRIVACY,
85 MGMT_OP_LOAD_IRKS,
86 };
87
88 static const u16 mgmt_events[] = {
89 MGMT_EV_CONTROLLER_ERROR,
90 MGMT_EV_INDEX_ADDED,
91 MGMT_EV_INDEX_REMOVED,
92 MGMT_EV_NEW_SETTINGS,
93 MGMT_EV_CLASS_OF_DEV_CHANGED,
94 MGMT_EV_LOCAL_NAME_CHANGED,
95 MGMT_EV_NEW_LINK_KEY,
96 MGMT_EV_NEW_LONG_TERM_KEY,
97 MGMT_EV_DEVICE_CONNECTED,
98 MGMT_EV_DEVICE_DISCONNECTED,
99 MGMT_EV_CONNECT_FAILED,
100 MGMT_EV_PIN_CODE_REQUEST,
101 MGMT_EV_USER_CONFIRM_REQUEST,
102 MGMT_EV_USER_PASSKEY_REQUEST,
103 MGMT_EV_AUTH_FAILED,
104 MGMT_EV_DEVICE_FOUND,
105 MGMT_EV_DISCOVERING,
106 MGMT_EV_DEVICE_BLOCKED,
107 MGMT_EV_DEVICE_UNBLOCKED,
108 MGMT_EV_DEVICE_UNPAIRED,
109 MGMT_EV_PASSKEY_NOTIFY,
110 MGMT_EV_NEW_IRK,
111 };
112
113 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
114
115 #define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
116 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
117
118 struct pending_cmd {
119 struct list_head list;
120 u16 opcode;
121 int index;
122 void *param;
123 struct sock *sk;
124 void *user_data;
125 };
126
127 /* HCI to MGMT error code conversion table */
128 static u8 mgmt_status_table[] = {
129 MGMT_STATUS_SUCCESS,
130 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
131 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
132 MGMT_STATUS_FAILED, /* Hardware Failure */
133 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
134 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
135 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
136 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
137 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
138 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
139 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
140 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
141 MGMT_STATUS_BUSY, /* Command Disallowed */
142 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
143 MGMT_STATUS_REJECTED, /* Rejected Security */
144 MGMT_STATUS_REJECTED, /* Rejected Personal */
145 MGMT_STATUS_TIMEOUT, /* Host Timeout */
146 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
147 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
148 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
149 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
150 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
151 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
152 MGMT_STATUS_BUSY, /* Repeated Attempts */
153 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
154 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
155 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
156 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
157 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
158 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
159 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
160 MGMT_STATUS_FAILED, /* Unspecified Error */
161 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
162 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
163 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
164 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
165 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
166 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
167 MGMT_STATUS_FAILED, /* Unit Link Key Used */
168 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
169 MGMT_STATUS_TIMEOUT, /* Instant Passed */
170 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
171 MGMT_STATUS_FAILED, /* Transaction Collision */
172 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
173 MGMT_STATUS_REJECTED, /* QoS Rejected */
174 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
175 MGMT_STATUS_REJECTED, /* Insufficient Security */
176 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
177 MGMT_STATUS_BUSY, /* Role Switch Pending */
178 MGMT_STATUS_FAILED, /* Slot Violation */
179 MGMT_STATUS_FAILED, /* Role Switch Failed */
180 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
181 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
182 MGMT_STATUS_BUSY, /* Host Busy Pairing */
183 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
184 MGMT_STATUS_BUSY, /* Controller Busy */
185 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
186 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
187 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
188 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
189 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
190 };
191
192 static u8 mgmt_status(u8 hci_status)
193 {
194 if (hci_status < ARRAY_SIZE(mgmt_status_table))
195 return mgmt_status_table[hci_status];
196
197 return MGMT_STATUS_FAILED;
198 }
199
200 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
201 {
202 struct sk_buff *skb;
203 struct mgmt_hdr *hdr;
204 struct mgmt_ev_cmd_status *ev;
205 int err;
206
207 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
208
209 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
210 if (!skb)
211 return -ENOMEM;
212
213 hdr = (void *) skb_put(skb, sizeof(*hdr));
214
215 hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_STATUS);
216 hdr->index = cpu_to_le16(index);
217 hdr->len = cpu_to_le16(sizeof(*ev));
218
219 ev = (void *) skb_put(skb, sizeof(*ev));
220 ev->status = status;
221 ev->opcode = cpu_to_le16(cmd);
222
223 err = sock_queue_rcv_skb(sk, skb);
224 if (err < 0)
225 kfree_skb(skb);
226
227 return err;
228 }
229
230 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
231 void *rp, size_t rp_len)
232 {
233 struct sk_buff *skb;
234 struct mgmt_hdr *hdr;
235 struct mgmt_ev_cmd_complete *ev;
236 int err;
237
238 BT_DBG("sock %p", sk);
239
240 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
241 if (!skb)
242 return -ENOMEM;
243
244 hdr = (void *) skb_put(skb, sizeof(*hdr));
245
246 hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_COMPLETE);
247 hdr->index = cpu_to_le16(index);
248 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
249
250 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
251 ev->opcode = cpu_to_le16(cmd);
252 ev->status = status;
253
254 if (rp)
255 memcpy(ev->data, rp, rp_len);
256
257 err = sock_queue_rcv_skb(sk, skb);
258 if (err < 0)
259 kfree_skb(skb);
260
261 return err;
262 }
263
264 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
265 u16 data_len)
266 {
267 struct mgmt_rp_read_version rp;
268
269 BT_DBG("sock %p", sk);
270
271 rp.version = MGMT_VERSION;
272 rp.revision = __constant_cpu_to_le16(MGMT_REVISION);
273
274 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
275 sizeof(rp));
276 }
277
278 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
279 u16 data_len)
280 {
281 struct mgmt_rp_read_commands *rp;
282 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
283 const u16 num_events = ARRAY_SIZE(mgmt_events);
284 __le16 *opcode;
285 size_t rp_size;
286 int i, err;
287
288 BT_DBG("sock %p", sk);
289
290 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
291
292 rp = kmalloc(rp_size, GFP_KERNEL);
293 if (!rp)
294 return -ENOMEM;
295
296 rp->num_commands = __constant_cpu_to_le16(num_commands);
297 rp->num_events = __constant_cpu_to_le16(num_events);
298
299 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
300 put_unaligned_le16(mgmt_commands[i], opcode);
301
302 for (i = 0; i < num_events; i++, opcode++)
303 put_unaligned_le16(mgmt_events[i], opcode);
304
305 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
306 rp_size);
307 kfree(rp);
308
309 return err;
310 }
311
312 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
313 u16 data_len)
314 {
315 struct mgmt_rp_read_index_list *rp;
316 struct hci_dev *d;
317 size_t rp_len;
318 u16 count;
319 int err;
320
321 BT_DBG("sock %p", sk);
322
323 read_lock(&hci_dev_list_lock);
324
325 count = 0;
326 list_for_each_entry(d, &hci_dev_list, list) {
327 if (d->dev_type == HCI_BREDR)
328 count++;
329 }
330
331 rp_len = sizeof(*rp) + (2 * count);
332 rp = kmalloc(rp_len, GFP_ATOMIC);
333 if (!rp) {
334 read_unlock(&hci_dev_list_lock);
335 return -ENOMEM;
336 }
337
338 count = 0;
339 list_for_each_entry(d, &hci_dev_list, list) {
340 if (test_bit(HCI_SETUP, &d->dev_flags))
341 continue;
342
343 if (test_bit(HCI_USER_CHANNEL, &d->dev_flags))
344 continue;
345
346 if (d->dev_type == HCI_BREDR) {
347 rp->index[count++] = cpu_to_le16(d->id);
348 BT_DBG("Added hci%u", d->id);
349 }
350 }
351
352 rp->num_controllers = cpu_to_le16(count);
353 rp_len = sizeof(*rp) + (2 * count);
354
355 read_unlock(&hci_dev_list_lock);
356
357 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
358 rp_len);
359
360 kfree(rp);
361
362 return err;
363 }
364
365 static u32 get_supported_settings(struct hci_dev *hdev)
366 {
367 u32 settings = 0;
368
369 settings |= MGMT_SETTING_POWERED;
370 settings |= MGMT_SETTING_PAIRABLE;
371 settings |= MGMT_SETTING_DEBUG_KEYS;
372
373 if (lmp_bredr_capable(hdev)) {
374 settings |= MGMT_SETTING_CONNECTABLE;
375 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
376 settings |= MGMT_SETTING_FAST_CONNECTABLE;
377 settings |= MGMT_SETTING_DISCOVERABLE;
378 settings |= MGMT_SETTING_BREDR;
379 settings |= MGMT_SETTING_LINK_SECURITY;
380
381 if (lmp_ssp_capable(hdev)) {
382 settings |= MGMT_SETTING_SSP;
383 settings |= MGMT_SETTING_HS;
384 }
385
386 if (lmp_sc_capable(hdev) ||
387 test_bit(HCI_FORCE_SC, &hdev->dev_flags))
388 settings |= MGMT_SETTING_SECURE_CONN;
389 }
390
391 if (lmp_le_capable(hdev)) {
392 settings |= MGMT_SETTING_LE;
393 settings |= MGMT_SETTING_ADVERTISING;
394 settings |= MGMT_SETTING_PRIVACY;
395 }
396
397 return settings;
398 }
399
400 static u32 get_current_settings(struct hci_dev *hdev)
401 {
402 u32 settings = 0;
403
404 if (hdev_is_powered(hdev))
405 settings |= MGMT_SETTING_POWERED;
406
407 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
408 settings |= MGMT_SETTING_CONNECTABLE;
409
410 if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
411 settings |= MGMT_SETTING_FAST_CONNECTABLE;
412
413 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
414 settings |= MGMT_SETTING_DISCOVERABLE;
415
416 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
417 settings |= MGMT_SETTING_PAIRABLE;
418
419 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
420 settings |= MGMT_SETTING_BREDR;
421
422 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
423 settings |= MGMT_SETTING_LE;
424
425 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
426 settings |= MGMT_SETTING_LINK_SECURITY;
427
428 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
429 settings |= MGMT_SETTING_SSP;
430
431 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
432 settings |= MGMT_SETTING_HS;
433
434 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
435 settings |= MGMT_SETTING_ADVERTISING;
436
437 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
438 settings |= MGMT_SETTING_SECURE_CONN;
439
440 if (test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags))
441 settings |= MGMT_SETTING_DEBUG_KEYS;
442
443 if (test_bit(HCI_PRIVACY, &hdev->dev_flags))
444 settings |= MGMT_SETTING_PRIVACY;
445
446 return settings;
447 }
448
449 #define PNP_INFO_SVCLASS_ID 0x1200
450
451 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
452 {
453 u8 *ptr = data, *uuids_start = NULL;
454 struct bt_uuid *uuid;
455
456 if (len < 4)
457 return ptr;
458
459 list_for_each_entry(uuid, &hdev->uuids, list) {
460 u16 uuid16;
461
462 if (uuid->size != 16)
463 continue;
464
465 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
466 if (uuid16 < 0x1100)
467 continue;
468
469 if (uuid16 == PNP_INFO_SVCLASS_ID)
470 continue;
471
472 if (!uuids_start) {
473 uuids_start = ptr;
474 uuids_start[0] = 1;
475 uuids_start[1] = EIR_UUID16_ALL;
476 ptr += 2;
477 }
478
479 /* Stop if not enough space to put next UUID */
480 if ((ptr - data) + sizeof(u16) > len) {
481 uuids_start[1] = EIR_UUID16_SOME;
482 break;
483 }
484
485 *ptr++ = (uuid16 & 0x00ff);
486 *ptr++ = (uuid16 & 0xff00) >> 8;
487 uuids_start[0] += sizeof(uuid16);
488 }
489
490 return ptr;
491 }
492
493 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
494 {
495 u8 *ptr = data, *uuids_start = NULL;
496 struct bt_uuid *uuid;
497
498 if (len < 6)
499 return ptr;
500
501 list_for_each_entry(uuid, &hdev->uuids, list) {
502 if (uuid->size != 32)
503 continue;
504
505 if (!uuids_start) {
506 uuids_start = ptr;
507 uuids_start[0] = 1;
508 uuids_start[1] = EIR_UUID32_ALL;
509 ptr += 2;
510 }
511
512 /* Stop if not enough space to put next UUID */
513 if ((ptr - data) + sizeof(u32) > len) {
514 uuids_start[1] = EIR_UUID32_SOME;
515 break;
516 }
517
518 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
519 ptr += sizeof(u32);
520 uuids_start[0] += sizeof(u32);
521 }
522
523 return ptr;
524 }
525
526 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
527 {
528 u8 *ptr = data, *uuids_start = NULL;
529 struct bt_uuid *uuid;
530
531 if (len < 18)
532 return ptr;
533
534 list_for_each_entry(uuid, &hdev->uuids, list) {
535 if (uuid->size != 128)
536 continue;
537
538 if (!uuids_start) {
539 uuids_start = ptr;
540 uuids_start[0] = 1;
541 uuids_start[1] = EIR_UUID128_ALL;
542 ptr += 2;
543 }
544
545 /* Stop if not enough space to put next UUID */
546 if ((ptr - data) + 16 > len) {
547 uuids_start[1] = EIR_UUID128_SOME;
548 break;
549 }
550
551 memcpy(ptr, uuid->uuid, 16);
552 ptr += 16;
553 uuids_start[0] += 16;
554 }
555
556 return ptr;
557 }
558
559 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
560 {
561 struct pending_cmd *cmd;
562
563 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
564 if (cmd->opcode == opcode)
565 return cmd;
566 }
567
568 return NULL;
569 }
570
571 static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
572 {
573 u8 ad_len = 0;
574 size_t name_len;
575
576 name_len = strlen(hdev->dev_name);
577 if (name_len > 0) {
578 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
579
580 if (name_len > max_len) {
581 name_len = max_len;
582 ptr[1] = EIR_NAME_SHORT;
583 } else
584 ptr[1] = EIR_NAME_COMPLETE;
585
586 ptr[0] = name_len + 1;
587
588 memcpy(ptr + 2, hdev->dev_name, name_len);
589
590 ad_len += (name_len + 2);
591 ptr += (name_len + 2);
592 }
593
594 return ad_len;
595 }
596
597 static void update_scan_rsp_data(struct hci_request *req)
598 {
599 struct hci_dev *hdev = req->hdev;
600 struct hci_cp_le_set_scan_rsp_data cp;
601 u8 len;
602
603 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
604 return;
605
606 memset(&cp, 0, sizeof(cp));
607
608 len = create_scan_rsp_data(hdev, cp.data);
609
610 if (hdev->scan_rsp_data_len == len &&
611 memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
612 return;
613
614 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
615 hdev->scan_rsp_data_len = len;
616
617 cp.length = len;
618
619 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
620 }
621
622 static u8 get_adv_discov_flags(struct hci_dev *hdev)
623 {
624 struct pending_cmd *cmd;
625
626 /* If there's a pending mgmt command the flags will not yet have
627 * their final values, so check for this first.
628 */
629 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
630 if (cmd) {
631 struct mgmt_mode *cp = cmd->param;
632 if (cp->val == 0x01)
633 return LE_AD_GENERAL;
634 else if (cp->val == 0x02)
635 return LE_AD_LIMITED;
636 } else {
637 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
638 return LE_AD_LIMITED;
639 else if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
640 return LE_AD_GENERAL;
641 }
642
643 return 0;
644 }
645
646 static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
647 {
648 u8 ad_len = 0, flags = 0;
649
650 flags |= get_adv_discov_flags(hdev);
651
652 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
653 flags |= LE_AD_NO_BREDR;
654
655 if (flags) {
656 BT_DBG("adv flags 0x%02x", flags);
657
658 ptr[0] = 2;
659 ptr[1] = EIR_FLAGS;
660 ptr[2] = flags;
661
662 ad_len += 3;
663 ptr += 3;
664 }
665
666 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
667 ptr[0] = 2;
668 ptr[1] = EIR_TX_POWER;
669 ptr[2] = (u8) hdev->adv_tx_power;
670
671 ad_len += 3;
672 ptr += 3;
673 }
674
675 return ad_len;
676 }
677
678 static void update_adv_data(struct hci_request *req)
679 {
680 struct hci_dev *hdev = req->hdev;
681 struct hci_cp_le_set_adv_data cp;
682 u8 len;
683
684 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
685 return;
686
687 memset(&cp, 0, sizeof(cp));
688
689 len = create_adv_data(hdev, cp.data);
690
691 if (hdev->adv_data_len == len &&
692 memcmp(cp.data, hdev->adv_data, len) == 0)
693 return;
694
695 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
696 hdev->adv_data_len = len;
697
698 cp.length = len;
699
700 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
701 }
702
703 static void create_eir(struct hci_dev *hdev, u8 *data)
704 {
705 u8 *ptr = data;
706 size_t name_len;
707
708 name_len = strlen(hdev->dev_name);
709
710 if (name_len > 0) {
711 /* EIR Data type */
712 if (name_len > 48) {
713 name_len = 48;
714 ptr[1] = EIR_NAME_SHORT;
715 } else
716 ptr[1] = EIR_NAME_COMPLETE;
717
718 /* EIR Data length */
719 ptr[0] = name_len + 1;
720
721 memcpy(ptr + 2, hdev->dev_name, name_len);
722
723 ptr += (name_len + 2);
724 }
725
726 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
727 ptr[0] = 2;
728 ptr[1] = EIR_TX_POWER;
729 ptr[2] = (u8) hdev->inq_tx_power;
730
731 ptr += 3;
732 }
733
734 if (hdev->devid_source > 0) {
735 ptr[0] = 9;
736 ptr[1] = EIR_DEVICE_ID;
737
738 put_unaligned_le16(hdev->devid_source, ptr + 2);
739 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
740 put_unaligned_le16(hdev->devid_product, ptr + 6);
741 put_unaligned_le16(hdev->devid_version, ptr + 8);
742
743 ptr += 10;
744 }
745
746 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
747 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
748 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
749 }
750
751 static void update_eir(struct hci_request *req)
752 {
753 struct hci_dev *hdev = req->hdev;
754 struct hci_cp_write_eir cp;
755
756 if (!hdev_is_powered(hdev))
757 return;
758
759 if (!lmp_ext_inq_capable(hdev))
760 return;
761
762 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
763 return;
764
765 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
766 return;
767
768 memset(&cp, 0, sizeof(cp));
769
770 create_eir(hdev, cp.data);
771
772 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
773 return;
774
775 memcpy(hdev->eir, cp.data, sizeof(cp.data));
776
777 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
778 }
779
780 static u8 get_service_classes(struct hci_dev *hdev)
781 {
782 struct bt_uuid *uuid;
783 u8 val = 0;
784
785 list_for_each_entry(uuid, &hdev->uuids, list)
786 val |= uuid->svc_hint;
787
788 return val;
789 }
790
791 static void update_class(struct hci_request *req)
792 {
793 struct hci_dev *hdev = req->hdev;
794 u8 cod[3];
795
796 BT_DBG("%s", hdev->name);
797
798 if (!hdev_is_powered(hdev))
799 return;
800
801 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
802 return;
803
804 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
805 return;
806
807 cod[0] = hdev->minor_class;
808 cod[1] = hdev->major_class;
809 cod[2] = get_service_classes(hdev);
810
811 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
812 cod[1] |= 0x20;
813
814 if (memcmp(cod, hdev->dev_class, 3) == 0)
815 return;
816
817 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
818 }
819
820 static bool get_connectable(struct hci_dev *hdev)
821 {
822 struct pending_cmd *cmd;
823
824 /* If there's a pending mgmt command the flag will not yet have
825 * it's final value, so check for this first.
826 */
827 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
828 if (cmd) {
829 struct mgmt_mode *cp = cmd->param;
830 return cp->val;
831 }
832
833 return test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
834 }
835
836 static void enable_advertising(struct hci_request *req)
837 {
838 struct hci_dev *hdev = req->hdev;
839 struct hci_cp_le_set_adv_param cp;
840 u8 own_addr_type, enable = 0x01;
841 bool connectable;
842
843 connectable = get_connectable(hdev);
844
845 /* Set require_privacy to true only when non-connectable
846 * advertising is used. In that case it is fine to use a
847 * non-resolvable private address.
848 */
849 if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
850 return;
851
852 memset(&cp, 0, sizeof(cp));
853 cp.min_interval = __constant_cpu_to_le16(0x0800);
854 cp.max_interval = __constant_cpu_to_le16(0x0800);
855 cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
856 cp.own_address_type = own_addr_type;
857 cp.channel_map = hdev->le_adv_channel_map;
858
859 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
860
861 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
862 }
863
864 static void disable_advertising(struct hci_request *req)
865 {
866 u8 enable = 0x00;
867
868 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
869 }
870
871 static void service_cache_off(struct work_struct *work)
872 {
873 struct hci_dev *hdev = container_of(work, struct hci_dev,
874 service_cache.work);
875 struct hci_request req;
876
877 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
878 return;
879
880 hci_req_init(&req, hdev);
881
882 hci_dev_lock(hdev);
883
884 update_eir(&req);
885 update_class(&req);
886
887 hci_dev_unlock(hdev);
888
889 hci_req_run(&req, NULL);
890 }
891
892 static void rpa_expired(struct work_struct *work)
893 {
894 struct hci_dev *hdev = container_of(work, struct hci_dev,
895 rpa_expired.work);
896 struct hci_request req;
897
898 BT_DBG("");
899
900 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
901
902 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags) ||
903 hci_conn_num(hdev, LE_LINK) > 0)
904 return;
905
906 /* The generation of a new RPA and programming it into the
907 * controller happens in the enable_advertising() function.
908 */
909
910 hci_req_init(&req, hdev);
911
912 disable_advertising(&req);
913 enable_advertising(&req);
914
915 hci_req_run(&req, NULL);
916 }
917
918 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
919 {
920 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
921 return;
922
923 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
924 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
925
926 /* Non-mgmt controlled devices get this bit set
927 * implicitly so that pairing works for them, however
928 * for mgmt we require user-space to explicitly enable
929 * it
930 */
931 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
932 }
933
934 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
935 void *data, u16 data_len)
936 {
937 struct mgmt_rp_read_info rp;
938
939 BT_DBG("sock %p %s", sk, hdev->name);
940
941 hci_dev_lock(hdev);
942
943 memset(&rp, 0, sizeof(rp));
944
945 bacpy(&rp.bdaddr, &hdev->bdaddr);
946
947 rp.version = hdev->hci_ver;
948 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
949
950 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
951 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
952
953 memcpy(rp.dev_class, hdev->dev_class, 3);
954
955 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
956 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
957
958 hci_dev_unlock(hdev);
959
960 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
961 sizeof(rp));
962 }
963
964 static void mgmt_pending_free(struct pending_cmd *cmd)
965 {
966 sock_put(cmd->sk);
967 kfree(cmd->param);
968 kfree(cmd);
969 }
970
971 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
972 struct hci_dev *hdev, void *data,
973 u16 len)
974 {
975 struct pending_cmd *cmd;
976
977 cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
978 if (!cmd)
979 return NULL;
980
981 cmd->opcode = opcode;
982 cmd->index = hdev->id;
983
984 cmd->param = kmalloc(len, GFP_KERNEL);
985 if (!cmd->param) {
986 kfree(cmd);
987 return NULL;
988 }
989
990 if (data)
991 memcpy(cmd->param, data, len);
992
993 cmd->sk = sk;
994 sock_hold(sk);
995
996 list_add(&cmd->list, &hdev->mgmt_pending);
997
998 return cmd;
999 }
1000
1001 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
1002 void (*cb)(struct pending_cmd *cmd,
1003 void *data),
1004 void *data)
1005 {
1006 struct pending_cmd *cmd, *tmp;
1007
1008 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
1009 if (opcode > 0 && cmd->opcode != opcode)
1010 continue;
1011
1012 cb(cmd, data);
1013 }
1014 }
1015
1016 static void mgmt_pending_remove(struct pending_cmd *cmd)
1017 {
1018 list_del(&cmd->list);
1019 mgmt_pending_free(cmd);
1020 }
1021
1022 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1023 {
1024 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1025
1026 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
1027 sizeof(settings));
1028 }
1029
1030 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status)
1031 {
1032 BT_DBG("%s status 0x%02x", hdev->name, status);
1033
1034 if (hci_conn_count(hdev) == 0) {
1035 cancel_delayed_work(&hdev->power_off);
1036 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1037 }
1038 }
1039
1040 static int clean_up_hci_state(struct hci_dev *hdev)
1041 {
1042 struct hci_request req;
1043 struct hci_conn *conn;
1044
1045 hci_req_init(&req, hdev);
1046
1047 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1048 test_bit(HCI_PSCAN, &hdev->flags)) {
1049 u8 scan = 0x00;
1050 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1051 }
1052
1053 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1054 disable_advertising(&req);
1055
1056 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
1057 hci_req_add_le_scan_disable(&req);
1058 }
1059
1060 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1061 struct hci_cp_disconnect dc;
1062 struct hci_cp_reject_conn_req rej;
1063
1064 switch (conn->state) {
1065 case BT_CONNECTED:
1066 case BT_CONFIG:
1067 dc.handle = cpu_to_le16(conn->handle);
1068 dc.reason = 0x15; /* Terminated due to Power Off */
1069 hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1070 break;
1071 case BT_CONNECT:
1072 if (conn->type == LE_LINK)
1073 hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL,
1074 0, NULL);
1075 else if (conn->type == ACL_LINK)
1076 hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL,
1077 6, &conn->dst);
1078 break;
1079 case BT_CONNECT2:
1080 bacpy(&rej.bdaddr, &conn->dst);
1081 rej.reason = 0x15; /* Terminated due to Power Off */
1082 if (conn->type == ACL_LINK)
1083 hci_req_add(&req, HCI_OP_REJECT_CONN_REQ,
1084 sizeof(rej), &rej);
1085 else if (conn->type == SCO_LINK)
1086 hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ,
1087 sizeof(rej), &rej);
1088 break;
1089 }
1090 }
1091
1092 return hci_req_run(&req, clean_up_hci_complete);
1093 }
1094
1095 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1096 u16 len)
1097 {
1098 struct mgmt_mode *cp = data;
1099 struct pending_cmd *cmd;
1100 int err;
1101
1102 BT_DBG("request for %s", hdev->name);
1103
1104 if (cp->val != 0x00 && cp->val != 0x01)
1105 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1106 MGMT_STATUS_INVALID_PARAMS);
1107
1108 hci_dev_lock(hdev);
1109
1110 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
1111 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1112 MGMT_STATUS_BUSY);
1113 goto failed;
1114 }
1115
1116 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1117 cancel_delayed_work(&hdev->power_off);
1118
1119 if (cp->val) {
1120 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
1121 data, len);
1122 err = mgmt_powered(hdev, 1);
1123 goto failed;
1124 }
1125 }
1126
1127 if (!!cp->val == hdev_is_powered(hdev)) {
1128 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1129 goto failed;
1130 }
1131
1132 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1133 if (!cmd) {
1134 err = -ENOMEM;
1135 goto failed;
1136 }
1137
1138 if (cp->val) {
1139 queue_work(hdev->req_workqueue, &hdev->power_on);
1140 err = 0;
1141 } else {
1142 /* Disconnect connections, stop scans, etc */
1143 err = clean_up_hci_state(hdev);
1144 if (!err)
1145 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1146 HCI_POWER_OFF_TIMEOUT);
1147
1148 /* ENODATA means there were no HCI commands queued */
1149 if (err == -ENODATA) {
1150 cancel_delayed_work(&hdev->power_off);
1151 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1152 err = 0;
1153 }
1154 }
1155
1156 failed:
1157 hci_dev_unlock(hdev);
1158 return err;
1159 }
1160
1161 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
1162 struct sock *skip_sk)
1163 {
1164 struct sk_buff *skb;
1165 struct mgmt_hdr *hdr;
1166
1167 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
1168 if (!skb)
1169 return -ENOMEM;
1170
1171 hdr = (void *) skb_put(skb, sizeof(*hdr));
1172 hdr->opcode = cpu_to_le16(event);
1173 if (hdev)
1174 hdr->index = cpu_to_le16(hdev->id);
1175 else
1176 hdr->index = __constant_cpu_to_le16(MGMT_INDEX_NONE);
1177 hdr->len = cpu_to_le16(data_len);
1178
1179 if (data)
1180 memcpy(skb_put(skb, data_len), data, data_len);
1181
1182 /* Time stamp */
1183 __net_timestamp(skb);
1184
1185 hci_send_to_control(skb, skip_sk);
1186 kfree_skb(skb);
1187
1188 return 0;
1189 }
1190
1191 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1192 {
1193 __le32 ev;
1194
1195 ev = cpu_to_le32(get_current_settings(hdev));
1196
1197 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
1198 }
1199
1200 struct cmd_lookup {
1201 struct sock *sk;
1202 struct hci_dev *hdev;
1203 u8 mgmt_status;
1204 };
1205
1206 static void settings_rsp(struct pending_cmd *cmd, void *data)
1207 {
1208 struct cmd_lookup *match = data;
1209
1210 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1211
1212 list_del(&cmd->list);
1213
1214 if (match->sk == NULL) {
1215 match->sk = cmd->sk;
1216 sock_hold(match->sk);
1217 }
1218
1219 mgmt_pending_free(cmd);
1220 }
1221
1222 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
1223 {
1224 u8 *status = data;
1225
1226 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1227 mgmt_pending_remove(cmd);
1228 }
1229
1230 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1231 {
1232 if (!lmp_bredr_capable(hdev))
1233 return MGMT_STATUS_NOT_SUPPORTED;
1234 else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1235 return MGMT_STATUS_REJECTED;
1236 else
1237 return MGMT_STATUS_SUCCESS;
1238 }
1239
1240 static u8 mgmt_le_support(struct hci_dev *hdev)
1241 {
1242 if (!lmp_le_capable(hdev))
1243 return MGMT_STATUS_NOT_SUPPORTED;
1244 else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1245 return MGMT_STATUS_REJECTED;
1246 else
1247 return MGMT_STATUS_SUCCESS;
1248 }
1249
1250 static void set_discoverable_complete(struct hci_dev *hdev, u8 status)
1251 {
1252 struct pending_cmd *cmd;
1253 struct mgmt_mode *cp;
1254 struct hci_request req;
1255 bool changed;
1256
1257 BT_DBG("status 0x%02x", status);
1258
1259 hci_dev_lock(hdev);
1260
1261 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1262 if (!cmd)
1263 goto unlock;
1264
1265 if (status) {
1266 u8 mgmt_err = mgmt_status(status);
1267 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1268 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1269 goto remove_cmd;
1270 }
1271
1272 cp = cmd->param;
1273 if (cp->val) {
1274 changed = !test_and_set_bit(HCI_DISCOVERABLE,
1275 &hdev->dev_flags);
1276
1277 if (hdev->discov_timeout > 0) {
1278 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1279 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1280 to);
1281 }
1282 } else {
1283 changed = test_and_clear_bit(HCI_DISCOVERABLE,
1284 &hdev->dev_flags);
1285 }
1286
1287 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1288
1289 if (changed)
1290 new_settings(hdev, cmd->sk);
1291
1292 /* When the discoverable mode gets changed, make sure
1293 * that class of device has the limited discoverable
1294 * bit correctly set.
1295 */
1296 hci_req_init(&req, hdev);
1297 update_class(&req);
1298 hci_req_run(&req, NULL);
1299
1300 remove_cmd:
1301 mgmt_pending_remove(cmd);
1302
1303 unlock:
1304 hci_dev_unlock(hdev);
1305 }
1306
1307 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1308 u16 len)
1309 {
1310 struct mgmt_cp_set_discoverable *cp = data;
1311 struct pending_cmd *cmd;
1312 struct hci_request req;
1313 u16 timeout;
1314 u8 scan;
1315 int err;
1316
1317 BT_DBG("request for %s", hdev->name);
1318
1319 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1320 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1321 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1322 MGMT_STATUS_REJECTED);
1323
1324 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1325 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1326 MGMT_STATUS_INVALID_PARAMS);
1327
1328 timeout = __le16_to_cpu(cp->timeout);
1329
1330 /* Disabling discoverable requires that no timeout is set,
1331 * and enabling limited discoverable requires a timeout.
1332 */
1333 if ((cp->val == 0x00 && timeout > 0) ||
1334 (cp->val == 0x02 && timeout == 0))
1335 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1336 MGMT_STATUS_INVALID_PARAMS);
1337
1338 hci_dev_lock(hdev);
1339
1340 if (!hdev_is_powered(hdev) && timeout > 0) {
1341 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1342 MGMT_STATUS_NOT_POWERED);
1343 goto failed;
1344 }
1345
1346 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1347 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1348 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1349 MGMT_STATUS_BUSY);
1350 goto failed;
1351 }
1352
1353 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
1354 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1355 MGMT_STATUS_REJECTED);
1356 goto failed;
1357 }
1358
1359 if (!hdev_is_powered(hdev)) {
1360 bool changed = false;
1361
1362 /* Setting limited discoverable when powered off is
1363 * not a valid operation since it requires a timeout
1364 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1365 */
1366 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
1367 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1368 changed = true;
1369 }
1370
1371 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1372 if (err < 0)
1373 goto failed;
1374
1375 if (changed)
1376 err = new_settings(hdev, sk);
1377
1378 goto failed;
1379 }
1380
1381 /* If the current mode is the same, then just update the timeout
1382 * value with the new value. And if only the timeout gets updated,
1383 * then no need for any HCI transactions.
1384 */
1385 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
1386 (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
1387 &hdev->dev_flags)) {
1388 cancel_delayed_work(&hdev->discov_off);
1389 hdev->discov_timeout = timeout;
1390
1391 if (cp->val && hdev->discov_timeout > 0) {
1392 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1393 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1394 to);
1395 }
1396
1397 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1398 goto failed;
1399 }
1400
1401 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1402 if (!cmd) {
1403 err = -ENOMEM;
1404 goto failed;
1405 }
1406
1407 /* Cancel any potential discoverable timeout that might be
1408 * still active and store new timeout value. The arming of
1409 * the timeout happens in the complete handler.
1410 */
1411 cancel_delayed_work(&hdev->discov_off);
1412 hdev->discov_timeout = timeout;
1413
1414 /* Limited discoverable mode */
1415 if (cp->val == 0x02)
1416 set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1417 else
1418 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1419
1420 hci_req_init(&req, hdev);
1421
1422 /* The procedure for LE-only controllers is much simpler - just
1423 * update the advertising data.
1424 */
1425 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1426 goto update_ad;
1427
1428 scan = SCAN_PAGE;
1429
1430 if (cp->val) {
1431 struct hci_cp_write_current_iac_lap hci_cp;
1432
1433 if (cp->val == 0x02) {
1434 /* Limited discoverable mode */
1435 hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1436 hci_cp.iac_lap[0] = 0x00; /* LIAC */
1437 hci_cp.iac_lap[1] = 0x8b;
1438 hci_cp.iac_lap[2] = 0x9e;
1439 hci_cp.iac_lap[3] = 0x33; /* GIAC */
1440 hci_cp.iac_lap[4] = 0x8b;
1441 hci_cp.iac_lap[5] = 0x9e;
1442 } else {
1443 /* General discoverable mode */
1444 hci_cp.num_iac = 1;
1445 hci_cp.iac_lap[0] = 0x33; /* GIAC */
1446 hci_cp.iac_lap[1] = 0x8b;
1447 hci_cp.iac_lap[2] = 0x9e;
1448 }
1449
1450 hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1451 (hci_cp.num_iac * 3) + 1, &hci_cp);
1452
1453 scan |= SCAN_INQUIRY;
1454 } else {
1455 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1456 }
1457
1458 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1459
1460 update_ad:
1461 update_adv_data(&req);
1462
1463 err = hci_req_run(&req, set_discoverable_complete);
1464 if (err < 0)
1465 mgmt_pending_remove(cmd);
1466
1467 failed:
1468 hci_dev_unlock(hdev);
1469 return err;
1470 }
1471
1472 static void write_fast_connectable(struct hci_request *req, bool enable)
1473 {
1474 struct hci_dev *hdev = req->hdev;
1475 struct hci_cp_write_page_scan_activity acp;
1476 u8 type;
1477
1478 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1479 return;
1480
1481 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1482 return;
1483
1484 if (enable) {
1485 type = PAGE_SCAN_TYPE_INTERLACED;
1486
1487 /* 160 msec page scan interval */
1488 acp.interval = __constant_cpu_to_le16(0x0100);
1489 } else {
1490 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1491
1492 /* default 1.28 sec page scan */
1493 acp.interval = __constant_cpu_to_le16(0x0800);
1494 }
1495
1496 acp.window = __constant_cpu_to_le16(0x0012);
1497
1498 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1499 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1500 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1501 sizeof(acp), &acp);
1502
1503 if (hdev->page_scan_type != type)
1504 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1505 }
1506
1507 static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1508 {
1509 struct pending_cmd *cmd;
1510 struct mgmt_mode *cp;
1511 bool changed;
1512
1513 BT_DBG("status 0x%02x", status);
1514
1515 hci_dev_lock(hdev);
1516
1517 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1518 if (!cmd)
1519 goto unlock;
1520
1521 if (status) {
1522 u8 mgmt_err = mgmt_status(status);
1523 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1524 goto remove_cmd;
1525 }
1526
1527 cp = cmd->param;
1528 if (cp->val)
1529 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1530 else
1531 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1532
1533 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1534
1535 if (changed)
1536 new_settings(hdev, cmd->sk);
1537
1538 remove_cmd:
1539 mgmt_pending_remove(cmd);
1540
1541 unlock:
1542 hci_dev_unlock(hdev);
1543 }
1544
1545 static int set_connectable_update_settings(struct hci_dev *hdev,
1546 struct sock *sk, u8 val)
1547 {
1548 bool changed = false;
1549 int err;
1550
1551 if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1552 changed = true;
1553
1554 if (val) {
1555 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1556 } else {
1557 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1558 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1559 }
1560
1561 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1562 if (err < 0)
1563 return err;
1564
1565 if (changed)
1566 return new_settings(hdev, sk);
1567
1568 return 0;
1569 }
1570
1571 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1572 u16 len)
1573 {
1574 struct mgmt_mode *cp = data;
1575 struct pending_cmd *cmd;
1576 struct hci_request req;
1577 u8 scan;
1578 int err;
1579
1580 BT_DBG("request for %s", hdev->name);
1581
1582 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1583 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1584 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1585 MGMT_STATUS_REJECTED);
1586
1587 if (cp->val != 0x00 && cp->val != 0x01)
1588 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1589 MGMT_STATUS_INVALID_PARAMS);
1590
1591 hci_dev_lock(hdev);
1592
1593 if (!hdev_is_powered(hdev)) {
1594 err = set_connectable_update_settings(hdev, sk, cp->val);
1595 goto failed;
1596 }
1597
1598 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1599 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1600 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1601 MGMT_STATUS_BUSY);
1602 goto failed;
1603 }
1604
1605 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1606 if (!cmd) {
1607 err = -ENOMEM;
1608 goto failed;
1609 }
1610
1611 hci_req_init(&req, hdev);
1612
1613 /* If BR/EDR is not enabled and we disable advertising as a
1614 * by-product of disabling connectable, we need to update the
1615 * advertising flags.
1616 */
1617 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1618 if (!cp->val) {
1619 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1620 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1621 }
1622 update_adv_data(&req);
1623 } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
1624 if (cp->val) {
1625 scan = SCAN_PAGE;
1626 } else {
1627 scan = 0;
1628
1629 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1630 hdev->discov_timeout > 0)
1631 cancel_delayed_work(&hdev->discov_off);
1632 }
1633
1634 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1635 }
1636
1637 /* If we're going from non-connectable to connectable or
1638 * vice-versa when fast connectable is enabled ensure that fast
1639 * connectable gets disabled. write_fast_connectable won't do
1640 * anything if the page scan parameters are already what they
1641 * should be.
1642 */
1643 if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1644 write_fast_connectable(&req, false);
1645
1646 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) &&
1647 hci_conn_num(hdev, LE_LINK) == 0) {
1648 disable_advertising(&req);
1649 enable_advertising(&req);
1650 }
1651
1652 err = hci_req_run(&req, set_connectable_complete);
1653 if (err < 0) {
1654 mgmt_pending_remove(cmd);
1655 if (err == -ENODATA)
1656 err = set_connectable_update_settings(hdev, sk,
1657 cp->val);
1658 goto failed;
1659 }
1660
1661 failed:
1662 hci_dev_unlock(hdev);
1663 return err;
1664 }
1665
1666 static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
1667 u16 len)
1668 {
1669 struct mgmt_mode *cp = data;
1670 bool changed;
1671 int err;
1672
1673 BT_DBG("request for %s", hdev->name);
1674
1675 if (cp->val != 0x00 && cp->val != 0x01)
1676 return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE,
1677 MGMT_STATUS_INVALID_PARAMS);
1678
1679 hci_dev_lock(hdev);
1680
1681 if (cp->val)
1682 changed = !test_and_set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1683 else
1684 changed = test_and_clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1685
1686 err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
1687 if (err < 0)
1688 goto unlock;
1689
1690 if (changed)
1691 err = new_settings(hdev, sk);
1692
1693 unlock:
1694 hci_dev_unlock(hdev);
1695 return err;
1696 }
1697
1698 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1699 u16 len)
1700 {
1701 struct mgmt_mode *cp = data;
1702 struct pending_cmd *cmd;
1703 u8 val, status;
1704 int err;
1705
1706 BT_DBG("request for %s", hdev->name);
1707
1708 status = mgmt_bredr_support(hdev);
1709 if (status)
1710 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1711 status);
1712
1713 if (cp->val != 0x00 && cp->val != 0x01)
1714 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1715 MGMT_STATUS_INVALID_PARAMS);
1716
1717 hci_dev_lock(hdev);
1718
1719 if (!hdev_is_powered(hdev)) {
1720 bool changed = false;
1721
1722 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1723 &hdev->dev_flags)) {
1724 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
1725 changed = true;
1726 }
1727
1728 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1729 if (err < 0)
1730 goto failed;
1731
1732 if (changed)
1733 err = new_settings(hdev, sk);
1734
1735 goto failed;
1736 }
1737
1738 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1739 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1740 MGMT_STATUS_BUSY);
1741 goto failed;
1742 }
1743
1744 val = !!cp->val;
1745
1746 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1747 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1748 goto failed;
1749 }
1750
1751 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1752 if (!cmd) {
1753 err = -ENOMEM;
1754 goto failed;
1755 }
1756
1757 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1758 if (err < 0) {
1759 mgmt_pending_remove(cmd);
1760 goto failed;
1761 }
1762
1763 failed:
1764 hci_dev_unlock(hdev);
1765 return err;
1766 }
1767
1768 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1769 {
1770 struct mgmt_mode *cp = data;
1771 struct pending_cmd *cmd;
1772 u8 status;
1773 int err;
1774
1775 BT_DBG("request for %s", hdev->name);
1776
1777 status = mgmt_bredr_support(hdev);
1778 if (status)
1779 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1780
1781 if (!lmp_ssp_capable(hdev))
1782 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1783 MGMT_STATUS_NOT_SUPPORTED);
1784
1785 if (cp->val != 0x00 && cp->val != 0x01)
1786 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1787 MGMT_STATUS_INVALID_PARAMS);
1788
1789 hci_dev_lock(hdev);
1790
1791 if (!hdev_is_powered(hdev)) {
1792 bool changed;
1793
1794 if (cp->val) {
1795 changed = !test_and_set_bit(HCI_SSP_ENABLED,
1796 &hdev->dev_flags);
1797 } else {
1798 changed = test_and_clear_bit(HCI_SSP_ENABLED,
1799 &hdev->dev_flags);
1800 if (!changed)
1801 changed = test_and_clear_bit(HCI_HS_ENABLED,
1802 &hdev->dev_flags);
1803 else
1804 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1805 }
1806
1807 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1808 if (err < 0)
1809 goto failed;
1810
1811 if (changed)
1812 err = new_settings(hdev, sk);
1813
1814 goto failed;
1815 }
1816
1817 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev) ||
1818 mgmt_pending_find(MGMT_OP_SET_HS, hdev)) {
1819 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1820 MGMT_STATUS_BUSY);
1821 goto failed;
1822 }
1823
1824 if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1825 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1826 goto failed;
1827 }
1828
1829 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1830 if (!cmd) {
1831 err = -ENOMEM;
1832 goto failed;
1833 }
1834
1835 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1836 if (err < 0) {
1837 mgmt_pending_remove(cmd);
1838 goto failed;
1839 }
1840
1841 failed:
1842 hci_dev_unlock(hdev);
1843 return err;
1844 }
1845
1846 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1847 {
1848 struct mgmt_mode *cp = data;
1849 bool changed;
1850 u8 status;
1851 int err;
1852
1853 BT_DBG("request for %s", hdev->name);
1854
1855 status = mgmt_bredr_support(hdev);
1856 if (status)
1857 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1858
1859 if (!lmp_ssp_capable(hdev))
1860 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1861 MGMT_STATUS_NOT_SUPPORTED);
1862
1863 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
1864 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1865 MGMT_STATUS_REJECTED);
1866
1867 if (cp->val != 0x00 && cp->val != 0x01)
1868 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1869 MGMT_STATUS_INVALID_PARAMS);
1870
1871 hci_dev_lock(hdev);
1872
1873 if (cp->val) {
1874 changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1875 } else {
1876 if (hdev_is_powered(hdev)) {
1877 err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1878 MGMT_STATUS_REJECTED);
1879 goto unlock;
1880 }
1881
1882 changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1883 }
1884
1885 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1886 if (err < 0)
1887 goto unlock;
1888
1889 if (changed)
1890 err = new_settings(hdev, sk);
1891
1892 unlock:
1893 hci_dev_unlock(hdev);
1894 return err;
1895 }
1896
1897 static void le_enable_complete(struct hci_dev *hdev, u8 status)
1898 {
1899 struct cmd_lookup match = { NULL, hdev };
1900
1901 if (status) {
1902 u8 mgmt_err = mgmt_status(status);
1903
1904 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1905 &mgmt_err);
1906 return;
1907 }
1908
1909 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1910
1911 new_settings(hdev, match.sk);
1912
1913 if (match.sk)
1914 sock_put(match.sk);
1915
1916 /* Make sure the controller has a good default for
1917 * advertising data. Restrict the update to when LE
1918 * has actually been enabled. During power on, the
1919 * update in powered_update_hci will take care of it.
1920 */
1921 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1922 struct hci_request req;
1923
1924 hci_dev_lock(hdev);
1925
1926 hci_req_init(&req, hdev);
1927 update_adv_data(&req);
1928 update_scan_rsp_data(&req);
1929 hci_req_run(&req, NULL);
1930
1931 hci_dev_unlock(hdev);
1932 }
1933 }
1934
1935 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1936 {
1937 struct mgmt_mode *cp = data;
1938 struct hci_cp_write_le_host_supported hci_cp;
1939 struct pending_cmd *cmd;
1940 struct hci_request req;
1941 int err;
1942 u8 val, enabled;
1943
1944 BT_DBG("request for %s", hdev->name);
1945
1946 if (!lmp_le_capable(hdev))
1947 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1948 MGMT_STATUS_NOT_SUPPORTED);
1949
1950 if (cp->val != 0x00 && cp->val != 0x01)
1951 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1952 MGMT_STATUS_INVALID_PARAMS);
1953
1954 /* LE-only devices do not allow toggling LE on/off */
1955 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1956 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1957 MGMT_STATUS_REJECTED);
1958
1959 hci_dev_lock(hdev);
1960
1961 val = !!cp->val;
1962 enabled = lmp_host_le_capable(hdev);
1963
1964 if (!hdev_is_powered(hdev) || val == enabled) {
1965 bool changed = false;
1966
1967 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1968 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1969 changed = true;
1970 }
1971
1972 if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
1973 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
1974 changed = true;
1975 }
1976
1977 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1978 if (err < 0)
1979 goto unlock;
1980
1981 if (changed)
1982 err = new_settings(hdev, sk);
1983
1984 goto unlock;
1985 }
1986
1987 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
1988 mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
1989 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1990 MGMT_STATUS_BUSY);
1991 goto unlock;
1992 }
1993
1994 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
1995 if (!cmd) {
1996 err = -ENOMEM;
1997 goto unlock;
1998 }
1999
2000 hci_req_init(&req, hdev);
2001
2002 memset(&hci_cp, 0, sizeof(hci_cp));
2003
2004 if (val) {
2005 hci_cp.le = val;
2006 hci_cp.simul = lmp_le_br_capable(hdev);
2007 } else {
2008 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
2009 disable_advertising(&req);
2010 }
2011
2012 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2013 &hci_cp);
2014
2015 err = hci_req_run(&req, le_enable_complete);
2016 if (err < 0)
2017 mgmt_pending_remove(cmd);
2018
2019 unlock:
2020 hci_dev_unlock(hdev);
2021 return err;
2022 }
2023
2024 /* This is a helper function to test for pending mgmt commands that can
2025 * cause CoD or EIR HCI commands. We can only allow one such pending
2026 * mgmt command at a time since otherwise we cannot easily track what
2027 * the current values are, will be, and based on that calculate if a new
2028 * HCI command needs to be sent and if yes with what value.
2029 */
2030 static bool pending_eir_or_class(struct hci_dev *hdev)
2031 {
2032 struct pending_cmd *cmd;
2033
2034 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2035 switch (cmd->opcode) {
2036 case MGMT_OP_ADD_UUID:
2037 case MGMT_OP_REMOVE_UUID:
2038 case MGMT_OP_SET_DEV_CLASS:
2039 case MGMT_OP_SET_POWERED:
2040 return true;
2041 }
2042 }
2043
2044 return false;
2045 }
2046
2047 static const u8 bluetooth_base_uuid[] = {
2048 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2049 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2050 };
2051
2052 static u8 get_uuid_size(const u8 *uuid)
2053 {
2054 u32 val;
2055
2056 if (memcmp(uuid, bluetooth_base_uuid, 12))
2057 return 128;
2058
2059 val = get_unaligned_le32(&uuid[12]);
2060 if (val > 0xffff)
2061 return 32;
2062
2063 return 16;
2064 }
2065
2066 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2067 {
2068 struct pending_cmd *cmd;
2069
2070 hci_dev_lock(hdev);
2071
2072 cmd = mgmt_pending_find(mgmt_op, hdev);
2073 if (!cmd)
2074 goto unlock;
2075
2076 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
2077 hdev->dev_class, 3);
2078
2079 mgmt_pending_remove(cmd);
2080
2081 unlock:
2082 hci_dev_unlock(hdev);
2083 }
2084
2085 static void add_uuid_complete(struct hci_dev *hdev, u8 status)
2086 {
2087 BT_DBG("status 0x%02x", status);
2088
2089 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2090 }
2091
2092 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2093 {
2094 struct mgmt_cp_add_uuid *cp = data;
2095 struct pending_cmd *cmd;
2096 struct hci_request req;
2097 struct bt_uuid *uuid;
2098 int err;
2099
2100 BT_DBG("request for %s", hdev->name);
2101
2102 hci_dev_lock(hdev);
2103
2104 if (pending_eir_or_class(hdev)) {
2105 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2106 MGMT_STATUS_BUSY);
2107 goto failed;
2108 }
2109
2110 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2111 if (!uuid) {
2112 err = -ENOMEM;
2113 goto failed;
2114 }
2115
2116 memcpy(uuid->uuid, cp->uuid, 16);
2117 uuid->svc_hint = cp->svc_hint;
2118 uuid->size = get_uuid_size(cp->uuid);
2119
2120 list_add_tail(&uuid->list, &hdev->uuids);
2121
2122 hci_req_init(&req, hdev);
2123
2124 update_class(&req);
2125 update_eir(&req);
2126
2127 err = hci_req_run(&req, add_uuid_complete);
2128 if (err < 0) {
2129 if (err != -ENODATA)
2130 goto failed;
2131
2132 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2133 hdev->dev_class, 3);
2134 goto failed;
2135 }
2136
2137 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2138 if (!cmd) {
2139 err = -ENOMEM;
2140 goto failed;
2141 }
2142
2143 err = 0;
2144
2145 failed:
2146 hci_dev_unlock(hdev);
2147 return err;
2148 }
2149
2150 static bool enable_service_cache(struct hci_dev *hdev)
2151 {
2152 if (!hdev_is_powered(hdev))
2153 return false;
2154
2155 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2156 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2157 CACHE_TIMEOUT);
2158 return true;
2159 }
2160
2161 return false;
2162 }
2163
2164 static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
2165 {
2166 BT_DBG("status 0x%02x", status);
2167
2168 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2169 }
2170
2171 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2172 u16 len)
2173 {
2174 struct mgmt_cp_remove_uuid *cp = data;
2175 struct pending_cmd *cmd;
2176 struct bt_uuid *match, *tmp;
2177 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2178 struct hci_request req;
2179 int err, found;
2180
2181 BT_DBG("request for %s", hdev->name);
2182
2183 hci_dev_lock(hdev);
2184
2185 if (pending_eir_or_class(hdev)) {
2186 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2187 MGMT_STATUS_BUSY);
2188 goto unlock;
2189 }
2190
2191 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2192 hci_uuids_clear(hdev);
2193
2194 if (enable_service_cache(hdev)) {
2195 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2196 0, hdev->dev_class, 3);
2197 goto unlock;
2198 }
2199
2200 goto update_class;
2201 }
2202
2203 found = 0;
2204
2205 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2206 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2207 continue;
2208
2209 list_del(&match->list);
2210 kfree(match);
2211 found++;
2212 }
2213
2214 if (found == 0) {
2215 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2216 MGMT_STATUS_INVALID_PARAMS);
2217 goto unlock;
2218 }
2219
2220 update_class:
2221 hci_req_init(&req, hdev);
2222
2223 update_class(&req);
2224 update_eir(&req);
2225
2226 err = hci_req_run(&req, remove_uuid_complete);
2227 if (err < 0) {
2228 if (err != -ENODATA)
2229 goto unlock;
2230
2231 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2232 hdev->dev_class, 3);
2233 goto unlock;
2234 }
2235
2236 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2237 if (!cmd) {
2238 err = -ENOMEM;
2239 goto unlock;
2240 }
2241
2242 err = 0;
2243
2244 unlock:
2245 hci_dev_unlock(hdev);
2246 return err;
2247 }
2248
2249 static void set_class_complete(struct hci_dev *hdev, u8 status)
2250 {
2251 BT_DBG("status 0x%02x", status);
2252
2253 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2254 }
2255
2256 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2257 u16 len)
2258 {
2259 struct mgmt_cp_set_dev_class *cp = data;
2260 struct pending_cmd *cmd;
2261 struct hci_request req;
2262 int err;
2263
2264 BT_DBG("request for %s", hdev->name);
2265
2266 if (!lmp_bredr_capable(hdev))
2267 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2268 MGMT_STATUS_NOT_SUPPORTED);
2269
2270 hci_dev_lock(hdev);
2271
2272 if (pending_eir_or_class(hdev)) {
2273 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2274 MGMT_STATUS_BUSY);
2275 goto unlock;
2276 }
2277
2278 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2279 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2280 MGMT_STATUS_INVALID_PARAMS);
2281 goto unlock;
2282 }
2283
2284 hdev->major_class = cp->major;
2285 hdev->minor_class = cp->minor;
2286
2287 if (!hdev_is_powered(hdev)) {
2288 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2289 hdev->dev_class, 3);
2290 goto unlock;
2291 }
2292
2293 hci_req_init(&req, hdev);
2294
2295 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2296 hci_dev_unlock(hdev);
2297 cancel_delayed_work_sync(&hdev->service_cache);
2298 hci_dev_lock(hdev);
2299 update_eir(&req);
2300 }
2301
2302 update_class(&req);
2303
2304 err = hci_req_run(&req, set_class_complete);
2305 if (err < 0) {
2306 if (err != -ENODATA)
2307 goto unlock;
2308
2309 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2310 hdev->dev_class, 3);
2311 goto unlock;
2312 }
2313
2314 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2315 if (!cmd) {
2316 err = -ENOMEM;
2317 goto unlock;
2318 }
2319
2320 err = 0;
2321
2322 unlock:
2323 hci_dev_unlock(hdev);
2324 return err;
2325 }
2326
2327 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2328 u16 len)
2329 {
2330 struct mgmt_cp_load_link_keys *cp = data;
2331 u16 key_count, expected_len;
2332 bool changed;
2333 int i;
2334
2335 BT_DBG("request for %s", hdev->name);
2336
2337 if (!lmp_bredr_capable(hdev))
2338 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2339 MGMT_STATUS_NOT_SUPPORTED);
2340
2341 key_count = __le16_to_cpu(cp->key_count);
2342
2343 expected_len = sizeof(*cp) + key_count *
2344 sizeof(struct mgmt_link_key_info);
2345 if (expected_len != len) {
2346 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2347 len, expected_len);
2348 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2349 MGMT_STATUS_INVALID_PARAMS);
2350 }
2351
2352 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2353 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2354 MGMT_STATUS_INVALID_PARAMS);
2355
2356 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2357 key_count);
2358
2359 for (i = 0; i < key_count; i++) {
2360 struct mgmt_link_key_info *key = &cp->keys[i];
2361
2362 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2363 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2364 MGMT_STATUS_INVALID_PARAMS);
2365 }
2366
2367 hci_dev_lock(hdev);
2368
2369 hci_link_keys_clear(hdev);
2370
2371 if (cp->debug_keys)
2372 changed = !test_and_set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
2373 else
2374 changed = test_and_clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
2375
2376 if (changed)
2377 new_settings(hdev, NULL);
2378
2379 for (i = 0; i < key_count; i++) {
2380 struct mgmt_link_key_info *key = &cp->keys[i];
2381
2382 hci_add_link_key(hdev, NULL, 0, &key->addr.bdaddr, key->val,
2383 key->type, key->pin_len);
2384 }
2385
2386 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2387
2388 hci_dev_unlock(hdev);
2389
2390 return 0;
2391 }
2392
2393 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2394 u8 addr_type, struct sock *skip_sk)
2395 {
2396 struct mgmt_ev_device_unpaired ev;
2397
2398 bacpy(&ev.addr.bdaddr, bdaddr);
2399 ev.addr.type = addr_type;
2400
2401 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2402 skip_sk);
2403 }
2404
2405 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2406 u16 len)
2407 {
2408 struct mgmt_cp_unpair_device *cp = data;
2409 struct mgmt_rp_unpair_device rp;
2410 struct hci_cp_disconnect dc;
2411 struct pending_cmd *cmd;
2412 struct hci_conn *conn;
2413 int err;
2414
2415 memset(&rp, 0, sizeof(rp));
2416 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2417 rp.addr.type = cp->addr.type;
2418
2419 if (!bdaddr_type_is_valid(cp->addr.type))
2420 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2421 MGMT_STATUS_INVALID_PARAMS,
2422 &rp, sizeof(rp));
2423
2424 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2425 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2426 MGMT_STATUS_INVALID_PARAMS,
2427 &rp, sizeof(rp));
2428
2429 hci_dev_lock(hdev);
2430
2431 if (!hdev_is_powered(hdev)) {
2432 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2433 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2434 goto unlock;
2435 }
2436
2437 if (cp->addr.type == BDADDR_BREDR) {
2438 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2439 } else {
2440 u8 addr_type;
2441
2442 if (cp->addr.type == BDADDR_LE_PUBLIC)
2443 addr_type = ADDR_LE_DEV_PUBLIC;
2444 else
2445 addr_type = ADDR_LE_DEV_RANDOM;
2446
2447 hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2448
2449 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2450
2451 err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2452 }
2453
2454 if (err < 0) {
2455 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2456 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
2457 goto unlock;
2458 }
2459
2460 if (cp->disconnect) {
2461 if (cp->addr.type == BDADDR_BREDR)
2462 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2463 &cp->addr.bdaddr);
2464 else
2465 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2466 &cp->addr.bdaddr);
2467 } else {
2468 conn = NULL;
2469 }
2470
2471 if (!conn) {
2472 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2473 &rp, sizeof(rp));
2474 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2475 goto unlock;
2476 }
2477
2478 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2479 sizeof(*cp));
2480 if (!cmd) {
2481 err = -ENOMEM;
2482 goto unlock;
2483 }
2484
2485 dc.handle = cpu_to_le16(conn->handle);
2486 dc.reason = 0x13; /* Remote User Terminated Connection */
2487 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2488 if (err < 0)
2489 mgmt_pending_remove(cmd);
2490
2491 unlock:
2492 hci_dev_unlock(hdev);
2493 return err;
2494 }
2495
2496 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2497 u16 len)
2498 {
2499 struct mgmt_cp_disconnect *cp = data;
2500 struct mgmt_rp_disconnect rp;
2501 struct hci_cp_disconnect dc;
2502 struct pending_cmd *cmd;
2503 struct hci_conn *conn;
2504 int err;
2505
2506 BT_DBG("");
2507
2508 memset(&rp, 0, sizeof(rp));
2509 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2510 rp.addr.type = cp->addr.type;
2511
2512 if (!bdaddr_type_is_valid(cp->addr.type))
2513 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2514 MGMT_STATUS_INVALID_PARAMS,
2515 &rp, sizeof(rp));
2516
2517 hci_dev_lock(hdev);
2518
2519 if (!test_bit(HCI_UP, &hdev->flags)) {
2520 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2521 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2522 goto failed;
2523 }
2524
2525 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
2526 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2527 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2528 goto failed;
2529 }
2530
2531 if (cp->addr.type == BDADDR_BREDR)
2532 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2533 &cp->addr.bdaddr);
2534 else
2535 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2536
2537 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2538 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2539 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
2540 goto failed;
2541 }
2542
2543 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2544 if (!cmd) {
2545 err = -ENOMEM;
2546 goto failed;
2547 }
2548
2549 dc.handle = cpu_to_le16(conn->handle);
2550 dc.reason = HCI_ERROR_REMOTE_USER_TERM;
2551
2552 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2553 if (err < 0)
2554 mgmt_pending_remove(cmd);
2555
2556 failed:
2557 hci_dev_unlock(hdev);
2558 return err;
2559 }
2560
2561 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2562 {
2563 switch (link_type) {
2564 case LE_LINK:
2565 switch (addr_type) {
2566 case ADDR_LE_DEV_PUBLIC:
2567 return BDADDR_LE_PUBLIC;
2568
2569 default:
2570 /* Fallback to LE Random address type */
2571 return BDADDR_LE_RANDOM;
2572 }
2573
2574 default:
2575 /* Fallback to BR/EDR type */
2576 return BDADDR_BREDR;
2577 }
2578 }
2579
2580 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2581 u16 data_len)
2582 {
2583 struct mgmt_rp_get_connections *rp;
2584 struct hci_conn *c;
2585 size_t rp_len;
2586 int err;
2587 u16 i;
2588
2589 BT_DBG("");
2590
2591 hci_dev_lock(hdev);
2592
2593 if (!hdev_is_powered(hdev)) {
2594 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2595 MGMT_STATUS_NOT_POWERED);
2596 goto unlock;
2597 }
2598
2599 i = 0;
2600 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2601 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2602 i++;
2603 }
2604
2605 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2606 rp = kmalloc(rp_len, GFP_KERNEL);
2607 if (!rp) {
2608 err = -ENOMEM;
2609 goto unlock;
2610 }
2611
2612 i = 0;
2613 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2614 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2615 continue;
2616 bacpy(&rp->addr[i].bdaddr, &c->dst);
2617 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2618 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2619 continue;
2620 i++;
2621 }
2622
2623 rp->conn_count = cpu_to_le16(i);
2624
2625 /* Recalculate length in case of filtered SCO connections, etc */
2626 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2627
2628 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2629 rp_len);
2630
2631 kfree(rp);
2632
2633 unlock:
2634 hci_dev_unlock(hdev);
2635 return err;
2636 }
2637
2638 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2639 struct mgmt_cp_pin_code_neg_reply *cp)
2640 {
2641 struct pending_cmd *cmd;
2642 int err;
2643
2644 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2645 sizeof(*cp));
2646 if (!cmd)
2647 return -ENOMEM;
2648
2649 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2650 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2651 if (err < 0)
2652 mgmt_pending_remove(cmd);
2653
2654 return err;
2655 }
2656
2657 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2658 u16 len)
2659 {
2660 struct hci_conn *conn;
2661 struct mgmt_cp_pin_code_reply *cp = data;
2662 struct hci_cp_pin_code_reply reply;
2663 struct pending_cmd *cmd;
2664 int err;
2665
2666 BT_DBG("");
2667
2668 hci_dev_lock(hdev);
2669
2670 if (!hdev_is_powered(hdev)) {
2671 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2672 MGMT_STATUS_NOT_POWERED);
2673 goto failed;
2674 }
2675
2676 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2677 if (!conn) {
2678 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2679 MGMT_STATUS_NOT_CONNECTED);
2680 goto failed;
2681 }
2682
2683 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2684 struct mgmt_cp_pin_code_neg_reply ncp;
2685
2686 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2687
2688 BT_ERR("PIN code is not 16 bytes long");
2689
2690 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2691 if (err >= 0)
2692 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2693 MGMT_STATUS_INVALID_PARAMS);
2694
2695 goto failed;
2696 }
2697
2698 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2699 if (!cmd) {
2700 err = -ENOMEM;
2701 goto failed;
2702 }
2703
2704 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2705 reply.pin_len = cp->pin_len;
2706 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2707
2708 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2709 if (err < 0)
2710 mgmt_pending_remove(cmd);
2711
2712 failed:
2713 hci_dev_unlock(hdev);
2714 return err;
2715 }
2716
2717 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2718 u16 len)
2719 {
2720 struct mgmt_cp_set_io_capability *cp = data;
2721
2722 BT_DBG("");
2723
2724 hci_dev_lock(hdev);
2725
2726 hdev->io_capability = cp->io_capability;
2727
2728 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2729 hdev->io_capability);
2730
2731 hci_dev_unlock(hdev);
2732
2733 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
2734 0);
2735 }
2736
2737 static struct pending_cmd *find_pairing(struct hci_conn *conn)
2738 {
2739 struct hci_dev *hdev = conn->hdev;
2740 struct pending_cmd *cmd;
2741
2742 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2743 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2744 continue;
2745
2746 if (cmd->user_data != conn)
2747 continue;
2748
2749 return cmd;
2750 }
2751
2752 return NULL;
2753 }
2754
2755 static void pairing_complete(struct pending_cmd *cmd, u8 status)
2756 {
2757 struct mgmt_rp_pair_device rp;
2758 struct hci_conn *conn = cmd->user_data;
2759
2760 bacpy(&rp.addr.bdaddr, &conn->dst);
2761 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2762
2763 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
2764 &rp, sizeof(rp));
2765
2766 /* So we don't get further callbacks for this connection */
2767 conn->connect_cfm_cb = NULL;
2768 conn->security_cfm_cb = NULL;
2769 conn->disconn_cfm_cb = NULL;
2770
2771 hci_conn_drop(conn);
2772
2773 mgmt_pending_remove(cmd);
2774 }
2775
2776 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2777 {
2778 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2779 struct pending_cmd *cmd;
2780
2781 cmd = find_pairing(conn);
2782 if (cmd)
2783 pairing_complete(cmd, status);
2784 }
2785
2786 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2787 {
2788 struct pending_cmd *cmd;
2789
2790 BT_DBG("status %u", status);
2791
2792 cmd = find_pairing(conn);
2793 if (!cmd)
2794 BT_DBG("Unable to find a pending command");
2795 else
2796 pairing_complete(cmd, mgmt_status(status));
2797 }
2798
2799 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2800 {
2801 struct pending_cmd *cmd;
2802
2803 BT_DBG("status %u", status);
2804
2805 if (!status)
2806 return;
2807
2808 cmd = find_pairing(conn);
2809 if (!cmd)
2810 BT_DBG("Unable to find a pending command");
2811 else
2812 pairing_complete(cmd, mgmt_status(status));
2813 }
2814
2815 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2816 u16 len)
2817 {
2818 struct mgmt_cp_pair_device *cp = data;
2819 struct mgmt_rp_pair_device rp;
2820 struct pending_cmd *cmd;
2821 u8 sec_level, auth_type;
2822 struct hci_conn *conn;
2823 int err;
2824
2825 BT_DBG("");
2826
2827 memset(&rp, 0, sizeof(rp));
2828 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2829 rp.addr.type = cp->addr.type;
2830
2831 if (!bdaddr_type_is_valid(cp->addr.type))
2832 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2833 MGMT_STATUS_INVALID_PARAMS,
2834 &rp, sizeof(rp));
2835
2836 hci_dev_lock(hdev);
2837
2838 if (!hdev_is_powered(hdev)) {
2839 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2840 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2841 goto unlock;
2842 }
2843
2844 sec_level = BT_SECURITY_MEDIUM;
2845 if (cp->io_cap == 0x03)
2846 auth_type = HCI_AT_DEDICATED_BONDING;
2847 else
2848 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
2849
2850 if (cp->addr.type == BDADDR_BREDR) {
2851 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
2852 auth_type);
2853 } else {
2854 u8 addr_type;
2855
2856 /* Convert from L2CAP channel address type to HCI address type
2857 */
2858 if (cp->addr.type == BDADDR_LE_PUBLIC)
2859 addr_type = ADDR_LE_DEV_PUBLIC;
2860 else
2861 addr_type = ADDR_LE_DEV_RANDOM;
2862
2863 conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
2864 sec_level, auth_type);
2865 }
2866
2867 if (IS_ERR(conn)) {
2868 int status;
2869
2870 if (PTR_ERR(conn) == -EBUSY)
2871 status = MGMT_STATUS_BUSY;
2872 else
2873 status = MGMT_STATUS_CONNECT_FAILED;
2874
2875 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2876 status, &rp,
2877 sizeof(rp));
2878 goto unlock;
2879 }
2880
2881 if (conn->connect_cfm_cb) {
2882 hci_conn_drop(conn);
2883 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2884 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2885 goto unlock;
2886 }
2887
2888 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2889 if (!cmd) {
2890 err = -ENOMEM;
2891 hci_conn_drop(conn);
2892 goto unlock;
2893 }
2894
2895 /* For LE, just connecting isn't a proof that the pairing finished */
2896 if (cp->addr.type == BDADDR_BREDR) {
2897 conn->connect_cfm_cb = pairing_complete_cb;
2898 conn->security_cfm_cb = pairing_complete_cb;
2899 conn->disconn_cfm_cb = pairing_complete_cb;
2900 } else {
2901 conn->connect_cfm_cb = le_pairing_complete_cb;
2902 conn->security_cfm_cb = le_pairing_complete_cb;
2903 conn->disconn_cfm_cb = le_pairing_complete_cb;
2904 }
2905
2906 conn->io_capability = cp->io_cap;
2907 cmd->user_data = conn;
2908
2909 if (conn->state == BT_CONNECTED &&
2910 hci_conn_security(conn, sec_level, auth_type))
2911 pairing_complete(cmd, 0);
2912
2913 err = 0;
2914
2915 unlock:
2916 hci_dev_unlock(hdev);
2917 return err;
2918 }
2919
2920 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2921 u16 len)
2922 {
2923 struct mgmt_addr_info *addr = data;
2924 struct pending_cmd *cmd;
2925 struct hci_conn *conn;
2926 int err;
2927
2928 BT_DBG("");
2929
2930 hci_dev_lock(hdev);
2931
2932 if (!hdev_is_powered(hdev)) {
2933 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2934 MGMT_STATUS_NOT_POWERED);
2935 goto unlock;
2936 }
2937
2938 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
2939 if (!cmd) {
2940 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2941 MGMT_STATUS_INVALID_PARAMS);
2942 goto unlock;
2943 }
2944
2945 conn = cmd->user_data;
2946
2947 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
2948 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2949 MGMT_STATUS_INVALID_PARAMS);
2950 goto unlock;
2951 }
2952
2953 pairing_complete(cmd, MGMT_STATUS_CANCELLED);
2954
2955 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
2956 addr, sizeof(*addr));
2957 unlock:
2958 hci_dev_unlock(hdev);
2959 return err;
2960 }
2961
2962 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
2963 struct mgmt_addr_info *addr, u16 mgmt_op,
2964 u16 hci_op, __le32 passkey)
2965 {
2966 struct pending_cmd *cmd;
2967 struct hci_conn *conn;
2968 int err;
2969
2970 hci_dev_lock(hdev);
2971
2972 if (!hdev_is_powered(hdev)) {
2973 err = cmd_complete(sk, hdev->id, mgmt_op,
2974 MGMT_STATUS_NOT_POWERED, addr,
2975 sizeof(*addr));
2976 goto done;
2977 }
2978
2979 if (addr->type == BDADDR_BREDR)
2980 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
2981 else
2982 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
2983
2984 if (!conn) {
2985 err = cmd_complete(sk, hdev->id, mgmt_op,
2986 MGMT_STATUS_NOT_CONNECTED, addr,
2987 sizeof(*addr));
2988 goto done;
2989 }
2990
2991 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
2992 /* Continue with pairing via SMP */
2993 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
2994
2995 if (!err)
2996 err = cmd_complete(sk, hdev->id, mgmt_op,
2997 MGMT_STATUS_SUCCESS, addr,
2998 sizeof(*addr));
2999 else
3000 err = cmd_complete(sk, hdev->id, mgmt_op,
3001 MGMT_STATUS_FAILED, addr,
3002 sizeof(*addr));
3003
3004 goto done;
3005 }
3006
3007 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3008 if (!cmd) {
3009 err = -ENOMEM;
3010 goto done;
3011 }
3012
3013 /* Continue with pairing via HCI */
3014 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3015 struct hci_cp_user_passkey_reply cp;
3016
3017 bacpy(&cp.bdaddr, &addr->bdaddr);
3018 cp.passkey = passkey;
3019 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3020 } else
3021 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3022 &addr->bdaddr);
3023
3024 if (err < 0)
3025 mgmt_pending_remove(cmd);
3026
3027 done:
3028 hci_dev_unlock(hdev);
3029 return err;
3030 }
3031
3032 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3033 void *data, u16 len)
3034 {
3035 struct mgmt_cp_pin_code_neg_reply *cp = data;
3036
3037 BT_DBG("");
3038
3039 return user_pairing_resp(sk, hdev, &cp->addr,
3040 MGMT_OP_PIN_CODE_NEG_REPLY,
3041 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3042 }
3043
3044 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3045 u16 len)
3046 {
3047 struct mgmt_cp_user_confirm_reply *cp = data;
3048
3049 BT_DBG("");
3050
3051 if (len != sizeof(*cp))
3052 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3053 MGMT_STATUS_INVALID_PARAMS);
3054
3055 return user_pairing_resp(sk, hdev, &cp->addr,
3056 MGMT_OP_USER_CONFIRM_REPLY,
3057 HCI_OP_USER_CONFIRM_REPLY, 0);
3058 }
3059
3060 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3061 void *data, u16 len)
3062 {
3063 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3064
3065 BT_DBG("");
3066
3067 return user_pairing_resp(sk, hdev, &cp->addr,
3068 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3069 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3070 }
3071
3072 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3073 u16 len)
3074 {
3075 struct mgmt_cp_user_passkey_reply *cp = data;
3076
3077 BT_DBG("");
3078
3079 return user_pairing_resp(sk, hdev, &cp->addr,
3080 MGMT_OP_USER_PASSKEY_REPLY,
3081 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3082 }
3083
3084 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3085 void *data, u16 len)
3086 {
3087 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3088
3089 BT_DBG("");
3090
3091 return user_pairing_resp(sk, hdev, &cp->addr,
3092 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3093 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3094 }
3095
3096 static void update_name(struct hci_request *req)
3097 {
3098 struct hci_dev *hdev = req->hdev;
3099 struct hci_cp_write_local_name cp;
3100
3101 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3102
3103 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3104 }
3105
3106 static void set_name_complete(struct hci_dev *hdev, u8 status)
3107 {
3108 struct mgmt_cp_set_local_name *cp;
3109 struct pending_cmd *cmd;
3110
3111 BT_DBG("status 0x%02x", status);
3112
3113 hci_dev_lock(hdev);
3114
3115 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3116 if (!cmd)
3117 goto unlock;
3118
3119 cp = cmd->param;
3120
3121 if (status)
3122 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3123 mgmt_status(status));
3124 else
3125 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3126 cp, sizeof(*cp));
3127
3128 mgmt_pending_remove(cmd);
3129
3130 unlock:
3131 hci_dev_unlock(hdev);
3132 }
3133
3134 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3135 u16 len)
3136 {
3137 struct mgmt_cp_set_local_name *cp = data;
3138 struct pending_cmd *cmd;
3139 struct hci_request req;
3140 int err;
3141
3142 BT_DBG("");
3143
3144 hci_dev_lock(hdev);
3145
3146 /* If the old values are the same as the new ones just return a
3147 * direct command complete event.
3148 */
3149 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3150 !memcmp(hdev->short_name, cp->short_name,
3151 sizeof(hdev->short_name))) {
3152 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3153 data, len);
3154 goto failed;
3155 }
3156
3157 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3158
3159 if (!hdev_is_powered(hdev)) {
3160 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3161
3162 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3163 data, len);
3164 if (err < 0)
3165 goto failed;
3166
3167 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
3168 sk);
3169
3170 goto failed;
3171 }
3172
3173 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3174 if (!cmd) {
3175 err = -ENOMEM;
3176 goto failed;
3177 }
3178
3179 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3180
3181 hci_req_init(&req, hdev);
3182
3183 if (lmp_bredr_capable(hdev)) {
3184 update_name(&req);
3185 update_eir(&req);
3186 }
3187
3188 /* The name is stored in the scan response data and so
3189 * no need to udpate the advertising data here.
3190 */
3191 if (lmp_le_capable(hdev))
3192 update_scan_rsp_data(&req);
3193
3194 err = hci_req_run(&req, set_name_complete);
3195 if (err < 0)
3196 mgmt_pending_remove(cmd);
3197
3198 failed:
3199 hci_dev_unlock(hdev);
3200 return err;
3201 }
3202
3203 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3204 void *data, u16 data_len)
3205 {
3206 struct pending_cmd *cmd;
3207 int err;
3208
3209 BT_DBG("%s", hdev->name);
3210
3211 hci_dev_lock(hdev);
3212
3213 if (!hdev_is_powered(hdev)) {
3214 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3215 MGMT_STATUS_NOT_POWERED);
3216 goto unlock;
3217 }
3218
3219 if (!lmp_ssp_capable(hdev)) {
3220 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3221 MGMT_STATUS_NOT_SUPPORTED);
3222 goto unlock;
3223 }
3224
3225 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3226 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3227 MGMT_STATUS_BUSY);
3228 goto unlock;
3229 }
3230
3231 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3232 if (!cmd) {
3233 err = -ENOMEM;
3234 goto unlock;
3235 }
3236
3237 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
3238 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
3239 0, NULL);
3240 else
3241 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3242
3243 if (err < 0)
3244 mgmt_pending_remove(cmd);
3245
3246 unlock:
3247 hci_dev_unlock(hdev);
3248 return err;
3249 }
3250
3251 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3252 void *data, u16 len)
3253 {
3254 int err;
3255
3256 BT_DBG("%s ", hdev->name);
3257
3258 hci_dev_lock(hdev);
3259
3260 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3261 struct mgmt_cp_add_remote_oob_data *cp = data;
3262 u8 status;
3263
3264 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3265 cp->hash, cp->randomizer);
3266 if (err < 0)
3267 status = MGMT_STATUS_FAILED;
3268 else
3269 status = MGMT_STATUS_SUCCESS;
3270
3271 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3272 status, &cp->addr, sizeof(cp->addr));
3273 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3274 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3275 u8 status;
3276
3277 err = hci_add_remote_oob_ext_data(hdev, &cp->addr.bdaddr,
3278 cp->hash192,
3279 cp->randomizer192,
3280 cp->hash256,
3281 cp->randomizer256);
3282 if (err < 0)
3283 status = MGMT_STATUS_FAILED;
3284 else
3285 status = MGMT_STATUS_SUCCESS;
3286
3287 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3288 status, &cp->addr, sizeof(cp->addr));
3289 } else {
3290 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3291 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3292 MGMT_STATUS_INVALID_PARAMS);
3293 }
3294
3295 hci_dev_unlock(hdev);
3296 return err;
3297 }
3298
3299 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3300 void *data, u16 len)
3301 {
3302 struct mgmt_cp_remove_remote_oob_data *cp = data;
3303 u8 status;
3304 int err;
3305
3306 BT_DBG("%s", hdev->name);
3307
3308 hci_dev_lock(hdev);
3309
3310 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
3311 if (err < 0)
3312 status = MGMT_STATUS_INVALID_PARAMS;
3313 else
3314 status = MGMT_STATUS_SUCCESS;
3315
3316 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3317 status, &cp->addr, sizeof(cp->addr));
3318
3319 hci_dev_unlock(hdev);
3320 return err;
3321 }
3322
3323 static int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
3324 {
3325 struct pending_cmd *cmd;
3326 u8 type;
3327 int err;
3328
3329 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3330
3331 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3332 if (!cmd)
3333 return -ENOENT;
3334
3335 type = hdev->discovery.type;
3336
3337 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3338 &type, sizeof(type));
3339 mgmt_pending_remove(cmd);
3340
3341 return err;
3342 }
3343
3344 static void start_discovery_complete(struct hci_dev *hdev, u8 status)
3345 {
3346 BT_DBG("status %d", status);
3347
3348 if (status) {
3349 hci_dev_lock(hdev);
3350 mgmt_start_discovery_failed(hdev, status);
3351 hci_dev_unlock(hdev);
3352 return;
3353 }
3354
3355 hci_dev_lock(hdev);
3356 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3357 hci_dev_unlock(hdev);
3358
3359 switch (hdev->discovery.type) {
3360 case DISCOV_TYPE_LE:
3361 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
3362 DISCOV_LE_TIMEOUT);
3363 break;
3364
3365 case DISCOV_TYPE_INTERLEAVED:
3366 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
3367 DISCOV_INTERLEAVED_TIMEOUT);
3368 break;
3369
3370 case DISCOV_TYPE_BREDR:
3371 break;
3372
3373 default:
3374 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
3375 }
3376 }
3377
3378 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3379 void *data, u16 len)
3380 {
3381 struct mgmt_cp_start_discovery *cp = data;
3382 struct pending_cmd *cmd;
3383 struct hci_cp_le_set_scan_param param_cp;
3384 struct hci_cp_le_set_scan_enable enable_cp;
3385 struct hci_cp_inquiry inq_cp;
3386 struct hci_request req;
3387 /* General inquiry access code (GIAC) */
3388 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3389 u8 status, own_addr_type;
3390 int err;
3391
3392 BT_DBG("%s", hdev->name);
3393
3394 hci_dev_lock(hdev);
3395
3396 if (!hdev_is_powered(hdev)) {
3397 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3398 MGMT_STATUS_NOT_POWERED);
3399 goto failed;
3400 }
3401
3402 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3403 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3404 MGMT_STATUS_BUSY);
3405 goto failed;
3406 }
3407
3408 if (hdev->discovery.state != DISCOVERY_STOPPED) {
3409 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3410 MGMT_STATUS_BUSY);
3411 goto failed;
3412 }
3413
3414 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
3415 if (!cmd) {
3416 err = -ENOMEM;
3417 goto failed;
3418 }
3419
3420 hdev->discovery.type = cp->type;
3421
3422 hci_req_init(&req, hdev);
3423
3424 switch (hdev->discovery.type) {
3425 case DISCOV_TYPE_BREDR:
3426 status = mgmt_bredr_support(hdev);
3427 if (status) {
3428 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3429 status);
3430 mgmt_pending_remove(cmd);
3431 goto failed;
3432 }
3433
3434 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3435 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3436 MGMT_STATUS_BUSY);
3437 mgmt_pending_remove(cmd);
3438 goto failed;
3439 }
3440
3441 hci_inquiry_cache_flush(hdev);
3442
3443 memset(&inq_cp, 0, sizeof(inq_cp));
3444 memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
3445 inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
3446 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
3447 break;
3448
3449 case DISCOV_TYPE_LE:
3450 case DISCOV_TYPE_INTERLEAVED:
3451 status = mgmt_le_support(hdev);
3452 if (status) {
3453 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3454 status);
3455 mgmt_pending_remove(cmd);
3456 goto failed;
3457 }
3458
3459 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3460 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3461 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3462 MGMT_STATUS_NOT_SUPPORTED);
3463 mgmt_pending_remove(cmd);
3464 goto failed;
3465 }
3466
3467 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3468 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3469 MGMT_STATUS_REJECTED);
3470 mgmt_pending_remove(cmd);
3471 goto failed;
3472 }
3473
3474 /* If controller is scanning, it means the background scanning
3475 * is running. Thus, we should temporarily stop it in order to
3476 * set the discovery scanning parameters.
3477 */
3478 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
3479 hci_req_add_le_scan_disable(&req);
3480
3481 memset(&param_cp, 0, sizeof(param_cp));
3482
3483 /* All active scans will be done with either a resolvable
3484 * private address (when privacy feature has been enabled)
3485 * or unresolvable private address.
3486 */
3487 err = hci_update_random_address(&req, true, &own_addr_type);
3488 if (err < 0) {
3489 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3490 MGMT_STATUS_FAILED);
3491 mgmt_pending_remove(cmd);
3492 goto failed;
3493 }
3494
3495 param_cp.type = LE_SCAN_ACTIVE;
3496 param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
3497 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3498 param_cp.own_address_type = own_addr_type;
3499 hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
3500 &param_cp);
3501
3502 memset(&enable_cp, 0, sizeof(enable_cp));
3503 enable_cp.enable = LE_SCAN_ENABLE;
3504 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3505 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
3506 &enable_cp);
3507 break;
3508
3509 default:
3510 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3511 MGMT_STATUS_INVALID_PARAMS);
3512 mgmt_pending_remove(cmd);
3513 goto failed;
3514 }
3515
3516 err = hci_req_run(&req, start_discovery_complete);
3517 if (err < 0)
3518 mgmt_pending_remove(cmd);
3519 else
3520 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3521
3522 failed:
3523 hci_dev_unlock(hdev);
3524 return err;
3525 }
3526
3527 static int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
3528 {
3529 struct pending_cmd *cmd;
3530 int err;
3531
3532 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
3533 if (!cmd)
3534 return -ENOENT;
3535
3536 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3537 &hdev->discovery.type, sizeof(hdev->discovery.type));
3538 mgmt_pending_remove(cmd);
3539
3540 return err;
3541 }
3542
3543 static void stop_discovery_complete(struct hci_dev *hdev, u8 status)
3544 {
3545 BT_DBG("status %d", status);
3546
3547 hci_dev_lock(hdev);
3548
3549 if (status) {
3550 mgmt_stop_discovery_failed(hdev, status);
3551 goto unlock;
3552 }
3553
3554 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3555
3556 unlock:
3557 hci_dev_unlock(hdev);
3558 }
3559
3560 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
3561 u16 len)
3562 {
3563 struct mgmt_cp_stop_discovery *mgmt_cp = data;
3564 struct pending_cmd *cmd;
3565 struct hci_cp_remote_name_req_cancel cp;
3566 struct inquiry_entry *e;
3567 struct hci_request req;
3568 int err;
3569
3570 BT_DBG("%s", hdev->name);
3571
3572 hci_dev_lock(hdev);
3573
3574 if (!hci_discovery_active(hdev)) {
3575 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3576 MGMT_STATUS_REJECTED, &mgmt_cp->type,
3577 sizeof(mgmt_cp->type));
3578 goto unlock;
3579 }
3580
3581 if (hdev->discovery.type != mgmt_cp->type) {
3582 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3583 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
3584 sizeof(mgmt_cp->type));
3585 goto unlock;
3586 }
3587
3588 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
3589 if (!cmd) {
3590 err = -ENOMEM;
3591 goto unlock;
3592 }
3593
3594 hci_req_init(&req, hdev);
3595
3596 switch (hdev->discovery.state) {
3597 case DISCOVERY_FINDING:
3598 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3599 hci_req_add(&req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3600 } else {
3601 cancel_delayed_work(&hdev->le_scan_disable);
3602
3603 hci_req_add_le_scan_disable(&req);
3604 }
3605
3606 break;
3607
3608 case DISCOVERY_RESOLVING:
3609 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
3610 NAME_PENDING);
3611 if (!e) {
3612 mgmt_pending_remove(cmd);
3613 err = cmd_complete(sk, hdev->id,
3614 MGMT_OP_STOP_DISCOVERY, 0,
3615 &mgmt_cp->type,
3616 sizeof(mgmt_cp->type));
3617 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3618 goto unlock;
3619 }
3620
3621 bacpy(&cp.bdaddr, &e->data.bdaddr);
3622 hci_req_add(&req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
3623 &cp);
3624
3625 break;
3626
3627 default:
3628 BT_DBG("unknown discovery state %u", hdev->discovery.state);
3629
3630 mgmt_pending_remove(cmd);
3631 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3632 MGMT_STATUS_FAILED, &mgmt_cp->type,
3633 sizeof(mgmt_cp->type));
3634 goto unlock;
3635 }
3636
3637 err = hci_req_run(&req, stop_discovery_complete);
3638 if (err < 0)
3639 mgmt_pending_remove(cmd);
3640 else
3641 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
3642
3643 unlock:
3644 hci_dev_unlock(hdev);
3645 return err;
3646 }
3647
3648 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
3649 u16 len)
3650 {
3651 struct mgmt_cp_confirm_name *cp = data;
3652 struct inquiry_entry *e;
3653 int err;
3654
3655 BT_DBG("%s", hdev->name);
3656
3657 hci_dev_lock(hdev);
3658
3659 if (!hci_discovery_active(hdev)) {
3660 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3661 MGMT_STATUS_FAILED, &cp->addr,
3662 sizeof(cp->addr));
3663 goto failed;
3664 }
3665
3666 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
3667 if (!e) {
3668 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3669 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
3670 sizeof(cp->addr));
3671 goto failed;
3672 }
3673
3674 if (cp->name_known) {
3675 e->name_state = NAME_KNOWN;
3676 list_del(&e->list);
3677 } else {
3678 e->name_state = NAME_NEEDED;
3679 hci_inquiry_cache_update_resolve(hdev, e);
3680 }
3681
3682 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
3683 sizeof(cp->addr));
3684
3685 failed:
3686 hci_dev_unlock(hdev);
3687 return err;
3688 }
3689
3690 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
3691 u16 len)
3692 {
3693 struct mgmt_cp_block_device *cp = data;
3694 u8 status;
3695 int err;
3696
3697 BT_DBG("%s", hdev->name);
3698
3699 if (!bdaddr_type_is_valid(cp->addr.type))
3700 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
3701 MGMT_STATUS_INVALID_PARAMS,
3702 &cp->addr, sizeof(cp->addr));
3703
3704 hci_dev_lock(hdev);
3705
3706 err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type);
3707 if (err < 0)
3708 status = MGMT_STATUS_FAILED;
3709 else
3710 status = MGMT_STATUS_SUCCESS;
3711
3712 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
3713 &cp->addr, sizeof(cp->addr));
3714
3715 hci_dev_unlock(hdev);
3716
3717 return err;
3718 }
3719
3720 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
3721 u16 len)
3722 {
3723 struct mgmt_cp_unblock_device *cp = data;
3724 u8 status;
3725 int err;
3726
3727 BT_DBG("%s", hdev->name);
3728
3729 if (!bdaddr_type_is_valid(cp->addr.type))
3730 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
3731 MGMT_STATUS_INVALID_PARAMS,
3732 &cp->addr, sizeof(cp->addr));
3733
3734 hci_dev_lock(hdev);
3735
3736 err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type);
3737 if (err < 0)
3738 status = MGMT_STATUS_INVALID_PARAMS;
3739 else
3740 status = MGMT_STATUS_SUCCESS;
3741
3742 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
3743 &cp->addr, sizeof(cp->addr));
3744
3745 hci_dev_unlock(hdev);
3746
3747 return err;
3748 }
3749
3750 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
3751 u16 len)
3752 {
3753 struct mgmt_cp_set_device_id *cp = data;
3754 struct hci_request req;
3755 int err;
3756 __u16 source;
3757
3758 BT_DBG("%s", hdev->name);
3759
3760 source = __le16_to_cpu(cp->source);
3761
3762 if (source > 0x0002)
3763 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
3764 MGMT_STATUS_INVALID_PARAMS);
3765
3766 hci_dev_lock(hdev);
3767
3768 hdev->devid_source = source;
3769 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
3770 hdev->devid_product = __le16_to_cpu(cp->product);
3771 hdev->devid_version = __le16_to_cpu(cp->version);
3772
3773 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
3774
3775 hci_req_init(&req, hdev);
3776 update_eir(&req);
3777 hci_req_run(&req, NULL);
3778
3779 hci_dev_unlock(hdev);
3780
3781 return err;
3782 }
3783
3784 static void set_advertising_complete(struct hci_dev *hdev, u8 status)
3785 {
3786 struct cmd_lookup match = { NULL, hdev };
3787
3788 if (status) {
3789 u8 mgmt_err = mgmt_status(status);
3790
3791 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
3792 cmd_status_rsp, &mgmt_err);
3793 return;
3794 }
3795
3796 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
3797 &match);
3798
3799 new_settings(hdev, match.sk);
3800
3801 if (match.sk)
3802 sock_put(match.sk);
3803 }
3804
3805 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
3806 u16 len)
3807 {
3808 struct mgmt_mode *cp = data;
3809 struct pending_cmd *cmd;
3810 struct hci_request req;
3811 u8 val, enabled, status;
3812 int err;
3813
3814 BT_DBG("request for %s", hdev->name);
3815
3816 status = mgmt_le_support(hdev);
3817 if (status)
3818 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3819 status);
3820
3821 if (cp->val != 0x00 && cp->val != 0x01)
3822 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3823 MGMT_STATUS_INVALID_PARAMS);
3824
3825 hci_dev_lock(hdev);
3826
3827 val = !!cp->val;
3828 enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
3829
3830 /* The following conditions are ones which mean that we should
3831 * not do any HCI communication but directly send a mgmt
3832 * response to user space (after toggling the flag if
3833 * necessary).
3834 */
3835 if (!hdev_is_powered(hdev) || val == enabled ||
3836 hci_conn_num(hdev, LE_LINK) > 0) {
3837 bool changed = false;
3838
3839 if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3840 change_bit(HCI_ADVERTISING, &hdev->dev_flags);
3841 changed = true;
3842 }
3843
3844 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
3845 if (err < 0)
3846 goto unlock;
3847
3848 if (changed)
3849 err = new_settings(hdev, sk);
3850
3851 goto unlock;
3852 }
3853
3854 if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
3855 mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
3856 err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3857 MGMT_STATUS_BUSY);
3858 goto unlock;
3859 }
3860
3861 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
3862 if (!cmd) {
3863 err = -ENOMEM;
3864 goto unlock;
3865 }
3866
3867 hci_req_init(&req, hdev);
3868
3869 if (val)
3870 enable_advertising(&req);
3871 else
3872 disable_advertising(&req);
3873
3874 err = hci_req_run(&req, set_advertising_complete);
3875 if (err < 0)
3876 mgmt_pending_remove(cmd);
3877
3878 unlock:
3879 hci_dev_unlock(hdev);
3880 return err;
3881 }
3882
3883 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
3884 void *data, u16 len)
3885 {
3886 struct mgmt_cp_set_static_address *cp = data;
3887 int err;
3888
3889 BT_DBG("%s", hdev->name);
3890
3891 if (!lmp_le_capable(hdev))
3892 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
3893 MGMT_STATUS_NOT_SUPPORTED);
3894
3895 if (hdev_is_powered(hdev))
3896 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
3897 MGMT_STATUS_REJECTED);
3898
3899 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
3900 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
3901 return cmd_status(sk, hdev->id,
3902 MGMT_OP_SET_STATIC_ADDRESS,
3903 MGMT_STATUS_INVALID_PARAMS);
3904
3905 /* Two most significant bits shall be set */
3906 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
3907 return cmd_status(sk, hdev->id,
3908 MGMT_OP_SET_STATIC_ADDRESS,
3909 MGMT_STATUS_INVALID_PARAMS);
3910 }
3911
3912 hci_dev_lock(hdev);
3913
3914 bacpy(&hdev->static_addr, &cp->bdaddr);
3915
3916 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0);
3917
3918 hci_dev_unlock(hdev);
3919
3920 return err;
3921 }
3922
3923 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
3924 void *data, u16 len)
3925 {
3926 struct mgmt_cp_set_scan_params *cp = data;
3927 __u16 interval, window;
3928 int err;
3929
3930 BT_DBG("%s", hdev->name);
3931
3932 if (!lmp_le_capable(hdev))
3933 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3934 MGMT_STATUS_NOT_SUPPORTED);
3935
3936 interval = __le16_to_cpu(cp->interval);
3937
3938 if (interval < 0x0004 || interval > 0x4000)
3939 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3940 MGMT_STATUS_INVALID_PARAMS);
3941
3942 window = __le16_to_cpu(cp->window);
3943
3944 if (window < 0x0004 || window > 0x4000)
3945 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3946 MGMT_STATUS_INVALID_PARAMS);
3947
3948 if (window > interval)
3949 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3950 MGMT_STATUS_INVALID_PARAMS);
3951
3952 hci_dev_lock(hdev);
3953
3954 hdev->le_scan_interval = interval;
3955 hdev->le_scan_window = window;
3956
3957 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);
3958
3959 /* If background scan is running, restart it so new parameters are
3960 * loaded.
3961 */
3962 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
3963 hdev->discovery.state == DISCOVERY_STOPPED) {
3964 struct hci_request req;
3965
3966 hci_req_init(&req, hdev);
3967
3968 hci_req_add_le_scan_disable(&req);
3969 hci_req_add_le_passive_scan(&req);
3970
3971 hci_req_run(&req, NULL);
3972 }
3973
3974 hci_dev_unlock(hdev);
3975
3976 return err;
3977 }
3978
3979 static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
3980 {
3981 struct pending_cmd *cmd;
3982
3983 BT_DBG("status 0x%02x", status);
3984
3985 hci_dev_lock(hdev);
3986
3987 cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
3988 if (!cmd)
3989 goto unlock;
3990
3991 if (status) {
3992 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3993 mgmt_status(status));
3994 } else {
3995 struct mgmt_mode *cp = cmd->param;
3996
3997 if (cp->val)
3998 set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
3999 else
4000 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4001
4002 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4003 new_settings(hdev, cmd->sk);
4004 }
4005
4006 mgmt_pending_remove(cmd);
4007
4008 unlock:
4009 hci_dev_unlock(hdev);
4010 }
4011
4012 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4013 void *data, u16 len)
4014 {
4015 struct mgmt_mode *cp = data;
4016 struct pending_cmd *cmd;
4017 struct hci_request req;
4018 int err;
4019
4020 BT_DBG("%s", hdev->name);
4021
4022 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
4023 hdev->hci_ver < BLUETOOTH_VER_1_2)
4024 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4025 MGMT_STATUS_NOT_SUPPORTED);
4026
4027 if (cp->val != 0x00 && cp->val != 0x01)
4028 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4029 MGMT_STATUS_INVALID_PARAMS);
4030
4031 if (!hdev_is_powered(hdev))
4032 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4033 MGMT_STATUS_NOT_POWERED);
4034
4035 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4036 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4037 MGMT_STATUS_REJECTED);
4038
4039 hci_dev_lock(hdev);
4040
4041 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4042 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4043 MGMT_STATUS_BUSY);
4044 goto unlock;
4045 }
4046
4047 if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
4048 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4049 hdev);
4050 goto unlock;
4051 }
4052
4053 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4054 data, len);
4055 if (!cmd) {
4056 err = -ENOMEM;
4057 goto unlock;
4058 }
4059
4060 hci_req_init(&req, hdev);
4061
4062 write_fast_connectable(&req, cp->val);
4063
4064 err = hci_req_run(&req, fast_connectable_complete);
4065 if (err < 0) {
4066 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4067 MGMT_STATUS_FAILED);
4068 mgmt_pending_remove(cmd);
4069 }
4070
4071 unlock:
4072 hci_dev_unlock(hdev);
4073
4074 return err;
4075 }
4076
4077 static void set_bredr_scan(struct hci_request *req)
4078 {
4079 struct hci_dev *hdev = req->hdev;
4080 u8 scan = 0;
4081
4082 /* Ensure that fast connectable is disabled. This function will
4083 * not do anything if the page scan parameters are already what
4084 * they should be.
4085 */
4086 write_fast_connectable(req, false);
4087
4088 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4089 scan |= SCAN_PAGE;
4090 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
4091 scan |= SCAN_INQUIRY;
4092
4093 if (scan)
4094 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
4095 }
4096
4097 static void set_bredr_complete(struct hci_dev *hdev, u8 status)
4098 {
4099 struct pending_cmd *cmd;
4100
4101 BT_DBG("status 0x%02x", status);
4102
4103 hci_dev_lock(hdev);
4104
4105 cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
4106 if (!cmd)
4107 goto unlock;
4108
4109 if (status) {
4110 u8 mgmt_err = mgmt_status(status);
4111
4112 /* We need to restore the flag if related HCI commands
4113 * failed.
4114 */
4115 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4116
4117 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4118 } else {
4119 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4120 new_settings(hdev, cmd->sk);
4121 }
4122
4123 mgmt_pending_remove(cmd);
4124
4125 unlock:
4126 hci_dev_unlock(hdev);
4127 }
4128
4129 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4130 {
4131 struct mgmt_mode *cp = data;
4132 struct pending_cmd *cmd;
4133 struct hci_request req;
4134 int err;
4135
4136 BT_DBG("request for %s", hdev->name);
4137
4138 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4139 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4140 MGMT_STATUS_NOT_SUPPORTED);
4141
4142 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4143 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4144 MGMT_STATUS_REJECTED);
4145
4146 if (cp->val != 0x00 && cp->val != 0x01)
4147 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4148 MGMT_STATUS_INVALID_PARAMS);
4149
4150 hci_dev_lock(hdev);
4151
4152 if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4153 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4154 goto unlock;
4155 }
4156
4157 if (!hdev_is_powered(hdev)) {
4158 if (!cp->val) {
4159 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4160 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4161 clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4162 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4163 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4164 }
4165
4166 change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4167
4168 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4169 if (err < 0)
4170 goto unlock;
4171
4172 err = new_settings(hdev, sk);
4173 goto unlock;
4174 }
4175
4176 /* Reject disabling when powered on */
4177 if (!cp->val) {
4178 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4179 MGMT_STATUS_REJECTED);
4180 goto unlock;
4181 }
4182
4183 if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
4184 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4185 MGMT_STATUS_BUSY);
4186 goto unlock;
4187 }
4188
4189 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4190 if (!cmd) {
4191 err = -ENOMEM;
4192 goto unlock;
4193 }
4194
4195 /* We need to flip the bit already here so that update_adv_data
4196 * generates the correct flags.
4197 */
4198 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4199
4200 hci_req_init(&req, hdev);
4201
4202 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4203 set_bredr_scan(&req);
4204
4205 /* Since only the advertising data flags will change, there
4206 * is no need to update the scan response data.
4207 */
4208 update_adv_data(&req);
4209
4210 err = hci_req_run(&req, set_bredr_complete);
4211 if (err < 0)
4212 mgmt_pending_remove(cmd);
4213
4214 unlock:
4215 hci_dev_unlock(hdev);
4216 return err;
4217 }
4218
4219 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4220 void *data, u16 len)
4221 {
4222 struct mgmt_mode *cp = data;
4223 struct pending_cmd *cmd;
4224 u8 val, status;
4225 int err;
4226
4227 BT_DBG("request for %s", hdev->name);
4228
4229 status = mgmt_bredr_support(hdev);
4230 if (status)
4231 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4232 status);
4233
4234 if (!lmp_sc_capable(hdev) &&
4235 !test_bit(HCI_FORCE_SC, &hdev->dev_flags))
4236 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4237 MGMT_STATUS_NOT_SUPPORTED);
4238
4239 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4240 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4241 MGMT_STATUS_INVALID_PARAMS);
4242
4243 hci_dev_lock(hdev);
4244
4245 if (!hdev_is_powered(hdev)) {
4246 bool changed;
4247
4248 if (cp->val) {
4249 changed = !test_and_set_bit(HCI_SC_ENABLED,
4250 &hdev->dev_flags);
4251 if (cp->val == 0x02)
4252 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4253 else
4254 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4255 } else {
4256 changed = test_and_clear_bit(HCI_SC_ENABLED,
4257 &hdev->dev_flags);
4258 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4259 }
4260
4261 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4262 if (err < 0)
4263 goto failed;
4264
4265 if (changed)
4266 err = new_settings(hdev, sk);
4267
4268 goto failed;
4269 }
4270
4271 if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4272 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4273 MGMT_STATUS_BUSY);
4274 goto failed;
4275 }
4276
4277 val = !!cp->val;
4278
4279 if (val == test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
4280 (cp->val == 0x02) == test_bit(HCI_SC_ONLY, &hdev->dev_flags)) {
4281 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4282 goto failed;
4283 }
4284
4285 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4286 if (!cmd) {
4287 err = -ENOMEM;
4288 goto failed;
4289 }
4290
4291 err = hci_send_cmd(hdev, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4292 if (err < 0) {
4293 mgmt_pending_remove(cmd);
4294 goto failed;
4295 }
4296
4297 if (cp->val == 0x02)
4298 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4299 else
4300 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4301
4302 failed:
4303 hci_dev_unlock(hdev);
4304 return err;
4305 }
4306
4307 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
4308 void *data, u16 len)
4309 {
4310 struct mgmt_mode *cp = data;
4311 bool changed;
4312 int err;
4313
4314 BT_DBG("request for %s", hdev->name);
4315
4316 if (cp->val != 0x00 && cp->val != 0x01)
4317 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
4318 MGMT_STATUS_INVALID_PARAMS);
4319
4320 hci_dev_lock(hdev);
4321
4322 if (cp->val)
4323 changed = !test_and_set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
4324 else
4325 changed = test_and_clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
4326
4327 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
4328 if (err < 0)
4329 goto unlock;
4330
4331 if (changed)
4332 err = new_settings(hdev, sk);
4333
4334 unlock:
4335 hci_dev_unlock(hdev);
4336 return err;
4337 }
4338
4339 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4340 u16 len)
4341 {
4342 struct mgmt_cp_set_privacy *cp = cp_data;
4343 bool changed;
4344 int err;
4345
4346 BT_DBG("request for %s", hdev->name);
4347
4348 if (!lmp_le_capable(hdev))
4349 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4350 MGMT_STATUS_NOT_SUPPORTED);
4351
4352 if (cp->privacy != 0x00 && cp->privacy != 0x01)
4353 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4354 MGMT_STATUS_INVALID_PARAMS);
4355
4356 if (hdev_is_powered(hdev))
4357 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4358 MGMT_STATUS_REJECTED);
4359
4360 hci_dev_lock(hdev);
4361
4362 /* If user space supports this command it is also expected to
4363 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
4364 */
4365 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4366
4367 if (cp->privacy) {
4368 changed = !test_and_set_bit(HCI_PRIVACY, &hdev->dev_flags);
4369 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
4370 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4371 } else {
4372 changed = test_and_clear_bit(HCI_PRIVACY, &hdev->dev_flags);
4373 memset(hdev->irk, 0, sizeof(hdev->irk));
4374 clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4375 }
4376
4377 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
4378 if (err < 0)
4379 goto unlock;
4380
4381 if (changed)
4382 err = new_settings(hdev, sk);
4383
4384 unlock:
4385 hci_dev_unlock(hdev);
4386 return err;
4387 }
4388
4389 static bool irk_is_valid(struct mgmt_irk_info *irk)
4390 {
4391 switch (irk->addr.type) {
4392 case BDADDR_LE_PUBLIC:
4393 return true;
4394
4395 case BDADDR_LE_RANDOM:
4396 /* Two most significant bits shall be set */
4397 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4398 return false;
4399 return true;
4400 }
4401
4402 return false;
4403 }
4404
4405 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4406 u16 len)
4407 {
4408 struct mgmt_cp_load_irks *cp = cp_data;
4409 u16 irk_count, expected_len;
4410 int i, err;
4411
4412 BT_DBG("request for %s", hdev->name);
4413
4414 if (!lmp_le_capable(hdev))
4415 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4416 MGMT_STATUS_NOT_SUPPORTED);
4417
4418 irk_count = __le16_to_cpu(cp->irk_count);
4419
4420 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
4421 if (expected_len != len) {
4422 BT_ERR("load_irks: expected %u bytes, got %u bytes",
4423 len, expected_len);
4424 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4425 MGMT_STATUS_INVALID_PARAMS);
4426 }
4427
4428 BT_DBG("%s irk_count %u", hdev->name, irk_count);
4429
4430 for (i = 0; i < irk_count; i++) {
4431 struct mgmt_irk_info *key = &cp->irks[i];
4432
4433 if (!irk_is_valid(key))
4434 return cmd_status(sk, hdev->id,
4435 MGMT_OP_LOAD_IRKS,
4436 MGMT_STATUS_INVALID_PARAMS);
4437 }
4438
4439 hci_dev_lock(hdev);
4440
4441 hci_smp_irks_clear(hdev);
4442
4443 for (i = 0; i < irk_count; i++) {
4444 struct mgmt_irk_info *irk = &cp->irks[i];
4445 u8 addr_type;
4446
4447 if (irk->addr.type == BDADDR_LE_PUBLIC)
4448 addr_type = ADDR_LE_DEV_PUBLIC;
4449 else
4450 addr_type = ADDR_LE_DEV_RANDOM;
4451
4452 hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
4453 BDADDR_ANY);
4454 }
4455
4456 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4457
4458 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
4459
4460 hci_dev_unlock(hdev);
4461
4462 return err;
4463 }
4464
4465 static bool ltk_is_valid(struct mgmt_ltk_info *key)
4466 {
4467 if (key->master != 0x00 && key->master != 0x01)
4468 return false;
4469
4470 switch (key->addr.type) {
4471 case BDADDR_LE_PUBLIC:
4472 return true;
4473
4474 case BDADDR_LE_RANDOM:
4475 /* Two most significant bits shall be set */
4476 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4477 return false;
4478 return true;
4479 }
4480
4481 return false;
4482 }
4483
4484 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
4485 void *cp_data, u16 len)
4486 {
4487 struct mgmt_cp_load_long_term_keys *cp = cp_data;
4488 u16 key_count, expected_len;
4489 int i, err;
4490
4491 BT_DBG("request for %s", hdev->name);
4492
4493 if (!lmp_le_capable(hdev))
4494 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4495 MGMT_STATUS_NOT_SUPPORTED);
4496
4497 key_count = __le16_to_cpu(cp->key_count);
4498
4499 expected_len = sizeof(*cp) + key_count *
4500 sizeof(struct mgmt_ltk_info);
4501 if (expected_len != len) {
4502 BT_ERR("load_keys: expected %u bytes, got %u bytes",
4503 len, expected_len);
4504 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4505 MGMT_STATUS_INVALID_PARAMS);
4506 }
4507
4508 BT_DBG("%s key_count %u", hdev->name, key_count);
4509
4510 for (i = 0; i < key_count; i++) {
4511 struct mgmt_ltk_info *key = &cp->keys[i];
4512
4513 if (!ltk_is_valid(key))
4514 return cmd_status(sk, hdev->id,
4515 MGMT_OP_LOAD_LONG_TERM_KEYS,
4516 MGMT_STATUS_INVALID_PARAMS);
4517 }
4518
4519 hci_dev_lock(hdev);
4520
4521 hci_smp_ltks_clear(hdev);
4522
4523 for (i = 0; i < key_count; i++) {
4524 struct mgmt_ltk_info *key = &cp->keys[i];
4525 u8 type, addr_type;
4526
4527 if (key->addr.type == BDADDR_LE_PUBLIC)
4528 addr_type = ADDR_LE_DEV_PUBLIC;
4529 else
4530 addr_type = ADDR_LE_DEV_RANDOM;
4531
4532 if (key->master)
4533 type = HCI_SMP_LTK;
4534 else
4535 type = HCI_SMP_LTK_SLAVE;
4536
4537 hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
4538 key->type, key->val, key->enc_size, key->ediv,
4539 key->rand);
4540 }
4541
4542 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
4543 NULL, 0);
4544
4545 hci_dev_unlock(hdev);
4546
4547 return err;
4548 }
4549
4550 static const struct mgmt_handler {
4551 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
4552 u16 data_len);
4553 bool var_len;
4554 size_t data_len;
4555 } mgmt_handlers[] = {
4556 { NULL }, /* 0x0000 (no command) */
4557 { read_version, false, MGMT_READ_VERSION_SIZE },
4558 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
4559 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
4560 { read_controller_info, false, MGMT_READ_INFO_SIZE },
4561 { set_powered, false, MGMT_SETTING_SIZE },
4562 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
4563 { set_connectable, false, MGMT_SETTING_SIZE },
4564 { set_fast_connectable, false, MGMT_SETTING_SIZE },
4565 { set_pairable, false, MGMT_SETTING_SIZE },
4566 { set_link_security, false, MGMT_SETTING_SIZE },
4567 { set_ssp, false, MGMT_SETTING_SIZE },
4568 { set_hs, false, MGMT_SETTING_SIZE },
4569 { set_le, false, MGMT_SETTING_SIZE },
4570 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
4571 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
4572 { add_uuid, false, MGMT_ADD_UUID_SIZE },
4573 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
4574 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
4575 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
4576 { disconnect, false, MGMT_DISCONNECT_SIZE },
4577 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
4578 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
4579 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
4580 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
4581 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
4582 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
4583 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
4584 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
4585 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
4586 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
4587 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
4588 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
4589 { add_remote_oob_data, true, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
4590 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
4591 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
4592 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
4593 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
4594 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
4595 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
4596 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
4597 { set_advertising, false, MGMT_SETTING_SIZE },
4598 { set_bredr, false, MGMT_SETTING_SIZE },
4599 { set_static_address, false, MGMT_SET_STATIC_ADDRESS_SIZE },
4600 { set_scan_params, false, MGMT_SET_SCAN_PARAMS_SIZE },
4601 { set_secure_conn, false, MGMT_SETTING_SIZE },
4602 { set_debug_keys, false, MGMT_SETTING_SIZE },
4603 { set_privacy, false, MGMT_SET_PRIVACY_SIZE },
4604 { load_irks, true, MGMT_LOAD_IRKS_SIZE },
4605 };
4606
4607
4608 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
4609 {
4610 void *buf;
4611 u8 *cp;
4612 struct mgmt_hdr *hdr;
4613 u16 opcode, index, len;
4614 struct hci_dev *hdev = NULL;
4615 const struct mgmt_handler *handler;
4616 int err;
4617
4618 BT_DBG("got %zu bytes", msglen);
4619
4620 if (msglen < sizeof(*hdr))
4621 return -EINVAL;
4622
4623 buf = kmalloc(msglen, GFP_KERNEL);
4624 if (!buf)
4625 return -ENOMEM;
4626
4627 if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
4628 err = -EFAULT;
4629 goto done;
4630 }
4631
4632 hdr = buf;
4633 opcode = __le16_to_cpu(hdr->opcode);
4634 index = __le16_to_cpu(hdr->index);
4635 len = __le16_to_cpu(hdr->len);
4636
4637 if (len != msglen - sizeof(*hdr)) {
4638 err = -EINVAL;
4639 goto done;
4640 }
4641
4642 if (index != MGMT_INDEX_NONE) {
4643 hdev = hci_dev_get(index);
4644 if (!hdev) {
4645 err = cmd_status(sk, index, opcode,
4646 MGMT_STATUS_INVALID_INDEX);
4647 goto done;
4648 }
4649
4650 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
4651 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4652 err = cmd_status(sk, index, opcode,
4653 MGMT_STATUS_INVALID_INDEX);
4654 goto done;
4655 }
4656 }
4657
4658 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
4659 mgmt_handlers[opcode].func == NULL) {
4660 BT_DBG("Unknown op %u", opcode);
4661 err = cmd_status(sk, index, opcode,
4662 MGMT_STATUS_UNKNOWN_COMMAND);
4663 goto done;
4664 }
4665
4666 if ((hdev && opcode < MGMT_OP_READ_INFO) ||
4667 (!hdev && opcode >= MGMT_OP_READ_INFO)) {
4668 err = cmd_status(sk, index, opcode,
4669 MGMT_STATUS_INVALID_INDEX);
4670 goto done;
4671 }
4672
4673 handler = &mgmt_handlers[opcode];
4674
4675 if ((handler->var_len && len < handler->data_len) ||
4676 (!handler->var_len && len != handler->data_len)) {
4677 err = cmd_status(sk, index, opcode,
4678 MGMT_STATUS_INVALID_PARAMS);
4679 goto done;
4680 }
4681
4682 if (hdev)
4683 mgmt_init_hdev(sk, hdev);
4684
4685 cp = buf + sizeof(*hdr);
4686
4687 err = handler->func(sk, hdev, cp, len);
4688 if (err < 0)
4689 goto done;
4690
4691 err = msglen;
4692
4693 done:
4694 if (hdev)
4695 hci_dev_put(hdev);
4696
4697 kfree(buf);
4698 return err;
4699 }
4700
4701 void mgmt_index_added(struct hci_dev *hdev)
4702 {
4703 if (hdev->dev_type != HCI_BREDR)
4704 return;
4705
4706 mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
4707 }
4708
4709 void mgmt_index_removed(struct hci_dev *hdev)
4710 {
4711 u8 status = MGMT_STATUS_INVALID_INDEX;
4712
4713 if (hdev->dev_type != HCI_BREDR)
4714 return;
4715
4716 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
4717
4718 mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
4719 }
4720
4721 /* This function requires the caller holds hdev->lock */
4722 static void restart_le_auto_conns(struct hci_dev *hdev)
4723 {
4724 struct hci_conn_params *p;
4725
4726 list_for_each_entry(p, &hdev->le_conn_params, list) {
4727 if (p->auto_connect == HCI_AUTO_CONN_ALWAYS)
4728 hci_pend_le_conn_add(hdev, &p->addr, p->addr_type);
4729 }
4730 }
4731
4732 static void powered_complete(struct hci_dev *hdev, u8 status)
4733 {
4734 struct cmd_lookup match = { NULL, hdev };
4735
4736 BT_DBG("status 0x%02x", status);
4737
4738 hci_dev_lock(hdev);
4739
4740 restart_le_auto_conns(hdev);
4741
4742 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
4743
4744 new_settings(hdev, match.sk);
4745
4746 hci_dev_unlock(hdev);
4747
4748 if (match.sk)
4749 sock_put(match.sk);
4750 }
4751
4752 static int powered_update_hci(struct hci_dev *hdev)
4753 {
4754 struct hci_request req;
4755 u8 link_sec;
4756
4757 hci_req_init(&req, hdev);
4758
4759 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
4760 !lmp_host_ssp_capable(hdev)) {
4761 u8 ssp = 1;
4762
4763 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
4764 }
4765
4766 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
4767 lmp_bredr_capable(hdev)) {
4768 struct hci_cp_write_le_host_supported cp;
4769
4770 cp.le = 1;
4771 cp.simul = lmp_le_br_capable(hdev);
4772
4773 /* Check first if we already have the right
4774 * host state (host features set)
4775 */
4776 if (cp.le != lmp_host_le_capable(hdev) ||
4777 cp.simul != lmp_host_le_br_capable(hdev))
4778 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
4779 sizeof(cp), &cp);
4780 }
4781
4782 if (lmp_le_capable(hdev)) {
4783 /* Make sure the controller has a good default for
4784 * advertising data. This also applies to the case
4785 * where BR/EDR was toggled during the AUTO_OFF phase.
4786 */
4787 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
4788 update_adv_data(&req);
4789 update_scan_rsp_data(&req);
4790 }
4791
4792 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
4793 enable_advertising(&req);
4794 }
4795
4796 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4797 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
4798 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
4799 sizeof(link_sec), &link_sec);
4800
4801 if (lmp_bredr_capable(hdev)) {
4802 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
4803 set_bredr_scan(&req);
4804 update_class(&req);
4805 update_name(&req);
4806 update_eir(&req);
4807 }
4808
4809 return hci_req_run(&req, powered_complete);
4810 }
4811
4812 int mgmt_powered(struct hci_dev *hdev, u8 powered)
4813 {
4814 struct cmd_lookup match = { NULL, hdev };
4815 u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
4816 u8 zero_cod[] = { 0, 0, 0 };
4817 int err;
4818
4819 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
4820 return 0;
4821
4822 if (powered) {
4823 if (powered_update_hci(hdev) == 0)
4824 return 0;
4825
4826 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
4827 &match);
4828 goto new_settings;
4829 }
4830
4831 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
4832 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered);
4833
4834 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
4835 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
4836 zero_cod, sizeof(zero_cod), NULL);
4837
4838 new_settings:
4839 err = new_settings(hdev, match.sk);
4840
4841 if (match.sk)
4842 sock_put(match.sk);
4843
4844 return err;
4845 }
4846
4847 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
4848 {
4849 struct pending_cmd *cmd;
4850 u8 status;
4851
4852 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
4853 if (!cmd)
4854 return;
4855
4856 if (err == -ERFKILL)
4857 status = MGMT_STATUS_RFKILLED;
4858 else
4859 status = MGMT_STATUS_FAILED;
4860
4861 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
4862
4863 mgmt_pending_remove(cmd);
4864 }
4865
4866 void mgmt_discoverable_timeout(struct hci_dev *hdev)
4867 {
4868 struct hci_request req;
4869
4870 hci_dev_lock(hdev);
4871
4872 /* When discoverable timeout triggers, then just make sure
4873 * the limited discoverable flag is cleared. Even in the case
4874 * of a timeout triggered from general discoverable, it is
4875 * safe to unconditionally clear the flag.
4876 */
4877 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
4878 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4879
4880 hci_req_init(&req, hdev);
4881 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4882 u8 scan = SCAN_PAGE;
4883 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
4884 sizeof(scan), &scan);
4885 }
4886 update_class(&req);
4887 update_adv_data(&req);
4888 hci_req_run(&req, NULL);
4889
4890 hdev->discov_timeout = 0;
4891
4892 new_settings(hdev, NULL);
4893
4894 hci_dev_unlock(hdev);
4895 }
4896
4897 void mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
4898 {
4899 bool changed;
4900
4901 /* Nothing needed here if there's a pending command since that
4902 * commands request completion callback takes care of everything
4903 * necessary.
4904 */
4905 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
4906 return;
4907
4908 /* Powering off may clear the scan mode - don't let that interfere */
4909 if (!discoverable && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
4910 return;
4911
4912 if (discoverable) {
4913 changed = !test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4914 } else {
4915 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
4916 changed = test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4917 }
4918
4919 if (changed) {
4920 struct hci_request req;
4921
4922 /* In case this change in discoverable was triggered by
4923 * a disabling of connectable there could be a need to
4924 * update the advertising flags.
4925 */
4926 hci_req_init(&req, hdev);
4927 update_adv_data(&req);
4928 hci_req_run(&req, NULL);
4929
4930 new_settings(hdev, NULL);
4931 }
4932 }
4933
4934 void mgmt_connectable(struct hci_dev *hdev, u8 connectable)
4935 {
4936 bool changed;
4937
4938 /* Nothing needed here if there's a pending command since that
4939 * commands request completion callback takes care of everything
4940 * necessary.
4941 */
4942 if (mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
4943 return;
4944
4945 /* Powering off may clear the scan mode - don't let that interfere */
4946 if (!connectable && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
4947 return;
4948
4949 if (connectable)
4950 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
4951 else
4952 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
4953
4954 if (changed)
4955 new_settings(hdev, NULL);
4956 }
4957
4958 void mgmt_advertising(struct hci_dev *hdev, u8 advertising)
4959 {
4960 /* Powering off may stop advertising - don't let that interfere */
4961 if (!advertising && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
4962 return;
4963
4964 if (advertising)
4965 set_bit(HCI_ADVERTISING, &hdev->dev_flags);
4966 else
4967 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
4968 }
4969
4970 void mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
4971 {
4972 u8 mgmt_err = mgmt_status(status);
4973
4974 if (scan & SCAN_PAGE)
4975 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
4976 cmd_status_rsp, &mgmt_err);
4977
4978 if (scan & SCAN_INQUIRY)
4979 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
4980 cmd_status_rsp, &mgmt_err);
4981 }
4982
4983 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
4984 bool persistent)
4985 {
4986 struct mgmt_ev_new_link_key ev;
4987
4988 memset(&ev, 0, sizeof(ev));
4989
4990 ev.store_hint = persistent;
4991 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
4992 ev.key.addr.type = BDADDR_BREDR;
4993 ev.key.type = key->type;
4994 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
4995 ev.key.pin_len = key->pin_len;
4996
4997 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
4998 }
4999
5000 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key)
5001 {
5002 struct mgmt_ev_new_long_term_key ev;
5003
5004 memset(&ev, 0, sizeof(ev));
5005
5006 /* Devices using resolvable or non-resolvable random addresses
5007 * without providing an indentity resolving key don't require
5008 * to store long term keys. Their addresses will change the
5009 * next time around.
5010 *
5011 * Only when a remote device provides an identity address
5012 * make sure the long term key is stored. If the remote
5013 * identity is known, the long term keys are internally
5014 * mapped to the identity address. So allow static random
5015 * and public addresses here.
5016 */
5017 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
5018 (key->bdaddr.b[5] & 0xc0) != 0xc0)
5019 ev.store_hint = 0x00;
5020 else
5021 ev.store_hint = 0x01;
5022
5023 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
5024 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
5025 ev.key.type = key->authenticated;
5026 ev.key.enc_size = key->enc_size;
5027 ev.key.ediv = key->ediv;
5028
5029 if (key->type == HCI_SMP_LTK)
5030 ev.key.master = 1;
5031
5032 memcpy(ev.key.rand, key->rand, sizeof(key->rand));
5033 memcpy(ev.key.val, key->val, sizeof(key->val));
5034
5035 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
5036 }
5037
5038 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
5039 {
5040 struct mgmt_ev_new_irk ev;
5041
5042 memset(&ev, 0, sizeof(ev));
5043
5044 /* For identity resolving keys from devices that are already
5045 * using a public address or static random address, do not
5046 * ask for storing this key. The identity resolving key really
5047 * is only mandatory for devices using resovlable random
5048 * addresses.
5049 *
5050 * Storing all identity resolving keys has the downside that
5051 * they will be also loaded on next boot of they system. More
5052 * identity resolving keys, means more time during scanning is
5053 * needed to actually resolve these addresses.
5054 */
5055 if (bacmp(&irk->rpa, BDADDR_ANY))
5056 ev.store_hint = 0x01;
5057 else
5058 ev.store_hint = 0x00;
5059
5060 bacpy(&ev.rpa, &irk->rpa);
5061 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
5062 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
5063 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
5064
5065 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
5066 }
5067
5068 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
5069 u8 data_len)
5070 {
5071 eir[eir_len++] = sizeof(type) + data_len;
5072 eir[eir_len++] = type;
5073 memcpy(&eir[eir_len], data, data_len);
5074 eir_len += data_len;
5075
5076 return eir_len;
5077 }
5078
5079 void mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5080 u8 addr_type, u32 flags, u8 *name, u8 name_len,
5081 u8 *dev_class)
5082 {
5083 char buf[512];
5084 struct mgmt_ev_device_connected *ev = (void *) buf;
5085 u16 eir_len = 0;
5086
5087 bacpy(&ev->addr.bdaddr, bdaddr);
5088 ev->addr.type = link_to_bdaddr(link_type, addr_type);
5089
5090 ev->flags = __cpu_to_le32(flags);
5091
5092 if (name_len > 0)
5093 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
5094 name, name_len);
5095
5096 if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
5097 eir_len = eir_append_data(ev->eir, eir_len,
5098 EIR_CLASS_OF_DEV, dev_class, 3);
5099
5100 ev->eir_len = cpu_to_le16(eir_len);
5101
5102 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
5103 sizeof(*ev) + eir_len, NULL);
5104 }
5105
5106 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
5107 {
5108 struct mgmt_cp_disconnect *cp = cmd->param;
5109 struct sock **sk = data;
5110 struct mgmt_rp_disconnect rp;
5111
5112 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5113 rp.addr.type = cp->addr.type;
5114
5115 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
5116 sizeof(rp));
5117
5118 *sk = cmd->sk;
5119 sock_hold(*sk);
5120
5121 mgmt_pending_remove(cmd);
5122 }
5123
5124 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
5125 {
5126 struct hci_dev *hdev = data;
5127 struct mgmt_cp_unpair_device *cp = cmd->param;
5128 struct mgmt_rp_unpair_device rp;
5129
5130 memset(&rp, 0, sizeof(rp));
5131 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5132 rp.addr.type = cp->addr.type;
5133
5134 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
5135
5136 cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
5137
5138 mgmt_pending_remove(cmd);
5139 }
5140
5141 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
5142 u8 link_type, u8 addr_type, u8 reason,
5143 bool mgmt_connected)
5144 {
5145 struct mgmt_ev_device_disconnected ev;
5146 struct pending_cmd *power_off;
5147 struct sock *sk = NULL;
5148
5149 power_off = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
5150 if (power_off) {
5151 struct mgmt_mode *cp = power_off->param;
5152
5153 /* The connection is still in hci_conn_hash so test for 1
5154 * instead of 0 to know if this is the last one.
5155 */
5156 if (!cp->val && hci_conn_count(hdev) == 1) {
5157 cancel_delayed_work(&hdev->power_off);
5158 queue_work(hdev->req_workqueue, &hdev->power_off.work);
5159 }
5160 }
5161
5162 if (!mgmt_connected)
5163 return;
5164
5165 if (link_type != ACL_LINK && link_type != LE_LINK)
5166 return;
5167
5168 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
5169
5170 bacpy(&ev.addr.bdaddr, bdaddr);
5171 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5172 ev.reason = reason;
5173
5174 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
5175
5176 if (sk)
5177 sock_put(sk);
5178
5179 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
5180 hdev);
5181 }
5182
5183 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
5184 u8 link_type, u8 addr_type, u8 status)
5185 {
5186 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
5187 struct mgmt_cp_disconnect *cp;
5188 struct mgmt_rp_disconnect rp;
5189 struct pending_cmd *cmd;
5190
5191 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
5192 hdev);
5193
5194 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
5195 if (!cmd)
5196 return;
5197
5198 cp = cmd->param;
5199
5200 if (bacmp(bdaddr, &cp->addr.bdaddr))
5201 return;
5202
5203 if (cp->addr.type != bdaddr_type)
5204 return;
5205
5206 bacpy(&rp.addr.bdaddr, bdaddr);
5207 rp.addr.type = bdaddr_type;
5208
5209 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
5210 mgmt_status(status), &rp, sizeof(rp));
5211
5212 mgmt_pending_remove(cmd);
5213 }
5214
5215 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5216 u8 addr_type, u8 status)
5217 {
5218 struct mgmt_ev_connect_failed ev;
5219 struct pending_cmd *power_off;
5220
5221 power_off = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
5222 if (power_off) {
5223 struct mgmt_mode *cp = power_off->param;
5224
5225 /* The connection is still in hci_conn_hash so test for 1
5226 * instead of 0 to know if this is the last one.
5227 */
5228 if (!cp->val && hci_conn_count(hdev) == 1) {
5229 cancel_delayed_work(&hdev->power_off);
5230 queue_work(hdev->req_workqueue, &hdev->power_off.work);
5231 }
5232 }
5233
5234 bacpy(&ev.addr.bdaddr, bdaddr);
5235 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5236 ev.status = mgmt_status(status);
5237
5238 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
5239 }
5240
5241 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
5242 {
5243 struct mgmt_ev_pin_code_request ev;
5244
5245 bacpy(&ev.addr.bdaddr, bdaddr);
5246 ev.addr.type = BDADDR_BREDR;
5247 ev.secure = secure;
5248
5249 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
5250 }
5251
5252 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5253 u8 status)
5254 {
5255 struct pending_cmd *cmd;
5256 struct mgmt_rp_pin_code_reply rp;
5257
5258 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
5259 if (!cmd)
5260 return;
5261
5262 bacpy(&rp.addr.bdaddr, bdaddr);
5263 rp.addr.type = BDADDR_BREDR;
5264
5265 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
5266 mgmt_status(status), &rp, sizeof(rp));
5267
5268 mgmt_pending_remove(cmd);
5269 }
5270
5271 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5272 u8 status)
5273 {
5274 struct pending_cmd *cmd;
5275 struct mgmt_rp_pin_code_reply rp;
5276
5277 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
5278 if (!cmd)
5279 return;
5280
5281 bacpy(&rp.addr.bdaddr, bdaddr);
5282 rp.addr.type = BDADDR_BREDR;
5283
5284 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
5285 mgmt_status(status), &rp, sizeof(rp));
5286
5287 mgmt_pending_remove(cmd);
5288 }
5289
5290 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
5291 u8 link_type, u8 addr_type, __le32 value,
5292 u8 confirm_hint)
5293 {
5294 struct mgmt_ev_user_confirm_request ev;
5295
5296 BT_DBG("%s", hdev->name);
5297
5298 bacpy(&ev.addr.bdaddr, bdaddr);
5299 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5300 ev.confirm_hint = confirm_hint;
5301 ev.value = value;
5302
5303 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
5304 NULL);
5305 }
5306
5307 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
5308 u8 link_type, u8 addr_type)
5309 {
5310 struct mgmt_ev_user_passkey_request ev;
5311
5312 BT_DBG("%s", hdev->name);
5313
5314 bacpy(&ev.addr.bdaddr, bdaddr);
5315 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5316
5317 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
5318 NULL);
5319 }
5320
5321 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5322 u8 link_type, u8 addr_type, u8 status,
5323 u8 opcode)
5324 {
5325 struct pending_cmd *cmd;
5326 struct mgmt_rp_user_confirm_reply rp;
5327 int err;
5328
5329 cmd = mgmt_pending_find(opcode, hdev);
5330 if (!cmd)
5331 return -ENOENT;
5332
5333 bacpy(&rp.addr.bdaddr, bdaddr);
5334 rp.addr.type = link_to_bdaddr(link_type, addr_type);
5335 err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
5336 &rp, sizeof(rp));
5337
5338 mgmt_pending_remove(cmd);
5339
5340 return err;
5341 }
5342
5343 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5344 u8 link_type, u8 addr_type, u8 status)
5345 {
5346 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5347 status, MGMT_OP_USER_CONFIRM_REPLY);
5348 }
5349
5350 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5351 u8 link_type, u8 addr_type, u8 status)
5352 {
5353 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5354 status,
5355 MGMT_OP_USER_CONFIRM_NEG_REPLY);
5356 }
5357
5358 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5359 u8 link_type, u8 addr_type, u8 status)
5360 {
5361 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5362 status, MGMT_OP_USER_PASSKEY_REPLY);
5363 }
5364
5365 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5366 u8 link_type, u8 addr_type, u8 status)
5367 {
5368 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5369 status,
5370 MGMT_OP_USER_PASSKEY_NEG_REPLY);
5371 }
5372
5373 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
5374 u8 link_type, u8 addr_type, u32 passkey,
5375 u8 entered)
5376 {
5377 struct mgmt_ev_passkey_notify ev;
5378
5379 BT_DBG("%s", hdev->name);
5380
5381 bacpy(&ev.addr.bdaddr, bdaddr);
5382 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5383 ev.passkey = __cpu_to_le32(passkey);
5384 ev.entered = entered;
5385
5386 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
5387 }
5388
5389 void mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5390 u8 addr_type, u8 status)
5391 {
5392 struct mgmt_ev_auth_failed ev;
5393
5394 bacpy(&ev.addr.bdaddr, bdaddr);
5395 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5396 ev.status = mgmt_status(status);
5397
5398 mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
5399 }
5400
5401 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
5402 {
5403 struct cmd_lookup match = { NULL, hdev };
5404 bool changed;
5405
5406 if (status) {
5407 u8 mgmt_err = mgmt_status(status);
5408 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
5409 cmd_status_rsp, &mgmt_err);
5410 return;
5411 }
5412
5413 if (test_bit(HCI_AUTH, &hdev->flags))
5414 changed = !test_and_set_bit(HCI_LINK_SECURITY,
5415 &hdev->dev_flags);
5416 else
5417 changed = test_and_clear_bit(HCI_LINK_SECURITY,
5418 &hdev->dev_flags);
5419
5420 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
5421 &match);
5422
5423 if (changed)
5424 new_settings(hdev, match.sk);
5425
5426 if (match.sk)
5427 sock_put(match.sk);
5428 }
5429
5430 static void clear_eir(struct hci_request *req)
5431 {
5432 struct hci_dev *hdev = req->hdev;
5433 struct hci_cp_write_eir cp;
5434
5435 if (!lmp_ext_inq_capable(hdev))
5436 return;
5437
5438 memset(hdev->eir, 0, sizeof(hdev->eir));
5439
5440 memset(&cp, 0, sizeof(cp));
5441
5442 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
5443 }
5444
5445 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
5446 {
5447 struct cmd_lookup match = { NULL, hdev };
5448 struct hci_request req;
5449 bool changed = false;
5450
5451 if (status) {
5452 u8 mgmt_err = mgmt_status(status);
5453
5454 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
5455 &hdev->dev_flags)) {
5456 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
5457 new_settings(hdev, NULL);
5458 }
5459
5460 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
5461 &mgmt_err);
5462 return;
5463 }
5464
5465 if (enable) {
5466 changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
5467 } else {
5468 changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
5469 if (!changed)
5470 changed = test_and_clear_bit(HCI_HS_ENABLED,
5471 &hdev->dev_flags);
5472 else
5473 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
5474 }
5475
5476 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
5477
5478 if (changed)
5479 new_settings(hdev, match.sk);
5480
5481 if (match.sk)
5482 sock_put(match.sk);
5483
5484 hci_req_init(&req, hdev);
5485
5486 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
5487 update_eir(&req);
5488 else
5489 clear_eir(&req);
5490
5491 hci_req_run(&req, NULL);
5492 }
5493
5494 void mgmt_sc_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
5495 {
5496 struct cmd_lookup match = { NULL, hdev };
5497 bool changed = false;
5498
5499 if (status) {
5500 u8 mgmt_err = mgmt_status(status);
5501
5502 if (enable) {
5503 if (test_and_clear_bit(HCI_SC_ENABLED,
5504 &hdev->dev_flags))
5505 new_settings(hdev, NULL);
5506 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
5507 }
5508
5509 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
5510 cmd_status_rsp, &mgmt_err);
5511 return;
5512 }
5513
5514 if (enable) {
5515 changed = !test_and_set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
5516 } else {
5517 changed = test_and_clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
5518 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
5519 }
5520
5521 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
5522 settings_rsp, &match);
5523
5524 if (changed)
5525 new_settings(hdev, match.sk);
5526
5527 if (match.sk)
5528 sock_put(match.sk);
5529 }
5530
5531 static void sk_lookup(struct pending_cmd *cmd, void *data)
5532 {
5533 struct cmd_lookup *match = data;
5534
5535 if (match->sk == NULL) {
5536 match->sk = cmd->sk;
5537 sock_hold(match->sk);
5538 }
5539 }
5540
5541 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
5542 u8 status)
5543 {
5544 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
5545
5546 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
5547 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
5548 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
5549
5550 if (!status)
5551 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
5552 NULL);
5553
5554 if (match.sk)
5555 sock_put(match.sk);
5556 }
5557
5558 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
5559 {
5560 struct mgmt_cp_set_local_name ev;
5561 struct pending_cmd *cmd;
5562
5563 if (status)
5564 return;
5565
5566 memset(&ev, 0, sizeof(ev));
5567 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
5568 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
5569
5570 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
5571 if (!cmd) {
5572 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
5573
5574 /* If this is a HCI command related to powering on the
5575 * HCI dev don't send any mgmt signals.
5576 */
5577 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5578 return;
5579 }
5580
5581 mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
5582 cmd ? cmd->sk : NULL);
5583 }
5584
5585 void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
5586 u8 *randomizer192, u8 *hash256,
5587 u8 *randomizer256, u8 status)
5588 {
5589 struct pending_cmd *cmd;
5590
5591 BT_DBG("%s status %u", hdev->name, status);
5592
5593 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
5594 if (!cmd)
5595 return;
5596
5597 if (status) {
5598 cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5599 mgmt_status(status));
5600 } else {
5601 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
5602 hash256 && randomizer256) {
5603 struct mgmt_rp_read_local_oob_ext_data rp;
5604
5605 memcpy(rp.hash192, hash192, sizeof(rp.hash192));
5606 memcpy(rp.randomizer192, randomizer192,
5607 sizeof(rp.randomizer192));
5608
5609 memcpy(rp.hash256, hash256, sizeof(rp.hash256));
5610 memcpy(rp.randomizer256, randomizer256,
5611 sizeof(rp.randomizer256));
5612
5613 cmd_complete(cmd->sk, hdev->id,
5614 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
5615 &rp, sizeof(rp));
5616 } else {
5617 struct mgmt_rp_read_local_oob_data rp;
5618
5619 memcpy(rp.hash, hash192, sizeof(rp.hash));
5620 memcpy(rp.randomizer, randomizer192,
5621 sizeof(rp.randomizer));
5622
5623 cmd_complete(cmd->sk, hdev->id,
5624 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
5625 &rp, sizeof(rp));
5626 }
5627 }
5628
5629 mgmt_pending_remove(cmd);
5630 }
5631
5632 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5633 u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name, u8
5634 ssp, u8 *eir, u16 eir_len)
5635 {
5636 char buf[512];
5637 struct mgmt_ev_device_found *ev = (void *) buf;
5638 struct smp_irk *irk;
5639 size_t ev_size;
5640
5641 if (!hci_discovery_active(hdev))
5642 return;
5643
5644 /* Leave 5 bytes for a potential CoD field */
5645 if (sizeof(*ev) + eir_len + 5 > sizeof(buf))
5646 return;
5647
5648 memset(buf, 0, sizeof(buf));
5649
5650 irk = hci_get_irk(hdev, bdaddr, addr_type);
5651 if (irk) {
5652 bacpy(&ev->addr.bdaddr, &irk->bdaddr);
5653 ev->addr.type = link_to_bdaddr(link_type, irk->addr_type);
5654 } else {
5655 bacpy(&ev->addr.bdaddr, bdaddr);
5656 ev->addr.type = link_to_bdaddr(link_type, addr_type);
5657 }
5658
5659 ev->rssi = rssi;
5660 if (cfm_name)
5661 ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_CONFIRM_NAME);
5662 if (!ssp)
5663 ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_LEGACY_PAIRING);
5664
5665 if (eir_len > 0)
5666 memcpy(ev->eir, eir, eir_len);
5667
5668 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
5669 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
5670 dev_class, 3);
5671
5672 ev->eir_len = cpu_to_le16(eir_len);
5673 ev_size = sizeof(*ev) + eir_len;
5674
5675 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
5676 }
5677
5678 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5679 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
5680 {
5681 struct mgmt_ev_device_found *ev;
5682 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
5683 u16 eir_len;
5684
5685 ev = (struct mgmt_ev_device_found *) buf;
5686
5687 memset(buf, 0, sizeof(buf));
5688
5689 bacpy(&ev->addr.bdaddr, bdaddr);
5690 ev->addr.type = link_to_bdaddr(link_type, addr_type);
5691 ev->rssi = rssi;
5692
5693 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
5694 name_len);
5695
5696 ev->eir_len = cpu_to_le16(eir_len);
5697
5698 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
5699 }
5700
5701 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
5702 {
5703 struct mgmt_ev_discovering ev;
5704 struct pending_cmd *cmd;
5705
5706 BT_DBG("%s discovering %u", hdev->name, discovering);
5707
5708 if (discovering)
5709 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
5710 else
5711 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
5712
5713 if (cmd != NULL) {
5714 u8 type = hdev->discovery.type;
5715
5716 cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
5717 sizeof(type));
5718 mgmt_pending_remove(cmd);
5719 }
5720
5721 memset(&ev, 0, sizeof(ev));
5722 ev.type = hdev->discovery.type;
5723 ev.discovering = discovering;
5724
5725 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
5726 }
5727
5728 int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
5729 {
5730 struct pending_cmd *cmd;
5731 struct mgmt_ev_device_blocked ev;
5732
5733 cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, hdev);
5734
5735 bacpy(&ev.addr.bdaddr, bdaddr);
5736 ev.addr.type = type;
5737
5738 return mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &ev, sizeof(ev),
5739 cmd ? cmd->sk : NULL);
5740 }
5741
5742 int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
5743 {
5744 struct pending_cmd *cmd;
5745 struct mgmt_ev_device_unblocked ev;
5746
5747 cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, hdev);
5748
5749 bacpy(&ev.addr.bdaddr, bdaddr);
5750 ev.addr.type = type;
5751
5752 return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &ev, sizeof(ev),
5753 cmd ? cmd->sk : NULL);
5754 }
5755
5756 static void adv_enable_complete(struct hci_dev *hdev, u8 status)
5757 {
5758 BT_DBG("%s status %u", hdev->name, status);
5759
5760 /* Clear the advertising mgmt setting if we failed to re-enable it */
5761 if (status) {
5762 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
5763 new_settings(hdev, NULL);
5764 }
5765 }
5766
5767 void mgmt_reenable_advertising(struct hci_dev *hdev)
5768 {
5769 struct hci_request req;
5770
5771 if (hci_conn_num(hdev, LE_LINK) > 0)
5772 return;
5773
5774 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
5775 return;
5776
5777 hci_req_init(&req, hdev);
5778 enable_advertising(&req);
5779
5780 /* If this fails we have no option but to let user space know
5781 * that we've disabled advertising.
5782 */
5783 if (hci_req_run(&req, adv_enable_complete) < 0) {
5784 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
5785 new_settings(hdev, NULL);
5786 }
5787 }