]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - net/bluetooth/mgmt.c
Bluetooth: Refactor HCI connection code
[mirror_ubuntu-artful-kernel.git] / net / bluetooth / mgmt.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI Management interface */
26
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/mgmt.h>
33
34 #include "smp.h"
35
36 #define MGMT_VERSION 1
37 #define MGMT_REVISION 5
38
39 static const u16 mgmt_commands[] = {
40 MGMT_OP_READ_INDEX_LIST,
41 MGMT_OP_READ_INFO,
42 MGMT_OP_SET_POWERED,
43 MGMT_OP_SET_DISCOVERABLE,
44 MGMT_OP_SET_CONNECTABLE,
45 MGMT_OP_SET_FAST_CONNECTABLE,
46 MGMT_OP_SET_PAIRABLE,
47 MGMT_OP_SET_LINK_SECURITY,
48 MGMT_OP_SET_SSP,
49 MGMT_OP_SET_HS,
50 MGMT_OP_SET_LE,
51 MGMT_OP_SET_DEV_CLASS,
52 MGMT_OP_SET_LOCAL_NAME,
53 MGMT_OP_ADD_UUID,
54 MGMT_OP_REMOVE_UUID,
55 MGMT_OP_LOAD_LINK_KEYS,
56 MGMT_OP_LOAD_LONG_TERM_KEYS,
57 MGMT_OP_DISCONNECT,
58 MGMT_OP_GET_CONNECTIONS,
59 MGMT_OP_PIN_CODE_REPLY,
60 MGMT_OP_PIN_CODE_NEG_REPLY,
61 MGMT_OP_SET_IO_CAPABILITY,
62 MGMT_OP_PAIR_DEVICE,
63 MGMT_OP_CANCEL_PAIR_DEVICE,
64 MGMT_OP_UNPAIR_DEVICE,
65 MGMT_OP_USER_CONFIRM_REPLY,
66 MGMT_OP_USER_CONFIRM_NEG_REPLY,
67 MGMT_OP_USER_PASSKEY_REPLY,
68 MGMT_OP_USER_PASSKEY_NEG_REPLY,
69 MGMT_OP_READ_LOCAL_OOB_DATA,
70 MGMT_OP_ADD_REMOTE_OOB_DATA,
71 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
72 MGMT_OP_START_DISCOVERY,
73 MGMT_OP_STOP_DISCOVERY,
74 MGMT_OP_CONFIRM_NAME,
75 MGMT_OP_BLOCK_DEVICE,
76 MGMT_OP_UNBLOCK_DEVICE,
77 MGMT_OP_SET_DEVICE_ID,
78 MGMT_OP_SET_ADVERTISING,
79 MGMT_OP_SET_BREDR,
80 MGMT_OP_SET_STATIC_ADDRESS,
81 MGMT_OP_SET_SCAN_PARAMS,
82 MGMT_OP_SET_SECURE_CONN,
83 MGMT_OP_SET_DEBUG_KEYS,
84 MGMT_OP_SET_PRIVACY,
85 MGMT_OP_LOAD_IRKS,
86 };
87
88 static const u16 mgmt_events[] = {
89 MGMT_EV_CONTROLLER_ERROR,
90 MGMT_EV_INDEX_ADDED,
91 MGMT_EV_INDEX_REMOVED,
92 MGMT_EV_NEW_SETTINGS,
93 MGMT_EV_CLASS_OF_DEV_CHANGED,
94 MGMT_EV_LOCAL_NAME_CHANGED,
95 MGMT_EV_NEW_LINK_KEY,
96 MGMT_EV_NEW_LONG_TERM_KEY,
97 MGMT_EV_DEVICE_CONNECTED,
98 MGMT_EV_DEVICE_DISCONNECTED,
99 MGMT_EV_CONNECT_FAILED,
100 MGMT_EV_PIN_CODE_REQUEST,
101 MGMT_EV_USER_CONFIRM_REQUEST,
102 MGMT_EV_USER_PASSKEY_REQUEST,
103 MGMT_EV_AUTH_FAILED,
104 MGMT_EV_DEVICE_FOUND,
105 MGMT_EV_DISCOVERING,
106 MGMT_EV_DEVICE_BLOCKED,
107 MGMT_EV_DEVICE_UNBLOCKED,
108 MGMT_EV_DEVICE_UNPAIRED,
109 MGMT_EV_PASSKEY_NOTIFY,
110 MGMT_EV_NEW_IRK,
111 };
112
113 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
114
115 #define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
116 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
117
118 struct pending_cmd {
119 struct list_head list;
120 u16 opcode;
121 int index;
122 void *param;
123 struct sock *sk;
124 void *user_data;
125 };
126
127 /* HCI to MGMT error code conversion table */
128 static u8 mgmt_status_table[] = {
129 MGMT_STATUS_SUCCESS,
130 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
131 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
132 MGMT_STATUS_FAILED, /* Hardware Failure */
133 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
134 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
135 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
136 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
137 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
138 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
139 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
140 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
141 MGMT_STATUS_BUSY, /* Command Disallowed */
142 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
143 MGMT_STATUS_REJECTED, /* Rejected Security */
144 MGMT_STATUS_REJECTED, /* Rejected Personal */
145 MGMT_STATUS_TIMEOUT, /* Host Timeout */
146 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
147 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
148 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
149 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
150 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
151 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
152 MGMT_STATUS_BUSY, /* Repeated Attempts */
153 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
154 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
155 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
156 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
157 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
158 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
159 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
160 MGMT_STATUS_FAILED, /* Unspecified Error */
161 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
162 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
163 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
164 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
165 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
166 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
167 MGMT_STATUS_FAILED, /* Unit Link Key Used */
168 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
169 MGMT_STATUS_TIMEOUT, /* Instant Passed */
170 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
171 MGMT_STATUS_FAILED, /* Transaction Collision */
172 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
173 MGMT_STATUS_REJECTED, /* QoS Rejected */
174 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
175 MGMT_STATUS_REJECTED, /* Insufficient Security */
176 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
177 MGMT_STATUS_BUSY, /* Role Switch Pending */
178 MGMT_STATUS_FAILED, /* Slot Violation */
179 MGMT_STATUS_FAILED, /* Role Switch Failed */
180 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
181 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
182 MGMT_STATUS_BUSY, /* Host Busy Pairing */
183 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
184 MGMT_STATUS_BUSY, /* Controller Busy */
185 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
186 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
187 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
188 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
189 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
190 };
191
192 static u8 mgmt_status(u8 hci_status)
193 {
194 if (hci_status < ARRAY_SIZE(mgmt_status_table))
195 return mgmt_status_table[hci_status];
196
197 return MGMT_STATUS_FAILED;
198 }
199
200 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
201 {
202 struct sk_buff *skb;
203 struct mgmt_hdr *hdr;
204 struct mgmt_ev_cmd_status *ev;
205 int err;
206
207 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
208
209 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
210 if (!skb)
211 return -ENOMEM;
212
213 hdr = (void *) skb_put(skb, sizeof(*hdr));
214
215 hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_STATUS);
216 hdr->index = cpu_to_le16(index);
217 hdr->len = cpu_to_le16(sizeof(*ev));
218
219 ev = (void *) skb_put(skb, sizeof(*ev));
220 ev->status = status;
221 ev->opcode = cpu_to_le16(cmd);
222
223 err = sock_queue_rcv_skb(sk, skb);
224 if (err < 0)
225 kfree_skb(skb);
226
227 return err;
228 }
229
230 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
231 void *rp, size_t rp_len)
232 {
233 struct sk_buff *skb;
234 struct mgmt_hdr *hdr;
235 struct mgmt_ev_cmd_complete *ev;
236 int err;
237
238 BT_DBG("sock %p", sk);
239
240 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
241 if (!skb)
242 return -ENOMEM;
243
244 hdr = (void *) skb_put(skb, sizeof(*hdr));
245
246 hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_COMPLETE);
247 hdr->index = cpu_to_le16(index);
248 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
249
250 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
251 ev->opcode = cpu_to_le16(cmd);
252 ev->status = status;
253
254 if (rp)
255 memcpy(ev->data, rp, rp_len);
256
257 err = sock_queue_rcv_skb(sk, skb);
258 if (err < 0)
259 kfree_skb(skb);
260
261 return err;
262 }
263
264 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
265 u16 data_len)
266 {
267 struct mgmt_rp_read_version rp;
268
269 BT_DBG("sock %p", sk);
270
271 rp.version = MGMT_VERSION;
272 rp.revision = __constant_cpu_to_le16(MGMT_REVISION);
273
274 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
275 sizeof(rp));
276 }
277
278 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
279 u16 data_len)
280 {
281 struct mgmt_rp_read_commands *rp;
282 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
283 const u16 num_events = ARRAY_SIZE(mgmt_events);
284 __le16 *opcode;
285 size_t rp_size;
286 int i, err;
287
288 BT_DBG("sock %p", sk);
289
290 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
291
292 rp = kmalloc(rp_size, GFP_KERNEL);
293 if (!rp)
294 return -ENOMEM;
295
296 rp->num_commands = __constant_cpu_to_le16(num_commands);
297 rp->num_events = __constant_cpu_to_le16(num_events);
298
299 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
300 put_unaligned_le16(mgmt_commands[i], opcode);
301
302 for (i = 0; i < num_events; i++, opcode++)
303 put_unaligned_le16(mgmt_events[i], opcode);
304
305 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
306 rp_size);
307 kfree(rp);
308
309 return err;
310 }
311
312 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
313 u16 data_len)
314 {
315 struct mgmt_rp_read_index_list *rp;
316 struct hci_dev *d;
317 size_t rp_len;
318 u16 count;
319 int err;
320
321 BT_DBG("sock %p", sk);
322
323 read_lock(&hci_dev_list_lock);
324
325 count = 0;
326 list_for_each_entry(d, &hci_dev_list, list) {
327 if (d->dev_type == HCI_BREDR)
328 count++;
329 }
330
331 rp_len = sizeof(*rp) + (2 * count);
332 rp = kmalloc(rp_len, GFP_ATOMIC);
333 if (!rp) {
334 read_unlock(&hci_dev_list_lock);
335 return -ENOMEM;
336 }
337
338 count = 0;
339 list_for_each_entry(d, &hci_dev_list, list) {
340 if (test_bit(HCI_SETUP, &d->dev_flags))
341 continue;
342
343 if (test_bit(HCI_USER_CHANNEL, &d->dev_flags))
344 continue;
345
346 if (d->dev_type == HCI_BREDR) {
347 rp->index[count++] = cpu_to_le16(d->id);
348 BT_DBG("Added hci%u", d->id);
349 }
350 }
351
352 rp->num_controllers = cpu_to_le16(count);
353 rp_len = sizeof(*rp) + (2 * count);
354
355 read_unlock(&hci_dev_list_lock);
356
357 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
358 rp_len);
359
360 kfree(rp);
361
362 return err;
363 }
364
365 static u32 get_supported_settings(struct hci_dev *hdev)
366 {
367 u32 settings = 0;
368
369 settings |= MGMT_SETTING_POWERED;
370 settings |= MGMT_SETTING_PAIRABLE;
371 settings |= MGMT_SETTING_DEBUG_KEYS;
372
373 if (lmp_bredr_capable(hdev)) {
374 settings |= MGMT_SETTING_CONNECTABLE;
375 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
376 settings |= MGMT_SETTING_FAST_CONNECTABLE;
377 settings |= MGMT_SETTING_DISCOVERABLE;
378 settings |= MGMT_SETTING_BREDR;
379 settings |= MGMT_SETTING_LINK_SECURITY;
380
381 if (lmp_ssp_capable(hdev)) {
382 settings |= MGMT_SETTING_SSP;
383 settings |= MGMT_SETTING_HS;
384 }
385
386 if (lmp_sc_capable(hdev) ||
387 test_bit(HCI_FORCE_SC, &hdev->dev_flags))
388 settings |= MGMT_SETTING_SECURE_CONN;
389 }
390
391 if (lmp_le_capable(hdev)) {
392 settings |= MGMT_SETTING_LE;
393 settings |= MGMT_SETTING_ADVERTISING;
394 settings |= MGMT_SETTING_PRIVACY;
395 }
396
397 return settings;
398 }
399
400 static u32 get_current_settings(struct hci_dev *hdev)
401 {
402 u32 settings = 0;
403
404 if (hdev_is_powered(hdev))
405 settings |= MGMT_SETTING_POWERED;
406
407 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
408 settings |= MGMT_SETTING_CONNECTABLE;
409
410 if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
411 settings |= MGMT_SETTING_FAST_CONNECTABLE;
412
413 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
414 settings |= MGMT_SETTING_DISCOVERABLE;
415
416 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
417 settings |= MGMT_SETTING_PAIRABLE;
418
419 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
420 settings |= MGMT_SETTING_BREDR;
421
422 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
423 settings |= MGMT_SETTING_LE;
424
425 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
426 settings |= MGMT_SETTING_LINK_SECURITY;
427
428 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
429 settings |= MGMT_SETTING_SSP;
430
431 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
432 settings |= MGMT_SETTING_HS;
433
434 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
435 settings |= MGMT_SETTING_ADVERTISING;
436
437 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
438 settings |= MGMT_SETTING_SECURE_CONN;
439
440 if (test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags))
441 settings |= MGMT_SETTING_DEBUG_KEYS;
442
443 if (test_bit(HCI_PRIVACY, &hdev->dev_flags))
444 settings |= MGMT_SETTING_PRIVACY;
445
446 return settings;
447 }
448
449 #define PNP_INFO_SVCLASS_ID 0x1200
450
451 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
452 {
453 u8 *ptr = data, *uuids_start = NULL;
454 struct bt_uuid *uuid;
455
456 if (len < 4)
457 return ptr;
458
459 list_for_each_entry(uuid, &hdev->uuids, list) {
460 u16 uuid16;
461
462 if (uuid->size != 16)
463 continue;
464
465 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
466 if (uuid16 < 0x1100)
467 continue;
468
469 if (uuid16 == PNP_INFO_SVCLASS_ID)
470 continue;
471
472 if (!uuids_start) {
473 uuids_start = ptr;
474 uuids_start[0] = 1;
475 uuids_start[1] = EIR_UUID16_ALL;
476 ptr += 2;
477 }
478
479 /* Stop if not enough space to put next UUID */
480 if ((ptr - data) + sizeof(u16) > len) {
481 uuids_start[1] = EIR_UUID16_SOME;
482 break;
483 }
484
485 *ptr++ = (uuid16 & 0x00ff);
486 *ptr++ = (uuid16 & 0xff00) >> 8;
487 uuids_start[0] += sizeof(uuid16);
488 }
489
490 return ptr;
491 }
492
493 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
494 {
495 u8 *ptr = data, *uuids_start = NULL;
496 struct bt_uuid *uuid;
497
498 if (len < 6)
499 return ptr;
500
501 list_for_each_entry(uuid, &hdev->uuids, list) {
502 if (uuid->size != 32)
503 continue;
504
505 if (!uuids_start) {
506 uuids_start = ptr;
507 uuids_start[0] = 1;
508 uuids_start[1] = EIR_UUID32_ALL;
509 ptr += 2;
510 }
511
512 /* Stop if not enough space to put next UUID */
513 if ((ptr - data) + sizeof(u32) > len) {
514 uuids_start[1] = EIR_UUID32_SOME;
515 break;
516 }
517
518 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
519 ptr += sizeof(u32);
520 uuids_start[0] += sizeof(u32);
521 }
522
523 return ptr;
524 }
525
526 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
527 {
528 u8 *ptr = data, *uuids_start = NULL;
529 struct bt_uuid *uuid;
530
531 if (len < 18)
532 return ptr;
533
534 list_for_each_entry(uuid, &hdev->uuids, list) {
535 if (uuid->size != 128)
536 continue;
537
538 if (!uuids_start) {
539 uuids_start = ptr;
540 uuids_start[0] = 1;
541 uuids_start[1] = EIR_UUID128_ALL;
542 ptr += 2;
543 }
544
545 /* Stop if not enough space to put next UUID */
546 if ((ptr - data) + 16 > len) {
547 uuids_start[1] = EIR_UUID128_SOME;
548 break;
549 }
550
551 memcpy(ptr, uuid->uuid, 16);
552 ptr += 16;
553 uuids_start[0] += 16;
554 }
555
556 return ptr;
557 }
558
559 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
560 {
561 struct pending_cmd *cmd;
562
563 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
564 if (cmd->opcode == opcode)
565 return cmd;
566 }
567
568 return NULL;
569 }
570
571 static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
572 {
573 u8 ad_len = 0;
574 size_t name_len;
575
576 name_len = strlen(hdev->dev_name);
577 if (name_len > 0) {
578 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
579
580 if (name_len > max_len) {
581 name_len = max_len;
582 ptr[1] = EIR_NAME_SHORT;
583 } else
584 ptr[1] = EIR_NAME_COMPLETE;
585
586 ptr[0] = name_len + 1;
587
588 memcpy(ptr + 2, hdev->dev_name, name_len);
589
590 ad_len += (name_len + 2);
591 ptr += (name_len + 2);
592 }
593
594 return ad_len;
595 }
596
597 static void update_scan_rsp_data(struct hci_request *req)
598 {
599 struct hci_dev *hdev = req->hdev;
600 struct hci_cp_le_set_scan_rsp_data cp;
601 u8 len;
602
603 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
604 return;
605
606 memset(&cp, 0, sizeof(cp));
607
608 len = create_scan_rsp_data(hdev, cp.data);
609
610 if (hdev->scan_rsp_data_len == len &&
611 memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
612 return;
613
614 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
615 hdev->scan_rsp_data_len = len;
616
617 cp.length = len;
618
619 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
620 }
621
622 static u8 get_adv_discov_flags(struct hci_dev *hdev)
623 {
624 struct pending_cmd *cmd;
625
626 /* If there's a pending mgmt command the flags will not yet have
627 * their final values, so check for this first.
628 */
629 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
630 if (cmd) {
631 struct mgmt_mode *cp = cmd->param;
632 if (cp->val == 0x01)
633 return LE_AD_GENERAL;
634 else if (cp->val == 0x02)
635 return LE_AD_LIMITED;
636 } else {
637 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
638 return LE_AD_LIMITED;
639 else if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
640 return LE_AD_GENERAL;
641 }
642
643 return 0;
644 }
645
646 static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
647 {
648 u8 ad_len = 0, flags = 0;
649
650 flags |= get_adv_discov_flags(hdev);
651
652 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
653 flags |= LE_AD_NO_BREDR;
654
655 if (flags) {
656 BT_DBG("adv flags 0x%02x", flags);
657
658 ptr[0] = 2;
659 ptr[1] = EIR_FLAGS;
660 ptr[2] = flags;
661
662 ad_len += 3;
663 ptr += 3;
664 }
665
666 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
667 ptr[0] = 2;
668 ptr[1] = EIR_TX_POWER;
669 ptr[2] = (u8) hdev->adv_tx_power;
670
671 ad_len += 3;
672 ptr += 3;
673 }
674
675 return ad_len;
676 }
677
678 static void update_adv_data(struct hci_request *req)
679 {
680 struct hci_dev *hdev = req->hdev;
681 struct hci_cp_le_set_adv_data cp;
682 u8 len;
683
684 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
685 return;
686
687 memset(&cp, 0, sizeof(cp));
688
689 len = create_adv_data(hdev, cp.data);
690
691 if (hdev->adv_data_len == len &&
692 memcmp(cp.data, hdev->adv_data, len) == 0)
693 return;
694
695 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
696 hdev->adv_data_len = len;
697
698 cp.length = len;
699
700 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
701 }
702
703 static void create_eir(struct hci_dev *hdev, u8 *data)
704 {
705 u8 *ptr = data;
706 size_t name_len;
707
708 name_len = strlen(hdev->dev_name);
709
710 if (name_len > 0) {
711 /* EIR Data type */
712 if (name_len > 48) {
713 name_len = 48;
714 ptr[1] = EIR_NAME_SHORT;
715 } else
716 ptr[1] = EIR_NAME_COMPLETE;
717
718 /* EIR Data length */
719 ptr[0] = name_len + 1;
720
721 memcpy(ptr + 2, hdev->dev_name, name_len);
722
723 ptr += (name_len + 2);
724 }
725
726 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
727 ptr[0] = 2;
728 ptr[1] = EIR_TX_POWER;
729 ptr[2] = (u8) hdev->inq_tx_power;
730
731 ptr += 3;
732 }
733
734 if (hdev->devid_source > 0) {
735 ptr[0] = 9;
736 ptr[1] = EIR_DEVICE_ID;
737
738 put_unaligned_le16(hdev->devid_source, ptr + 2);
739 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
740 put_unaligned_le16(hdev->devid_product, ptr + 6);
741 put_unaligned_le16(hdev->devid_version, ptr + 8);
742
743 ptr += 10;
744 }
745
746 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
747 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
748 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
749 }
750
751 static void update_eir(struct hci_request *req)
752 {
753 struct hci_dev *hdev = req->hdev;
754 struct hci_cp_write_eir cp;
755
756 if (!hdev_is_powered(hdev))
757 return;
758
759 if (!lmp_ext_inq_capable(hdev))
760 return;
761
762 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
763 return;
764
765 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
766 return;
767
768 memset(&cp, 0, sizeof(cp));
769
770 create_eir(hdev, cp.data);
771
772 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
773 return;
774
775 memcpy(hdev->eir, cp.data, sizeof(cp.data));
776
777 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
778 }
779
780 static u8 get_service_classes(struct hci_dev *hdev)
781 {
782 struct bt_uuid *uuid;
783 u8 val = 0;
784
785 list_for_each_entry(uuid, &hdev->uuids, list)
786 val |= uuid->svc_hint;
787
788 return val;
789 }
790
791 static void update_class(struct hci_request *req)
792 {
793 struct hci_dev *hdev = req->hdev;
794 u8 cod[3];
795
796 BT_DBG("%s", hdev->name);
797
798 if (!hdev_is_powered(hdev))
799 return;
800
801 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
802 return;
803
804 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
805 return;
806
807 cod[0] = hdev->minor_class;
808 cod[1] = hdev->major_class;
809 cod[2] = get_service_classes(hdev);
810
811 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
812 cod[1] |= 0x20;
813
814 if (memcmp(cod, hdev->dev_class, 3) == 0)
815 return;
816
817 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
818 }
819
820 static bool get_connectable(struct hci_dev *hdev)
821 {
822 struct pending_cmd *cmd;
823
824 /* If there's a pending mgmt command the flag will not yet have
825 * it's final value, so check for this first.
826 */
827 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
828 if (cmd) {
829 struct mgmt_mode *cp = cmd->param;
830 return cp->val;
831 }
832
833 return test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
834 }
835
836 static void enable_advertising(struct hci_request *req)
837 {
838 struct hci_dev *hdev = req->hdev;
839 struct hci_cp_le_set_adv_param cp;
840 u8 own_addr_type, enable = 0x01;
841 bool connectable;
842
843 connectable = get_connectable(hdev);
844
845 /* Set require_privacy to true only when non-connectable
846 * advertising is used. In that case it is fine to use a
847 * non-resolvable private address.
848 */
849 if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
850 return;
851
852 memset(&cp, 0, sizeof(cp));
853 cp.min_interval = __constant_cpu_to_le16(0x0800);
854 cp.max_interval = __constant_cpu_to_le16(0x0800);
855 cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
856 cp.own_address_type = own_addr_type;
857 cp.channel_map = hdev->le_adv_channel_map;
858
859 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
860
861 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
862 }
863
864 static void disable_advertising(struct hci_request *req)
865 {
866 u8 enable = 0x00;
867
868 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
869 }
870
871 static void service_cache_off(struct work_struct *work)
872 {
873 struct hci_dev *hdev = container_of(work, struct hci_dev,
874 service_cache.work);
875 struct hci_request req;
876
877 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
878 return;
879
880 hci_req_init(&req, hdev);
881
882 hci_dev_lock(hdev);
883
884 update_eir(&req);
885 update_class(&req);
886
887 hci_dev_unlock(hdev);
888
889 hci_req_run(&req, NULL);
890 }
891
892 static void rpa_expired(struct work_struct *work)
893 {
894 struct hci_dev *hdev = container_of(work, struct hci_dev,
895 rpa_expired.work);
896 struct hci_request req;
897
898 BT_DBG("");
899
900 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
901
902 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags) ||
903 hci_conn_num(hdev, LE_LINK) > 0)
904 return;
905
906 /* The generation of a new RPA and programming it into the
907 * controller happens in the enable_advertising() function.
908 */
909
910 hci_req_init(&req, hdev);
911
912 disable_advertising(&req);
913 enable_advertising(&req);
914
915 hci_req_run(&req, NULL);
916 }
917
918 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
919 {
920 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
921 return;
922
923 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
924 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
925
926 /* Non-mgmt controlled devices get this bit set
927 * implicitly so that pairing works for them, however
928 * for mgmt we require user-space to explicitly enable
929 * it
930 */
931 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
932 }
933
934 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
935 void *data, u16 data_len)
936 {
937 struct mgmt_rp_read_info rp;
938
939 BT_DBG("sock %p %s", sk, hdev->name);
940
941 hci_dev_lock(hdev);
942
943 memset(&rp, 0, sizeof(rp));
944
945 bacpy(&rp.bdaddr, &hdev->bdaddr);
946
947 rp.version = hdev->hci_ver;
948 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
949
950 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
951 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
952
953 memcpy(rp.dev_class, hdev->dev_class, 3);
954
955 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
956 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
957
958 hci_dev_unlock(hdev);
959
960 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
961 sizeof(rp));
962 }
963
964 static void mgmt_pending_free(struct pending_cmd *cmd)
965 {
966 sock_put(cmd->sk);
967 kfree(cmd->param);
968 kfree(cmd);
969 }
970
971 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
972 struct hci_dev *hdev, void *data,
973 u16 len)
974 {
975 struct pending_cmd *cmd;
976
977 cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
978 if (!cmd)
979 return NULL;
980
981 cmd->opcode = opcode;
982 cmd->index = hdev->id;
983
984 cmd->param = kmalloc(len, GFP_KERNEL);
985 if (!cmd->param) {
986 kfree(cmd);
987 return NULL;
988 }
989
990 if (data)
991 memcpy(cmd->param, data, len);
992
993 cmd->sk = sk;
994 sock_hold(sk);
995
996 list_add(&cmd->list, &hdev->mgmt_pending);
997
998 return cmd;
999 }
1000
1001 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
1002 void (*cb)(struct pending_cmd *cmd,
1003 void *data),
1004 void *data)
1005 {
1006 struct pending_cmd *cmd, *tmp;
1007
1008 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
1009 if (opcode > 0 && cmd->opcode != opcode)
1010 continue;
1011
1012 cb(cmd, data);
1013 }
1014 }
1015
1016 static void mgmt_pending_remove(struct pending_cmd *cmd)
1017 {
1018 list_del(&cmd->list);
1019 mgmt_pending_free(cmd);
1020 }
1021
1022 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1023 {
1024 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1025
1026 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
1027 sizeof(settings));
1028 }
1029
1030 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status)
1031 {
1032 BT_DBG("%s status 0x%02x", hdev->name, status);
1033
1034 if (hci_conn_count(hdev) == 0)
1035 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1036 }
1037
1038 static int clean_up_hci_state(struct hci_dev *hdev)
1039 {
1040 struct hci_request req;
1041 struct hci_conn *conn;
1042
1043 hci_req_init(&req, hdev);
1044
1045 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1046 test_bit(HCI_PSCAN, &hdev->flags)) {
1047 u8 scan = 0x00;
1048 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1049 }
1050
1051 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1052 disable_advertising(&req);
1053
1054 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
1055 hci_req_add_le_scan_disable(&req);
1056 }
1057
1058 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1059 struct hci_cp_disconnect dc;
1060
1061 dc.handle = cpu_to_le16(conn->handle);
1062 dc.reason = 0x15; /* Terminated due to Power Off */
1063 hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1064 }
1065
1066 return hci_req_run(&req, clean_up_hci_complete);
1067 }
1068
1069 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1070 u16 len)
1071 {
1072 struct mgmt_mode *cp = data;
1073 struct pending_cmd *cmd;
1074 int err;
1075
1076 BT_DBG("request for %s", hdev->name);
1077
1078 if (cp->val != 0x00 && cp->val != 0x01)
1079 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1080 MGMT_STATUS_INVALID_PARAMS);
1081
1082 hci_dev_lock(hdev);
1083
1084 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
1085 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1086 MGMT_STATUS_BUSY);
1087 goto failed;
1088 }
1089
1090 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1091 cancel_delayed_work(&hdev->power_off);
1092
1093 if (cp->val) {
1094 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
1095 data, len);
1096 err = mgmt_powered(hdev, 1);
1097 goto failed;
1098 }
1099 }
1100
1101 if (!!cp->val == hdev_is_powered(hdev)) {
1102 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1103 goto failed;
1104 }
1105
1106 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1107 if (!cmd) {
1108 err = -ENOMEM;
1109 goto failed;
1110 }
1111
1112 if (cp->val) {
1113 queue_work(hdev->req_workqueue, &hdev->power_on);
1114 err = 0;
1115 } else {
1116 /* Disconnect connections, stop scans, etc */
1117 err = clean_up_hci_state(hdev);
1118
1119 /* ENODATA means there were no HCI commands queued */
1120 if (err == -ENODATA) {
1121 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1122 err = 0;
1123 }
1124 }
1125
1126 failed:
1127 hci_dev_unlock(hdev);
1128 return err;
1129 }
1130
1131 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
1132 struct sock *skip_sk)
1133 {
1134 struct sk_buff *skb;
1135 struct mgmt_hdr *hdr;
1136
1137 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
1138 if (!skb)
1139 return -ENOMEM;
1140
1141 hdr = (void *) skb_put(skb, sizeof(*hdr));
1142 hdr->opcode = cpu_to_le16(event);
1143 if (hdev)
1144 hdr->index = cpu_to_le16(hdev->id);
1145 else
1146 hdr->index = __constant_cpu_to_le16(MGMT_INDEX_NONE);
1147 hdr->len = cpu_to_le16(data_len);
1148
1149 if (data)
1150 memcpy(skb_put(skb, data_len), data, data_len);
1151
1152 /* Time stamp */
1153 __net_timestamp(skb);
1154
1155 hci_send_to_control(skb, skip_sk);
1156 kfree_skb(skb);
1157
1158 return 0;
1159 }
1160
1161 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1162 {
1163 __le32 ev;
1164
1165 ev = cpu_to_le32(get_current_settings(hdev));
1166
1167 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
1168 }
1169
1170 struct cmd_lookup {
1171 struct sock *sk;
1172 struct hci_dev *hdev;
1173 u8 mgmt_status;
1174 };
1175
1176 static void settings_rsp(struct pending_cmd *cmd, void *data)
1177 {
1178 struct cmd_lookup *match = data;
1179
1180 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1181
1182 list_del(&cmd->list);
1183
1184 if (match->sk == NULL) {
1185 match->sk = cmd->sk;
1186 sock_hold(match->sk);
1187 }
1188
1189 mgmt_pending_free(cmd);
1190 }
1191
1192 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
1193 {
1194 u8 *status = data;
1195
1196 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1197 mgmt_pending_remove(cmd);
1198 }
1199
1200 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1201 {
1202 if (!lmp_bredr_capable(hdev))
1203 return MGMT_STATUS_NOT_SUPPORTED;
1204 else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1205 return MGMT_STATUS_REJECTED;
1206 else
1207 return MGMT_STATUS_SUCCESS;
1208 }
1209
1210 static u8 mgmt_le_support(struct hci_dev *hdev)
1211 {
1212 if (!lmp_le_capable(hdev))
1213 return MGMT_STATUS_NOT_SUPPORTED;
1214 else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1215 return MGMT_STATUS_REJECTED;
1216 else
1217 return MGMT_STATUS_SUCCESS;
1218 }
1219
1220 static void set_discoverable_complete(struct hci_dev *hdev, u8 status)
1221 {
1222 struct pending_cmd *cmd;
1223 struct mgmt_mode *cp;
1224 struct hci_request req;
1225 bool changed;
1226
1227 BT_DBG("status 0x%02x", status);
1228
1229 hci_dev_lock(hdev);
1230
1231 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1232 if (!cmd)
1233 goto unlock;
1234
1235 if (status) {
1236 u8 mgmt_err = mgmt_status(status);
1237 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1238 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1239 goto remove_cmd;
1240 }
1241
1242 cp = cmd->param;
1243 if (cp->val) {
1244 changed = !test_and_set_bit(HCI_DISCOVERABLE,
1245 &hdev->dev_flags);
1246
1247 if (hdev->discov_timeout > 0) {
1248 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1249 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1250 to);
1251 }
1252 } else {
1253 changed = test_and_clear_bit(HCI_DISCOVERABLE,
1254 &hdev->dev_flags);
1255 }
1256
1257 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1258
1259 if (changed)
1260 new_settings(hdev, cmd->sk);
1261
1262 /* When the discoverable mode gets changed, make sure
1263 * that class of device has the limited discoverable
1264 * bit correctly set.
1265 */
1266 hci_req_init(&req, hdev);
1267 update_class(&req);
1268 hci_req_run(&req, NULL);
1269
1270 remove_cmd:
1271 mgmt_pending_remove(cmd);
1272
1273 unlock:
1274 hci_dev_unlock(hdev);
1275 }
1276
1277 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1278 u16 len)
1279 {
1280 struct mgmt_cp_set_discoverable *cp = data;
1281 struct pending_cmd *cmd;
1282 struct hci_request req;
1283 u16 timeout;
1284 u8 scan;
1285 int err;
1286
1287 BT_DBG("request for %s", hdev->name);
1288
1289 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1290 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1291 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1292 MGMT_STATUS_REJECTED);
1293
1294 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1295 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1296 MGMT_STATUS_INVALID_PARAMS);
1297
1298 timeout = __le16_to_cpu(cp->timeout);
1299
1300 /* Disabling discoverable requires that no timeout is set,
1301 * and enabling limited discoverable requires a timeout.
1302 */
1303 if ((cp->val == 0x00 && timeout > 0) ||
1304 (cp->val == 0x02 && timeout == 0))
1305 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1306 MGMT_STATUS_INVALID_PARAMS);
1307
1308 hci_dev_lock(hdev);
1309
1310 if (!hdev_is_powered(hdev) && timeout > 0) {
1311 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1312 MGMT_STATUS_NOT_POWERED);
1313 goto failed;
1314 }
1315
1316 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1317 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1318 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1319 MGMT_STATUS_BUSY);
1320 goto failed;
1321 }
1322
1323 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
1324 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1325 MGMT_STATUS_REJECTED);
1326 goto failed;
1327 }
1328
1329 if (!hdev_is_powered(hdev)) {
1330 bool changed = false;
1331
1332 /* Setting limited discoverable when powered off is
1333 * not a valid operation since it requires a timeout
1334 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1335 */
1336 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
1337 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1338 changed = true;
1339 }
1340
1341 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1342 if (err < 0)
1343 goto failed;
1344
1345 if (changed)
1346 err = new_settings(hdev, sk);
1347
1348 goto failed;
1349 }
1350
1351 /* If the current mode is the same, then just update the timeout
1352 * value with the new value. And if only the timeout gets updated,
1353 * then no need for any HCI transactions.
1354 */
1355 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
1356 (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
1357 &hdev->dev_flags)) {
1358 cancel_delayed_work(&hdev->discov_off);
1359 hdev->discov_timeout = timeout;
1360
1361 if (cp->val && hdev->discov_timeout > 0) {
1362 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1363 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1364 to);
1365 }
1366
1367 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1368 goto failed;
1369 }
1370
1371 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1372 if (!cmd) {
1373 err = -ENOMEM;
1374 goto failed;
1375 }
1376
1377 /* Cancel any potential discoverable timeout that might be
1378 * still active and store new timeout value. The arming of
1379 * the timeout happens in the complete handler.
1380 */
1381 cancel_delayed_work(&hdev->discov_off);
1382 hdev->discov_timeout = timeout;
1383
1384 /* Limited discoverable mode */
1385 if (cp->val == 0x02)
1386 set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1387 else
1388 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1389
1390 hci_req_init(&req, hdev);
1391
1392 /* The procedure for LE-only controllers is much simpler - just
1393 * update the advertising data.
1394 */
1395 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1396 goto update_ad;
1397
1398 scan = SCAN_PAGE;
1399
1400 if (cp->val) {
1401 struct hci_cp_write_current_iac_lap hci_cp;
1402
1403 if (cp->val == 0x02) {
1404 /* Limited discoverable mode */
1405 hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1406 hci_cp.iac_lap[0] = 0x00; /* LIAC */
1407 hci_cp.iac_lap[1] = 0x8b;
1408 hci_cp.iac_lap[2] = 0x9e;
1409 hci_cp.iac_lap[3] = 0x33; /* GIAC */
1410 hci_cp.iac_lap[4] = 0x8b;
1411 hci_cp.iac_lap[5] = 0x9e;
1412 } else {
1413 /* General discoverable mode */
1414 hci_cp.num_iac = 1;
1415 hci_cp.iac_lap[0] = 0x33; /* GIAC */
1416 hci_cp.iac_lap[1] = 0x8b;
1417 hci_cp.iac_lap[2] = 0x9e;
1418 }
1419
1420 hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1421 (hci_cp.num_iac * 3) + 1, &hci_cp);
1422
1423 scan |= SCAN_INQUIRY;
1424 } else {
1425 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1426 }
1427
1428 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1429
1430 update_ad:
1431 update_adv_data(&req);
1432
1433 err = hci_req_run(&req, set_discoverable_complete);
1434 if (err < 0)
1435 mgmt_pending_remove(cmd);
1436
1437 failed:
1438 hci_dev_unlock(hdev);
1439 return err;
1440 }
1441
1442 static void write_fast_connectable(struct hci_request *req, bool enable)
1443 {
1444 struct hci_dev *hdev = req->hdev;
1445 struct hci_cp_write_page_scan_activity acp;
1446 u8 type;
1447
1448 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1449 return;
1450
1451 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1452 return;
1453
1454 if (enable) {
1455 type = PAGE_SCAN_TYPE_INTERLACED;
1456
1457 /* 160 msec page scan interval */
1458 acp.interval = __constant_cpu_to_le16(0x0100);
1459 } else {
1460 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1461
1462 /* default 1.28 sec page scan */
1463 acp.interval = __constant_cpu_to_le16(0x0800);
1464 }
1465
1466 acp.window = __constant_cpu_to_le16(0x0012);
1467
1468 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1469 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1470 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1471 sizeof(acp), &acp);
1472
1473 if (hdev->page_scan_type != type)
1474 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1475 }
1476
1477 static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1478 {
1479 struct pending_cmd *cmd;
1480 struct mgmt_mode *cp;
1481 bool changed;
1482
1483 BT_DBG("status 0x%02x", status);
1484
1485 hci_dev_lock(hdev);
1486
1487 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1488 if (!cmd)
1489 goto unlock;
1490
1491 if (status) {
1492 u8 mgmt_err = mgmt_status(status);
1493 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1494 goto remove_cmd;
1495 }
1496
1497 cp = cmd->param;
1498 if (cp->val)
1499 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1500 else
1501 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1502
1503 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1504
1505 if (changed)
1506 new_settings(hdev, cmd->sk);
1507
1508 remove_cmd:
1509 mgmt_pending_remove(cmd);
1510
1511 unlock:
1512 hci_dev_unlock(hdev);
1513 }
1514
1515 static int set_connectable_update_settings(struct hci_dev *hdev,
1516 struct sock *sk, u8 val)
1517 {
1518 bool changed = false;
1519 int err;
1520
1521 if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1522 changed = true;
1523
1524 if (val) {
1525 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1526 } else {
1527 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1528 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1529 }
1530
1531 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1532 if (err < 0)
1533 return err;
1534
1535 if (changed)
1536 return new_settings(hdev, sk);
1537
1538 return 0;
1539 }
1540
1541 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1542 u16 len)
1543 {
1544 struct mgmt_mode *cp = data;
1545 struct pending_cmd *cmd;
1546 struct hci_request req;
1547 u8 scan;
1548 int err;
1549
1550 BT_DBG("request for %s", hdev->name);
1551
1552 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1553 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1554 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1555 MGMT_STATUS_REJECTED);
1556
1557 if (cp->val != 0x00 && cp->val != 0x01)
1558 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1559 MGMT_STATUS_INVALID_PARAMS);
1560
1561 hci_dev_lock(hdev);
1562
1563 if (!hdev_is_powered(hdev)) {
1564 err = set_connectable_update_settings(hdev, sk, cp->val);
1565 goto failed;
1566 }
1567
1568 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1569 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1570 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1571 MGMT_STATUS_BUSY);
1572 goto failed;
1573 }
1574
1575 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1576 if (!cmd) {
1577 err = -ENOMEM;
1578 goto failed;
1579 }
1580
1581 hci_req_init(&req, hdev);
1582
1583 /* If BR/EDR is not enabled and we disable advertising as a
1584 * by-product of disabling connectable, we need to update the
1585 * advertising flags.
1586 */
1587 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1588 if (!cp->val) {
1589 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1590 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1591 }
1592 update_adv_data(&req);
1593 } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
1594 if (cp->val) {
1595 scan = SCAN_PAGE;
1596 } else {
1597 scan = 0;
1598
1599 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1600 hdev->discov_timeout > 0)
1601 cancel_delayed_work(&hdev->discov_off);
1602 }
1603
1604 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1605 }
1606
1607 /* If we're going from non-connectable to connectable or
1608 * vice-versa when fast connectable is enabled ensure that fast
1609 * connectable gets disabled. write_fast_connectable won't do
1610 * anything if the page scan parameters are already what they
1611 * should be.
1612 */
1613 if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1614 write_fast_connectable(&req, false);
1615
1616 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) &&
1617 hci_conn_num(hdev, LE_LINK) == 0) {
1618 disable_advertising(&req);
1619 enable_advertising(&req);
1620 }
1621
1622 err = hci_req_run(&req, set_connectable_complete);
1623 if (err < 0) {
1624 mgmt_pending_remove(cmd);
1625 if (err == -ENODATA)
1626 err = set_connectable_update_settings(hdev, sk,
1627 cp->val);
1628 goto failed;
1629 }
1630
1631 failed:
1632 hci_dev_unlock(hdev);
1633 return err;
1634 }
1635
1636 static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
1637 u16 len)
1638 {
1639 struct mgmt_mode *cp = data;
1640 bool changed;
1641 int err;
1642
1643 BT_DBG("request for %s", hdev->name);
1644
1645 if (cp->val != 0x00 && cp->val != 0x01)
1646 return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE,
1647 MGMT_STATUS_INVALID_PARAMS);
1648
1649 hci_dev_lock(hdev);
1650
1651 if (cp->val)
1652 changed = !test_and_set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1653 else
1654 changed = test_and_clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1655
1656 err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
1657 if (err < 0)
1658 goto unlock;
1659
1660 if (changed)
1661 err = new_settings(hdev, sk);
1662
1663 unlock:
1664 hci_dev_unlock(hdev);
1665 return err;
1666 }
1667
1668 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1669 u16 len)
1670 {
1671 struct mgmt_mode *cp = data;
1672 struct pending_cmd *cmd;
1673 u8 val, status;
1674 int err;
1675
1676 BT_DBG("request for %s", hdev->name);
1677
1678 status = mgmt_bredr_support(hdev);
1679 if (status)
1680 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1681 status);
1682
1683 if (cp->val != 0x00 && cp->val != 0x01)
1684 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1685 MGMT_STATUS_INVALID_PARAMS);
1686
1687 hci_dev_lock(hdev);
1688
1689 if (!hdev_is_powered(hdev)) {
1690 bool changed = false;
1691
1692 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1693 &hdev->dev_flags)) {
1694 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
1695 changed = true;
1696 }
1697
1698 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1699 if (err < 0)
1700 goto failed;
1701
1702 if (changed)
1703 err = new_settings(hdev, sk);
1704
1705 goto failed;
1706 }
1707
1708 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1709 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1710 MGMT_STATUS_BUSY);
1711 goto failed;
1712 }
1713
1714 val = !!cp->val;
1715
1716 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1717 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1718 goto failed;
1719 }
1720
1721 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1722 if (!cmd) {
1723 err = -ENOMEM;
1724 goto failed;
1725 }
1726
1727 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1728 if (err < 0) {
1729 mgmt_pending_remove(cmd);
1730 goto failed;
1731 }
1732
1733 failed:
1734 hci_dev_unlock(hdev);
1735 return err;
1736 }
1737
1738 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1739 {
1740 struct mgmt_mode *cp = data;
1741 struct pending_cmd *cmd;
1742 u8 status;
1743 int err;
1744
1745 BT_DBG("request for %s", hdev->name);
1746
1747 status = mgmt_bredr_support(hdev);
1748 if (status)
1749 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1750
1751 if (!lmp_ssp_capable(hdev))
1752 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1753 MGMT_STATUS_NOT_SUPPORTED);
1754
1755 if (cp->val != 0x00 && cp->val != 0x01)
1756 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1757 MGMT_STATUS_INVALID_PARAMS);
1758
1759 hci_dev_lock(hdev);
1760
1761 if (!hdev_is_powered(hdev)) {
1762 bool changed;
1763
1764 if (cp->val) {
1765 changed = !test_and_set_bit(HCI_SSP_ENABLED,
1766 &hdev->dev_flags);
1767 } else {
1768 changed = test_and_clear_bit(HCI_SSP_ENABLED,
1769 &hdev->dev_flags);
1770 if (!changed)
1771 changed = test_and_clear_bit(HCI_HS_ENABLED,
1772 &hdev->dev_flags);
1773 else
1774 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1775 }
1776
1777 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1778 if (err < 0)
1779 goto failed;
1780
1781 if (changed)
1782 err = new_settings(hdev, sk);
1783
1784 goto failed;
1785 }
1786
1787 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev) ||
1788 mgmt_pending_find(MGMT_OP_SET_HS, hdev)) {
1789 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1790 MGMT_STATUS_BUSY);
1791 goto failed;
1792 }
1793
1794 if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1795 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1796 goto failed;
1797 }
1798
1799 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1800 if (!cmd) {
1801 err = -ENOMEM;
1802 goto failed;
1803 }
1804
1805 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1806 if (err < 0) {
1807 mgmt_pending_remove(cmd);
1808 goto failed;
1809 }
1810
1811 failed:
1812 hci_dev_unlock(hdev);
1813 return err;
1814 }
1815
1816 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1817 {
1818 struct mgmt_mode *cp = data;
1819 bool changed;
1820 u8 status;
1821 int err;
1822
1823 BT_DBG("request for %s", hdev->name);
1824
1825 status = mgmt_bredr_support(hdev);
1826 if (status)
1827 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1828
1829 if (!lmp_ssp_capable(hdev))
1830 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1831 MGMT_STATUS_NOT_SUPPORTED);
1832
1833 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
1834 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1835 MGMT_STATUS_REJECTED);
1836
1837 if (cp->val != 0x00 && cp->val != 0x01)
1838 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1839 MGMT_STATUS_INVALID_PARAMS);
1840
1841 hci_dev_lock(hdev);
1842
1843 if (cp->val) {
1844 changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1845 } else {
1846 if (hdev_is_powered(hdev)) {
1847 err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1848 MGMT_STATUS_REJECTED);
1849 goto unlock;
1850 }
1851
1852 changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1853 }
1854
1855 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1856 if (err < 0)
1857 goto unlock;
1858
1859 if (changed)
1860 err = new_settings(hdev, sk);
1861
1862 unlock:
1863 hci_dev_unlock(hdev);
1864 return err;
1865 }
1866
1867 static void le_enable_complete(struct hci_dev *hdev, u8 status)
1868 {
1869 struct cmd_lookup match = { NULL, hdev };
1870
1871 if (status) {
1872 u8 mgmt_err = mgmt_status(status);
1873
1874 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1875 &mgmt_err);
1876 return;
1877 }
1878
1879 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1880
1881 new_settings(hdev, match.sk);
1882
1883 if (match.sk)
1884 sock_put(match.sk);
1885
1886 /* Make sure the controller has a good default for
1887 * advertising data. Restrict the update to when LE
1888 * has actually been enabled. During power on, the
1889 * update in powered_update_hci will take care of it.
1890 */
1891 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1892 struct hci_request req;
1893
1894 hci_dev_lock(hdev);
1895
1896 hci_req_init(&req, hdev);
1897 update_adv_data(&req);
1898 update_scan_rsp_data(&req);
1899 hci_req_run(&req, NULL);
1900
1901 hci_dev_unlock(hdev);
1902 }
1903 }
1904
1905 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1906 {
1907 struct mgmt_mode *cp = data;
1908 struct hci_cp_write_le_host_supported hci_cp;
1909 struct pending_cmd *cmd;
1910 struct hci_request req;
1911 int err;
1912 u8 val, enabled;
1913
1914 BT_DBG("request for %s", hdev->name);
1915
1916 if (!lmp_le_capable(hdev))
1917 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1918 MGMT_STATUS_NOT_SUPPORTED);
1919
1920 if (cp->val != 0x00 && cp->val != 0x01)
1921 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1922 MGMT_STATUS_INVALID_PARAMS);
1923
1924 /* LE-only devices do not allow toggling LE on/off */
1925 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1926 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1927 MGMT_STATUS_REJECTED);
1928
1929 hci_dev_lock(hdev);
1930
1931 val = !!cp->val;
1932 enabled = lmp_host_le_capable(hdev);
1933
1934 if (!hdev_is_powered(hdev) || val == enabled) {
1935 bool changed = false;
1936
1937 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1938 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1939 changed = true;
1940 }
1941
1942 if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
1943 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
1944 changed = true;
1945 }
1946
1947 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1948 if (err < 0)
1949 goto unlock;
1950
1951 if (changed)
1952 err = new_settings(hdev, sk);
1953
1954 goto unlock;
1955 }
1956
1957 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
1958 mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
1959 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1960 MGMT_STATUS_BUSY);
1961 goto unlock;
1962 }
1963
1964 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
1965 if (!cmd) {
1966 err = -ENOMEM;
1967 goto unlock;
1968 }
1969
1970 hci_req_init(&req, hdev);
1971
1972 memset(&hci_cp, 0, sizeof(hci_cp));
1973
1974 if (val) {
1975 hci_cp.le = val;
1976 hci_cp.simul = lmp_le_br_capable(hdev);
1977 } else {
1978 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1979 disable_advertising(&req);
1980 }
1981
1982 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
1983 &hci_cp);
1984
1985 err = hci_req_run(&req, le_enable_complete);
1986 if (err < 0)
1987 mgmt_pending_remove(cmd);
1988
1989 unlock:
1990 hci_dev_unlock(hdev);
1991 return err;
1992 }
1993
1994 /* This is a helper function to test for pending mgmt commands that can
1995 * cause CoD or EIR HCI commands. We can only allow one such pending
1996 * mgmt command at a time since otherwise we cannot easily track what
1997 * the current values are, will be, and based on that calculate if a new
1998 * HCI command needs to be sent and if yes with what value.
1999 */
2000 static bool pending_eir_or_class(struct hci_dev *hdev)
2001 {
2002 struct pending_cmd *cmd;
2003
2004 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2005 switch (cmd->opcode) {
2006 case MGMT_OP_ADD_UUID:
2007 case MGMT_OP_REMOVE_UUID:
2008 case MGMT_OP_SET_DEV_CLASS:
2009 case MGMT_OP_SET_POWERED:
2010 return true;
2011 }
2012 }
2013
2014 return false;
2015 }
2016
2017 static const u8 bluetooth_base_uuid[] = {
2018 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2019 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2020 };
2021
2022 static u8 get_uuid_size(const u8 *uuid)
2023 {
2024 u32 val;
2025
2026 if (memcmp(uuid, bluetooth_base_uuid, 12))
2027 return 128;
2028
2029 val = get_unaligned_le32(&uuid[12]);
2030 if (val > 0xffff)
2031 return 32;
2032
2033 return 16;
2034 }
2035
2036 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2037 {
2038 struct pending_cmd *cmd;
2039
2040 hci_dev_lock(hdev);
2041
2042 cmd = mgmt_pending_find(mgmt_op, hdev);
2043 if (!cmd)
2044 goto unlock;
2045
2046 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
2047 hdev->dev_class, 3);
2048
2049 mgmt_pending_remove(cmd);
2050
2051 unlock:
2052 hci_dev_unlock(hdev);
2053 }
2054
2055 static void add_uuid_complete(struct hci_dev *hdev, u8 status)
2056 {
2057 BT_DBG("status 0x%02x", status);
2058
2059 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2060 }
2061
2062 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2063 {
2064 struct mgmt_cp_add_uuid *cp = data;
2065 struct pending_cmd *cmd;
2066 struct hci_request req;
2067 struct bt_uuid *uuid;
2068 int err;
2069
2070 BT_DBG("request for %s", hdev->name);
2071
2072 hci_dev_lock(hdev);
2073
2074 if (pending_eir_or_class(hdev)) {
2075 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2076 MGMT_STATUS_BUSY);
2077 goto failed;
2078 }
2079
2080 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2081 if (!uuid) {
2082 err = -ENOMEM;
2083 goto failed;
2084 }
2085
2086 memcpy(uuid->uuid, cp->uuid, 16);
2087 uuid->svc_hint = cp->svc_hint;
2088 uuid->size = get_uuid_size(cp->uuid);
2089
2090 list_add_tail(&uuid->list, &hdev->uuids);
2091
2092 hci_req_init(&req, hdev);
2093
2094 update_class(&req);
2095 update_eir(&req);
2096
2097 err = hci_req_run(&req, add_uuid_complete);
2098 if (err < 0) {
2099 if (err != -ENODATA)
2100 goto failed;
2101
2102 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2103 hdev->dev_class, 3);
2104 goto failed;
2105 }
2106
2107 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2108 if (!cmd) {
2109 err = -ENOMEM;
2110 goto failed;
2111 }
2112
2113 err = 0;
2114
2115 failed:
2116 hci_dev_unlock(hdev);
2117 return err;
2118 }
2119
2120 static bool enable_service_cache(struct hci_dev *hdev)
2121 {
2122 if (!hdev_is_powered(hdev))
2123 return false;
2124
2125 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2126 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2127 CACHE_TIMEOUT);
2128 return true;
2129 }
2130
2131 return false;
2132 }
2133
2134 static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
2135 {
2136 BT_DBG("status 0x%02x", status);
2137
2138 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2139 }
2140
2141 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2142 u16 len)
2143 {
2144 struct mgmt_cp_remove_uuid *cp = data;
2145 struct pending_cmd *cmd;
2146 struct bt_uuid *match, *tmp;
2147 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2148 struct hci_request req;
2149 int err, found;
2150
2151 BT_DBG("request for %s", hdev->name);
2152
2153 hci_dev_lock(hdev);
2154
2155 if (pending_eir_or_class(hdev)) {
2156 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2157 MGMT_STATUS_BUSY);
2158 goto unlock;
2159 }
2160
2161 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2162 hci_uuids_clear(hdev);
2163
2164 if (enable_service_cache(hdev)) {
2165 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2166 0, hdev->dev_class, 3);
2167 goto unlock;
2168 }
2169
2170 goto update_class;
2171 }
2172
2173 found = 0;
2174
2175 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2176 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2177 continue;
2178
2179 list_del(&match->list);
2180 kfree(match);
2181 found++;
2182 }
2183
2184 if (found == 0) {
2185 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2186 MGMT_STATUS_INVALID_PARAMS);
2187 goto unlock;
2188 }
2189
2190 update_class:
2191 hci_req_init(&req, hdev);
2192
2193 update_class(&req);
2194 update_eir(&req);
2195
2196 err = hci_req_run(&req, remove_uuid_complete);
2197 if (err < 0) {
2198 if (err != -ENODATA)
2199 goto unlock;
2200
2201 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2202 hdev->dev_class, 3);
2203 goto unlock;
2204 }
2205
2206 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2207 if (!cmd) {
2208 err = -ENOMEM;
2209 goto unlock;
2210 }
2211
2212 err = 0;
2213
2214 unlock:
2215 hci_dev_unlock(hdev);
2216 return err;
2217 }
2218
2219 static void set_class_complete(struct hci_dev *hdev, u8 status)
2220 {
2221 BT_DBG("status 0x%02x", status);
2222
2223 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2224 }
2225
2226 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2227 u16 len)
2228 {
2229 struct mgmt_cp_set_dev_class *cp = data;
2230 struct pending_cmd *cmd;
2231 struct hci_request req;
2232 int err;
2233
2234 BT_DBG("request for %s", hdev->name);
2235
2236 if (!lmp_bredr_capable(hdev))
2237 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2238 MGMT_STATUS_NOT_SUPPORTED);
2239
2240 hci_dev_lock(hdev);
2241
2242 if (pending_eir_or_class(hdev)) {
2243 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2244 MGMT_STATUS_BUSY);
2245 goto unlock;
2246 }
2247
2248 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2249 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2250 MGMT_STATUS_INVALID_PARAMS);
2251 goto unlock;
2252 }
2253
2254 hdev->major_class = cp->major;
2255 hdev->minor_class = cp->minor;
2256
2257 if (!hdev_is_powered(hdev)) {
2258 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2259 hdev->dev_class, 3);
2260 goto unlock;
2261 }
2262
2263 hci_req_init(&req, hdev);
2264
2265 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2266 hci_dev_unlock(hdev);
2267 cancel_delayed_work_sync(&hdev->service_cache);
2268 hci_dev_lock(hdev);
2269 update_eir(&req);
2270 }
2271
2272 update_class(&req);
2273
2274 err = hci_req_run(&req, set_class_complete);
2275 if (err < 0) {
2276 if (err != -ENODATA)
2277 goto unlock;
2278
2279 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2280 hdev->dev_class, 3);
2281 goto unlock;
2282 }
2283
2284 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2285 if (!cmd) {
2286 err = -ENOMEM;
2287 goto unlock;
2288 }
2289
2290 err = 0;
2291
2292 unlock:
2293 hci_dev_unlock(hdev);
2294 return err;
2295 }
2296
2297 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2298 u16 len)
2299 {
2300 struct mgmt_cp_load_link_keys *cp = data;
2301 u16 key_count, expected_len;
2302 bool changed;
2303 int i;
2304
2305 BT_DBG("request for %s", hdev->name);
2306
2307 if (!lmp_bredr_capable(hdev))
2308 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2309 MGMT_STATUS_NOT_SUPPORTED);
2310
2311 key_count = __le16_to_cpu(cp->key_count);
2312
2313 expected_len = sizeof(*cp) + key_count *
2314 sizeof(struct mgmt_link_key_info);
2315 if (expected_len != len) {
2316 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2317 len, expected_len);
2318 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2319 MGMT_STATUS_INVALID_PARAMS);
2320 }
2321
2322 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2323 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2324 MGMT_STATUS_INVALID_PARAMS);
2325
2326 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2327 key_count);
2328
2329 for (i = 0; i < key_count; i++) {
2330 struct mgmt_link_key_info *key = &cp->keys[i];
2331
2332 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2333 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2334 MGMT_STATUS_INVALID_PARAMS);
2335 }
2336
2337 hci_dev_lock(hdev);
2338
2339 hci_link_keys_clear(hdev);
2340
2341 if (cp->debug_keys)
2342 changed = !test_and_set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
2343 else
2344 changed = test_and_clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
2345
2346 if (changed)
2347 new_settings(hdev, NULL);
2348
2349 for (i = 0; i < key_count; i++) {
2350 struct mgmt_link_key_info *key = &cp->keys[i];
2351
2352 hci_add_link_key(hdev, NULL, 0, &key->addr.bdaddr, key->val,
2353 key->type, key->pin_len);
2354 }
2355
2356 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2357
2358 hci_dev_unlock(hdev);
2359
2360 return 0;
2361 }
2362
2363 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2364 u8 addr_type, struct sock *skip_sk)
2365 {
2366 struct mgmt_ev_device_unpaired ev;
2367
2368 bacpy(&ev.addr.bdaddr, bdaddr);
2369 ev.addr.type = addr_type;
2370
2371 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2372 skip_sk);
2373 }
2374
2375 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2376 u16 len)
2377 {
2378 struct mgmt_cp_unpair_device *cp = data;
2379 struct mgmt_rp_unpair_device rp;
2380 struct hci_cp_disconnect dc;
2381 struct pending_cmd *cmd;
2382 struct hci_conn *conn;
2383 int err;
2384
2385 memset(&rp, 0, sizeof(rp));
2386 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2387 rp.addr.type = cp->addr.type;
2388
2389 if (!bdaddr_type_is_valid(cp->addr.type))
2390 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2391 MGMT_STATUS_INVALID_PARAMS,
2392 &rp, sizeof(rp));
2393
2394 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2395 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2396 MGMT_STATUS_INVALID_PARAMS,
2397 &rp, sizeof(rp));
2398
2399 hci_dev_lock(hdev);
2400
2401 if (!hdev_is_powered(hdev)) {
2402 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2403 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2404 goto unlock;
2405 }
2406
2407 if (cp->addr.type == BDADDR_BREDR) {
2408 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2409 } else {
2410 u8 addr_type;
2411
2412 if (cp->addr.type == BDADDR_LE_PUBLIC)
2413 addr_type = ADDR_LE_DEV_PUBLIC;
2414 else
2415 addr_type = ADDR_LE_DEV_RANDOM;
2416
2417 hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2418
2419 err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2420 }
2421
2422 if (err < 0) {
2423 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2424 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
2425 goto unlock;
2426 }
2427
2428 if (cp->disconnect) {
2429 if (cp->addr.type == BDADDR_BREDR)
2430 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2431 &cp->addr.bdaddr);
2432 else
2433 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2434 &cp->addr.bdaddr);
2435 } else {
2436 conn = NULL;
2437 }
2438
2439 if (!conn) {
2440 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2441 &rp, sizeof(rp));
2442 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2443 goto unlock;
2444 }
2445
2446 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2447 sizeof(*cp));
2448 if (!cmd) {
2449 err = -ENOMEM;
2450 goto unlock;
2451 }
2452
2453 dc.handle = cpu_to_le16(conn->handle);
2454 dc.reason = 0x13; /* Remote User Terminated Connection */
2455 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2456 if (err < 0)
2457 mgmt_pending_remove(cmd);
2458
2459 unlock:
2460 hci_dev_unlock(hdev);
2461 return err;
2462 }
2463
2464 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2465 u16 len)
2466 {
2467 struct mgmt_cp_disconnect *cp = data;
2468 struct mgmt_rp_disconnect rp;
2469 struct hci_cp_disconnect dc;
2470 struct pending_cmd *cmd;
2471 struct hci_conn *conn;
2472 int err;
2473
2474 BT_DBG("");
2475
2476 memset(&rp, 0, sizeof(rp));
2477 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2478 rp.addr.type = cp->addr.type;
2479
2480 if (!bdaddr_type_is_valid(cp->addr.type))
2481 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2482 MGMT_STATUS_INVALID_PARAMS,
2483 &rp, sizeof(rp));
2484
2485 hci_dev_lock(hdev);
2486
2487 if (!test_bit(HCI_UP, &hdev->flags)) {
2488 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2489 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2490 goto failed;
2491 }
2492
2493 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
2494 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2495 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2496 goto failed;
2497 }
2498
2499 if (cp->addr.type == BDADDR_BREDR)
2500 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2501 &cp->addr.bdaddr);
2502 else
2503 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2504
2505 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2506 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2507 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
2508 goto failed;
2509 }
2510
2511 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2512 if (!cmd) {
2513 err = -ENOMEM;
2514 goto failed;
2515 }
2516
2517 dc.handle = cpu_to_le16(conn->handle);
2518 dc.reason = HCI_ERROR_REMOTE_USER_TERM;
2519
2520 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2521 if (err < 0)
2522 mgmt_pending_remove(cmd);
2523
2524 failed:
2525 hci_dev_unlock(hdev);
2526 return err;
2527 }
2528
2529 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2530 {
2531 switch (link_type) {
2532 case LE_LINK:
2533 switch (addr_type) {
2534 case ADDR_LE_DEV_PUBLIC:
2535 return BDADDR_LE_PUBLIC;
2536
2537 default:
2538 /* Fallback to LE Random address type */
2539 return BDADDR_LE_RANDOM;
2540 }
2541
2542 default:
2543 /* Fallback to BR/EDR type */
2544 return BDADDR_BREDR;
2545 }
2546 }
2547
2548 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2549 u16 data_len)
2550 {
2551 struct mgmt_rp_get_connections *rp;
2552 struct hci_conn *c;
2553 size_t rp_len;
2554 int err;
2555 u16 i;
2556
2557 BT_DBG("");
2558
2559 hci_dev_lock(hdev);
2560
2561 if (!hdev_is_powered(hdev)) {
2562 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2563 MGMT_STATUS_NOT_POWERED);
2564 goto unlock;
2565 }
2566
2567 i = 0;
2568 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2569 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2570 i++;
2571 }
2572
2573 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2574 rp = kmalloc(rp_len, GFP_KERNEL);
2575 if (!rp) {
2576 err = -ENOMEM;
2577 goto unlock;
2578 }
2579
2580 i = 0;
2581 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2582 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2583 continue;
2584 bacpy(&rp->addr[i].bdaddr, &c->dst);
2585 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2586 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2587 continue;
2588 i++;
2589 }
2590
2591 rp->conn_count = cpu_to_le16(i);
2592
2593 /* Recalculate length in case of filtered SCO connections, etc */
2594 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2595
2596 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2597 rp_len);
2598
2599 kfree(rp);
2600
2601 unlock:
2602 hci_dev_unlock(hdev);
2603 return err;
2604 }
2605
2606 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2607 struct mgmt_cp_pin_code_neg_reply *cp)
2608 {
2609 struct pending_cmd *cmd;
2610 int err;
2611
2612 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2613 sizeof(*cp));
2614 if (!cmd)
2615 return -ENOMEM;
2616
2617 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2618 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2619 if (err < 0)
2620 mgmt_pending_remove(cmd);
2621
2622 return err;
2623 }
2624
2625 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2626 u16 len)
2627 {
2628 struct hci_conn *conn;
2629 struct mgmt_cp_pin_code_reply *cp = data;
2630 struct hci_cp_pin_code_reply reply;
2631 struct pending_cmd *cmd;
2632 int err;
2633
2634 BT_DBG("");
2635
2636 hci_dev_lock(hdev);
2637
2638 if (!hdev_is_powered(hdev)) {
2639 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2640 MGMT_STATUS_NOT_POWERED);
2641 goto failed;
2642 }
2643
2644 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2645 if (!conn) {
2646 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2647 MGMT_STATUS_NOT_CONNECTED);
2648 goto failed;
2649 }
2650
2651 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2652 struct mgmt_cp_pin_code_neg_reply ncp;
2653
2654 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2655
2656 BT_ERR("PIN code is not 16 bytes long");
2657
2658 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2659 if (err >= 0)
2660 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2661 MGMT_STATUS_INVALID_PARAMS);
2662
2663 goto failed;
2664 }
2665
2666 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2667 if (!cmd) {
2668 err = -ENOMEM;
2669 goto failed;
2670 }
2671
2672 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2673 reply.pin_len = cp->pin_len;
2674 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2675
2676 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2677 if (err < 0)
2678 mgmt_pending_remove(cmd);
2679
2680 failed:
2681 hci_dev_unlock(hdev);
2682 return err;
2683 }
2684
2685 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2686 u16 len)
2687 {
2688 struct mgmt_cp_set_io_capability *cp = data;
2689
2690 BT_DBG("");
2691
2692 hci_dev_lock(hdev);
2693
2694 hdev->io_capability = cp->io_capability;
2695
2696 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2697 hdev->io_capability);
2698
2699 hci_dev_unlock(hdev);
2700
2701 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
2702 0);
2703 }
2704
2705 static struct pending_cmd *find_pairing(struct hci_conn *conn)
2706 {
2707 struct hci_dev *hdev = conn->hdev;
2708 struct pending_cmd *cmd;
2709
2710 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2711 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2712 continue;
2713
2714 if (cmd->user_data != conn)
2715 continue;
2716
2717 return cmd;
2718 }
2719
2720 return NULL;
2721 }
2722
2723 static void pairing_complete(struct pending_cmd *cmd, u8 status)
2724 {
2725 struct mgmt_rp_pair_device rp;
2726 struct hci_conn *conn = cmd->user_data;
2727
2728 bacpy(&rp.addr.bdaddr, &conn->dst);
2729 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2730
2731 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
2732 &rp, sizeof(rp));
2733
2734 /* So we don't get further callbacks for this connection */
2735 conn->connect_cfm_cb = NULL;
2736 conn->security_cfm_cb = NULL;
2737 conn->disconn_cfm_cb = NULL;
2738
2739 hci_conn_drop(conn);
2740
2741 mgmt_pending_remove(cmd);
2742 }
2743
2744 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2745 {
2746 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2747 struct pending_cmd *cmd;
2748
2749 cmd = find_pairing(conn);
2750 if (cmd)
2751 pairing_complete(cmd, status);
2752 }
2753
2754 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2755 {
2756 struct pending_cmd *cmd;
2757
2758 BT_DBG("status %u", status);
2759
2760 cmd = find_pairing(conn);
2761 if (!cmd)
2762 BT_DBG("Unable to find a pending command");
2763 else
2764 pairing_complete(cmd, mgmt_status(status));
2765 }
2766
2767 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2768 {
2769 struct pending_cmd *cmd;
2770
2771 BT_DBG("status %u", status);
2772
2773 if (!status)
2774 return;
2775
2776 cmd = find_pairing(conn);
2777 if (!cmd)
2778 BT_DBG("Unable to find a pending command");
2779 else
2780 pairing_complete(cmd, mgmt_status(status));
2781 }
2782
2783 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2784 u16 len)
2785 {
2786 struct mgmt_cp_pair_device *cp = data;
2787 struct mgmt_rp_pair_device rp;
2788 struct pending_cmd *cmd;
2789 u8 sec_level, auth_type;
2790 struct hci_conn *conn;
2791 int err;
2792
2793 BT_DBG("");
2794
2795 memset(&rp, 0, sizeof(rp));
2796 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2797 rp.addr.type = cp->addr.type;
2798
2799 if (!bdaddr_type_is_valid(cp->addr.type))
2800 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2801 MGMT_STATUS_INVALID_PARAMS,
2802 &rp, sizeof(rp));
2803
2804 hci_dev_lock(hdev);
2805
2806 if (!hdev_is_powered(hdev)) {
2807 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2808 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2809 goto unlock;
2810 }
2811
2812 sec_level = BT_SECURITY_MEDIUM;
2813 if (cp->io_cap == 0x03)
2814 auth_type = HCI_AT_DEDICATED_BONDING;
2815 else
2816 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
2817
2818 if (cp->addr.type == BDADDR_BREDR)
2819 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
2820 auth_type);
2821 else
2822 conn = hci_connect_le(hdev, &cp->addr.bdaddr, cp->addr.type,
2823 sec_level, auth_type);
2824
2825 if (IS_ERR(conn)) {
2826 int status;
2827
2828 if (PTR_ERR(conn) == -EBUSY)
2829 status = MGMT_STATUS_BUSY;
2830 else
2831 status = MGMT_STATUS_CONNECT_FAILED;
2832
2833 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2834 status, &rp,
2835 sizeof(rp));
2836 goto unlock;
2837 }
2838
2839 if (conn->connect_cfm_cb) {
2840 hci_conn_drop(conn);
2841 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2842 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2843 goto unlock;
2844 }
2845
2846 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2847 if (!cmd) {
2848 err = -ENOMEM;
2849 hci_conn_drop(conn);
2850 goto unlock;
2851 }
2852
2853 /* For LE, just connecting isn't a proof that the pairing finished */
2854 if (cp->addr.type == BDADDR_BREDR) {
2855 conn->connect_cfm_cb = pairing_complete_cb;
2856 conn->security_cfm_cb = pairing_complete_cb;
2857 conn->disconn_cfm_cb = pairing_complete_cb;
2858 } else {
2859 conn->connect_cfm_cb = le_pairing_complete_cb;
2860 conn->security_cfm_cb = le_pairing_complete_cb;
2861 conn->disconn_cfm_cb = le_pairing_complete_cb;
2862 }
2863
2864 conn->io_capability = cp->io_cap;
2865 cmd->user_data = conn;
2866
2867 if (conn->state == BT_CONNECTED &&
2868 hci_conn_security(conn, sec_level, auth_type))
2869 pairing_complete(cmd, 0);
2870
2871 err = 0;
2872
2873 unlock:
2874 hci_dev_unlock(hdev);
2875 return err;
2876 }
2877
2878 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2879 u16 len)
2880 {
2881 struct mgmt_addr_info *addr = data;
2882 struct pending_cmd *cmd;
2883 struct hci_conn *conn;
2884 int err;
2885
2886 BT_DBG("");
2887
2888 hci_dev_lock(hdev);
2889
2890 if (!hdev_is_powered(hdev)) {
2891 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2892 MGMT_STATUS_NOT_POWERED);
2893 goto unlock;
2894 }
2895
2896 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
2897 if (!cmd) {
2898 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2899 MGMT_STATUS_INVALID_PARAMS);
2900 goto unlock;
2901 }
2902
2903 conn = cmd->user_data;
2904
2905 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
2906 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2907 MGMT_STATUS_INVALID_PARAMS);
2908 goto unlock;
2909 }
2910
2911 pairing_complete(cmd, MGMT_STATUS_CANCELLED);
2912
2913 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
2914 addr, sizeof(*addr));
2915 unlock:
2916 hci_dev_unlock(hdev);
2917 return err;
2918 }
2919
2920 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
2921 struct mgmt_addr_info *addr, u16 mgmt_op,
2922 u16 hci_op, __le32 passkey)
2923 {
2924 struct pending_cmd *cmd;
2925 struct hci_conn *conn;
2926 int err;
2927
2928 hci_dev_lock(hdev);
2929
2930 if (!hdev_is_powered(hdev)) {
2931 err = cmd_complete(sk, hdev->id, mgmt_op,
2932 MGMT_STATUS_NOT_POWERED, addr,
2933 sizeof(*addr));
2934 goto done;
2935 }
2936
2937 if (addr->type == BDADDR_BREDR)
2938 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
2939 else
2940 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
2941
2942 if (!conn) {
2943 err = cmd_complete(sk, hdev->id, mgmt_op,
2944 MGMT_STATUS_NOT_CONNECTED, addr,
2945 sizeof(*addr));
2946 goto done;
2947 }
2948
2949 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
2950 /* Continue with pairing via SMP */
2951 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
2952
2953 if (!err)
2954 err = cmd_complete(sk, hdev->id, mgmt_op,
2955 MGMT_STATUS_SUCCESS, addr,
2956 sizeof(*addr));
2957 else
2958 err = cmd_complete(sk, hdev->id, mgmt_op,
2959 MGMT_STATUS_FAILED, addr,
2960 sizeof(*addr));
2961
2962 goto done;
2963 }
2964
2965 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
2966 if (!cmd) {
2967 err = -ENOMEM;
2968 goto done;
2969 }
2970
2971 /* Continue with pairing via HCI */
2972 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
2973 struct hci_cp_user_passkey_reply cp;
2974
2975 bacpy(&cp.bdaddr, &addr->bdaddr);
2976 cp.passkey = passkey;
2977 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
2978 } else
2979 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
2980 &addr->bdaddr);
2981
2982 if (err < 0)
2983 mgmt_pending_remove(cmd);
2984
2985 done:
2986 hci_dev_unlock(hdev);
2987 return err;
2988 }
2989
2990 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2991 void *data, u16 len)
2992 {
2993 struct mgmt_cp_pin_code_neg_reply *cp = data;
2994
2995 BT_DBG("");
2996
2997 return user_pairing_resp(sk, hdev, &cp->addr,
2998 MGMT_OP_PIN_CODE_NEG_REPLY,
2999 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3000 }
3001
3002 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3003 u16 len)
3004 {
3005 struct mgmt_cp_user_confirm_reply *cp = data;
3006
3007 BT_DBG("");
3008
3009 if (len != sizeof(*cp))
3010 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3011 MGMT_STATUS_INVALID_PARAMS);
3012
3013 return user_pairing_resp(sk, hdev, &cp->addr,
3014 MGMT_OP_USER_CONFIRM_REPLY,
3015 HCI_OP_USER_CONFIRM_REPLY, 0);
3016 }
3017
3018 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3019 void *data, u16 len)
3020 {
3021 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3022
3023 BT_DBG("");
3024
3025 return user_pairing_resp(sk, hdev, &cp->addr,
3026 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3027 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3028 }
3029
3030 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3031 u16 len)
3032 {
3033 struct mgmt_cp_user_passkey_reply *cp = data;
3034
3035 BT_DBG("");
3036
3037 return user_pairing_resp(sk, hdev, &cp->addr,
3038 MGMT_OP_USER_PASSKEY_REPLY,
3039 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3040 }
3041
3042 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3043 void *data, u16 len)
3044 {
3045 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3046
3047 BT_DBG("");
3048
3049 return user_pairing_resp(sk, hdev, &cp->addr,
3050 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3051 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3052 }
3053
3054 static void update_name(struct hci_request *req)
3055 {
3056 struct hci_dev *hdev = req->hdev;
3057 struct hci_cp_write_local_name cp;
3058
3059 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3060
3061 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3062 }
3063
3064 static void set_name_complete(struct hci_dev *hdev, u8 status)
3065 {
3066 struct mgmt_cp_set_local_name *cp;
3067 struct pending_cmd *cmd;
3068
3069 BT_DBG("status 0x%02x", status);
3070
3071 hci_dev_lock(hdev);
3072
3073 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3074 if (!cmd)
3075 goto unlock;
3076
3077 cp = cmd->param;
3078
3079 if (status)
3080 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3081 mgmt_status(status));
3082 else
3083 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3084 cp, sizeof(*cp));
3085
3086 mgmt_pending_remove(cmd);
3087
3088 unlock:
3089 hci_dev_unlock(hdev);
3090 }
3091
3092 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3093 u16 len)
3094 {
3095 struct mgmt_cp_set_local_name *cp = data;
3096 struct pending_cmd *cmd;
3097 struct hci_request req;
3098 int err;
3099
3100 BT_DBG("");
3101
3102 hci_dev_lock(hdev);
3103
3104 /* If the old values are the same as the new ones just return a
3105 * direct command complete event.
3106 */
3107 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3108 !memcmp(hdev->short_name, cp->short_name,
3109 sizeof(hdev->short_name))) {
3110 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3111 data, len);
3112 goto failed;
3113 }
3114
3115 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3116
3117 if (!hdev_is_powered(hdev)) {
3118 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3119
3120 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3121 data, len);
3122 if (err < 0)
3123 goto failed;
3124
3125 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
3126 sk);
3127
3128 goto failed;
3129 }
3130
3131 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3132 if (!cmd) {
3133 err = -ENOMEM;
3134 goto failed;
3135 }
3136
3137 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3138
3139 hci_req_init(&req, hdev);
3140
3141 if (lmp_bredr_capable(hdev)) {
3142 update_name(&req);
3143 update_eir(&req);
3144 }
3145
3146 /* The name is stored in the scan response data and so
3147 * no need to udpate the advertising data here.
3148 */
3149 if (lmp_le_capable(hdev))
3150 update_scan_rsp_data(&req);
3151
3152 err = hci_req_run(&req, set_name_complete);
3153 if (err < 0)
3154 mgmt_pending_remove(cmd);
3155
3156 failed:
3157 hci_dev_unlock(hdev);
3158 return err;
3159 }
3160
3161 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3162 void *data, u16 data_len)
3163 {
3164 struct pending_cmd *cmd;
3165 int err;
3166
3167 BT_DBG("%s", hdev->name);
3168
3169 hci_dev_lock(hdev);
3170
3171 if (!hdev_is_powered(hdev)) {
3172 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3173 MGMT_STATUS_NOT_POWERED);
3174 goto unlock;
3175 }
3176
3177 if (!lmp_ssp_capable(hdev)) {
3178 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3179 MGMT_STATUS_NOT_SUPPORTED);
3180 goto unlock;
3181 }
3182
3183 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3184 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3185 MGMT_STATUS_BUSY);
3186 goto unlock;
3187 }
3188
3189 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3190 if (!cmd) {
3191 err = -ENOMEM;
3192 goto unlock;
3193 }
3194
3195 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
3196 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
3197 0, NULL);
3198 else
3199 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3200
3201 if (err < 0)
3202 mgmt_pending_remove(cmd);
3203
3204 unlock:
3205 hci_dev_unlock(hdev);
3206 return err;
3207 }
3208
3209 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3210 void *data, u16 len)
3211 {
3212 int err;
3213
3214 BT_DBG("%s ", hdev->name);
3215
3216 hci_dev_lock(hdev);
3217
3218 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3219 struct mgmt_cp_add_remote_oob_data *cp = data;
3220 u8 status;
3221
3222 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3223 cp->hash, cp->randomizer);
3224 if (err < 0)
3225 status = MGMT_STATUS_FAILED;
3226 else
3227 status = MGMT_STATUS_SUCCESS;
3228
3229 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3230 status, &cp->addr, sizeof(cp->addr));
3231 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3232 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3233 u8 status;
3234
3235 err = hci_add_remote_oob_ext_data(hdev, &cp->addr.bdaddr,
3236 cp->hash192,
3237 cp->randomizer192,
3238 cp->hash256,
3239 cp->randomizer256);
3240 if (err < 0)
3241 status = MGMT_STATUS_FAILED;
3242 else
3243 status = MGMT_STATUS_SUCCESS;
3244
3245 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3246 status, &cp->addr, sizeof(cp->addr));
3247 } else {
3248 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3249 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3250 MGMT_STATUS_INVALID_PARAMS);
3251 }
3252
3253 hci_dev_unlock(hdev);
3254 return err;
3255 }
3256
3257 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3258 void *data, u16 len)
3259 {
3260 struct mgmt_cp_remove_remote_oob_data *cp = data;
3261 u8 status;
3262 int err;
3263
3264 BT_DBG("%s", hdev->name);
3265
3266 hci_dev_lock(hdev);
3267
3268 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
3269 if (err < 0)
3270 status = MGMT_STATUS_INVALID_PARAMS;
3271 else
3272 status = MGMT_STATUS_SUCCESS;
3273
3274 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3275 status, &cp->addr, sizeof(cp->addr));
3276
3277 hci_dev_unlock(hdev);
3278 return err;
3279 }
3280
3281 static int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
3282 {
3283 struct pending_cmd *cmd;
3284 u8 type;
3285 int err;
3286
3287 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3288
3289 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3290 if (!cmd)
3291 return -ENOENT;
3292
3293 type = hdev->discovery.type;
3294
3295 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3296 &type, sizeof(type));
3297 mgmt_pending_remove(cmd);
3298
3299 return err;
3300 }
3301
3302 static void start_discovery_complete(struct hci_dev *hdev, u8 status)
3303 {
3304 BT_DBG("status %d", status);
3305
3306 if (status) {
3307 hci_dev_lock(hdev);
3308 mgmt_start_discovery_failed(hdev, status);
3309 hci_dev_unlock(hdev);
3310 return;
3311 }
3312
3313 hci_dev_lock(hdev);
3314 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3315 hci_dev_unlock(hdev);
3316
3317 switch (hdev->discovery.type) {
3318 case DISCOV_TYPE_LE:
3319 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
3320 DISCOV_LE_TIMEOUT);
3321 break;
3322
3323 case DISCOV_TYPE_INTERLEAVED:
3324 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
3325 DISCOV_INTERLEAVED_TIMEOUT);
3326 break;
3327
3328 case DISCOV_TYPE_BREDR:
3329 break;
3330
3331 default:
3332 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
3333 }
3334 }
3335
3336 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3337 void *data, u16 len)
3338 {
3339 struct mgmt_cp_start_discovery *cp = data;
3340 struct pending_cmd *cmd;
3341 struct hci_cp_le_set_scan_param param_cp;
3342 struct hci_cp_le_set_scan_enable enable_cp;
3343 struct hci_cp_inquiry inq_cp;
3344 struct hci_request req;
3345 /* General inquiry access code (GIAC) */
3346 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3347 u8 status, own_addr_type;
3348 int err;
3349
3350 BT_DBG("%s", hdev->name);
3351
3352 hci_dev_lock(hdev);
3353
3354 if (!hdev_is_powered(hdev)) {
3355 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3356 MGMT_STATUS_NOT_POWERED);
3357 goto failed;
3358 }
3359
3360 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3361 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3362 MGMT_STATUS_BUSY);
3363 goto failed;
3364 }
3365
3366 if (hdev->discovery.state != DISCOVERY_STOPPED) {
3367 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3368 MGMT_STATUS_BUSY);
3369 goto failed;
3370 }
3371
3372 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
3373 if (!cmd) {
3374 err = -ENOMEM;
3375 goto failed;
3376 }
3377
3378 hdev->discovery.type = cp->type;
3379
3380 hci_req_init(&req, hdev);
3381
3382 switch (hdev->discovery.type) {
3383 case DISCOV_TYPE_BREDR:
3384 status = mgmt_bredr_support(hdev);
3385 if (status) {
3386 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3387 status);
3388 mgmt_pending_remove(cmd);
3389 goto failed;
3390 }
3391
3392 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3393 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3394 MGMT_STATUS_BUSY);
3395 mgmt_pending_remove(cmd);
3396 goto failed;
3397 }
3398
3399 hci_inquiry_cache_flush(hdev);
3400
3401 memset(&inq_cp, 0, sizeof(inq_cp));
3402 memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
3403 inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
3404 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
3405 break;
3406
3407 case DISCOV_TYPE_LE:
3408 case DISCOV_TYPE_INTERLEAVED:
3409 status = mgmt_le_support(hdev);
3410 if (status) {
3411 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3412 status);
3413 mgmt_pending_remove(cmd);
3414 goto failed;
3415 }
3416
3417 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3418 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3419 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3420 MGMT_STATUS_NOT_SUPPORTED);
3421 mgmt_pending_remove(cmd);
3422 goto failed;
3423 }
3424
3425 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3426 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3427 MGMT_STATUS_REJECTED);
3428 mgmt_pending_remove(cmd);
3429 goto failed;
3430 }
3431
3432 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
3433 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3434 MGMT_STATUS_BUSY);
3435 mgmt_pending_remove(cmd);
3436 goto failed;
3437 }
3438
3439 memset(&param_cp, 0, sizeof(param_cp));
3440
3441 /* All active scans will be done with either a resolvable
3442 * private address (when privacy feature has been enabled)
3443 * or unresolvable private address.
3444 */
3445 err = hci_update_random_address(&req, true, &own_addr_type);
3446 if (err < 0) {
3447 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3448 MGMT_STATUS_FAILED);
3449 mgmt_pending_remove(cmd);
3450 goto failed;
3451 }
3452
3453 param_cp.type = LE_SCAN_ACTIVE;
3454 param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
3455 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3456 param_cp.own_address_type = own_addr_type;
3457 hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
3458 &param_cp);
3459
3460 memset(&enable_cp, 0, sizeof(enable_cp));
3461 enable_cp.enable = LE_SCAN_ENABLE;
3462 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3463 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
3464 &enable_cp);
3465 break;
3466
3467 default:
3468 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3469 MGMT_STATUS_INVALID_PARAMS);
3470 mgmt_pending_remove(cmd);
3471 goto failed;
3472 }
3473
3474 err = hci_req_run(&req, start_discovery_complete);
3475 if (err < 0)
3476 mgmt_pending_remove(cmd);
3477 else
3478 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3479
3480 failed:
3481 hci_dev_unlock(hdev);
3482 return err;
3483 }
3484
3485 static int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
3486 {
3487 struct pending_cmd *cmd;
3488 int err;
3489
3490 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
3491 if (!cmd)
3492 return -ENOENT;
3493
3494 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3495 &hdev->discovery.type, sizeof(hdev->discovery.type));
3496 mgmt_pending_remove(cmd);
3497
3498 return err;
3499 }
3500
3501 static void stop_discovery_complete(struct hci_dev *hdev, u8 status)
3502 {
3503 BT_DBG("status %d", status);
3504
3505 hci_dev_lock(hdev);
3506
3507 if (status) {
3508 mgmt_stop_discovery_failed(hdev, status);
3509 goto unlock;
3510 }
3511
3512 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3513
3514 unlock:
3515 hci_dev_unlock(hdev);
3516 }
3517
3518 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
3519 u16 len)
3520 {
3521 struct mgmt_cp_stop_discovery *mgmt_cp = data;
3522 struct pending_cmd *cmd;
3523 struct hci_cp_remote_name_req_cancel cp;
3524 struct inquiry_entry *e;
3525 struct hci_request req;
3526 int err;
3527
3528 BT_DBG("%s", hdev->name);
3529
3530 hci_dev_lock(hdev);
3531
3532 if (!hci_discovery_active(hdev)) {
3533 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3534 MGMT_STATUS_REJECTED, &mgmt_cp->type,
3535 sizeof(mgmt_cp->type));
3536 goto unlock;
3537 }
3538
3539 if (hdev->discovery.type != mgmt_cp->type) {
3540 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3541 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
3542 sizeof(mgmt_cp->type));
3543 goto unlock;
3544 }
3545
3546 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
3547 if (!cmd) {
3548 err = -ENOMEM;
3549 goto unlock;
3550 }
3551
3552 hci_req_init(&req, hdev);
3553
3554 switch (hdev->discovery.state) {
3555 case DISCOVERY_FINDING:
3556 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3557 hci_req_add(&req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3558 } else {
3559 cancel_delayed_work(&hdev->le_scan_disable);
3560
3561 hci_req_add_le_scan_disable(&req);
3562 }
3563
3564 break;
3565
3566 case DISCOVERY_RESOLVING:
3567 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
3568 NAME_PENDING);
3569 if (!e) {
3570 mgmt_pending_remove(cmd);
3571 err = cmd_complete(sk, hdev->id,
3572 MGMT_OP_STOP_DISCOVERY, 0,
3573 &mgmt_cp->type,
3574 sizeof(mgmt_cp->type));
3575 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3576 goto unlock;
3577 }
3578
3579 bacpy(&cp.bdaddr, &e->data.bdaddr);
3580 hci_req_add(&req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
3581 &cp);
3582
3583 break;
3584
3585 default:
3586 BT_DBG("unknown discovery state %u", hdev->discovery.state);
3587
3588 mgmt_pending_remove(cmd);
3589 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3590 MGMT_STATUS_FAILED, &mgmt_cp->type,
3591 sizeof(mgmt_cp->type));
3592 goto unlock;
3593 }
3594
3595 err = hci_req_run(&req, stop_discovery_complete);
3596 if (err < 0)
3597 mgmt_pending_remove(cmd);
3598 else
3599 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
3600
3601 unlock:
3602 hci_dev_unlock(hdev);
3603 return err;
3604 }
3605
3606 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
3607 u16 len)
3608 {
3609 struct mgmt_cp_confirm_name *cp = data;
3610 struct inquiry_entry *e;
3611 int err;
3612
3613 BT_DBG("%s", hdev->name);
3614
3615 hci_dev_lock(hdev);
3616
3617 if (!hci_discovery_active(hdev)) {
3618 err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3619 MGMT_STATUS_FAILED);
3620 goto failed;
3621 }
3622
3623 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
3624 if (!e) {
3625 err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3626 MGMT_STATUS_INVALID_PARAMS);
3627 goto failed;
3628 }
3629
3630 if (cp->name_known) {
3631 e->name_state = NAME_KNOWN;
3632 list_del(&e->list);
3633 } else {
3634 e->name_state = NAME_NEEDED;
3635 hci_inquiry_cache_update_resolve(hdev, e);
3636 }
3637
3638 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
3639 sizeof(cp->addr));
3640
3641 failed:
3642 hci_dev_unlock(hdev);
3643 return err;
3644 }
3645
3646 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
3647 u16 len)
3648 {
3649 struct mgmt_cp_block_device *cp = data;
3650 u8 status;
3651 int err;
3652
3653 BT_DBG("%s", hdev->name);
3654
3655 if (!bdaddr_type_is_valid(cp->addr.type))
3656 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
3657 MGMT_STATUS_INVALID_PARAMS,
3658 &cp->addr, sizeof(cp->addr));
3659
3660 hci_dev_lock(hdev);
3661
3662 err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type);
3663 if (err < 0)
3664 status = MGMT_STATUS_FAILED;
3665 else
3666 status = MGMT_STATUS_SUCCESS;
3667
3668 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
3669 &cp->addr, sizeof(cp->addr));
3670
3671 hci_dev_unlock(hdev);
3672
3673 return err;
3674 }
3675
3676 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
3677 u16 len)
3678 {
3679 struct mgmt_cp_unblock_device *cp = data;
3680 u8 status;
3681 int err;
3682
3683 BT_DBG("%s", hdev->name);
3684
3685 if (!bdaddr_type_is_valid(cp->addr.type))
3686 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
3687 MGMT_STATUS_INVALID_PARAMS,
3688 &cp->addr, sizeof(cp->addr));
3689
3690 hci_dev_lock(hdev);
3691
3692 err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type);
3693 if (err < 0)
3694 status = MGMT_STATUS_INVALID_PARAMS;
3695 else
3696 status = MGMT_STATUS_SUCCESS;
3697
3698 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
3699 &cp->addr, sizeof(cp->addr));
3700
3701 hci_dev_unlock(hdev);
3702
3703 return err;
3704 }
3705
3706 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
3707 u16 len)
3708 {
3709 struct mgmt_cp_set_device_id *cp = data;
3710 struct hci_request req;
3711 int err;
3712 __u16 source;
3713
3714 BT_DBG("%s", hdev->name);
3715
3716 source = __le16_to_cpu(cp->source);
3717
3718 if (source > 0x0002)
3719 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
3720 MGMT_STATUS_INVALID_PARAMS);
3721
3722 hci_dev_lock(hdev);
3723
3724 hdev->devid_source = source;
3725 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
3726 hdev->devid_product = __le16_to_cpu(cp->product);
3727 hdev->devid_version = __le16_to_cpu(cp->version);
3728
3729 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
3730
3731 hci_req_init(&req, hdev);
3732 update_eir(&req);
3733 hci_req_run(&req, NULL);
3734
3735 hci_dev_unlock(hdev);
3736
3737 return err;
3738 }
3739
3740 static void set_advertising_complete(struct hci_dev *hdev, u8 status)
3741 {
3742 struct cmd_lookup match = { NULL, hdev };
3743
3744 if (status) {
3745 u8 mgmt_err = mgmt_status(status);
3746
3747 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
3748 cmd_status_rsp, &mgmt_err);
3749 return;
3750 }
3751
3752 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
3753 &match);
3754
3755 new_settings(hdev, match.sk);
3756
3757 if (match.sk)
3758 sock_put(match.sk);
3759 }
3760
3761 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
3762 u16 len)
3763 {
3764 struct mgmt_mode *cp = data;
3765 struct pending_cmd *cmd;
3766 struct hci_request req;
3767 u8 val, enabled, status;
3768 int err;
3769
3770 BT_DBG("request for %s", hdev->name);
3771
3772 status = mgmt_le_support(hdev);
3773 if (status)
3774 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3775 status);
3776
3777 if (cp->val != 0x00 && cp->val != 0x01)
3778 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3779 MGMT_STATUS_INVALID_PARAMS);
3780
3781 hci_dev_lock(hdev);
3782
3783 val = !!cp->val;
3784 enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
3785
3786 /* The following conditions are ones which mean that we should
3787 * not do any HCI communication but directly send a mgmt
3788 * response to user space (after toggling the flag if
3789 * necessary).
3790 */
3791 if (!hdev_is_powered(hdev) || val == enabled ||
3792 hci_conn_num(hdev, LE_LINK) > 0) {
3793 bool changed = false;
3794
3795 if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3796 change_bit(HCI_ADVERTISING, &hdev->dev_flags);
3797 changed = true;
3798 }
3799
3800 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
3801 if (err < 0)
3802 goto unlock;
3803
3804 if (changed)
3805 err = new_settings(hdev, sk);
3806
3807 goto unlock;
3808 }
3809
3810 if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
3811 mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
3812 err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3813 MGMT_STATUS_BUSY);
3814 goto unlock;
3815 }
3816
3817 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
3818 if (!cmd) {
3819 err = -ENOMEM;
3820 goto unlock;
3821 }
3822
3823 hci_req_init(&req, hdev);
3824
3825 if (val)
3826 enable_advertising(&req);
3827 else
3828 disable_advertising(&req);
3829
3830 err = hci_req_run(&req, set_advertising_complete);
3831 if (err < 0)
3832 mgmt_pending_remove(cmd);
3833
3834 unlock:
3835 hci_dev_unlock(hdev);
3836 return err;
3837 }
3838
3839 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
3840 void *data, u16 len)
3841 {
3842 struct mgmt_cp_set_static_address *cp = data;
3843 int err;
3844
3845 BT_DBG("%s", hdev->name);
3846
3847 if (!lmp_le_capable(hdev))
3848 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
3849 MGMT_STATUS_NOT_SUPPORTED);
3850
3851 if (hdev_is_powered(hdev))
3852 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
3853 MGMT_STATUS_REJECTED);
3854
3855 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
3856 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
3857 return cmd_status(sk, hdev->id,
3858 MGMT_OP_SET_STATIC_ADDRESS,
3859 MGMT_STATUS_INVALID_PARAMS);
3860
3861 /* Two most significant bits shall be set */
3862 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
3863 return cmd_status(sk, hdev->id,
3864 MGMT_OP_SET_STATIC_ADDRESS,
3865 MGMT_STATUS_INVALID_PARAMS);
3866 }
3867
3868 hci_dev_lock(hdev);
3869
3870 bacpy(&hdev->static_addr, &cp->bdaddr);
3871
3872 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0);
3873
3874 hci_dev_unlock(hdev);
3875
3876 return err;
3877 }
3878
3879 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
3880 void *data, u16 len)
3881 {
3882 struct mgmt_cp_set_scan_params *cp = data;
3883 __u16 interval, window;
3884 int err;
3885
3886 BT_DBG("%s", hdev->name);
3887
3888 if (!lmp_le_capable(hdev))
3889 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3890 MGMT_STATUS_NOT_SUPPORTED);
3891
3892 interval = __le16_to_cpu(cp->interval);
3893
3894 if (interval < 0x0004 || interval > 0x4000)
3895 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3896 MGMT_STATUS_INVALID_PARAMS);
3897
3898 window = __le16_to_cpu(cp->window);
3899
3900 if (window < 0x0004 || window > 0x4000)
3901 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3902 MGMT_STATUS_INVALID_PARAMS);
3903
3904 if (window > interval)
3905 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3906 MGMT_STATUS_INVALID_PARAMS);
3907
3908 hci_dev_lock(hdev);
3909
3910 hdev->le_scan_interval = interval;
3911 hdev->le_scan_window = window;
3912
3913 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);
3914
3915 hci_dev_unlock(hdev);
3916
3917 return err;
3918 }
3919
3920 static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
3921 {
3922 struct pending_cmd *cmd;
3923
3924 BT_DBG("status 0x%02x", status);
3925
3926 hci_dev_lock(hdev);
3927
3928 cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
3929 if (!cmd)
3930 goto unlock;
3931
3932 if (status) {
3933 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3934 mgmt_status(status));
3935 } else {
3936 struct mgmt_mode *cp = cmd->param;
3937
3938 if (cp->val)
3939 set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
3940 else
3941 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
3942
3943 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
3944 new_settings(hdev, cmd->sk);
3945 }
3946
3947 mgmt_pending_remove(cmd);
3948
3949 unlock:
3950 hci_dev_unlock(hdev);
3951 }
3952
3953 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
3954 void *data, u16 len)
3955 {
3956 struct mgmt_mode *cp = data;
3957 struct pending_cmd *cmd;
3958 struct hci_request req;
3959 int err;
3960
3961 BT_DBG("%s", hdev->name);
3962
3963 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
3964 hdev->hci_ver < BLUETOOTH_VER_1_2)
3965 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3966 MGMT_STATUS_NOT_SUPPORTED);
3967
3968 if (cp->val != 0x00 && cp->val != 0x01)
3969 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3970 MGMT_STATUS_INVALID_PARAMS);
3971
3972 if (!hdev_is_powered(hdev))
3973 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3974 MGMT_STATUS_NOT_POWERED);
3975
3976 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3977 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3978 MGMT_STATUS_REJECTED);
3979
3980 hci_dev_lock(hdev);
3981
3982 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
3983 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3984 MGMT_STATUS_BUSY);
3985 goto unlock;
3986 }
3987
3988 if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
3989 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
3990 hdev);
3991 goto unlock;
3992 }
3993
3994 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
3995 data, len);
3996 if (!cmd) {
3997 err = -ENOMEM;
3998 goto unlock;
3999 }
4000
4001 hci_req_init(&req, hdev);
4002
4003 write_fast_connectable(&req, cp->val);
4004
4005 err = hci_req_run(&req, fast_connectable_complete);
4006 if (err < 0) {
4007 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4008 MGMT_STATUS_FAILED);
4009 mgmt_pending_remove(cmd);
4010 }
4011
4012 unlock:
4013 hci_dev_unlock(hdev);
4014
4015 return err;
4016 }
4017
4018 static void set_bredr_scan(struct hci_request *req)
4019 {
4020 struct hci_dev *hdev = req->hdev;
4021 u8 scan = 0;
4022
4023 /* Ensure that fast connectable is disabled. This function will
4024 * not do anything if the page scan parameters are already what
4025 * they should be.
4026 */
4027 write_fast_connectable(req, false);
4028
4029 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4030 scan |= SCAN_PAGE;
4031 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
4032 scan |= SCAN_INQUIRY;
4033
4034 if (scan)
4035 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
4036 }
4037
4038 static void set_bredr_complete(struct hci_dev *hdev, u8 status)
4039 {
4040 struct pending_cmd *cmd;
4041
4042 BT_DBG("status 0x%02x", status);
4043
4044 hci_dev_lock(hdev);
4045
4046 cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
4047 if (!cmd)
4048 goto unlock;
4049
4050 if (status) {
4051 u8 mgmt_err = mgmt_status(status);
4052
4053 /* We need to restore the flag if related HCI commands
4054 * failed.
4055 */
4056 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4057
4058 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4059 } else {
4060 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4061 new_settings(hdev, cmd->sk);
4062 }
4063
4064 mgmt_pending_remove(cmd);
4065
4066 unlock:
4067 hci_dev_unlock(hdev);
4068 }
4069
4070 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4071 {
4072 struct mgmt_mode *cp = data;
4073 struct pending_cmd *cmd;
4074 struct hci_request req;
4075 int err;
4076
4077 BT_DBG("request for %s", hdev->name);
4078
4079 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4080 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4081 MGMT_STATUS_NOT_SUPPORTED);
4082
4083 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4084 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4085 MGMT_STATUS_REJECTED);
4086
4087 if (cp->val != 0x00 && cp->val != 0x01)
4088 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4089 MGMT_STATUS_INVALID_PARAMS);
4090
4091 hci_dev_lock(hdev);
4092
4093 if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4094 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4095 goto unlock;
4096 }
4097
4098 if (!hdev_is_powered(hdev)) {
4099 if (!cp->val) {
4100 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4101 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4102 clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4103 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4104 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4105 }
4106
4107 change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4108
4109 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4110 if (err < 0)
4111 goto unlock;
4112
4113 err = new_settings(hdev, sk);
4114 goto unlock;
4115 }
4116
4117 /* Reject disabling when powered on */
4118 if (!cp->val) {
4119 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4120 MGMT_STATUS_REJECTED);
4121 goto unlock;
4122 }
4123
4124 if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
4125 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4126 MGMT_STATUS_BUSY);
4127 goto unlock;
4128 }
4129
4130 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4131 if (!cmd) {
4132 err = -ENOMEM;
4133 goto unlock;
4134 }
4135
4136 /* We need to flip the bit already here so that update_adv_data
4137 * generates the correct flags.
4138 */
4139 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4140
4141 hci_req_init(&req, hdev);
4142
4143 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4144 set_bredr_scan(&req);
4145
4146 /* Since only the advertising data flags will change, there
4147 * is no need to update the scan response data.
4148 */
4149 update_adv_data(&req);
4150
4151 err = hci_req_run(&req, set_bredr_complete);
4152 if (err < 0)
4153 mgmt_pending_remove(cmd);
4154
4155 unlock:
4156 hci_dev_unlock(hdev);
4157 return err;
4158 }
4159
4160 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4161 void *data, u16 len)
4162 {
4163 struct mgmt_mode *cp = data;
4164 struct pending_cmd *cmd;
4165 u8 val, status;
4166 int err;
4167
4168 BT_DBG("request for %s", hdev->name);
4169
4170 status = mgmt_bredr_support(hdev);
4171 if (status)
4172 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4173 status);
4174
4175 if (!lmp_sc_capable(hdev) &&
4176 !test_bit(HCI_FORCE_SC, &hdev->dev_flags))
4177 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4178 MGMT_STATUS_NOT_SUPPORTED);
4179
4180 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4181 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4182 MGMT_STATUS_INVALID_PARAMS);
4183
4184 hci_dev_lock(hdev);
4185
4186 if (!hdev_is_powered(hdev)) {
4187 bool changed;
4188
4189 if (cp->val) {
4190 changed = !test_and_set_bit(HCI_SC_ENABLED,
4191 &hdev->dev_flags);
4192 if (cp->val == 0x02)
4193 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4194 else
4195 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4196 } else {
4197 changed = test_and_clear_bit(HCI_SC_ENABLED,
4198 &hdev->dev_flags);
4199 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4200 }
4201
4202 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4203 if (err < 0)
4204 goto failed;
4205
4206 if (changed)
4207 err = new_settings(hdev, sk);
4208
4209 goto failed;
4210 }
4211
4212 if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4213 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4214 MGMT_STATUS_BUSY);
4215 goto failed;
4216 }
4217
4218 val = !!cp->val;
4219
4220 if (val == test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
4221 (cp->val == 0x02) == test_bit(HCI_SC_ONLY, &hdev->dev_flags)) {
4222 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4223 goto failed;
4224 }
4225
4226 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4227 if (!cmd) {
4228 err = -ENOMEM;
4229 goto failed;
4230 }
4231
4232 err = hci_send_cmd(hdev, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4233 if (err < 0) {
4234 mgmt_pending_remove(cmd);
4235 goto failed;
4236 }
4237
4238 if (cp->val == 0x02)
4239 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4240 else
4241 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4242
4243 failed:
4244 hci_dev_unlock(hdev);
4245 return err;
4246 }
4247
4248 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
4249 void *data, u16 len)
4250 {
4251 struct mgmt_mode *cp = data;
4252 bool changed;
4253 int err;
4254
4255 BT_DBG("request for %s", hdev->name);
4256
4257 if (cp->val != 0x00 && cp->val != 0x01)
4258 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
4259 MGMT_STATUS_INVALID_PARAMS);
4260
4261 hci_dev_lock(hdev);
4262
4263 if (cp->val)
4264 changed = !test_and_set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
4265 else
4266 changed = test_and_clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
4267
4268 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
4269 if (err < 0)
4270 goto unlock;
4271
4272 if (changed)
4273 err = new_settings(hdev, sk);
4274
4275 unlock:
4276 hci_dev_unlock(hdev);
4277 return err;
4278 }
4279
4280 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4281 u16 len)
4282 {
4283 struct mgmt_cp_set_privacy *cp = cp_data;
4284 bool changed;
4285 int err;
4286
4287 BT_DBG("request for %s", hdev->name);
4288
4289 if (!lmp_le_capable(hdev))
4290 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4291 MGMT_STATUS_NOT_SUPPORTED);
4292
4293 if (cp->privacy != 0x00 && cp->privacy != 0x01)
4294 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4295 MGMT_STATUS_INVALID_PARAMS);
4296
4297 if (hdev_is_powered(hdev))
4298 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4299 MGMT_STATUS_REJECTED);
4300
4301 hci_dev_lock(hdev);
4302
4303 /* If user space supports this command it is also expected to
4304 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
4305 */
4306 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4307
4308 if (cp->privacy) {
4309 changed = !test_and_set_bit(HCI_PRIVACY, &hdev->dev_flags);
4310 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
4311 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4312 } else {
4313 changed = test_and_clear_bit(HCI_PRIVACY, &hdev->dev_flags);
4314 memset(hdev->irk, 0, sizeof(hdev->irk));
4315 clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4316 }
4317
4318 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
4319 if (err < 0)
4320 goto unlock;
4321
4322 if (changed)
4323 err = new_settings(hdev, sk);
4324
4325 unlock:
4326 hci_dev_unlock(hdev);
4327 return err;
4328 }
4329
4330 static bool irk_is_valid(struct mgmt_irk_info *irk)
4331 {
4332 switch (irk->addr.type) {
4333 case BDADDR_LE_PUBLIC:
4334 return true;
4335
4336 case BDADDR_LE_RANDOM:
4337 /* Two most significant bits shall be set */
4338 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4339 return false;
4340 return true;
4341 }
4342
4343 return false;
4344 }
4345
4346 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4347 u16 len)
4348 {
4349 struct mgmt_cp_load_irks *cp = cp_data;
4350 u16 irk_count, expected_len;
4351 int i, err;
4352
4353 BT_DBG("request for %s", hdev->name);
4354
4355 if (!lmp_le_capable(hdev))
4356 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4357 MGMT_STATUS_NOT_SUPPORTED);
4358
4359 irk_count = __le16_to_cpu(cp->irk_count);
4360
4361 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
4362 if (expected_len != len) {
4363 BT_ERR("load_irks: expected %u bytes, got %u bytes",
4364 len, expected_len);
4365 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4366 MGMT_STATUS_INVALID_PARAMS);
4367 }
4368
4369 BT_DBG("%s irk_count %u", hdev->name, irk_count);
4370
4371 for (i = 0; i < irk_count; i++) {
4372 struct mgmt_irk_info *key = &cp->irks[i];
4373
4374 if (!irk_is_valid(key))
4375 return cmd_status(sk, hdev->id,
4376 MGMT_OP_LOAD_IRKS,
4377 MGMT_STATUS_INVALID_PARAMS);
4378 }
4379
4380 hci_dev_lock(hdev);
4381
4382 hci_smp_irks_clear(hdev);
4383
4384 for (i = 0; i < irk_count; i++) {
4385 struct mgmt_irk_info *irk = &cp->irks[i];
4386 u8 addr_type;
4387
4388 if (irk->addr.type == BDADDR_LE_PUBLIC)
4389 addr_type = ADDR_LE_DEV_PUBLIC;
4390 else
4391 addr_type = ADDR_LE_DEV_RANDOM;
4392
4393 hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
4394 BDADDR_ANY);
4395 }
4396
4397 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4398
4399 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
4400
4401 hci_dev_unlock(hdev);
4402
4403 return err;
4404 }
4405
4406 static bool ltk_is_valid(struct mgmt_ltk_info *key)
4407 {
4408 if (key->master != 0x00 && key->master != 0x01)
4409 return false;
4410
4411 switch (key->addr.type) {
4412 case BDADDR_LE_PUBLIC:
4413 return true;
4414
4415 case BDADDR_LE_RANDOM:
4416 /* Two most significant bits shall be set */
4417 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4418 return false;
4419 return true;
4420 }
4421
4422 return false;
4423 }
4424
4425 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
4426 void *cp_data, u16 len)
4427 {
4428 struct mgmt_cp_load_long_term_keys *cp = cp_data;
4429 u16 key_count, expected_len;
4430 int i, err;
4431
4432 BT_DBG("request for %s", hdev->name);
4433
4434 if (!lmp_le_capable(hdev))
4435 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4436 MGMT_STATUS_NOT_SUPPORTED);
4437
4438 key_count = __le16_to_cpu(cp->key_count);
4439
4440 expected_len = sizeof(*cp) + key_count *
4441 sizeof(struct mgmt_ltk_info);
4442 if (expected_len != len) {
4443 BT_ERR("load_keys: expected %u bytes, got %u bytes",
4444 len, expected_len);
4445 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4446 MGMT_STATUS_INVALID_PARAMS);
4447 }
4448
4449 BT_DBG("%s key_count %u", hdev->name, key_count);
4450
4451 for (i = 0; i < key_count; i++) {
4452 struct mgmt_ltk_info *key = &cp->keys[i];
4453
4454 if (!ltk_is_valid(key))
4455 return cmd_status(sk, hdev->id,
4456 MGMT_OP_LOAD_LONG_TERM_KEYS,
4457 MGMT_STATUS_INVALID_PARAMS);
4458 }
4459
4460 hci_dev_lock(hdev);
4461
4462 hci_smp_ltks_clear(hdev);
4463
4464 for (i = 0; i < key_count; i++) {
4465 struct mgmt_ltk_info *key = &cp->keys[i];
4466 u8 type, addr_type;
4467
4468 if (key->addr.type == BDADDR_LE_PUBLIC)
4469 addr_type = ADDR_LE_DEV_PUBLIC;
4470 else
4471 addr_type = ADDR_LE_DEV_RANDOM;
4472
4473 if (key->master)
4474 type = HCI_SMP_LTK;
4475 else
4476 type = HCI_SMP_LTK_SLAVE;
4477
4478 hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
4479 key->type, key->val, key->enc_size, key->ediv,
4480 key->rand);
4481 }
4482
4483 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
4484 NULL, 0);
4485
4486 hci_dev_unlock(hdev);
4487
4488 return err;
4489 }
4490
4491 static const struct mgmt_handler {
4492 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
4493 u16 data_len);
4494 bool var_len;
4495 size_t data_len;
4496 } mgmt_handlers[] = {
4497 { NULL }, /* 0x0000 (no command) */
4498 { read_version, false, MGMT_READ_VERSION_SIZE },
4499 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
4500 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
4501 { read_controller_info, false, MGMT_READ_INFO_SIZE },
4502 { set_powered, false, MGMT_SETTING_SIZE },
4503 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
4504 { set_connectable, false, MGMT_SETTING_SIZE },
4505 { set_fast_connectable, false, MGMT_SETTING_SIZE },
4506 { set_pairable, false, MGMT_SETTING_SIZE },
4507 { set_link_security, false, MGMT_SETTING_SIZE },
4508 { set_ssp, false, MGMT_SETTING_SIZE },
4509 { set_hs, false, MGMT_SETTING_SIZE },
4510 { set_le, false, MGMT_SETTING_SIZE },
4511 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
4512 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
4513 { add_uuid, false, MGMT_ADD_UUID_SIZE },
4514 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
4515 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
4516 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
4517 { disconnect, false, MGMT_DISCONNECT_SIZE },
4518 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
4519 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
4520 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
4521 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
4522 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
4523 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
4524 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
4525 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
4526 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
4527 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
4528 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
4529 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
4530 { add_remote_oob_data, true, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
4531 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
4532 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
4533 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
4534 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
4535 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
4536 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
4537 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
4538 { set_advertising, false, MGMT_SETTING_SIZE },
4539 { set_bredr, false, MGMT_SETTING_SIZE },
4540 { set_static_address, false, MGMT_SET_STATIC_ADDRESS_SIZE },
4541 { set_scan_params, false, MGMT_SET_SCAN_PARAMS_SIZE },
4542 { set_secure_conn, false, MGMT_SETTING_SIZE },
4543 { set_debug_keys, false, MGMT_SETTING_SIZE },
4544 { set_privacy, false, MGMT_SET_PRIVACY_SIZE },
4545 { load_irks, true, MGMT_LOAD_IRKS_SIZE },
4546 };
4547
4548
4549 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
4550 {
4551 void *buf;
4552 u8 *cp;
4553 struct mgmt_hdr *hdr;
4554 u16 opcode, index, len;
4555 struct hci_dev *hdev = NULL;
4556 const struct mgmt_handler *handler;
4557 int err;
4558
4559 BT_DBG("got %zu bytes", msglen);
4560
4561 if (msglen < sizeof(*hdr))
4562 return -EINVAL;
4563
4564 buf = kmalloc(msglen, GFP_KERNEL);
4565 if (!buf)
4566 return -ENOMEM;
4567
4568 if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
4569 err = -EFAULT;
4570 goto done;
4571 }
4572
4573 hdr = buf;
4574 opcode = __le16_to_cpu(hdr->opcode);
4575 index = __le16_to_cpu(hdr->index);
4576 len = __le16_to_cpu(hdr->len);
4577
4578 if (len != msglen - sizeof(*hdr)) {
4579 err = -EINVAL;
4580 goto done;
4581 }
4582
4583 if (index != MGMT_INDEX_NONE) {
4584 hdev = hci_dev_get(index);
4585 if (!hdev) {
4586 err = cmd_status(sk, index, opcode,
4587 MGMT_STATUS_INVALID_INDEX);
4588 goto done;
4589 }
4590
4591 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
4592 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4593 err = cmd_status(sk, index, opcode,
4594 MGMT_STATUS_INVALID_INDEX);
4595 goto done;
4596 }
4597 }
4598
4599 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
4600 mgmt_handlers[opcode].func == NULL) {
4601 BT_DBG("Unknown op %u", opcode);
4602 err = cmd_status(sk, index, opcode,
4603 MGMT_STATUS_UNKNOWN_COMMAND);
4604 goto done;
4605 }
4606
4607 if ((hdev && opcode < MGMT_OP_READ_INFO) ||
4608 (!hdev && opcode >= MGMT_OP_READ_INFO)) {
4609 err = cmd_status(sk, index, opcode,
4610 MGMT_STATUS_INVALID_INDEX);
4611 goto done;
4612 }
4613
4614 handler = &mgmt_handlers[opcode];
4615
4616 if ((handler->var_len && len < handler->data_len) ||
4617 (!handler->var_len && len != handler->data_len)) {
4618 err = cmd_status(sk, index, opcode,
4619 MGMT_STATUS_INVALID_PARAMS);
4620 goto done;
4621 }
4622
4623 if (hdev)
4624 mgmt_init_hdev(sk, hdev);
4625
4626 cp = buf + sizeof(*hdr);
4627
4628 err = handler->func(sk, hdev, cp, len);
4629 if (err < 0)
4630 goto done;
4631
4632 err = msglen;
4633
4634 done:
4635 if (hdev)
4636 hci_dev_put(hdev);
4637
4638 kfree(buf);
4639 return err;
4640 }
4641
4642 void mgmt_index_added(struct hci_dev *hdev)
4643 {
4644 if (hdev->dev_type != HCI_BREDR)
4645 return;
4646
4647 mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
4648 }
4649
4650 void mgmt_index_removed(struct hci_dev *hdev)
4651 {
4652 u8 status = MGMT_STATUS_INVALID_INDEX;
4653
4654 if (hdev->dev_type != HCI_BREDR)
4655 return;
4656
4657 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
4658
4659 mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
4660 }
4661
4662 static void powered_complete(struct hci_dev *hdev, u8 status)
4663 {
4664 struct cmd_lookup match = { NULL, hdev };
4665
4666 BT_DBG("status 0x%02x", status);
4667
4668 hci_dev_lock(hdev);
4669
4670 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
4671
4672 new_settings(hdev, match.sk);
4673
4674 hci_dev_unlock(hdev);
4675
4676 if (match.sk)
4677 sock_put(match.sk);
4678 }
4679
4680 static int powered_update_hci(struct hci_dev *hdev)
4681 {
4682 struct hci_request req;
4683 u8 link_sec;
4684
4685 hci_req_init(&req, hdev);
4686
4687 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
4688 !lmp_host_ssp_capable(hdev)) {
4689 u8 ssp = 1;
4690
4691 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
4692 }
4693
4694 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
4695 lmp_bredr_capable(hdev)) {
4696 struct hci_cp_write_le_host_supported cp;
4697
4698 cp.le = 1;
4699 cp.simul = lmp_le_br_capable(hdev);
4700
4701 /* Check first if we already have the right
4702 * host state (host features set)
4703 */
4704 if (cp.le != lmp_host_le_capable(hdev) ||
4705 cp.simul != lmp_host_le_br_capable(hdev))
4706 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
4707 sizeof(cp), &cp);
4708 }
4709
4710 if (lmp_le_capable(hdev)) {
4711 /* Make sure the controller has a good default for
4712 * advertising data. This also applies to the case
4713 * where BR/EDR was toggled during the AUTO_OFF phase.
4714 */
4715 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
4716 update_adv_data(&req);
4717 update_scan_rsp_data(&req);
4718 }
4719
4720 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
4721 enable_advertising(&req);
4722 }
4723
4724 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4725 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
4726 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
4727 sizeof(link_sec), &link_sec);
4728
4729 if (lmp_bredr_capable(hdev)) {
4730 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
4731 set_bredr_scan(&req);
4732 update_class(&req);
4733 update_name(&req);
4734 update_eir(&req);
4735 }
4736
4737 return hci_req_run(&req, powered_complete);
4738 }
4739
4740 int mgmt_powered(struct hci_dev *hdev, u8 powered)
4741 {
4742 struct cmd_lookup match = { NULL, hdev };
4743 u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
4744 u8 zero_cod[] = { 0, 0, 0 };
4745 int err;
4746
4747 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
4748 return 0;
4749
4750 if (powered) {
4751 if (powered_update_hci(hdev) == 0)
4752 return 0;
4753
4754 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
4755 &match);
4756 goto new_settings;
4757 }
4758
4759 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
4760 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered);
4761
4762 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
4763 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
4764 zero_cod, sizeof(zero_cod), NULL);
4765
4766 new_settings:
4767 err = new_settings(hdev, match.sk);
4768
4769 if (match.sk)
4770 sock_put(match.sk);
4771
4772 return err;
4773 }
4774
4775 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
4776 {
4777 struct pending_cmd *cmd;
4778 u8 status;
4779
4780 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
4781 if (!cmd)
4782 return;
4783
4784 if (err == -ERFKILL)
4785 status = MGMT_STATUS_RFKILLED;
4786 else
4787 status = MGMT_STATUS_FAILED;
4788
4789 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
4790
4791 mgmt_pending_remove(cmd);
4792 }
4793
4794 void mgmt_discoverable_timeout(struct hci_dev *hdev)
4795 {
4796 struct hci_request req;
4797
4798 hci_dev_lock(hdev);
4799
4800 /* When discoverable timeout triggers, then just make sure
4801 * the limited discoverable flag is cleared. Even in the case
4802 * of a timeout triggered from general discoverable, it is
4803 * safe to unconditionally clear the flag.
4804 */
4805 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
4806 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4807
4808 hci_req_init(&req, hdev);
4809 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4810 u8 scan = SCAN_PAGE;
4811 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
4812 sizeof(scan), &scan);
4813 }
4814 update_class(&req);
4815 update_adv_data(&req);
4816 hci_req_run(&req, NULL);
4817
4818 hdev->discov_timeout = 0;
4819
4820 new_settings(hdev, NULL);
4821
4822 hci_dev_unlock(hdev);
4823 }
4824
4825 void mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
4826 {
4827 bool changed;
4828
4829 /* Nothing needed here if there's a pending command since that
4830 * commands request completion callback takes care of everything
4831 * necessary.
4832 */
4833 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
4834 return;
4835
4836 /* Powering off may clear the scan mode - don't let that interfere */
4837 if (!discoverable && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
4838 return;
4839
4840 if (discoverable) {
4841 changed = !test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4842 } else {
4843 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
4844 changed = test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4845 }
4846
4847 if (changed) {
4848 struct hci_request req;
4849
4850 /* In case this change in discoverable was triggered by
4851 * a disabling of connectable there could be a need to
4852 * update the advertising flags.
4853 */
4854 hci_req_init(&req, hdev);
4855 update_adv_data(&req);
4856 hci_req_run(&req, NULL);
4857
4858 new_settings(hdev, NULL);
4859 }
4860 }
4861
4862 void mgmt_connectable(struct hci_dev *hdev, u8 connectable)
4863 {
4864 bool changed;
4865
4866 /* Nothing needed here if there's a pending command since that
4867 * commands request completion callback takes care of everything
4868 * necessary.
4869 */
4870 if (mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
4871 return;
4872
4873 /* Powering off may clear the scan mode - don't let that interfere */
4874 if (!connectable && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
4875 return;
4876
4877 if (connectable)
4878 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
4879 else
4880 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
4881
4882 if (changed)
4883 new_settings(hdev, NULL);
4884 }
4885
4886 void mgmt_advertising(struct hci_dev *hdev, u8 advertising)
4887 {
4888 /* Powering off may stop advertising - don't let that interfere */
4889 if (!advertising && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
4890 return;
4891
4892 if (advertising)
4893 set_bit(HCI_ADVERTISING, &hdev->dev_flags);
4894 else
4895 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
4896 }
4897
4898 void mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
4899 {
4900 u8 mgmt_err = mgmt_status(status);
4901
4902 if (scan & SCAN_PAGE)
4903 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
4904 cmd_status_rsp, &mgmt_err);
4905
4906 if (scan & SCAN_INQUIRY)
4907 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
4908 cmd_status_rsp, &mgmt_err);
4909 }
4910
4911 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
4912 bool persistent)
4913 {
4914 struct mgmt_ev_new_link_key ev;
4915
4916 memset(&ev, 0, sizeof(ev));
4917
4918 ev.store_hint = persistent;
4919 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
4920 ev.key.addr.type = BDADDR_BREDR;
4921 ev.key.type = key->type;
4922 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
4923 ev.key.pin_len = key->pin_len;
4924
4925 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
4926 }
4927
4928 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key)
4929 {
4930 struct mgmt_ev_new_long_term_key ev;
4931
4932 memset(&ev, 0, sizeof(ev));
4933
4934 /* Devices using resolvable or non-resolvable random addresses
4935 * without providing an indentity resolving key don't require
4936 * to store long term keys. Their addresses will change the
4937 * next time around.
4938 *
4939 * Only when a remote device provides an identity address
4940 * make sure the long term key is stored. If the remote
4941 * identity is known, the long term keys are internally
4942 * mapped to the identity address. So allow static random
4943 * and public addresses here.
4944 */
4945 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
4946 (key->bdaddr.b[5] & 0xc0) != 0xc0)
4947 ev.store_hint = 0x00;
4948 else
4949 ev.store_hint = 0x01;
4950
4951 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
4952 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
4953 ev.key.type = key->authenticated;
4954 ev.key.enc_size = key->enc_size;
4955 ev.key.ediv = key->ediv;
4956
4957 if (key->type == HCI_SMP_LTK)
4958 ev.key.master = 1;
4959
4960 memcpy(ev.key.rand, key->rand, sizeof(key->rand));
4961 memcpy(ev.key.val, key->val, sizeof(key->val));
4962
4963 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
4964 }
4965
4966 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
4967 {
4968 struct mgmt_ev_new_irk ev;
4969
4970 memset(&ev, 0, sizeof(ev));
4971
4972 /* For identity resolving keys from devices that are already
4973 * using a public address or static random address, do not
4974 * ask for storing this key. The identity resolving key really
4975 * is only mandatory for devices using resovlable random
4976 * addresses.
4977 *
4978 * Storing all identity resolving keys has the downside that
4979 * they will be also loaded on next boot of they system. More
4980 * identity resolving keys, means more time during scanning is
4981 * needed to actually resolve these addresses.
4982 */
4983 if (bacmp(&irk->rpa, BDADDR_ANY))
4984 ev.store_hint = 0x01;
4985 else
4986 ev.store_hint = 0x00;
4987
4988 bacpy(&ev.rpa, &irk->rpa);
4989 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
4990 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
4991 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
4992
4993 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
4994 }
4995
4996 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
4997 u8 data_len)
4998 {
4999 eir[eir_len++] = sizeof(type) + data_len;
5000 eir[eir_len++] = type;
5001 memcpy(&eir[eir_len], data, data_len);
5002 eir_len += data_len;
5003
5004 return eir_len;
5005 }
5006
5007 void mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5008 u8 addr_type, u32 flags, u8 *name, u8 name_len,
5009 u8 *dev_class)
5010 {
5011 char buf[512];
5012 struct mgmt_ev_device_connected *ev = (void *) buf;
5013 u16 eir_len = 0;
5014
5015 bacpy(&ev->addr.bdaddr, bdaddr);
5016 ev->addr.type = link_to_bdaddr(link_type, addr_type);
5017
5018 ev->flags = __cpu_to_le32(flags);
5019
5020 if (name_len > 0)
5021 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
5022 name, name_len);
5023
5024 if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
5025 eir_len = eir_append_data(ev->eir, eir_len,
5026 EIR_CLASS_OF_DEV, dev_class, 3);
5027
5028 ev->eir_len = cpu_to_le16(eir_len);
5029
5030 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
5031 sizeof(*ev) + eir_len, NULL);
5032 }
5033
5034 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
5035 {
5036 struct mgmt_cp_disconnect *cp = cmd->param;
5037 struct sock **sk = data;
5038 struct mgmt_rp_disconnect rp;
5039
5040 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5041 rp.addr.type = cp->addr.type;
5042
5043 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
5044 sizeof(rp));
5045
5046 *sk = cmd->sk;
5047 sock_hold(*sk);
5048
5049 mgmt_pending_remove(cmd);
5050 }
5051
5052 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
5053 {
5054 struct hci_dev *hdev = data;
5055 struct mgmt_cp_unpair_device *cp = cmd->param;
5056 struct mgmt_rp_unpair_device rp;
5057
5058 memset(&rp, 0, sizeof(rp));
5059 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5060 rp.addr.type = cp->addr.type;
5061
5062 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
5063
5064 cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
5065
5066 mgmt_pending_remove(cmd);
5067 }
5068
5069 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
5070 u8 link_type, u8 addr_type, u8 reason,
5071 bool mgmt_connected)
5072 {
5073 struct mgmt_ev_device_disconnected ev;
5074 struct pending_cmd *power_off;
5075 struct sock *sk = NULL;
5076
5077 power_off = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
5078 if (power_off) {
5079 struct mgmt_mode *cp = power_off->param;
5080
5081 /* The connection is still in hci_conn_hash so test for 1
5082 * instead of 0 to know if this is the last one.
5083 */
5084 if (!cp->val && hci_conn_count(hdev) == 1)
5085 queue_work(hdev->req_workqueue, &hdev->power_off.work);
5086 }
5087
5088 if (!mgmt_connected)
5089 return;
5090
5091 if (link_type != ACL_LINK && link_type != LE_LINK)
5092 return;
5093
5094 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
5095
5096 bacpy(&ev.addr.bdaddr, bdaddr);
5097 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5098 ev.reason = reason;
5099
5100 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
5101
5102 if (sk)
5103 sock_put(sk);
5104
5105 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
5106 hdev);
5107 }
5108
5109 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
5110 u8 link_type, u8 addr_type, u8 status)
5111 {
5112 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
5113 struct mgmt_cp_disconnect *cp;
5114 struct mgmt_rp_disconnect rp;
5115 struct pending_cmd *cmd;
5116
5117 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
5118 hdev);
5119
5120 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
5121 if (!cmd)
5122 return;
5123
5124 cp = cmd->param;
5125
5126 if (bacmp(bdaddr, &cp->addr.bdaddr))
5127 return;
5128
5129 if (cp->addr.type != bdaddr_type)
5130 return;
5131
5132 bacpy(&rp.addr.bdaddr, bdaddr);
5133 rp.addr.type = bdaddr_type;
5134
5135 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
5136 mgmt_status(status), &rp, sizeof(rp));
5137
5138 mgmt_pending_remove(cmd);
5139 }
5140
5141 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5142 u8 addr_type, u8 status)
5143 {
5144 struct mgmt_ev_connect_failed ev;
5145
5146 bacpy(&ev.addr.bdaddr, bdaddr);
5147 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5148 ev.status = mgmt_status(status);
5149
5150 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
5151 }
5152
5153 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
5154 {
5155 struct mgmt_ev_pin_code_request ev;
5156
5157 bacpy(&ev.addr.bdaddr, bdaddr);
5158 ev.addr.type = BDADDR_BREDR;
5159 ev.secure = secure;
5160
5161 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
5162 }
5163
5164 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5165 u8 status)
5166 {
5167 struct pending_cmd *cmd;
5168 struct mgmt_rp_pin_code_reply rp;
5169
5170 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
5171 if (!cmd)
5172 return;
5173
5174 bacpy(&rp.addr.bdaddr, bdaddr);
5175 rp.addr.type = BDADDR_BREDR;
5176
5177 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
5178 mgmt_status(status), &rp, sizeof(rp));
5179
5180 mgmt_pending_remove(cmd);
5181 }
5182
5183 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5184 u8 status)
5185 {
5186 struct pending_cmd *cmd;
5187 struct mgmt_rp_pin_code_reply rp;
5188
5189 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
5190 if (!cmd)
5191 return;
5192
5193 bacpy(&rp.addr.bdaddr, bdaddr);
5194 rp.addr.type = BDADDR_BREDR;
5195
5196 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
5197 mgmt_status(status), &rp, sizeof(rp));
5198
5199 mgmt_pending_remove(cmd);
5200 }
5201
5202 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
5203 u8 link_type, u8 addr_type, __le32 value,
5204 u8 confirm_hint)
5205 {
5206 struct mgmt_ev_user_confirm_request ev;
5207
5208 BT_DBG("%s", hdev->name);
5209
5210 bacpy(&ev.addr.bdaddr, bdaddr);
5211 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5212 ev.confirm_hint = confirm_hint;
5213 ev.value = value;
5214
5215 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
5216 NULL);
5217 }
5218
5219 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
5220 u8 link_type, u8 addr_type)
5221 {
5222 struct mgmt_ev_user_passkey_request ev;
5223
5224 BT_DBG("%s", hdev->name);
5225
5226 bacpy(&ev.addr.bdaddr, bdaddr);
5227 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5228
5229 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
5230 NULL);
5231 }
5232
5233 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5234 u8 link_type, u8 addr_type, u8 status,
5235 u8 opcode)
5236 {
5237 struct pending_cmd *cmd;
5238 struct mgmt_rp_user_confirm_reply rp;
5239 int err;
5240
5241 cmd = mgmt_pending_find(opcode, hdev);
5242 if (!cmd)
5243 return -ENOENT;
5244
5245 bacpy(&rp.addr.bdaddr, bdaddr);
5246 rp.addr.type = link_to_bdaddr(link_type, addr_type);
5247 err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
5248 &rp, sizeof(rp));
5249
5250 mgmt_pending_remove(cmd);
5251
5252 return err;
5253 }
5254
5255 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5256 u8 link_type, u8 addr_type, u8 status)
5257 {
5258 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5259 status, MGMT_OP_USER_CONFIRM_REPLY);
5260 }
5261
5262 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5263 u8 link_type, u8 addr_type, u8 status)
5264 {
5265 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5266 status,
5267 MGMT_OP_USER_CONFIRM_NEG_REPLY);
5268 }
5269
5270 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5271 u8 link_type, u8 addr_type, u8 status)
5272 {
5273 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5274 status, MGMT_OP_USER_PASSKEY_REPLY);
5275 }
5276
5277 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5278 u8 link_type, u8 addr_type, u8 status)
5279 {
5280 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5281 status,
5282 MGMT_OP_USER_PASSKEY_NEG_REPLY);
5283 }
5284
5285 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
5286 u8 link_type, u8 addr_type, u32 passkey,
5287 u8 entered)
5288 {
5289 struct mgmt_ev_passkey_notify ev;
5290
5291 BT_DBG("%s", hdev->name);
5292
5293 bacpy(&ev.addr.bdaddr, bdaddr);
5294 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5295 ev.passkey = __cpu_to_le32(passkey);
5296 ev.entered = entered;
5297
5298 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
5299 }
5300
5301 void mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5302 u8 addr_type, u8 status)
5303 {
5304 struct mgmt_ev_auth_failed ev;
5305
5306 bacpy(&ev.addr.bdaddr, bdaddr);
5307 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5308 ev.status = mgmt_status(status);
5309
5310 mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
5311 }
5312
5313 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
5314 {
5315 struct cmd_lookup match = { NULL, hdev };
5316 bool changed;
5317
5318 if (status) {
5319 u8 mgmt_err = mgmt_status(status);
5320 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
5321 cmd_status_rsp, &mgmt_err);
5322 return;
5323 }
5324
5325 if (test_bit(HCI_AUTH, &hdev->flags))
5326 changed = !test_and_set_bit(HCI_LINK_SECURITY,
5327 &hdev->dev_flags);
5328 else
5329 changed = test_and_clear_bit(HCI_LINK_SECURITY,
5330 &hdev->dev_flags);
5331
5332 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
5333 &match);
5334
5335 if (changed)
5336 new_settings(hdev, match.sk);
5337
5338 if (match.sk)
5339 sock_put(match.sk);
5340 }
5341
5342 static void clear_eir(struct hci_request *req)
5343 {
5344 struct hci_dev *hdev = req->hdev;
5345 struct hci_cp_write_eir cp;
5346
5347 if (!lmp_ext_inq_capable(hdev))
5348 return;
5349
5350 memset(hdev->eir, 0, sizeof(hdev->eir));
5351
5352 memset(&cp, 0, sizeof(cp));
5353
5354 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
5355 }
5356
5357 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
5358 {
5359 struct cmd_lookup match = { NULL, hdev };
5360 struct hci_request req;
5361 bool changed = false;
5362
5363 if (status) {
5364 u8 mgmt_err = mgmt_status(status);
5365
5366 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
5367 &hdev->dev_flags)) {
5368 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
5369 new_settings(hdev, NULL);
5370 }
5371
5372 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
5373 &mgmt_err);
5374 return;
5375 }
5376
5377 if (enable) {
5378 changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
5379 } else {
5380 changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
5381 if (!changed)
5382 changed = test_and_clear_bit(HCI_HS_ENABLED,
5383 &hdev->dev_flags);
5384 else
5385 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
5386 }
5387
5388 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
5389
5390 if (changed)
5391 new_settings(hdev, match.sk);
5392
5393 if (match.sk)
5394 sock_put(match.sk);
5395
5396 hci_req_init(&req, hdev);
5397
5398 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
5399 update_eir(&req);
5400 else
5401 clear_eir(&req);
5402
5403 hci_req_run(&req, NULL);
5404 }
5405
5406 void mgmt_sc_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
5407 {
5408 struct cmd_lookup match = { NULL, hdev };
5409 bool changed = false;
5410
5411 if (status) {
5412 u8 mgmt_err = mgmt_status(status);
5413
5414 if (enable) {
5415 if (test_and_clear_bit(HCI_SC_ENABLED,
5416 &hdev->dev_flags))
5417 new_settings(hdev, NULL);
5418 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
5419 }
5420
5421 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
5422 cmd_status_rsp, &mgmt_err);
5423 return;
5424 }
5425
5426 if (enable) {
5427 changed = !test_and_set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
5428 } else {
5429 changed = test_and_clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
5430 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
5431 }
5432
5433 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
5434 settings_rsp, &match);
5435
5436 if (changed)
5437 new_settings(hdev, match.sk);
5438
5439 if (match.sk)
5440 sock_put(match.sk);
5441 }
5442
5443 static void sk_lookup(struct pending_cmd *cmd, void *data)
5444 {
5445 struct cmd_lookup *match = data;
5446
5447 if (match->sk == NULL) {
5448 match->sk = cmd->sk;
5449 sock_hold(match->sk);
5450 }
5451 }
5452
5453 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
5454 u8 status)
5455 {
5456 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
5457
5458 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
5459 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
5460 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
5461
5462 if (!status)
5463 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
5464 NULL);
5465
5466 if (match.sk)
5467 sock_put(match.sk);
5468 }
5469
5470 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
5471 {
5472 struct mgmt_cp_set_local_name ev;
5473 struct pending_cmd *cmd;
5474
5475 if (status)
5476 return;
5477
5478 memset(&ev, 0, sizeof(ev));
5479 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
5480 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
5481
5482 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
5483 if (!cmd) {
5484 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
5485
5486 /* If this is a HCI command related to powering on the
5487 * HCI dev don't send any mgmt signals.
5488 */
5489 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5490 return;
5491 }
5492
5493 mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
5494 cmd ? cmd->sk : NULL);
5495 }
5496
5497 void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
5498 u8 *randomizer192, u8 *hash256,
5499 u8 *randomizer256, u8 status)
5500 {
5501 struct pending_cmd *cmd;
5502
5503 BT_DBG("%s status %u", hdev->name, status);
5504
5505 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
5506 if (!cmd)
5507 return;
5508
5509 if (status) {
5510 cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5511 mgmt_status(status));
5512 } else {
5513 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
5514 hash256 && randomizer256) {
5515 struct mgmt_rp_read_local_oob_ext_data rp;
5516
5517 memcpy(rp.hash192, hash192, sizeof(rp.hash192));
5518 memcpy(rp.randomizer192, randomizer192,
5519 sizeof(rp.randomizer192));
5520
5521 memcpy(rp.hash256, hash256, sizeof(rp.hash256));
5522 memcpy(rp.randomizer256, randomizer256,
5523 sizeof(rp.randomizer256));
5524
5525 cmd_complete(cmd->sk, hdev->id,
5526 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
5527 &rp, sizeof(rp));
5528 } else {
5529 struct mgmt_rp_read_local_oob_data rp;
5530
5531 memcpy(rp.hash, hash192, sizeof(rp.hash));
5532 memcpy(rp.randomizer, randomizer192,
5533 sizeof(rp.randomizer));
5534
5535 cmd_complete(cmd->sk, hdev->id,
5536 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
5537 &rp, sizeof(rp));
5538 }
5539 }
5540
5541 mgmt_pending_remove(cmd);
5542 }
5543
5544 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5545 u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name, u8
5546 ssp, u8 *eir, u16 eir_len)
5547 {
5548 char buf[512];
5549 struct mgmt_ev_device_found *ev = (void *) buf;
5550 struct smp_irk *irk;
5551 size_t ev_size;
5552
5553 if (!hci_discovery_active(hdev))
5554 return;
5555
5556 /* Leave 5 bytes for a potential CoD field */
5557 if (sizeof(*ev) + eir_len + 5 > sizeof(buf))
5558 return;
5559
5560 memset(buf, 0, sizeof(buf));
5561
5562 irk = hci_get_irk(hdev, bdaddr, addr_type);
5563 if (irk) {
5564 bacpy(&ev->addr.bdaddr, &irk->bdaddr);
5565 ev->addr.type = link_to_bdaddr(link_type, irk->addr_type);
5566 } else {
5567 bacpy(&ev->addr.bdaddr, bdaddr);
5568 ev->addr.type = link_to_bdaddr(link_type, addr_type);
5569 }
5570
5571 ev->rssi = rssi;
5572 if (cfm_name)
5573 ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_CONFIRM_NAME);
5574 if (!ssp)
5575 ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_LEGACY_PAIRING);
5576
5577 if (eir_len > 0)
5578 memcpy(ev->eir, eir, eir_len);
5579
5580 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
5581 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
5582 dev_class, 3);
5583
5584 ev->eir_len = cpu_to_le16(eir_len);
5585 ev_size = sizeof(*ev) + eir_len;
5586
5587 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
5588 }
5589
5590 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5591 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
5592 {
5593 struct mgmt_ev_device_found *ev;
5594 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
5595 u16 eir_len;
5596
5597 ev = (struct mgmt_ev_device_found *) buf;
5598
5599 memset(buf, 0, sizeof(buf));
5600
5601 bacpy(&ev->addr.bdaddr, bdaddr);
5602 ev->addr.type = link_to_bdaddr(link_type, addr_type);
5603 ev->rssi = rssi;
5604
5605 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
5606 name_len);
5607
5608 ev->eir_len = cpu_to_le16(eir_len);
5609
5610 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
5611 }
5612
5613 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
5614 {
5615 struct mgmt_ev_discovering ev;
5616 struct pending_cmd *cmd;
5617
5618 BT_DBG("%s discovering %u", hdev->name, discovering);
5619
5620 if (discovering)
5621 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
5622 else
5623 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
5624
5625 if (cmd != NULL) {
5626 u8 type = hdev->discovery.type;
5627
5628 cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
5629 sizeof(type));
5630 mgmt_pending_remove(cmd);
5631 }
5632
5633 memset(&ev, 0, sizeof(ev));
5634 ev.type = hdev->discovery.type;
5635 ev.discovering = discovering;
5636
5637 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
5638 }
5639
5640 int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
5641 {
5642 struct pending_cmd *cmd;
5643 struct mgmt_ev_device_blocked ev;
5644
5645 cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, hdev);
5646
5647 bacpy(&ev.addr.bdaddr, bdaddr);
5648 ev.addr.type = type;
5649
5650 return mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &ev, sizeof(ev),
5651 cmd ? cmd->sk : NULL);
5652 }
5653
5654 int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
5655 {
5656 struct pending_cmd *cmd;
5657 struct mgmt_ev_device_unblocked ev;
5658
5659 cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, hdev);
5660
5661 bacpy(&ev.addr.bdaddr, bdaddr);
5662 ev.addr.type = type;
5663
5664 return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &ev, sizeof(ev),
5665 cmd ? cmd->sk : NULL);
5666 }
5667
5668 static void adv_enable_complete(struct hci_dev *hdev, u8 status)
5669 {
5670 BT_DBG("%s status %u", hdev->name, status);
5671
5672 /* Clear the advertising mgmt setting if we failed to re-enable it */
5673 if (status) {
5674 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
5675 new_settings(hdev, NULL);
5676 }
5677 }
5678
5679 void mgmt_reenable_advertising(struct hci_dev *hdev)
5680 {
5681 struct hci_request req;
5682
5683 if (hci_conn_num(hdev, LE_LINK) > 0)
5684 return;
5685
5686 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
5687 return;
5688
5689 hci_req_init(&req, hdev);
5690 enable_advertising(&req);
5691
5692 /* If this fails we have no option but to let user space know
5693 * that we've disabled advertising.
5694 */
5695 if (hci_req_run(&req, adv_enable_complete) < 0) {
5696 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
5697 new_settings(hdev, NULL);
5698 }
5699 }