]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - net/bluetooth/mgmt.c
Bluetooth: Add Privacy flag to mgmt supported/current settings
[mirror_ubuntu-artful-kernel.git] / net / bluetooth / mgmt.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI Management interface */
26
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/mgmt.h>
33
34 #include "smp.h"
35
36 #define MGMT_VERSION 1
37 #define MGMT_REVISION 5
38
39 static const u16 mgmt_commands[] = {
40 MGMT_OP_READ_INDEX_LIST,
41 MGMT_OP_READ_INFO,
42 MGMT_OP_SET_POWERED,
43 MGMT_OP_SET_DISCOVERABLE,
44 MGMT_OP_SET_CONNECTABLE,
45 MGMT_OP_SET_FAST_CONNECTABLE,
46 MGMT_OP_SET_PAIRABLE,
47 MGMT_OP_SET_LINK_SECURITY,
48 MGMT_OP_SET_SSP,
49 MGMT_OP_SET_HS,
50 MGMT_OP_SET_LE,
51 MGMT_OP_SET_DEV_CLASS,
52 MGMT_OP_SET_LOCAL_NAME,
53 MGMT_OP_ADD_UUID,
54 MGMT_OP_REMOVE_UUID,
55 MGMT_OP_LOAD_LINK_KEYS,
56 MGMT_OP_LOAD_LONG_TERM_KEYS,
57 MGMT_OP_DISCONNECT,
58 MGMT_OP_GET_CONNECTIONS,
59 MGMT_OP_PIN_CODE_REPLY,
60 MGMT_OP_PIN_CODE_NEG_REPLY,
61 MGMT_OP_SET_IO_CAPABILITY,
62 MGMT_OP_PAIR_DEVICE,
63 MGMT_OP_CANCEL_PAIR_DEVICE,
64 MGMT_OP_UNPAIR_DEVICE,
65 MGMT_OP_USER_CONFIRM_REPLY,
66 MGMT_OP_USER_CONFIRM_NEG_REPLY,
67 MGMT_OP_USER_PASSKEY_REPLY,
68 MGMT_OP_USER_PASSKEY_NEG_REPLY,
69 MGMT_OP_READ_LOCAL_OOB_DATA,
70 MGMT_OP_ADD_REMOTE_OOB_DATA,
71 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
72 MGMT_OP_START_DISCOVERY,
73 MGMT_OP_STOP_DISCOVERY,
74 MGMT_OP_CONFIRM_NAME,
75 MGMT_OP_BLOCK_DEVICE,
76 MGMT_OP_UNBLOCK_DEVICE,
77 MGMT_OP_SET_DEVICE_ID,
78 MGMT_OP_SET_ADVERTISING,
79 MGMT_OP_SET_BREDR,
80 MGMT_OP_SET_STATIC_ADDRESS,
81 MGMT_OP_SET_SCAN_PARAMS,
82 MGMT_OP_SET_SECURE_CONN,
83 MGMT_OP_SET_DEBUG_KEYS,
84 MGMT_OP_LOAD_IRKS,
85 };
86
87 static const u16 mgmt_events[] = {
88 MGMT_EV_CONTROLLER_ERROR,
89 MGMT_EV_INDEX_ADDED,
90 MGMT_EV_INDEX_REMOVED,
91 MGMT_EV_NEW_SETTINGS,
92 MGMT_EV_CLASS_OF_DEV_CHANGED,
93 MGMT_EV_LOCAL_NAME_CHANGED,
94 MGMT_EV_NEW_LINK_KEY,
95 MGMT_EV_NEW_LONG_TERM_KEY,
96 MGMT_EV_DEVICE_CONNECTED,
97 MGMT_EV_DEVICE_DISCONNECTED,
98 MGMT_EV_CONNECT_FAILED,
99 MGMT_EV_PIN_CODE_REQUEST,
100 MGMT_EV_USER_CONFIRM_REQUEST,
101 MGMT_EV_USER_PASSKEY_REQUEST,
102 MGMT_EV_AUTH_FAILED,
103 MGMT_EV_DEVICE_FOUND,
104 MGMT_EV_DISCOVERING,
105 MGMT_EV_DEVICE_BLOCKED,
106 MGMT_EV_DEVICE_UNBLOCKED,
107 MGMT_EV_DEVICE_UNPAIRED,
108 MGMT_EV_PASSKEY_NOTIFY,
109 MGMT_EV_NEW_IRK,
110 };
111
112 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
113
114 #define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
115 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
116
117 struct pending_cmd {
118 struct list_head list;
119 u16 opcode;
120 int index;
121 void *param;
122 struct sock *sk;
123 void *user_data;
124 };
125
126 /* HCI to MGMT error code conversion table */
127 static u8 mgmt_status_table[] = {
128 MGMT_STATUS_SUCCESS,
129 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
130 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
131 MGMT_STATUS_FAILED, /* Hardware Failure */
132 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
133 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
134 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
135 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
136 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
137 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
138 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
139 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
140 MGMT_STATUS_BUSY, /* Command Disallowed */
141 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
142 MGMT_STATUS_REJECTED, /* Rejected Security */
143 MGMT_STATUS_REJECTED, /* Rejected Personal */
144 MGMT_STATUS_TIMEOUT, /* Host Timeout */
145 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
146 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
147 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
148 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
149 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
150 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
151 MGMT_STATUS_BUSY, /* Repeated Attempts */
152 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
153 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
154 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
155 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
156 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
157 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
158 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
159 MGMT_STATUS_FAILED, /* Unspecified Error */
160 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
161 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
162 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
163 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
164 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
165 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
166 MGMT_STATUS_FAILED, /* Unit Link Key Used */
167 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
168 MGMT_STATUS_TIMEOUT, /* Instant Passed */
169 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
170 MGMT_STATUS_FAILED, /* Transaction Collision */
171 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
172 MGMT_STATUS_REJECTED, /* QoS Rejected */
173 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
174 MGMT_STATUS_REJECTED, /* Insufficient Security */
175 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
176 MGMT_STATUS_BUSY, /* Role Switch Pending */
177 MGMT_STATUS_FAILED, /* Slot Violation */
178 MGMT_STATUS_FAILED, /* Role Switch Failed */
179 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
180 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
181 MGMT_STATUS_BUSY, /* Host Busy Pairing */
182 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
183 MGMT_STATUS_BUSY, /* Controller Busy */
184 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
185 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
186 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
187 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
188 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
189 };
190
191 static u8 mgmt_status(u8 hci_status)
192 {
193 if (hci_status < ARRAY_SIZE(mgmt_status_table))
194 return mgmt_status_table[hci_status];
195
196 return MGMT_STATUS_FAILED;
197 }
198
199 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
200 {
201 struct sk_buff *skb;
202 struct mgmt_hdr *hdr;
203 struct mgmt_ev_cmd_status *ev;
204 int err;
205
206 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
207
208 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
209 if (!skb)
210 return -ENOMEM;
211
212 hdr = (void *) skb_put(skb, sizeof(*hdr));
213
214 hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_STATUS);
215 hdr->index = cpu_to_le16(index);
216 hdr->len = cpu_to_le16(sizeof(*ev));
217
218 ev = (void *) skb_put(skb, sizeof(*ev));
219 ev->status = status;
220 ev->opcode = cpu_to_le16(cmd);
221
222 err = sock_queue_rcv_skb(sk, skb);
223 if (err < 0)
224 kfree_skb(skb);
225
226 return err;
227 }
228
229 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
230 void *rp, size_t rp_len)
231 {
232 struct sk_buff *skb;
233 struct mgmt_hdr *hdr;
234 struct mgmt_ev_cmd_complete *ev;
235 int err;
236
237 BT_DBG("sock %p", sk);
238
239 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
240 if (!skb)
241 return -ENOMEM;
242
243 hdr = (void *) skb_put(skb, sizeof(*hdr));
244
245 hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_COMPLETE);
246 hdr->index = cpu_to_le16(index);
247 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
248
249 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
250 ev->opcode = cpu_to_le16(cmd);
251 ev->status = status;
252
253 if (rp)
254 memcpy(ev->data, rp, rp_len);
255
256 err = sock_queue_rcv_skb(sk, skb);
257 if (err < 0)
258 kfree_skb(skb);
259
260 return err;
261 }
262
263 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
264 u16 data_len)
265 {
266 struct mgmt_rp_read_version rp;
267
268 BT_DBG("sock %p", sk);
269
270 rp.version = MGMT_VERSION;
271 rp.revision = __constant_cpu_to_le16(MGMT_REVISION);
272
273 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
274 sizeof(rp));
275 }
276
277 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
278 u16 data_len)
279 {
280 struct mgmt_rp_read_commands *rp;
281 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
282 const u16 num_events = ARRAY_SIZE(mgmt_events);
283 __le16 *opcode;
284 size_t rp_size;
285 int i, err;
286
287 BT_DBG("sock %p", sk);
288
289 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
290
291 rp = kmalloc(rp_size, GFP_KERNEL);
292 if (!rp)
293 return -ENOMEM;
294
295 rp->num_commands = __constant_cpu_to_le16(num_commands);
296 rp->num_events = __constant_cpu_to_le16(num_events);
297
298 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
299 put_unaligned_le16(mgmt_commands[i], opcode);
300
301 for (i = 0; i < num_events; i++, opcode++)
302 put_unaligned_le16(mgmt_events[i], opcode);
303
304 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
305 rp_size);
306 kfree(rp);
307
308 return err;
309 }
310
311 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
312 u16 data_len)
313 {
314 struct mgmt_rp_read_index_list *rp;
315 struct hci_dev *d;
316 size_t rp_len;
317 u16 count;
318 int err;
319
320 BT_DBG("sock %p", sk);
321
322 read_lock(&hci_dev_list_lock);
323
324 count = 0;
325 list_for_each_entry(d, &hci_dev_list, list) {
326 if (d->dev_type == HCI_BREDR)
327 count++;
328 }
329
330 rp_len = sizeof(*rp) + (2 * count);
331 rp = kmalloc(rp_len, GFP_ATOMIC);
332 if (!rp) {
333 read_unlock(&hci_dev_list_lock);
334 return -ENOMEM;
335 }
336
337 count = 0;
338 list_for_each_entry(d, &hci_dev_list, list) {
339 if (test_bit(HCI_SETUP, &d->dev_flags))
340 continue;
341
342 if (test_bit(HCI_USER_CHANNEL, &d->dev_flags))
343 continue;
344
345 if (d->dev_type == HCI_BREDR) {
346 rp->index[count++] = cpu_to_le16(d->id);
347 BT_DBG("Added hci%u", d->id);
348 }
349 }
350
351 rp->num_controllers = cpu_to_le16(count);
352 rp_len = sizeof(*rp) + (2 * count);
353
354 read_unlock(&hci_dev_list_lock);
355
356 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
357 rp_len);
358
359 kfree(rp);
360
361 return err;
362 }
363
364 static u32 get_supported_settings(struct hci_dev *hdev)
365 {
366 u32 settings = 0;
367
368 settings |= MGMT_SETTING_POWERED;
369 settings |= MGMT_SETTING_PAIRABLE;
370 settings |= MGMT_SETTING_DEBUG_KEYS;
371
372 if (lmp_bredr_capable(hdev)) {
373 settings |= MGMT_SETTING_CONNECTABLE;
374 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
375 settings |= MGMT_SETTING_FAST_CONNECTABLE;
376 settings |= MGMT_SETTING_DISCOVERABLE;
377 settings |= MGMT_SETTING_BREDR;
378 settings |= MGMT_SETTING_LINK_SECURITY;
379
380 if (lmp_ssp_capable(hdev)) {
381 settings |= MGMT_SETTING_SSP;
382 settings |= MGMT_SETTING_HS;
383 }
384
385 if (lmp_sc_capable(hdev) ||
386 test_bit(HCI_FORCE_SC, &hdev->dev_flags))
387 settings |= MGMT_SETTING_SECURE_CONN;
388 }
389
390 if (lmp_le_capable(hdev)) {
391 settings |= MGMT_SETTING_LE;
392 settings |= MGMT_SETTING_ADVERTISING;
393 settings |= MGMT_SETTING_PRIVACY;
394 }
395
396 return settings;
397 }
398
399 static u32 get_current_settings(struct hci_dev *hdev)
400 {
401 u32 settings = 0;
402
403 if (hdev_is_powered(hdev))
404 settings |= MGMT_SETTING_POWERED;
405
406 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
407 settings |= MGMT_SETTING_CONNECTABLE;
408
409 if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
410 settings |= MGMT_SETTING_FAST_CONNECTABLE;
411
412 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
413 settings |= MGMT_SETTING_DISCOVERABLE;
414
415 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
416 settings |= MGMT_SETTING_PAIRABLE;
417
418 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
419 settings |= MGMT_SETTING_BREDR;
420
421 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
422 settings |= MGMT_SETTING_LE;
423
424 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
425 settings |= MGMT_SETTING_LINK_SECURITY;
426
427 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
428 settings |= MGMT_SETTING_SSP;
429
430 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
431 settings |= MGMT_SETTING_HS;
432
433 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
434 settings |= MGMT_SETTING_ADVERTISING;
435
436 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
437 settings |= MGMT_SETTING_SECURE_CONN;
438
439 if (test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags))
440 settings |= MGMT_SETTING_DEBUG_KEYS;
441
442 if (test_bit(HCI_PRIVACY, &hdev->dev_flags))
443 settings |= MGMT_SETTING_PRIVACY;
444
445 return settings;
446 }
447
448 #define PNP_INFO_SVCLASS_ID 0x1200
449
450 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
451 {
452 u8 *ptr = data, *uuids_start = NULL;
453 struct bt_uuid *uuid;
454
455 if (len < 4)
456 return ptr;
457
458 list_for_each_entry(uuid, &hdev->uuids, list) {
459 u16 uuid16;
460
461 if (uuid->size != 16)
462 continue;
463
464 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
465 if (uuid16 < 0x1100)
466 continue;
467
468 if (uuid16 == PNP_INFO_SVCLASS_ID)
469 continue;
470
471 if (!uuids_start) {
472 uuids_start = ptr;
473 uuids_start[0] = 1;
474 uuids_start[1] = EIR_UUID16_ALL;
475 ptr += 2;
476 }
477
478 /* Stop if not enough space to put next UUID */
479 if ((ptr - data) + sizeof(u16) > len) {
480 uuids_start[1] = EIR_UUID16_SOME;
481 break;
482 }
483
484 *ptr++ = (uuid16 & 0x00ff);
485 *ptr++ = (uuid16 & 0xff00) >> 8;
486 uuids_start[0] += sizeof(uuid16);
487 }
488
489 return ptr;
490 }
491
492 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
493 {
494 u8 *ptr = data, *uuids_start = NULL;
495 struct bt_uuid *uuid;
496
497 if (len < 6)
498 return ptr;
499
500 list_for_each_entry(uuid, &hdev->uuids, list) {
501 if (uuid->size != 32)
502 continue;
503
504 if (!uuids_start) {
505 uuids_start = ptr;
506 uuids_start[0] = 1;
507 uuids_start[1] = EIR_UUID32_ALL;
508 ptr += 2;
509 }
510
511 /* Stop if not enough space to put next UUID */
512 if ((ptr - data) + sizeof(u32) > len) {
513 uuids_start[1] = EIR_UUID32_SOME;
514 break;
515 }
516
517 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
518 ptr += sizeof(u32);
519 uuids_start[0] += sizeof(u32);
520 }
521
522 return ptr;
523 }
524
525 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
526 {
527 u8 *ptr = data, *uuids_start = NULL;
528 struct bt_uuid *uuid;
529
530 if (len < 18)
531 return ptr;
532
533 list_for_each_entry(uuid, &hdev->uuids, list) {
534 if (uuid->size != 128)
535 continue;
536
537 if (!uuids_start) {
538 uuids_start = ptr;
539 uuids_start[0] = 1;
540 uuids_start[1] = EIR_UUID128_ALL;
541 ptr += 2;
542 }
543
544 /* Stop if not enough space to put next UUID */
545 if ((ptr - data) + 16 > len) {
546 uuids_start[1] = EIR_UUID128_SOME;
547 break;
548 }
549
550 memcpy(ptr, uuid->uuid, 16);
551 ptr += 16;
552 uuids_start[0] += 16;
553 }
554
555 return ptr;
556 }
557
558 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
559 {
560 struct pending_cmd *cmd;
561
562 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
563 if (cmd->opcode == opcode)
564 return cmd;
565 }
566
567 return NULL;
568 }
569
570 static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
571 {
572 u8 ad_len = 0;
573 size_t name_len;
574
575 name_len = strlen(hdev->dev_name);
576 if (name_len > 0) {
577 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
578
579 if (name_len > max_len) {
580 name_len = max_len;
581 ptr[1] = EIR_NAME_SHORT;
582 } else
583 ptr[1] = EIR_NAME_COMPLETE;
584
585 ptr[0] = name_len + 1;
586
587 memcpy(ptr + 2, hdev->dev_name, name_len);
588
589 ad_len += (name_len + 2);
590 ptr += (name_len + 2);
591 }
592
593 return ad_len;
594 }
595
596 static void update_scan_rsp_data(struct hci_request *req)
597 {
598 struct hci_dev *hdev = req->hdev;
599 struct hci_cp_le_set_scan_rsp_data cp;
600 u8 len;
601
602 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
603 return;
604
605 memset(&cp, 0, sizeof(cp));
606
607 len = create_scan_rsp_data(hdev, cp.data);
608
609 if (hdev->scan_rsp_data_len == len &&
610 memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
611 return;
612
613 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
614 hdev->scan_rsp_data_len = len;
615
616 cp.length = len;
617
618 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
619 }
620
621 static u8 get_adv_discov_flags(struct hci_dev *hdev)
622 {
623 struct pending_cmd *cmd;
624
625 /* If there's a pending mgmt command the flags will not yet have
626 * their final values, so check for this first.
627 */
628 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
629 if (cmd) {
630 struct mgmt_mode *cp = cmd->param;
631 if (cp->val == 0x01)
632 return LE_AD_GENERAL;
633 else if (cp->val == 0x02)
634 return LE_AD_LIMITED;
635 } else {
636 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
637 return LE_AD_LIMITED;
638 else if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
639 return LE_AD_GENERAL;
640 }
641
642 return 0;
643 }
644
645 static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
646 {
647 u8 ad_len = 0, flags = 0;
648
649 flags |= get_adv_discov_flags(hdev);
650
651 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
652 flags |= LE_AD_NO_BREDR;
653
654 if (flags) {
655 BT_DBG("adv flags 0x%02x", flags);
656
657 ptr[0] = 2;
658 ptr[1] = EIR_FLAGS;
659 ptr[2] = flags;
660
661 ad_len += 3;
662 ptr += 3;
663 }
664
665 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
666 ptr[0] = 2;
667 ptr[1] = EIR_TX_POWER;
668 ptr[2] = (u8) hdev->adv_tx_power;
669
670 ad_len += 3;
671 ptr += 3;
672 }
673
674 return ad_len;
675 }
676
677 static void update_adv_data(struct hci_request *req)
678 {
679 struct hci_dev *hdev = req->hdev;
680 struct hci_cp_le_set_adv_data cp;
681 u8 len;
682
683 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
684 return;
685
686 memset(&cp, 0, sizeof(cp));
687
688 len = create_adv_data(hdev, cp.data);
689
690 if (hdev->adv_data_len == len &&
691 memcmp(cp.data, hdev->adv_data, len) == 0)
692 return;
693
694 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
695 hdev->adv_data_len = len;
696
697 cp.length = len;
698
699 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
700 }
701
702 static void create_eir(struct hci_dev *hdev, u8 *data)
703 {
704 u8 *ptr = data;
705 size_t name_len;
706
707 name_len = strlen(hdev->dev_name);
708
709 if (name_len > 0) {
710 /* EIR Data type */
711 if (name_len > 48) {
712 name_len = 48;
713 ptr[1] = EIR_NAME_SHORT;
714 } else
715 ptr[1] = EIR_NAME_COMPLETE;
716
717 /* EIR Data length */
718 ptr[0] = name_len + 1;
719
720 memcpy(ptr + 2, hdev->dev_name, name_len);
721
722 ptr += (name_len + 2);
723 }
724
725 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
726 ptr[0] = 2;
727 ptr[1] = EIR_TX_POWER;
728 ptr[2] = (u8) hdev->inq_tx_power;
729
730 ptr += 3;
731 }
732
733 if (hdev->devid_source > 0) {
734 ptr[0] = 9;
735 ptr[1] = EIR_DEVICE_ID;
736
737 put_unaligned_le16(hdev->devid_source, ptr + 2);
738 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
739 put_unaligned_le16(hdev->devid_product, ptr + 6);
740 put_unaligned_le16(hdev->devid_version, ptr + 8);
741
742 ptr += 10;
743 }
744
745 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
746 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
747 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
748 }
749
750 static void update_eir(struct hci_request *req)
751 {
752 struct hci_dev *hdev = req->hdev;
753 struct hci_cp_write_eir cp;
754
755 if (!hdev_is_powered(hdev))
756 return;
757
758 if (!lmp_ext_inq_capable(hdev))
759 return;
760
761 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
762 return;
763
764 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
765 return;
766
767 memset(&cp, 0, sizeof(cp));
768
769 create_eir(hdev, cp.data);
770
771 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
772 return;
773
774 memcpy(hdev->eir, cp.data, sizeof(cp.data));
775
776 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
777 }
778
779 static u8 get_service_classes(struct hci_dev *hdev)
780 {
781 struct bt_uuid *uuid;
782 u8 val = 0;
783
784 list_for_each_entry(uuid, &hdev->uuids, list)
785 val |= uuid->svc_hint;
786
787 return val;
788 }
789
790 static void update_class(struct hci_request *req)
791 {
792 struct hci_dev *hdev = req->hdev;
793 u8 cod[3];
794
795 BT_DBG("%s", hdev->name);
796
797 if (!hdev_is_powered(hdev))
798 return;
799
800 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
801 return;
802
803 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
804 return;
805
806 cod[0] = hdev->minor_class;
807 cod[1] = hdev->major_class;
808 cod[2] = get_service_classes(hdev);
809
810 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
811 cod[1] |= 0x20;
812
813 if (memcmp(cod, hdev->dev_class, 3) == 0)
814 return;
815
816 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
817 }
818
819 static u8 get_adv_type(struct hci_dev *hdev)
820 {
821 struct pending_cmd *cmd;
822 bool connectable;
823
824 /* If there's a pending mgmt command the flag will not yet have
825 * it's final value, so check for this first.
826 */
827 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
828 if (cmd) {
829 struct mgmt_mode *cp = cmd->param;
830 connectable = !!cp->val;
831 } else {
832 connectable = test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
833 }
834
835 return connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
836 }
837
838 static void enable_advertising(struct hci_request *req)
839 {
840 struct hci_dev *hdev = req->hdev;
841 struct hci_cp_le_set_adv_param cp;
842 u8 enable = 0x01;
843
844 memset(&cp, 0, sizeof(cp));
845 cp.min_interval = __constant_cpu_to_le16(0x0800);
846 cp.max_interval = __constant_cpu_to_le16(0x0800);
847 cp.type = get_adv_type(hdev);
848 cp.own_address_type = hdev->own_addr_type;
849 cp.channel_map = hdev->le_adv_channel_map;
850
851 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
852
853 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
854 }
855
856 static void disable_advertising(struct hci_request *req)
857 {
858 u8 enable = 0x00;
859
860 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
861 }
862
863 static void service_cache_off(struct work_struct *work)
864 {
865 struct hci_dev *hdev = container_of(work, struct hci_dev,
866 service_cache.work);
867 struct hci_request req;
868
869 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
870 return;
871
872 hci_req_init(&req, hdev);
873
874 hci_dev_lock(hdev);
875
876 update_eir(&req);
877 update_class(&req);
878
879 hci_dev_unlock(hdev);
880
881 hci_req_run(&req, NULL);
882 }
883
884 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
885 {
886 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
887 return;
888
889 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
890
891 /* Non-mgmt controlled devices get this bit set
892 * implicitly so that pairing works for them, however
893 * for mgmt we require user-space to explicitly enable
894 * it
895 */
896 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
897 }
898
899 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
900 void *data, u16 data_len)
901 {
902 struct mgmt_rp_read_info rp;
903
904 BT_DBG("sock %p %s", sk, hdev->name);
905
906 hci_dev_lock(hdev);
907
908 memset(&rp, 0, sizeof(rp));
909
910 bacpy(&rp.bdaddr, &hdev->bdaddr);
911
912 rp.version = hdev->hci_ver;
913 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
914
915 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
916 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
917
918 memcpy(rp.dev_class, hdev->dev_class, 3);
919
920 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
921 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
922
923 hci_dev_unlock(hdev);
924
925 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
926 sizeof(rp));
927 }
928
929 static void mgmt_pending_free(struct pending_cmd *cmd)
930 {
931 sock_put(cmd->sk);
932 kfree(cmd->param);
933 kfree(cmd);
934 }
935
936 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
937 struct hci_dev *hdev, void *data,
938 u16 len)
939 {
940 struct pending_cmd *cmd;
941
942 cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
943 if (!cmd)
944 return NULL;
945
946 cmd->opcode = opcode;
947 cmd->index = hdev->id;
948
949 cmd->param = kmalloc(len, GFP_KERNEL);
950 if (!cmd->param) {
951 kfree(cmd);
952 return NULL;
953 }
954
955 if (data)
956 memcpy(cmd->param, data, len);
957
958 cmd->sk = sk;
959 sock_hold(sk);
960
961 list_add(&cmd->list, &hdev->mgmt_pending);
962
963 return cmd;
964 }
965
966 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
967 void (*cb)(struct pending_cmd *cmd,
968 void *data),
969 void *data)
970 {
971 struct pending_cmd *cmd, *tmp;
972
973 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
974 if (opcode > 0 && cmd->opcode != opcode)
975 continue;
976
977 cb(cmd, data);
978 }
979 }
980
981 static void mgmt_pending_remove(struct pending_cmd *cmd)
982 {
983 list_del(&cmd->list);
984 mgmt_pending_free(cmd);
985 }
986
987 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
988 {
989 __le32 settings = cpu_to_le32(get_current_settings(hdev));
990
991 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
992 sizeof(settings));
993 }
994
995 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
996 u16 len)
997 {
998 struct mgmt_mode *cp = data;
999 struct pending_cmd *cmd;
1000 int err;
1001
1002 BT_DBG("request for %s", hdev->name);
1003
1004 if (cp->val != 0x00 && cp->val != 0x01)
1005 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1006 MGMT_STATUS_INVALID_PARAMS);
1007
1008 hci_dev_lock(hdev);
1009
1010 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
1011 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1012 MGMT_STATUS_BUSY);
1013 goto failed;
1014 }
1015
1016 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1017 cancel_delayed_work(&hdev->power_off);
1018
1019 if (cp->val) {
1020 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
1021 data, len);
1022 err = mgmt_powered(hdev, 1);
1023 goto failed;
1024 }
1025 }
1026
1027 if (!!cp->val == hdev_is_powered(hdev)) {
1028 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1029 goto failed;
1030 }
1031
1032 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1033 if (!cmd) {
1034 err = -ENOMEM;
1035 goto failed;
1036 }
1037
1038 if (cp->val)
1039 queue_work(hdev->req_workqueue, &hdev->power_on);
1040 else
1041 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1042
1043 err = 0;
1044
1045 failed:
1046 hci_dev_unlock(hdev);
1047 return err;
1048 }
1049
1050 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
1051 struct sock *skip_sk)
1052 {
1053 struct sk_buff *skb;
1054 struct mgmt_hdr *hdr;
1055
1056 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
1057 if (!skb)
1058 return -ENOMEM;
1059
1060 hdr = (void *) skb_put(skb, sizeof(*hdr));
1061 hdr->opcode = cpu_to_le16(event);
1062 if (hdev)
1063 hdr->index = cpu_to_le16(hdev->id);
1064 else
1065 hdr->index = __constant_cpu_to_le16(MGMT_INDEX_NONE);
1066 hdr->len = cpu_to_le16(data_len);
1067
1068 if (data)
1069 memcpy(skb_put(skb, data_len), data, data_len);
1070
1071 /* Time stamp */
1072 __net_timestamp(skb);
1073
1074 hci_send_to_control(skb, skip_sk);
1075 kfree_skb(skb);
1076
1077 return 0;
1078 }
1079
1080 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1081 {
1082 __le32 ev;
1083
1084 ev = cpu_to_le32(get_current_settings(hdev));
1085
1086 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
1087 }
1088
1089 struct cmd_lookup {
1090 struct sock *sk;
1091 struct hci_dev *hdev;
1092 u8 mgmt_status;
1093 };
1094
1095 static void settings_rsp(struct pending_cmd *cmd, void *data)
1096 {
1097 struct cmd_lookup *match = data;
1098
1099 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1100
1101 list_del(&cmd->list);
1102
1103 if (match->sk == NULL) {
1104 match->sk = cmd->sk;
1105 sock_hold(match->sk);
1106 }
1107
1108 mgmt_pending_free(cmd);
1109 }
1110
1111 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
1112 {
1113 u8 *status = data;
1114
1115 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1116 mgmt_pending_remove(cmd);
1117 }
1118
1119 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1120 {
1121 if (!lmp_bredr_capable(hdev))
1122 return MGMT_STATUS_NOT_SUPPORTED;
1123 else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1124 return MGMT_STATUS_REJECTED;
1125 else
1126 return MGMT_STATUS_SUCCESS;
1127 }
1128
1129 static u8 mgmt_le_support(struct hci_dev *hdev)
1130 {
1131 if (!lmp_le_capable(hdev))
1132 return MGMT_STATUS_NOT_SUPPORTED;
1133 else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1134 return MGMT_STATUS_REJECTED;
1135 else
1136 return MGMT_STATUS_SUCCESS;
1137 }
1138
1139 static void set_discoverable_complete(struct hci_dev *hdev, u8 status)
1140 {
1141 struct pending_cmd *cmd;
1142 struct mgmt_mode *cp;
1143 struct hci_request req;
1144 bool changed;
1145
1146 BT_DBG("status 0x%02x", status);
1147
1148 hci_dev_lock(hdev);
1149
1150 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1151 if (!cmd)
1152 goto unlock;
1153
1154 if (status) {
1155 u8 mgmt_err = mgmt_status(status);
1156 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1157 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1158 goto remove_cmd;
1159 }
1160
1161 cp = cmd->param;
1162 if (cp->val) {
1163 changed = !test_and_set_bit(HCI_DISCOVERABLE,
1164 &hdev->dev_flags);
1165
1166 if (hdev->discov_timeout > 0) {
1167 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1168 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1169 to);
1170 }
1171 } else {
1172 changed = test_and_clear_bit(HCI_DISCOVERABLE,
1173 &hdev->dev_flags);
1174 }
1175
1176 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1177
1178 if (changed)
1179 new_settings(hdev, cmd->sk);
1180
1181 /* When the discoverable mode gets changed, make sure
1182 * that class of device has the limited discoverable
1183 * bit correctly set.
1184 */
1185 hci_req_init(&req, hdev);
1186 update_class(&req);
1187 hci_req_run(&req, NULL);
1188
1189 remove_cmd:
1190 mgmt_pending_remove(cmd);
1191
1192 unlock:
1193 hci_dev_unlock(hdev);
1194 }
1195
1196 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1197 u16 len)
1198 {
1199 struct mgmt_cp_set_discoverable *cp = data;
1200 struct pending_cmd *cmd;
1201 struct hci_request req;
1202 u16 timeout;
1203 u8 scan;
1204 int err;
1205
1206 BT_DBG("request for %s", hdev->name);
1207
1208 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1209 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1210 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1211 MGMT_STATUS_REJECTED);
1212
1213 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1214 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1215 MGMT_STATUS_INVALID_PARAMS);
1216
1217 timeout = __le16_to_cpu(cp->timeout);
1218
1219 /* Disabling discoverable requires that no timeout is set,
1220 * and enabling limited discoverable requires a timeout.
1221 */
1222 if ((cp->val == 0x00 && timeout > 0) ||
1223 (cp->val == 0x02 && timeout == 0))
1224 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1225 MGMT_STATUS_INVALID_PARAMS);
1226
1227 hci_dev_lock(hdev);
1228
1229 if (!hdev_is_powered(hdev) && timeout > 0) {
1230 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1231 MGMT_STATUS_NOT_POWERED);
1232 goto failed;
1233 }
1234
1235 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1236 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1237 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1238 MGMT_STATUS_BUSY);
1239 goto failed;
1240 }
1241
1242 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
1243 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1244 MGMT_STATUS_REJECTED);
1245 goto failed;
1246 }
1247
1248 if (!hdev_is_powered(hdev)) {
1249 bool changed = false;
1250
1251 /* Setting limited discoverable when powered off is
1252 * not a valid operation since it requires a timeout
1253 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1254 */
1255 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
1256 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1257 changed = true;
1258 }
1259
1260 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1261 if (err < 0)
1262 goto failed;
1263
1264 if (changed)
1265 err = new_settings(hdev, sk);
1266
1267 goto failed;
1268 }
1269
1270 /* If the current mode is the same, then just update the timeout
1271 * value with the new value. And if only the timeout gets updated,
1272 * then no need for any HCI transactions.
1273 */
1274 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
1275 (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
1276 &hdev->dev_flags)) {
1277 cancel_delayed_work(&hdev->discov_off);
1278 hdev->discov_timeout = timeout;
1279
1280 if (cp->val && hdev->discov_timeout > 0) {
1281 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1282 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1283 to);
1284 }
1285
1286 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1287 goto failed;
1288 }
1289
1290 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1291 if (!cmd) {
1292 err = -ENOMEM;
1293 goto failed;
1294 }
1295
1296 /* Cancel any potential discoverable timeout that might be
1297 * still active and store new timeout value. The arming of
1298 * the timeout happens in the complete handler.
1299 */
1300 cancel_delayed_work(&hdev->discov_off);
1301 hdev->discov_timeout = timeout;
1302
1303 /* Limited discoverable mode */
1304 if (cp->val == 0x02)
1305 set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1306 else
1307 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1308
1309 hci_req_init(&req, hdev);
1310
1311 /* The procedure for LE-only controllers is much simpler - just
1312 * update the advertising data.
1313 */
1314 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1315 goto update_ad;
1316
1317 scan = SCAN_PAGE;
1318
1319 if (cp->val) {
1320 struct hci_cp_write_current_iac_lap hci_cp;
1321
1322 if (cp->val == 0x02) {
1323 /* Limited discoverable mode */
1324 hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1325 hci_cp.iac_lap[0] = 0x00; /* LIAC */
1326 hci_cp.iac_lap[1] = 0x8b;
1327 hci_cp.iac_lap[2] = 0x9e;
1328 hci_cp.iac_lap[3] = 0x33; /* GIAC */
1329 hci_cp.iac_lap[4] = 0x8b;
1330 hci_cp.iac_lap[5] = 0x9e;
1331 } else {
1332 /* General discoverable mode */
1333 hci_cp.num_iac = 1;
1334 hci_cp.iac_lap[0] = 0x33; /* GIAC */
1335 hci_cp.iac_lap[1] = 0x8b;
1336 hci_cp.iac_lap[2] = 0x9e;
1337 }
1338
1339 hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1340 (hci_cp.num_iac * 3) + 1, &hci_cp);
1341
1342 scan |= SCAN_INQUIRY;
1343 } else {
1344 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1345 }
1346
1347 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1348
1349 update_ad:
1350 update_adv_data(&req);
1351
1352 err = hci_req_run(&req, set_discoverable_complete);
1353 if (err < 0)
1354 mgmt_pending_remove(cmd);
1355
1356 failed:
1357 hci_dev_unlock(hdev);
1358 return err;
1359 }
1360
1361 static void write_fast_connectable(struct hci_request *req, bool enable)
1362 {
1363 struct hci_dev *hdev = req->hdev;
1364 struct hci_cp_write_page_scan_activity acp;
1365 u8 type;
1366
1367 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1368 return;
1369
1370 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1371 return;
1372
1373 if (enable) {
1374 type = PAGE_SCAN_TYPE_INTERLACED;
1375
1376 /* 160 msec page scan interval */
1377 acp.interval = __constant_cpu_to_le16(0x0100);
1378 } else {
1379 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1380
1381 /* default 1.28 sec page scan */
1382 acp.interval = __constant_cpu_to_le16(0x0800);
1383 }
1384
1385 acp.window = __constant_cpu_to_le16(0x0012);
1386
1387 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1388 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1389 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1390 sizeof(acp), &acp);
1391
1392 if (hdev->page_scan_type != type)
1393 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1394 }
1395
1396 static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1397 {
1398 struct pending_cmd *cmd;
1399 struct mgmt_mode *cp;
1400 bool changed;
1401
1402 BT_DBG("status 0x%02x", status);
1403
1404 hci_dev_lock(hdev);
1405
1406 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1407 if (!cmd)
1408 goto unlock;
1409
1410 if (status) {
1411 u8 mgmt_err = mgmt_status(status);
1412 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1413 goto remove_cmd;
1414 }
1415
1416 cp = cmd->param;
1417 if (cp->val)
1418 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1419 else
1420 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1421
1422 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1423
1424 if (changed)
1425 new_settings(hdev, cmd->sk);
1426
1427 remove_cmd:
1428 mgmt_pending_remove(cmd);
1429
1430 unlock:
1431 hci_dev_unlock(hdev);
1432 }
1433
1434 static int set_connectable_update_settings(struct hci_dev *hdev,
1435 struct sock *sk, u8 val)
1436 {
1437 bool changed = false;
1438 int err;
1439
1440 if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1441 changed = true;
1442
1443 if (val) {
1444 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1445 } else {
1446 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1447 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1448 }
1449
1450 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1451 if (err < 0)
1452 return err;
1453
1454 if (changed)
1455 return new_settings(hdev, sk);
1456
1457 return 0;
1458 }
1459
1460 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1461 u16 len)
1462 {
1463 struct mgmt_mode *cp = data;
1464 struct pending_cmd *cmd;
1465 struct hci_request req;
1466 u8 scan;
1467 int err;
1468
1469 BT_DBG("request for %s", hdev->name);
1470
1471 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1472 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1473 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1474 MGMT_STATUS_REJECTED);
1475
1476 if (cp->val != 0x00 && cp->val != 0x01)
1477 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1478 MGMT_STATUS_INVALID_PARAMS);
1479
1480 hci_dev_lock(hdev);
1481
1482 if (!hdev_is_powered(hdev)) {
1483 err = set_connectable_update_settings(hdev, sk, cp->val);
1484 goto failed;
1485 }
1486
1487 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1488 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1489 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1490 MGMT_STATUS_BUSY);
1491 goto failed;
1492 }
1493
1494 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1495 if (!cmd) {
1496 err = -ENOMEM;
1497 goto failed;
1498 }
1499
1500 hci_req_init(&req, hdev);
1501
1502 /* If BR/EDR is not enabled and we disable advertising as a
1503 * by-product of disabling connectable, we need to update the
1504 * advertising flags.
1505 */
1506 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1507 if (!cp->val) {
1508 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1509 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1510 }
1511 update_adv_data(&req);
1512 } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
1513 if (cp->val) {
1514 scan = SCAN_PAGE;
1515 } else {
1516 scan = 0;
1517
1518 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1519 hdev->discov_timeout > 0)
1520 cancel_delayed_work(&hdev->discov_off);
1521 }
1522
1523 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1524 }
1525
1526 /* If we're going from non-connectable to connectable or
1527 * vice-versa when fast connectable is enabled ensure that fast
1528 * connectable gets disabled. write_fast_connectable won't do
1529 * anything if the page scan parameters are already what they
1530 * should be.
1531 */
1532 if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1533 write_fast_connectable(&req, false);
1534
1535 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) &&
1536 hci_conn_num(hdev, LE_LINK) == 0) {
1537 disable_advertising(&req);
1538 enable_advertising(&req);
1539 }
1540
1541 err = hci_req_run(&req, set_connectable_complete);
1542 if (err < 0) {
1543 mgmt_pending_remove(cmd);
1544 if (err == -ENODATA)
1545 err = set_connectable_update_settings(hdev, sk,
1546 cp->val);
1547 goto failed;
1548 }
1549
1550 failed:
1551 hci_dev_unlock(hdev);
1552 return err;
1553 }
1554
1555 static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
1556 u16 len)
1557 {
1558 struct mgmt_mode *cp = data;
1559 bool changed;
1560 int err;
1561
1562 BT_DBG("request for %s", hdev->name);
1563
1564 if (cp->val != 0x00 && cp->val != 0x01)
1565 return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE,
1566 MGMT_STATUS_INVALID_PARAMS);
1567
1568 hci_dev_lock(hdev);
1569
1570 if (cp->val)
1571 changed = !test_and_set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1572 else
1573 changed = test_and_clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1574
1575 err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
1576 if (err < 0)
1577 goto unlock;
1578
1579 if (changed)
1580 err = new_settings(hdev, sk);
1581
1582 unlock:
1583 hci_dev_unlock(hdev);
1584 return err;
1585 }
1586
1587 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1588 u16 len)
1589 {
1590 struct mgmt_mode *cp = data;
1591 struct pending_cmd *cmd;
1592 u8 val, status;
1593 int err;
1594
1595 BT_DBG("request for %s", hdev->name);
1596
1597 status = mgmt_bredr_support(hdev);
1598 if (status)
1599 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1600 status);
1601
1602 if (cp->val != 0x00 && cp->val != 0x01)
1603 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1604 MGMT_STATUS_INVALID_PARAMS);
1605
1606 hci_dev_lock(hdev);
1607
1608 if (!hdev_is_powered(hdev)) {
1609 bool changed = false;
1610
1611 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1612 &hdev->dev_flags)) {
1613 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
1614 changed = true;
1615 }
1616
1617 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1618 if (err < 0)
1619 goto failed;
1620
1621 if (changed)
1622 err = new_settings(hdev, sk);
1623
1624 goto failed;
1625 }
1626
1627 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1628 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1629 MGMT_STATUS_BUSY);
1630 goto failed;
1631 }
1632
1633 val = !!cp->val;
1634
1635 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1636 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1637 goto failed;
1638 }
1639
1640 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1641 if (!cmd) {
1642 err = -ENOMEM;
1643 goto failed;
1644 }
1645
1646 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1647 if (err < 0) {
1648 mgmt_pending_remove(cmd);
1649 goto failed;
1650 }
1651
1652 failed:
1653 hci_dev_unlock(hdev);
1654 return err;
1655 }
1656
1657 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1658 {
1659 struct mgmt_mode *cp = data;
1660 struct pending_cmd *cmd;
1661 u8 status;
1662 int err;
1663
1664 BT_DBG("request for %s", hdev->name);
1665
1666 status = mgmt_bredr_support(hdev);
1667 if (status)
1668 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1669
1670 if (!lmp_ssp_capable(hdev))
1671 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1672 MGMT_STATUS_NOT_SUPPORTED);
1673
1674 if (cp->val != 0x00 && cp->val != 0x01)
1675 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1676 MGMT_STATUS_INVALID_PARAMS);
1677
1678 hci_dev_lock(hdev);
1679
1680 if (!hdev_is_powered(hdev)) {
1681 bool changed;
1682
1683 if (cp->val) {
1684 changed = !test_and_set_bit(HCI_SSP_ENABLED,
1685 &hdev->dev_flags);
1686 } else {
1687 changed = test_and_clear_bit(HCI_SSP_ENABLED,
1688 &hdev->dev_flags);
1689 if (!changed)
1690 changed = test_and_clear_bit(HCI_HS_ENABLED,
1691 &hdev->dev_flags);
1692 else
1693 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1694 }
1695
1696 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1697 if (err < 0)
1698 goto failed;
1699
1700 if (changed)
1701 err = new_settings(hdev, sk);
1702
1703 goto failed;
1704 }
1705
1706 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev) ||
1707 mgmt_pending_find(MGMT_OP_SET_HS, hdev)) {
1708 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1709 MGMT_STATUS_BUSY);
1710 goto failed;
1711 }
1712
1713 if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1714 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1715 goto failed;
1716 }
1717
1718 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1719 if (!cmd) {
1720 err = -ENOMEM;
1721 goto failed;
1722 }
1723
1724 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1725 if (err < 0) {
1726 mgmt_pending_remove(cmd);
1727 goto failed;
1728 }
1729
1730 failed:
1731 hci_dev_unlock(hdev);
1732 return err;
1733 }
1734
1735 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1736 {
1737 struct mgmt_mode *cp = data;
1738 bool changed;
1739 u8 status;
1740 int err;
1741
1742 BT_DBG("request for %s", hdev->name);
1743
1744 status = mgmt_bredr_support(hdev);
1745 if (status)
1746 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1747
1748 if (!lmp_ssp_capable(hdev))
1749 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1750 MGMT_STATUS_NOT_SUPPORTED);
1751
1752 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
1753 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1754 MGMT_STATUS_REJECTED);
1755
1756 if (cp->val != 0x00 && cp->val != 0x01)
1757 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1758 MGMT_STATUS_INVALID_PARAMS);
1759
1760 hci_dev_lock(hdev);
1761
1762 if (cp->val) {
1763 changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1764 } else {
1765 if (hdev_is_powered(hdev)) {
1766 err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1767 MGMT_STATUS_REJECTED);
1768 goto unlock;
1769 }
1770
1771 changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1772 }
1773
1774 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1775 if (err < 0)
1776 goto unlock;
1777
1778 if (changed)
1779 err = new_settings(hdev, sk);
1780
1781 unlock:
1782 hci_dev_unlock(hdev);
1783 return err;
1784 }
1785
1786 static void le_enable_complete(struct hci_dev *hdev, u8 status)
1787 {
1788 struct cmd_lookup match = { NULL, hdev };
1789
1790 if (status) {
1791 u8 mgmt_err = mgmt_status(status);
1792
1793 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1794 &mgmt_err);
1795 return;
1796 }
1797
1798 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1799
1800 new_settings(hdev, match.sk);
1801
1802 if (match.sk)
1803 sock_put(match.sk);
1804
1805 /* Make sure the controller has a good default for
1806 * advertising data. Restrict the update to when LE
1807 * has actually been enabled. During power on, the
1808 * update in powered_update_hci will take care of it.
1809 */
1810 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1811 struct hci_request req;
1812
1813 hci_dev_lock(hdev);
1814
1815 hci_req_init(&req, hdev);
1816 update_adv_data(&req);
1817 update_scan_rsp_data(&req);
1818 hci_req_run(&req, NULL);
1819
1820 hci_dev_unlock(hdev);
1821 }
1822 }
1823
1824 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1825 {
1826 struct mgmt_mode *cp = data;
1827 struct hci_cp_write_le_host_supported hci_cp;
1828 struct pending_cmd *cmd;
1829 struct hci_request req;
1830 int err;
1831 u8 val, enabled;
1832
1833 BT_DBG("request for %s", hdev->name);
1834
1835 if (!lmp_le_capable(hdev))
1836 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1837 MGMT_STATUS_NOT_SUPPORTED);
1838
1839 if (cp->val != 0x00 && cp->val != 0x01)
1840 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1841 MGMT_STATUS_INVALID_PARAMS);
1842
1843 /* LE-only devices do not allow toggling LE on/off */
1844 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1845 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1846 MGMT_STATUS_REJECTED);
1847
1848 hci_dev_lock(hdev);
1849
1850 val = !!cp->val;
1851 enabled = lmp_host_le_capable(hdev);
1852
1853 if (!hdev_is_powered(hdev) || val == enabled) {
1854 bool changed = false;
1855
1856 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1857 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1858 changed = true;
1859 }
1860
1861 if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
1862 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
1863 changed = true;
1864 }
1865
1866 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1867 if (err < 0)
1868 goto unlock;
1869
1870 if (changed)
1871 err = new_settings(hdev, sk);
1872
1873 goto unlock;
1874 }
1875
1876 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
1877 mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
1878 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1879 MGMT_STATUS_BUSY);
1880 goto unlock;
1881 }
1882
1883 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
1884 if (!cmd) {
1885 err = -ENOMEM;
1886 goto unlock;
1887 }
1888
1889 hci_req_init(&req, hdev);
1890
1891 memset(&hci_cp, 0, sizeof(hci_cp));
1892
1893 if (val) {
1894 hci_cp.le = val;
1895 hci_cp.simul = lmp_le_br_capable(hdev);
1896 } else {
1897 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1898 disable_advertising(&req);
1899 }
1900
1901 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
1902 &hci_cp);
1903
1904 err = hci_req_run(&req, le_enable_complete);
1905 if (err < 0)
1906 mgmt_pending_remove(cmd);
1907
1908 unlock:
1909 hci_dev_unlock(hdev);
1910 return err;
1911 }
1912
1913 /* This is a helper function to test for pending mgmt commands that can
1914 * cause CoD or EIR HCI commands. We can only allow one such pending
1915 * mgmt command at a time since otherwise we cannot easily track what
1916 * the current values are, will be, and based on that calculate if a new
1917 * HCI command needs to be sent and if yes with what value.
1918 */
1919 static bool pending_eir_or_class(struct hci_dev *hdev)
1920 {
1921 struct pending_cmd *cmd;
1922
1923 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
1924 switch (cmd->opcode) {
1925 case MGMT_OP_ADD_UUID:
1926 case MGMT_OP_REMOVE_UUID:
1927 case MGMT_OP_SET_DEV_CLASS:
1928 case MGMT_OP_SET_POWERED:
1929 return true;
1930 }
1931 }
1932
1933 return false;
1934 }
1935
1936 static const u8 bluetooth_base_uuid[] = {
1937 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
1938 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1939 };
1940
1941 static u8 get_uuid_size(const u8 *uuid)
1942 {
1943 u32 val;
1944
1945 if (memcmp(uuid, bluetooth_base_uuid, 12))
1946 return 128;
1947
1948 val = get_unaligned_le32(&uuid[12]);
1949 if (val > 0xffff)
1950 return 32;
1951
1952 return 16;
1953 }
1954
1955 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
1956 {
1957 struct pending_cmd *cmd;
1958
1959 hci_dev_lock(hdev);
1960
1961 cmd = mgmt_pending_find(mgmt_op, hdev);
1962 if (!cmd)
1963 goto unlock;
1964
1965 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
1966 hdev->dev_class, 3);
1967
1968 mgmt_pending_remove(cmd);
1969
1970 unlock:
1971 hci_dev_unlock(hdev);
1972 }
1973
1974 static void add_uuid_complete(struct hci_dev *hdev, u8 status)
1975 {
1976 BT_DBG("status 0x%02x", status);
1977
1978 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
1979 }
1980
1981 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1982 {
1983 struct mgmt_cp_add_uuid *cp = data;
1984 struct pending_cmd *cmd;
1985 struct hci_request req;
1986 struct bt_uuid *uuid;
1987 int err;
1988
1989 BT_DBG("request for %s", hdev->name);
1990
1991 hci_dev_lock(hdev);
1992
1993 if (pending_eir_or_class(hdev)) {
1994 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
1995 MGMT_STATUS_BUSY);
1996 goto failed;
1997 }
1998
1999 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2000 if (!uuid) {
2001 err = -ENOMEM;
2002 goto failed;
2003 }
2004
2005 memcpy(uuid->uuid, cp->uuid, 16);
2006 uuid->svc_hint = cp->svc_hint;
2007 uuid->size = get_uuid_size(cp->uuid);
2008
2009 list_add_tail(&uuid->list, &hdev->uuids);
2010
2011 hci_req_init(&req, hdev);
2012
2013 update_class(&req);
2014 update_eir(&req);
2015
2016 err = hci_req_run(&req, add_uuid_complete);
2017 if (err < 0) {
2018 if (err != -ENODATA)
2019 goto failed;
2020
2021 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2022 hdev->dev_class, 3);
2023 goto failed;
2024 }
2025
2026 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2027 if (!cmd) {
2028 err = -ENOMEM;
2029 goto failed;
2030 }
2031
2032 err = 0;
2033
2034 failed:
2035 hci_dev_unlock(hdev);
2036 return err;
2037 }
2038
2039 static bool enable_service_cache(struct hci_dev *hdev)
2040 {
2041 if (!hdev_is_powered(hdev))
2042 return false;
2043
2044 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2045 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2046 CACHE_TIMEOUT);
2047 return true;
2048 }
2049
2050 return false;
2051 }
2052
2053 static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
2054 {
2055 BT_DBG("status 0x%02x", status);
2056
2057 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2058 }
2059
2060 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2061 u16 len)
2062 {
2063 struct mgmt_cp_remove_uuid *cp = data;
2064 struct pending_cmd *cmd;
2065 struct bt_uuid *match, *tmp;
2066 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2067 struct hci_request req;
2068 int err, found;
2069
2070 BT_DBG("request for %s", hdev->name);
2071
2072 hci_dev_lock(hdev);
2073
2074 if (pending_eir_or_class(hdev)) {
2075 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2076 MGMT_STATUS_BUSY);
2077 goto unlock;
2078 }
2079
2080 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2081 hci_uuids_clear(hdev);
2082
2083 if (enable_service_cache(hdev)) {
2084 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2085 0, hdev->dev_class, 3);
2086 goto unlock;
2087 }
2088
2089 goto update_class;
2090 }
2091
2092 found = 0;
2093
2094 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2095 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2096 continue;
2097
2098 list_del(&match->list);
2099 kfree(match);
2100 found++;
2101 }
2102
2103 if (found == 0) {
2104 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2105 MGMT_STATUS_INVALID_PARAMS);
2106 goto unlock;
2107 }
2108
2109 update_class:
2110 hci_req_init(&req, hdev);
2111
2112 update_class(&req);
2113 update_eir(&req);
2114
2115 err = hci_req_run(&req, remove_uuid_complete);
2116 if (err < 0) {
2117 if (err != -ENODATA)
2118 goto unlock;
2119
2120 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2121 hdev->dev_class, 3);
2122 goto unlock;
2123 }
2124
2125 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2126 if (!cmd) {
2127 err = -ENOMEM;
2128 goto unlock;
2129 }
2130
2131 err = 0;
2132
2133 unlock:
2134 hci_dev_unlock(hdev);
2135 return err;
2136 }
2137
2138 static void set_class_complete(struct hci_dev *hdev, u8 status)
2139 {
2140 BT_DBG("status 0x%02x", status);
2141
2142 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2143 }
2144
2145 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2146 u16 len)
2147 {
2148 struct mgmt_cp_set_dev_class *cp = data;
2149 struct pending_cmd *cmd;
2150 struct hci_request req;
2151 int err;
2152
2153 BT_DBG("request for %s", hdev->name);
2154
2155 if (!lmp_bredr_capable(hdev))
2156 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2157 MGMT_STATUS_NOT_SUPPORTED);
2158
2159 hci_dev_lock(hdev);
2160
2161 if (pending_eir_or_class(hdev)) {
2162 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2163 MGMT_STATUS_BUSY);
2164 goto unlock;
2165 }
2166
2167 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2168 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2169 MGMT_STATUS_INVALID_PARAMS);
2170 goto unlock;
2171 }
2172
2173 hdev->major_class = cp->major;
2174 hdev->minor_class = cp->minor;
2175
2176 if (!hdev_is_powered(hdev)) {
2177 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2178 hdev->dev_class, 3);
2179 goto unlock;
2180 }
2181
2182 hci_req_init(&req, hdev);
2183
2184 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2185 hci_dev_unlock(hdev);
2186 cancel_delayed_work_sync(&hdev->service_cache);
2187 hci_dev_lock(hdev);
2188 update_eir(&req);
2189 }
2190
2191 update_class(&req);
2192
2193 err = hci_req_run(&req, set_class_complete);
2194 if (err < 0) {
2195 if (err != -ENODATA)
2196 goto unlock;
2197
2198 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2199 hdev->dev_class, 3);
2200 goto unlock;
2201 }
2202
2203 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2204 if (!cmd) {
2205 err = -ENOMEM;
2206 goto unlock;
2207 }
2208
2209 err = 0;
2210
2211 unlock:
2212 hci_dev_unlock(hdev);
2213 return err;
2214 }
2215
2216 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2217 u16 len)
2218 {
2219 struct mgmt_cp_load_link_keys *cp = data;
2220 u16 key_count, expected_len;
2221 bool changed;
2222 int i;
2223
2224 BT_DBG("request for %s", hdev->name);
2225
2226 if (!lmp_bredr_capable(hdev))
2227 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2228 MGMT_STATUS_NOT_SUPPORTED);
2229
2230 key_count = __le16_to_cpu(cp->key_count);
2231
2232 expected_len = sizeof(*cp) + key_count *
2233 sizeof(struct mgmt_link_key_info);
2234 if (expected_len != len) {
2235 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2236 len, expected_len);
2237 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2238 MGMT_STATUS_INVALID_PARAMS);
2239 }
2240
2241 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2242 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2243 MGMT_STATUS_INVALID_PARAMS);
2244
2245 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2246 key_count);
2247
2248 for (i = 0; i < key_count; i++) {
2249 struct mgmt_link_key_info *key = &cp->keys[i];
2250
2251 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2252 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2253 MGMT_STATUS_INVALID_PARAMS);
2254 }
2255
2256 hci_dev_lock(hdev);
2257
2258 hci_link_keys_clear(hdev);
2259
2260 if (cp->debug_keys)
2261 changed = !test_and_set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
2262 else
2263 changed = test_and_clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
2264
2265 if (changed)
2266 new_settings(hdev, NULL);
2267
2268 for (i = 0; i < key_count; i++) {
2269 struct mgmt_link_key_info *key = &cp->keys[i];
2270
2271 hci_add_link_key(hdev, NULL, 0, &key->addr.bdaddr, key->val,
2272 key->type, key->pin_len);
2273 }
2274
2275 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2276
2277 hci_dev_unlock(hdev);
2278
2279 return 0;
2280 }
2281
2282 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2283 u8 addr_type, struct sock *skip_sk)
2284 {
2285 struct mgmt_ev_device_unpaired ev;
2286
2287 bacpy(&ev.addr.bdaddr, bdaddr);
2288 ev.addr.type = addr_type;
2289
2290 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2291 skip_sk);
2292 }
2293
2294 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2295 u16 len)
2296 {
2297 struct mgmt_cp_unpair_device *cp = data;
2298 struct mgmt_rp_unpair_device rp;
2299 struct hci_cp_disconnect dc;
2300 struct pending_cmd *cmd;
2301 struct hci_conn *conn;
2302 int err;
2303
2304 memset(&rp, 0, sizeof(rp));
2305 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2306 rp.addr.type = cp->addr.type;
2307
2308 if (!bdaddr_type_is_valid(cp->addr.type))
2309 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2310 MGMT_STATUS_INVALID_PARAMS,
2311 &rp, sizeof(rp));
2312
2313 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2314 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2315 MGMT_STATUS_INVALID_PARAMS,
2316 &rp, sizeof(rp));
2317
2318 hci_dev_lock(hdev);
2319
2320 if (!hdev_is_powered(hdev)) {
2321 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2322 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2323 goto unlock;
2324 }
2325
2326 if (cp->addr.type == BDADDR_BREDR) {
2327 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2328 } else {
2329 u8 addr_type;
2330
2331 if (cp->addr.type == BDADDR_LE_PUBLIC)
2332 addr_type = ADDR_LE_DEV_PUBLIC;
2333 else
2334 addr_type = ADDR_LE_DEV_RANDOM;
2335
2336 hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2337
2338 err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2339 }
2340
2341 if (err < 0) {
2342 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2343 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
2344 goto unlock;
2345 }
2346
2347 if (cp->disconnect) {
2348 if (cp->addr.type == BDADDR_BREDR)
2349 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2350 &cp->addr.bdaddr);
2351 else
2352 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2353 &cp->addr.bdaddr);
2354 } else {
2355 conn = NULL;
2356 }
2357
2358 if (!conn) {
2359 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2360 &rp, sizeof(rp));
2361 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2362 goto unlock;
2363 }
2364
2365 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2366 sizeof(*cp));
2367 if (!cmd) {
2368 err = -ENOMEM;
2369 goto unlock;
2370 }
2371
2372 dc.handle = cpu_to_le16(conn->handle);
2373 dc.reason = 0x13; /* Remote User Terminated Connection */
2374 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2375 if (err < 0)
2376 mgmt_pending_remove(cmd);
2377
2378 unlock:
2379 hci_dev_unlock(hdev);
2380 return err;
2381 }
2382
2383 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2384 u16 len)
2385 {
2386 struct mgmt_cp_disconnect *cp = data;
2387 struct mgmt_rp_disconnect rp;
2388 struct hci_cp_disconnect dc;
2389 struct pending_cmd *cmd;
2390 struct hci_conn *conn;
2391 int err;
2392
2393 BT_DBG("");
2394
2395 memset(&rp, 0, sizeof(rp));
2396 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2397 rp.addr.type = cp->addr.type;
2398
2399 if (!bdaddr_type_is_valid(cp->addr.type))
2400 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2401 MGMT_STATUS_INVALID_PARAMS,
2402 &rp, sizeof(rp));
2403
2404 hci_dev_lock(hdev);
2405
2406 if (!test_bit(HCI_UP, &hdev->flags)) {
2407 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2408 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2409 goto failed;
2410 }
2411
2412 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
2413 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2414 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2415 goto failed;
2416 }
2417
2418 if (cp->addr.type == BDADDR_BREDR)
2419 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2420 &cp->addr.bdaddr);
2421 else
2422 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2423
2424 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2425 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2426 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
2427 goto failed;
2428 }
2429
2430 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2431 if (!cmd) {
2432 err = -ENOMEM;
2433 goto failed;
2434 }
2435
2436 dc.handle = cpu_to_le16(conn->handle);
2437 dc.reason = HCI_ERROR_REMOTE_USER_TERM;
2438
2439 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2440 if (err < 0)
2441 mgmt_pending_remove(cmd);
2442
2443 failed:
2444 hci_dev_unlock(hdev);
2445 return err;
2446 }
2447
2448 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2449 {
2450 switch (link_type) {
2451 case LE_LINK:
2452 switch (addr_type) {
2453 case ADDR_LE_DEV_PUBLIC:
2454 return BDADDR_LE_PUBLIC;
2455
2456 default:
2457 /* Fallback to LE Random address type */
2458 return BDADDR_LE_RANDOM;
2459 }
2460
2461 default:
2462 /* Fallback to BR/EDR type */
2463 return BDADDR_BREDR;
2464 }
2465 }
2466
2467 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2468 u16 data_len)
2469 {
2470 struct mgmt_rp_get_connections *rp;
2471 struct hci_conn *c;
2472 size_t rp_len;
2473 int err;
2474 u16 i;
2475
2476 BT_DBG("");
2477
2478 hci_dev_lock(hdev);
2479
2480 if (!hdev_is_powered(hdev)) {
2481 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2482 MGMT_STATUS_NOT_POWERED);
2483 goto unlock;
2484 }
2485
2486 i = 0;
2487 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2488 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2489 i++;
2490 }
2491
2492 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2493 rp = kmalloc(rp_len, GFP_KERNEL);
2494 if (!rp) {
2495 err = -ENOMEM;
2496 goto unlock;
2497 }
2498
2499 i = 0;
2500 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2501 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2502 continue;
2503 bacpy(&rp->addr[i].bdaddr, &c->dst);
2504 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2505 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2506 continue;
2507 i++;
2508 }
2509
2510 rp->conn_count = cpu_to_le16(i);
2511
2512 /* Recalculate length in case of filtered SCO connections, etc */
2513 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2514
2515 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2516 rp_len);
2517
2518 kfree(rp);
2519
2520 unlock:
2521 hci_dev_unlock(hdev);
2522 return err;
2523 }
2524
2525 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2526 struct mgmt_cp_pin_code_neg_reply *cp)
2527 {
2528 struct pending_cmd *cmd;
2529 int err;
2530
2531 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2532 sizeof(*cp));
2533 if (!cmd)
2534 return -ENOMEM;
2535
2536 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2537 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2538 if (err < 0)
2539 mgmt_pending_remove(cmd);
2540
2541 return err;
2542 }
2543
2544 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2545 u16 len)
2546 {
2547 struct hci_conn *conn;
2548 struct mgmt_cp_pin_code_reply *cp = data;
2549 struct hci_cp_pin_code_reply reply;
2550 struct pending_cmd *cmd;
2551 int err;
2552
2553 BT_DBG("");
2554
2555 hci_dev_lock(hdev);
2556
2557 if (!hdev_is_powered(hdev)) {
2558 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2559 MGMT_STATUS_NOT_POWERED);
2560 goto failed;
2561 }
2562
2563 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2564 if (!conn) {
2565 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2566 MGMT_STATUS_NOT_CONNECTED);
2567 goto failed;
2568 }
2569
2570 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2571 struct mgmt_cp_pin_code_neg_reply ncp;
2572
2573 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2574
2575 BT_ERR("PIN code is not 16 bytes long");
2576
2577 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2578 if (err >= 0)
2579 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2580 MGMT_STATUS_INVALID_PARAMS);
2581
2582 goto failed;
2583 }
2584
2585 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2586 if (!cmd) {
2587 err = -ENOMEM;
2588 goto failed;
2589 }
2590
2591 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2592 reply.pin_len = cp->pin_len;
2593 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2594
2595 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2596 if (err < 0)
2597 mgmt_pending_remove(cmd);
2598
2599 failed:
2600 hci_dev_unlock(hdev);
2601 return err;
2602 }
2603
2604 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2605 u16 len)
2606 {
2607 struct mgmt_cp_set_io_capability *cp = data;
2608
2609 BT_DBG("");
2610
2611 hci_dev_lock(hdev);
2612
2613 hdev->io_capability = cp->io_capability;
2614
2615 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2616 hdev->io_capability);
2617
2618 hci_dev_unlock(hdev);
2619
2620 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
2621 0);
2622 }
2623
2624 static struct pending_cmd *find_pairing(struct hci_conn *conn)
2625 {
2626 struct hci_dev *hdev = conn->hdev;
2627 struct pending_cmd *cmd;
2628
2629 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2630 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2631 continue;
2632
2633 if (cmd->user_data != conn)
2634 continue;
2635
2636 return cmd;
2637 }
2638
2639 return NULL;
2640 }
2641
2642 static void pairing_complete(struct pending_cmd *cmd, u8 status)
2643 {
2644 struct mgmt_rp_pair_device rp;
2645 struct hci_conn *conn = cmd->user_data;
2646
2647 bacpy(&rp.addr.bdaddr, &conn->dst);
2648 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2649
2650 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
2651 &rp, sizeof(rp));
2652
2653 /* So we don't get further callbacks for this connection */
2654 conn->connect_cfm_cb = NULL;
2655 conn->security_cfm_cb = NULL;
2656 conn->disconn_cfm_cb = NULL;
2657
2658 hci_conn_drop(conn);
2659
2660 mgmt_pending_remove(cmd);
2661 }
2662
2663 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2664 {
2665 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2666 struct pending_cmd *cmd;
2667
2668 cmd = find_pairing(conn);
2669 if (cmd)
2670 pairing_complete(cmd, status);
2671 }
2672
2673 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2674 {
2675 struct pending_cmd *cmd;
2676
2677 BT_DBG("status %u", status);
2678
2679 cmd = find_pairing(conn);
2680 if (!cmd)
2681 BT_DBG("Unable to find a pending command");
2682 else
2683 pairing_complete(cmd, mgmt_status(status));
2684 }
2685
2686 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2687 {
2688 struct pending_cmd *cmd;
2689
2690 BT_DBG("status %u", status);
2691
2692 if (!status)
2693 return;
2694
2695 cmd = find_pairing(conn);
2696 if (!cmd)
2697 BT_DBG("Unable to find a pending command");
2698 else
2699 pairing_complete(cmd, mgmt_status(status));
2700 }
2701
2702 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2703 u16 len)
2704 {
2705 struct mgmt_cp_pair_device *cp = data;
2706 struct mgmt_rp_pair_device rp;
2707 struct pending_cmd *cmd;
2708 u8 sec_level, auth_type;
2709 struct hci_conn *conn;
2710 int err;
2711
2712 BT_DBG("");
2713
2714 memset(&rp, 0, sizeof(rp));
2715 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2716 rp.addr.type = cp->addr.type;
2717
2718 if (!bdaddr_type_is_valid(cp->addr.type))
2719 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2720 MGMT_STATUS_INVALID_PARAMS,
2721 &rp, sizeof(rp));
2722
2723 hci_dev_lock(hdev);
2724
2725 if (!hdev_is_powered(hdev)) {
2726 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2727 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2728 goto unlock;
2729 }
2730
2731 sec_level = BT_SECURITY_MEDIUM;
2732 if (cp->io_cap == 0x03)
2733 auth_type = HCI_AT_DEDICATED_BONDING;
2734 else
2735 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
2736
2737 if (cp->addr.type == BDADDR_BREDR)
2738 conn = hci_connect(hdev, ACL_LINK, &cp->addr.bdaddr,
2739 cp->addr.type, sec_level, auth_type);
2740 else
2741 conn = hci_connect(hdev, LE_LINK, &cp->addr.bdaddr,
2742 cp->addr.type, sec_level, auth_type);
2743
2744 if (IS_ERR(conn)) {
2745 int status;
2746
2747 if (PTR_ERR(conn) == -EBUSY)
2748 status = MGMT_STATUS_BUSY;
2749 else
2750 status = MGMT_STATUS_CONNECT_FAILED;
2751
2752 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2753 status, &rp,
2754 sizeof(rp));
2755 goto unlock;
2756 }
2757
2758 if (conn->connect_cfm_cb) {
2759 hci_conn_drop(conn);
2760 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2761 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2762 goto unlock;
2763 }
2764
2765 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2766 if (!cmd) {
2767 err = -ENOMEM;
2768 hci_conn_drop(conn);
2769 goto unlock;
2770 }
2771
2772 /* For LE, just connecting isn't a proof that the pairing finished */
2773 if (cp->addr.type == BDADDR_BREDR) {
2774 conn->connect_cfm_cb = pairing_complete_cb;
2775 conn->security_cfm_cb = pairing_complete_cb;
2776 conn->disconn_cfm_cb = pairing_complete_cb;
2777 } else {
2778 conn->connect_cfm_cb = le_pairing_complete_cb;
2779 conn->security_cfm_cb = le_pairing_complete_cb;
2780 conn->disconn_cfm_cb = le_pairing_complete_cb;
2781 }
2782
2783 conn->io_capability = cp->io_cap;
2784 cmd->user_data = conn;
2785
2786 if (conn->state == BT_CONNECTED &&
2787 hci_conn_security(conn, sec_level, auth_type))
2788 pairing_complete(cmd, 0);
2789
2790 err = 0;
2791
2792 unlock:
2793 hci_dev_unlock(hdev);
2794 return err;
2795 }
2796
2797 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2798 u16 len)
2799 {
2800 struct mgmt_addr_info *addr = data;
2801 struct pending_cmd *cmd;
2802 struct hci_conn *conn;
2803 int err;
2804
2805 BT_DBG("");
2806
2807 hci_dev_lock(hdev);
2808
2809 if (!hdev_is_powered(hdev)) {
2810 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2811 MGMT_STATUS_NOT_POWERED);
2812 goto unlock;
2813 }
2814
2815 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
2816 if (!cmd) {
2817 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2818 MGMT_STATUS_INVALID_PARAMS);
2819 goto unlock;
2820 }
2821
2822 conn = cmd->user_data;
2823
2824 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
2825 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2826 MGMT_STATUS_INVALID_PARAMS);
2827 goto unlock;
2828 }
2829
2830 pairing_complete(cmd, MGMT_STATUS_CANCELLED);
2831
2832 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
2833 addr, sizeof(*addr));
2834 unlock:
2835 hci_dev_unlock(hdev);
2836 return err;
2837 }
2838
2839 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
2840 struct mgmt_addr_info *addr, u16 mgmt_op,
2841 u16 hci_op, __le32 passkey)
2842 {
2843 struct pending_cmd *cmd;
2844 struct hci_conn *conn;
2845 int err;
2846
2847 hci_dev_lock(hdev);
2848
2849 if (!hdev_is_powered(hdev)) {
2850 err = cmd_complete(sk, hdev->id, mgmt_op,
2851 MGMT_STATUS_NOT_POWERED, addr,
2852 sizeof(*addr));
2853 goto done;
2854 }
2855
2856 if (addr->type == BDADDR_BREDR)
2857 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
2858 else
2859 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
2860
2861 if (!conn) {
2862 err = cmd_complete(sk, hdev->id, mgmt_op,
2863 MGMT_STATUS_NOT_CONNECTED, addr,
2864 sizeof(*addr));
2865 goto done;
2866 }
2867
2868 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
2869 /* Continue with pairing via SMP */
2870 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
2871
2872 if (!err)
2873 err = cmd_complete(sk, hdev->id, mgmt_op,
2874 MGMT_STATUS_SUCCESS, addr,
2875 sizeof(*addr));
2876 else
2877 err = cmd_complete(sk, hdev->id, mgmt_op,
2878 MGMT_STATUS_FAILED, addr,
2879 sizeof(*addr));
2880
2881 goto done;
2882 }
2883
2884 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
2885 if (!cmd) {
2886 err = -ENOMEM;
2887 goto done;
2888 }
2889
2890 /* Continue with pairing via HCI */
2891 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
2892 struct hci_cp_user_passkey_reply cp;
2893
2894 bacpy(&cp.bdaddr, &addr->bdaddr);
2895 cp.passkey = passkey;
2896 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
2897 } else
2898 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
2899 &addr->bdaddr);
2900
2901 if (err < 0)
2902 mgmt_pending_remove(cmd);
2903
2904 done:
2905 hci_dev_unlock(hdev);
2906 return err;
2907 }
2908
2909 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2910 void *data, u16 len)
2911 {
2912 struct mgmt_cp_pin_code_neg_reply *cp = data;
2913
2914 BT_DBG("");
2915
2916 return user_pairing_resp(sk, hdev, &cp->addr,
2917 MGMT_OP_PIN_CODE_NEG_REPLY,
2918 HCI_OP_PIN_CODE_NEG_REPLY, 0);
2919 }
2920
2921 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2922 u16 len)
2923 {
2924 struct mgmt_cp_user_confirm_reply *cp = data;
2925
2926 BT_DBG("");
2927
2928 if (len != sizeof(*cp))
2929 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
2930 MGMT_STATUS_INVALID_PARAMS);
2931
2932 return user_pairing_resp(sk, hdev, &cp->addr,
2933 MGMT_OP_USER_CONFIRM_REPLY,
2934 HCI_OP_USER_CONFIRM_REPLY, 0);
2935 }
2936
2937 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
2938 void *data, u16 len)
2939 {
2940 struct mgmt_cp_user_confirm_neg_reply *cp = data;
2941
2942 BT_DBG("");
2943
2944 return user_pairing_resp(sk, hdev, &cp->addr,
2945 MGMT_OP_USER_CONFIRM_NEG_REPLY,
2946 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
2947 }
2948
2949 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2950 u16 len)
2951 {
2952 struct mgmt_cp_user_passkey_reply *cp = data;
2953
2954 BT_DBG("");
2955
2956 return user_pairing_resp(sk, hdev, &cp->addr,
2957 MGMT_OP_USER_PASSKEY_REPLY,
2958 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
2959 }
2960
2961 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
2962 void *data, u16 len)
2963 {
2964 struct mgmt_cp_user_passkey_neg_reply *cp = data;
2965
2966 BT_DBG("");
2967
2968 return user_pairing_resp(sk, hdev, &cp->addr,
2969 MGMT_OP_USER_PASSKEY_NEG_REPLY,
2970 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
2971 }
2972
2973 static void update_name(struct hci_request *req)
2974 {
2975 struct hci_dev *hdev = req->hdev;
2976 struct hci_cp_write_local_name cp;
2977
2978 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
2979
2980 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
2981 }
2982
2983 static void set_name_complete(struct hci_dev *hdev, u8 status)
2984 {
2985 struct mgmt_cp_set_local_name *cp;
2986 struct pending_cmd *cmd;
2987
2988 BT_DBG("status 0x%02x", status);
2989
2990 hci_dev_lock(hdev);
2991
2992 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
2993 if (!cmd)
2994 goto unlock;
2995
2996 cp = cmd->param;
2997
2998 if (status)
2999 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3000 mgmt_status(status));
3001 else
3002 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3003 cp, sizeof(*cp));
3004
3005 mgmt_pending_remove(cmd);
3006
3007 unlock:
3008 hci_dev_unlock(hdev);
3009 }
3010
3011 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3012 u16 len)
3013 {
3014 struct mgmt_cp_set_local_name *cp = data;
3015 struct pending_cmd *cmd;
3016 struct hci_request req;
3017 int err;
3018
3019 BT_DBG("");
3020
3021 hci_dev_lock(hdev);
3022
3023 /* If the old values are the same as the new ones just return a
3024 * direct command complete event.
3025 */
3026 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3027 !memcmp(hdev->short_name, cp->short_name,
3028 sizeof(hdev->short_name))) {
3029 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3030 data, len);
3031 goto failed;
3032 }
3033
3034 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3035
3036 if (!hdev_is_powered(hdev)) {
3037 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3038
3039 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3040 data, len);
3041 if (err < 0)
3042 goto failed;
3043
3044 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
3045 sk);
3046
3047 goto failed;
3048 }
3049
3050 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3051 if (!cmd) {
3052 err = -ENOMEM;
3053 goto failed;
3054 }
3055
3056 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3057
3058 hci_req_init(&req, hdev);
3059
3060 if (lmp_bredr_capable(hdev)) {
3061 update_name(&req);
3062 update_eir(&req);
3063 }
3064
3065 /* The name is stored in the scan response data and so
3066 * no need to udpate the advertising data here.
3067 */
3068 if (lmp_le_capable(hdev))
3069 update_scan_rsp_data(&req);
3070
3071 err = hci_req_run(&req, set_name_complete);
3072 if (err < 0)
3073 mgmt_pending_remove(cmd);
3074
3075 failed:
3076 hci_dev_unlock(hdev);
3077 return err;
3078 }
3079
3080 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3081 void *data, u16 data_len)
3082 {
3083 struct pending_cmd *cmd;
3084 int err;
3085
3086 BT_DBG("%s", hdev->name);
3087
3088 hci_dev_lock(hdev);
3089
3090 if (!hdev_is_powered(hdev)) {
3091 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3092 MGMT_STATUS_NOT_POWERED);
3093 goto unlock;
3094 }
3095
3096 if (!lmp_ssp_capable(hdev)) {
3097 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3098 MGMT_STATUS_NOT_SUPPORTED);
3099 goto unlock;
3100 }
3101
3102 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3103 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3104 MGMT_STATUS_BUSY);
3105 goto unlock;
3106 }
3107
3108 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3109 if (!cmd) {
3110 err = -ENOMEM;
3111 goto unlock;
3112 }
3113
3114 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
3115 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
3116 0, NULL);
3117 else
3118 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3119
3120 if (err < 0)
3121 mgmt_pending_remove(cmd);
3122
3123 unlock:
3124 hci_dev_unlock(hdev);
3125 return err;
3126 }
3127
3128 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3129 void *data, u16 len)
3130 {
3131 int err;
3132
3133 BT_DBG("%s ", hdev->name);
3134
3135 hci_dev_lock(hdev);
3136
3137 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3138 struct mgmt_cp_add_remote_oob_data *cp = data;
3139 u8 status;
3140
3141 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3142 cp->hash, cp->randomizer);
3143 if (err < 0)
3144 status = MGMT_STATUS_FAILED;
3145 else
3146 status = MGMT_STATUS_SUCCESS;
3147
3148 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3149 status, &cp->addr, sizeof(cp->addr));
3150 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3151 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3152 u8 status;
3153
3154 err = hci_add_remote_oob_ext_data(hdev, &cp->addr.bdaddr,
3155 cp->hash192,
3156 cp->randomizer192,
3157 cp->hash256,
3158 cp->randomizer256);
3159 if (err < 0)
3160 status = MGMT_STATUS_FAILED;
3161 else
3162 status = MGMT_STATUS_SUCCESS;
3163
3164 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3165 status, &cp->addr, sizeof(cp->addr));
3166 } else {
3167 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3168 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3169 MGMT_STATUS_INVALID_PARAMS);
3170 }
3171
3172 hci_dev_unlock(hdev);
3173 return err;
3174 }
3175
3176 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3177 void *data, u16 len)
3178 {
3179 struct mgmt_cp_remove_remote_oob_data *cp = data;
3180 u8 status;
3181 int err;
3182
3183 BT_DBG("%s", hdev->name);
3184
3185 hci_dev_lock(hdev);
3186
3187 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
3188 if (err < 0)
3189 status = MGMT_STATUS_INVALID_PARAMS;
3190 else
3191 status = MGMT_STATUS_SUCCESS;
3192
3193 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3194 status, &cp->addr, sizeof(cp->addr));
3195
3196 hci_dev_unlock(hdev);
3197 return err;
3198 }
3199
3200 static int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
3201 {
3202 struct pending_cmd *cmd;
3203 u8 type;
3204 int err;
3205
3206 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3207
3208 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3209 if (!cmd)
3210 return -ENOENT;
3211
3212 type = hdev->discovery.type;
3213
3214 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3215 &type, sizeof(type));
3216 mgmt_pending_remove(cmd);
3217
3218 return err;
3219 }
3220
3221 static void start_discovery_complete(struct hci_dev *hdev, u8 status)
3222 {
3223 BT_DBG("status %d", status);
3224
3225 if (status) {
3226 hci_dev_lock(hdev);
3227 mgmt_start_discovery_failed(hdev, status);
3228 hci_dev_unlock(hdev);
3229 return;
3230 }
3231
3232 hci_dev_lock(hdev);
3233 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3234 hci_dev_unlock(hdev);
3235
3236 switch (hdev->discovery.type) {
3237 case DISCOV_TYPE_LE:
3238 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
3239 DISCOV_LE_TIMEOUT);
3240 break;
3241
3242 case DISCOV_TYPE_INTERLEAVED:
3243 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
3244 DISCOV_INTERLEAVED_TIMEOUT);
3245 break;
3246
3247 case DISCOV_TYPE_BREDR:
3248 break;
3249
3250 default:
3251 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
3252 }
3253 }
3254
3255 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3256 void *data, u16 len)
3257 {
3258 struct mgmt_cp_start_discovery *cp = data;
3259 struct pending_cmd *cmd;
3260 struct hci_cp_le_set_scan_param param_cp;
3261 struct hci_cp_le_set_scan_enable enable_cp;
3262 struct hci_cp_inquiry inq_cp;
3263 struct hci_request req;
3264 /* General inquiry access code (GIAC) */
3265 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3266 u8 status;
3267 int err;
3268
3269 BT_DBG("%s", hdev->name);
3270
3271 hci_dev_lock(hdev);
3272
3273 if (!hdev_is_powered(hdev)) {
3274 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3275 MGMT_STATUS_NOT_POWERED);
3276 goto failed;
3277 }
3278
3279 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3280 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3281 MGMT_STATUS_BUSY);
3282 goto failed;
3283 }
3284
3285 if (hdev->discovery.state != DISCOVERY_STOPPED) {
3286 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3287 MGMT_STATUS_BUSY);
3288 goto failed;
3289 }
3290
3291 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
3292 if (!cmd) {
3293 err = -ENOMEM;
3294 goto failed;
3295 }
3296
3297 hdev->discovery.type = cp->type;
3298
3299 hci_req_init(&req, hdev);
3300
3301 switch (hdev->discovery.type) {
3302 case DISCOV_TYPE_BREDR:
3303 status = mgmt_bredr_support(hdev);
3304 if (status) {
3305 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3306 status);
3307 mgmt_pending_remove(cmd);
3308 goto failed;
3309 }
3310
3311 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3312 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3313 MGMT_STATUS_BUSY);
3314 mgmt_pending_remove(cmd);
3315 goto failed;
3316 }
3317
3318 hci_inquiry_cache_flush(hdev);
3319
3320 memset(&inq_cp, 0, sizeof(inq_cp));
3321 memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
3322 inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
3323 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
3324 break;
3325
3326 case DISCOV_TYPE_LE:
3327 case DISCOV_TYPE_INTERLEAVED:
3328 status = mgmt_le_support(hdev);
3329 if (status) {
3330 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3331 status);
3332 mgmt_pending_remove(cmd);
3333 goto failed;
3334 }
3335
3336 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3337 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3338 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3339 MGMT_STATUS_NOT_SUPPORTED);
3340 mgmt_pending_remove(cmd);
3341 goto failed;
3342 }
3343
3344 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3345 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3346 MGMT_STATUS_REJECTED);
3347 mgmt_pending_remove(cmd);
3348 goto failed;
3349 }
3350
3351 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
3352 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3353 MGMT_STATUS_BUSY);
3354 mgmt_pending_remove(cmd);
3355 goto failed;
3356 }
3357
3358 memset(&param_cp, 0, sizeof(param_cp));
3359 param_cp.type = LE_SCAN_ACTIVE;
3360 param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
3361 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3362 param_cp.own_address_type = hdev->own_addr_type;
3363 hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
3364 &param_cp);
3365
3366 memset(&enable_cp, 0, sizeof(enable_cp));
3367 enable_cp.enable = LE_SCAN_ENABLE;
3368 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3369 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
3370 &enable_cp);
3371 break;
3372
3373 default:
3374 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3375 MGMT_STATUS_INVALID_PARAMS);
3376 mgmt_pending_remove(cmd);
3377 goto failed;
3378 }
3379
3380 err = hci_req_run(&req, start_discovery_complete);
3381 if (err < 0)
3382 mgmt_pending_remove(cmd);
3383 else
3384 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3385
3386 failed:
3387 hci_dev_unlock(hdev);
3388 return err;
3389 }
3390
3391 static int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
3392 {
3393 struct pending_cmd *cmd;
3394 int err;
3395
3396 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
3397 if (!cmd)
3398 return -ENOENT;
3399
3400 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3401 &hdev->discovery.type, sizeof(hdev->discovery.type));
3402 mgmt_pending_remove(cmd);
3403
3404 return err;
3405 }
3406
3407 static void stop_discovery_complete(struct hci_dev *hdev, u8 status)
3408 {
3409 BT_DBG("status %d", status);
3410
3411 hci_dev_lock(hdev);
3412
3413 if (status) {
3414 mgmt_stop_discovery_failed(hdev, status);
3415 goto unlock;
3416 }
3417
3418 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3419
3420 unlock:
3421 hci_dev_unlock(hdev);
3422 }
3423
3424 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
3425 u16 len)
3426 {
3427 struct mgmt_cp_stop_discovery *mgmt_cp = data;
3428 struct pending_cmd *cmd;
3429 struct hci_cp_remote_name_req_cancel cp;
3430 struct inquiry_entry *e;
3431 struct hci_request req;
3432 struct hci_cp_le_set_scan_enable enable_cp;
3433 int err;
3434
3435 BT_DBG("%s", hdev->name);
3436
3437 hci_dev_lock(hdev);
3438
3439 if (!hci_discovery_active(hdev)) {
3440 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3441 MGMT_STATUS_REJECTED, &mgmt_cp->type,
3442 sizeof(mgmt_cp->type));
3443 goto unlock;
3444 }
3445
3446 if (hdev->discovery.type != mgmt_cp->type) {
3447 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3448 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
3449 sizeof(mgmt_cp->type));
3450 goto unlock;
3451 }
3452
3453 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
3454 if (!cmd) {
3455 err = -ENOMEM;
3456 goto unlock;
3457 }
3458
3459 hci_req_init(&req, hdev);
3460
3461 switch (hdev->discovery.state) {
3462 case DISCOVERY_FINDING:
3463 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3464 hci_req_add(&req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3465 } else {
3466 cancel_delayed_work(&hdev->le_scan_disable);
3467
3468 memset(&enable_cp, 0, sizeof(enable_cp));
3469 enable_cp.enable = LE_SCAN_DISABLE;
3470 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE,
3471 sizeof(enable_cp), &enable_cp);
3472 }
3473
3474 break;
3475
3476 case DISCOVERY_RESOLVING:
3477 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
3478 NAME_PENDING);
3479 if (!e) {
3480 mgmt_pending_remove(cmd);
3481 err = cmd_complete(sk, hdev->id,
3482 MGMT_OP_STOP_DISCOVERY, 0,
3483 &mgmt_cp->type,
3484 sizeof(mgmt_cp->type));
3485 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3486 goto unlock;
3487 }
3488
3489 bacpy(&cp.bdaddr, &e->data.bdaddr);
3490 hci_req_add(&req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
3491 &cp);
3492
3493 break;
3494
3495 default:
3496 BT_DBG("unknown discovery state %u", hdev->discovery.state);
3497
3498 mgmt_pending_remove(cmd);
3499 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3500 MGMT_STATUS_FAILED, &mgmt_cp->type,
3501 sizeof(mgmt_cp->type));
3502 goto unlock;
3503 }
3504
3505 err = hci_req_run(&req, stop_discovery_complete);
3506 if (err < 0)
3507 mgmt_pending_remove(cmd);
3508 else
3509 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
3510
3511 unlock:
3512 hci_dev_unlock(hdev);
3513 return err;
3514 }
3515
3516 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
3517 u16 len)
3518 {
3519 struct mgmt_cp_confirm_name *cp = data;
3520 struct inquiry_entry *e;
3521 int err;
3522
3523 BT_DBG("%s", hdev->name);
3524
3525 hci_dev_lock(hdev);
3526
3527 if (!hci_discovery_active(hdev)) {
3528 err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3529 MGMT_STATUS_FAILED);
3530 goto failed;
3531 }
3532
3533 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
3534 if (!e) {
3535 err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3536 MGMT_STATUS_INVALID_PARAMS);
3537 goto failed;
3538 }
3539
3540 if (cp->name_known) {
3541 e->name_state = NAME_KNOWN;
3542 list_del(&e->list);
3543 } else {
3544 e->name_state = NAME_NEEDED;
3545 hci_inquiry_cache_update_resolve(hdev, e);
3546 }
3547
3548 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
3549 sizeof(cp->addr));
3550
3551 failed:
3552 hci_dev_unlock(hdev);
3553 return err;
3554 }
3555
3556 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
3557 u16 len)
3558 {
3559 struct mgmt_cp_block_device *cp = data;
3560 u8 status;
3561 int err;
3562
3563 BT_DBG("%s", hdev->name);
3564
3565 if (!bdaddr_type_is_valid(cp->addr.type))
3566 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
3567 MGMT_STATUS_INVALID_PARAMS,
3568 &cp->addr, sizeof(cp->addr));
3569
3570 hci_dev_lock(hdev);
3571
3572 err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type);
3573 if (err < 0)
3574 status = MGMT_STATUS_FAILED;
3575 else
3576 status = MGMT_STATUS_SUCCESS;
3577
3578 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
3579 &cp->addr, sizeof(cp->addr));
3580
3581 hci_dev_unlock(hdev);
3582
3583 return err;
3584 }
3585
3586 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
3587 u16 len)
3588 {
3589 struct mgmt_cp_unblock_device *cp = data;
3590 u8 status;
3591 int err;
3592
3593 BT_DBG("%s", hdev->name);
3594
3595 if (!bdaddr_type_is_valid(cp->addr.type))
3596 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
3597 MGMT_STATUS_INVALID_PARAMS,
3598 &cp->addr, sizeof(cp->addr));
3599
3600 hci_dev_lock(hdev);
3601
3602 err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type);
3603 if (err < 0)
3604 status = MGMT_STATUS_INVALID_PARAMS;
3605 else
3606 status = MGMT_STATUS_SUCCESS;
3607
3608 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
3609 &cp->addr, sizeof(cp->addr));
3610
3611 hci_dev_unlock(hdev);
3612
3613 return err;
3614 }
3615
3616 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
3617 u16 len)
3618 {
3619 struct mgmt_cp_set_device_id *cp = data;
3620 struct hci_request req;
3621 int err;
3622 __u16 source;
3623
3624 BT_DBG("%s", hdev->name);
3625
3626 source = __le16_to_cpu(cp->source);
3627
3628 if (source > 0x0002)
3629 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
3630 MGMT_STATUS_INVALID_PARAMS);
3631
3632 hci_dev_lock(hdev);
3633
3634 hdev->devid_source = source;
3635 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
3636 hdev->devid_product = __le16_to_cpu(cp->product);
3637 hdev->devid_version = __le16_to_cpu(cp->version);
3638
3639 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
3640
3641 hci_req_init(&req, hdev);
3642 update_eir(&req);
3643 hci_req_run(&req, NULL);
3644
3645 hci_dev_unlock(hdev);
3646
3647 return err;
3648 }
3649
3650 static void set_advertising_complete(struct hci_dev *hdev, u8 status)
3651 {
3652 struct cmd_lookup match = { NULL, hdev };
3653
3654 if (status) {
3655 u8 mgmt_err = mgmt_status(status);
3656
3657 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
3658 cmd_status_rsp, &mgmt_err);
3659 return;
3660 }
3661
3662 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
3663 &match);
3664
3665 new_settings(hdev, match.sk);
3666
3667 if (match.sk)
3668 sock_put(match.sk);
3669 }
3670
3671 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
3672 u16 len)
3673 {
3674 struct mgmt_mode *cp = data;
3675 struct pending_cmd *cmd;
3676 struct hci_request req;
3677 u8 val, enabled, status;
3678 int err;
3679
3680 BT_DBG("request for %s", hdev->name);
3681
3682 status = mgmt_le_support(hdev);
3683 if (status)
3684 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3685 status);
3686
3687 if (cp->val != 0x00 && cp->val != 0x01)
3688 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3689 MGMT_STATUS_INVALID_PARAMS);
3690
3691 hci_dev_lock(hdev);
3692
3693 val = !!cp->val;
3694 enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
3695
3696 /* The following conditions are ones which mean that we should
3697 * not do any HCI communication but directly send a mgmt
3698 * response to user space (after toggling the flag if
3699 * necessary).
3700 */
3701 if (!hdev_is_powered(hdev) || val == enabled ||
3702 hci_conn_num(hdev, LE_LINK) > 0) {
3703 bool changed = false;
3704
3705 if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3706 change_bit(HCI_ADVERTISING, &hdev->dev_flags);
3707 changed = true;
3708 }
3709
3710 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
3711 if (err < 0)
3712 goto unlock;
3713
3714 if (changed)
3715 err = new_settings(hdev, sk);
3716
3717 goto unlock;
3718 }
3719
3720 if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
3721 mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
3722 err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3723 MGMT_STATUS_BUSY);
3724 goto unlock;
3725 }
3726
3727 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
3728 if (!cmd) {
3729 err = -ENOMEM;
3730 goto unlock;
3731 }
3732
3733 hci_req_init(&req, hdev);
3734
3735 if (val)
3736 enable_advertising(&req);
3737 else
3738 disable_advertising(&req);
3739
3740 err = hci_req_run(&req, set_advertising_complete);
3741 if (err < 0)
3742 mgmt_pending_remove(cmd);
3743
3744 unlock:
3745 hci_dev_unlock(hdev);
3746 return err;
3747 }
3748
3749 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
3750 void *data, u16 len)
3751 {
3752 struct mgmt_cp_set_static_address *cp = data;
3753 int err;
3754
3755 BT_DBG("%s", hdev->name);
3756
3757 if (!lmp_le_capable(hdev))
3758 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
3759 MGMT_STATUS_NOT_SUPPORTED);
3760
3761 if (hdev_is_powered(hdev))
3762 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
3763 MGMT_STATUS_REJECTED);
3764
3765 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
3766 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
3767 return cmd_status(sk, hdev->id,
3768 MGMT_OP_SET_STATIC_ADDRESS,
3769 MGMT_STATUS_INVALID_PARAMS);
3770
3771 /* Two most significant bits shall be set */
3772 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
3773 return cmd_status(sk, hdev->id,
3774 MGMT_OP_SET_STATIC_ADDRESS,
3775 MGMT_STATUS_INVALID_PARAMS);
3776 }
3777
3778 hci_dev_lock(hdev);
3779
3780 bacpy(&hdev->static_addr, &cp->bdaddr);
3781
3782 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0);
3783
3784 hci_dev_unlock(hdev);
3785
3786 return err;
3787 }
3788
3789 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
3790 void *data, u16 len)
3791 {
3792 struct mgmt_cp_set_scan_params *cp = data;
3793 __u16 interval, window;
3794 int err;
3795
3796 BT_DBG("%s", hdev->name);
3797
3798 if (!lmp_le_capable(hdev))
3799 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3800 MGMT_STATUS_NOT_SUPPORTED);
3801
3802 interval = __le16_to_cpu(cp->interval);
3803
3804 if (interval < 0x0004 || interval > 0x4000)
3805 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3806 MGMT_STATUS_INVALID_PARAMS);
3807
3808 window = __le16_to_cpu(cp->window);
3809
3810 if (window < 0x0004 || window > 0x4000)
3811 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3812 MGMT_STATUS_INVALID_PARAMS);
3813
3814 if (window > interval)
3815 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3816 MGMT_STATUS_INVALID_PARAMS);
3817
3818 hci_dev_lock(hdev);
3819
3820 hdev->le_scan_interval = interval;
3821 hdev->le_scan_window = window;
3822
3823 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);
3824
3825 hci_dev_unlock(hdev);
3826
3827 return err;
3828 }
3829
3830 static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
3831 {
3832 struct pending_cmd *cmd;
3833
3834 BT_DBG("status 0x%02x", status);
3835
3836 hci_dev_lock(hdev);
3837
3838 cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
3839 if (!cmd)
3840 goto unlock;
3841
3842 if (status) {
3843 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3844 mgmt_status(status));
3845 } else {
3846 struct mgmt_mode *cp = cmd->param;
3847
3848 if (cp->val)
3849 set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
3850 else
3851 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
3852
3853 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
3854 new_settings(hdev, cmd->sk);
3855 }
3856
3857 mgmt_pending_remove(cmd);
3858
3859 unlock:
3860 hci_dev_unlock(hdev);
3861 }
3862
3863 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
3864 void *data, u16 len)
3865 {
3866 struct mgmt_mode *cp = data;
3867 struct pending_cmd *cmd;
3868 struct hci_request req;
3869 int err;
3870
3871 BT_DBG("%s", hdev->name);
3872
3873 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
3874 hdev->hci_ver < BLUETOOTH_VER_1_2)
3875 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3876 MGMT_STATUS_NOT_SUPPORTED);
3877
3878 if (cp->val != 0x00 && cp->val != 0x01)
3879 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3880 MGMT_STATUS_INVALID_PARAMS);
3881
3882 if (!hdev_is_powered(hdev))
3883 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3884 MGMT_STATUS_NOT_POWERED);
3885
3886 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3887 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3888 MGMT_STATUS_REJECTED);
3889
3890 hci_dev_lock(hdev);
3891
3892 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
3893 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3894 MGMT_STATUS_BUSY);
3895 goto unlock;
3896 }
3897
3898 if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
3899 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
3900 hdev);
3901 goto unlock;
3902 }
3903
3904 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
3905 data, len);
3906 if (!cmd) {
3907 err = -ENOMEM;
3908 goto unlock;
3909 }
3910
3911 hci_req_init(&req, hdev);
3912
3913 write_fast_connectable(&req, cp->val);
3914
3915 err = hci_req_run(&req, fast_connectable_complete);
3916 if (err < 0) {
3917 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3918 MGMT_STATUS_FAILED);
3919 mgmt_pending_remove(cmd);
3920 }
3921
3922 unlock:
3923 hci_dev_unlock(hdev);
3924
3925 return err;
3926 }
3927
3928 static void set_bredr_scan(struct hci_request *req)
3929 {
3930 struct hci_dev *hdev = req->hdev;
3931 u8 scan = 0;
3932
3933 /* Ensure that fast connectable is disabled. This function will
3934 * not do anything if the page scan parameters are already what
3935 * they should be.
3936 */
3937 write_fast_connectable(req, false);
3938
3939 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3940 scan |= SCAN_PAGE;
3941 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
3942 scan |= SCAN_INQUIRY;
3943
3944 if (scan)
3945 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
3946 }
3947
3948 static void set_bredr_complete(struct hci_dev *hdev, u8 status)
3949 {
3950 struct pending_cmd *cmd;
3951
3952 BT_DBG("status 0x%02x", status);
3953
3954 hci_dev_lock(hdev);
3955
3956 cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
3957 if (!cmd)
3958 goto unlock;
3959
3960 if (status) {
3961 u8 mgmt_err = mgmt_status(status);
3962
3963 /* We need to restore the flag if related HCI commands
3964 * failed.
3965 */
3966 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3967
3968 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
3969 } else {
3970 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
3971 new_settings(hdev, cmd->sk);
3972 }
3973
3974 mgmt_pending_remove(cmd);
3975
3976 unlock:
3977 hci_dev_unlock(hdev);
3978 }
3979
3980 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
3981 {
3982 struct mgmt_mode *cp = data;
3983 struct pending_cmd *cmd;
3984 struct hci_request req;
3985 int err;
3986
3987 BT_DBG("request for %s", hdev->name);
3988
3989 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
3990 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
3991 MGMT_STATUS_NOT_SUPPORTED);
3992
3993 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
3994 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
3995 MGMT_STATUS_REJECTED);
3996
3997 if (cp->val != 0x00 && cp->val != 0x01)
3998 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
3999 MGMT_STATUS_INVALID_PARAMS);
4000
4001 hci_dev_lock(hdev);
4002
4003 if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4004 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4005 goto unlock;
4006 }
4007
4008 if (!hdev_is_powered(hdev)) {
4009 if (!cp->val) {
4010 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4011 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4012 clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4013 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4014 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4015 }
4016
4017 change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4018
4019 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4020 if (err < 0)
4021 goto unlock;
4022
4023 err = new_settings(hdev, sk);
4024 goto unlock;
4025 }
4026
4027 /* Reject disabling when powered on */
4028 if (!cp->val) {
4029 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4030 MGMT_STATUS_REJECTED);
4031 goto unlock;
4032 }
4033
4034 if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
4035 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4036 MGMT_STATUS_BUSY);
4037 goto unlock;
4038 }
4039
4040 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4041 if (!cmd) {
4042 err = -ENOMEM;
4043 goto unlock;
4044 }
4045
4046 /* We need to flip the bit already here so that update_adv_data
4047 * generates the correct flags.
4048 */
4049 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4050
4051 hci_req_init(&req, hdev);
4052
4053 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4054 set_bredr_scan(&req);
4055
4056 /* Since only the advertising data flags will change, there
4057 * is no need to update the scan response data.
4058 */
4059 update_adv_data(&req);
4060
4061 err = hci_req_run(&req, set_bredr_complete);
4062 if (err < 0)
4063 mgmt_pending_remove(cmd);
4064
4065 unlock:
4066 hci_dev_unlock(hdev);
4067 return err;
4068 }
4069
4070 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4071 void *data, u16 len)
4072 {
4073 struct mgmt_mode *cp = data;
4074 struct pending_cmd *cmd;
4075 u8 val, status;
4076 int err;
4077
4078 BT_DBG("request for %s", hdev->name);
4079
4080 status = mgmt_bredr_support(hdev);
4081 if (status)
4082 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4083 status);
4084
4085 if (!lmp_sc_capable(hdev) &&
4086 !test_bit(HCI_FORCE_SC, &hdev->dev_flags))
4087 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4088 MGMT_STATUS_NOT_SUPPORTED);
4089
4090 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4091 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4092 MGMT_STATUS_INVALID_PARAMS);
4093
4094 hci_dev_lock(hdev);
4095
4096 if (!hdev_is_powered(hdev)) {
4097 bool changed;
4098
4099 if (cp->val) {
4100 changed = !test_and_set_bit(HCI_SC_ENABLED,
4101 &hdev->dev_flags);
4102 if (cp->val == 0x02)
4103 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4104 else
4105 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4106 } else {
4107 changed = test_and_clear_bit(HCI_SC_ENABLED,
4108 &hdev->dev_flags);
4109 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4110 }
4111
4112 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4113 if (err < 0)
4114 goto failed;
4115
4116 if (changed)
4117 err = new_settings(hdev, sk);
4118
4119 goto failed;
4120 }
4121
4122 if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4123 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4124 MGMT_STATUS_BUSY);
4125 goto failed;
4126 }
4127
4128 val = !!cp->val;
4129
4130 if (val == test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
4131 (cp->val == 0x02) == test_bit(HCI_SC_ONLY, &hdev->dev_flags)) {
4132 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4133 goto failed;
4134 }
4135
4136 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4137 if (!cmd) {
4138 err = -ENOMEM;
4139 goto failed;
4140 }
4141
4142 err = hci_send_cmd(hdev, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4143 if (err < 0) {
4144 mgmt_pending_remove(cmd);
4145 goto failed;
4146 }
4147
4148 if (cp->val == 0x02)
4149 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4150 else
4151 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4152
4153 failed:
4154 hci_dev_unlock(hdev);
4155 return err;
4156 }
4157
4158 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
4159 void *data, u16 len)
4160 {
4161 struct mgmt_mode *cp = data;
4162 bool changed;
4163 int err;
4164
4165 BT_DBG("request for %s", hdev->name);
4166
4167 if (cp->val != 0x00 && cp->val != 0x01)
4168 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
4169 MGMT_STATUS_INVALID_PARAMS);
4170
4171 hci_dev_lock(hdev);
4172
4173 if (cp->val)
4174 changed = !test_and_set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
4175 else
4176 changed = test_and_clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
4177
4178 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
4179 if (err < 0)
4180 goto unlock;
4181
4182 if (changed)
4183 err = new_settings(hdev, sk);
4184
4185 unlock:
4186 hci_dev_unlock(hdev);
4187 return err;
4188 }
4189
4190 static bool irk_is_valid(struct mgmt_irk_info *irk)
4191 {
4192 switch (irk->addr.type) {
4193 case BDADDR_LE_PUBLIC:
4194 return true;
4195
4196 case BDADDR_LE_RANDOM:
4197 /* Two most significant bits shall be set */
4198 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4199 return false;
4200 return true;
4201 }
4202
4203 return false;
4204 }
4205
4206 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4207 u16 len)
4208 {
4209 struct mgmt_cp_load_irks *cp = cp_data;
4210 u16 irk_count, expected_len;
4211 int i, err;
4212
4213 BT_DBG("request for %s", hdev->name);
4214
4215 if (!lmp_le_capable(hdev))
4216 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4217 MGMT_STATUS_NOT_SUPPORTED);
4218
4219 irk_count = __le16_to_cpu(cp->irk_count);
4220
4221 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
4222 if (expected_len != len) {
4223 BT_ERR("load_irks: expected %u bytes, got %u bytes",
4224 len, expected_len);
4225 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4226 MGMT_STATUS_INVALID_PARAMS);
4227 }
4228
4229 BT_DBG("%s irk_count %u", hdev->name, irk_count);
4230
4231 for (i = 0; i < irk_count; i++) {
4232 struct mgmt_irk_info *key = &cp->irks[i];
4233
4234 if (!irk_is_valid(key))
4235 return cmd_status(sk, hdev->id,
4236 MGMT_OP_LOAD_IRKS,
4237 MGMT_STATUS_INVALID_PARAMS);
4238 }
4239
4240 hci_dev_lock(hdev);
4241
4242 hci_smp_irks_clear(hdev);
4243
4244 for (i = 0; i < irk_count; i++) {
4245 struct mgmt_irk_info *irk = &cp->irks[i];
4246 u8 addr_type;
4247
4248 if (irk->addr.type == BDADDR_LE_PUBLIC)
4249 addr_type = ADDR_LE_DEV_PUBLIC;
4250 else
4251 addr_type = ADDR_LE_DEV_RANDOM;
4252
4253 hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
4254 BDADDR_ANY);
4255 }
4256
4257 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4258
4259 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
4260
4261 hci_dev_unlock(hdev);
4262
4263 return err;
4264 }
4265
4266 static bool ltk_is_valid(struct mgmt_ltk_info *key)
4267 {
4268 if (key->master != 0x00 && key->master != 0x01)
4269 return false;
4270
4271 switch (key->addr.type) {
4272 case BDADDR_LE_PUBLIC:
4273 return true;
4274
4275 case BDADDR_LE_RANDOM:
4276 /* Two most significant bits shall be set */
4277 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4278 return false;
4279 return true;
4280 }
4281
4282 return false;
4283 }
4284
4285 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
4286 void *cp_data, u16 len)
4287 {
4288 struct mgmt_cp_load_long_term_keys *cp = cp_data;
4289 u16 key_count, expected_len;
4290 int i, err;
4291
4292 BT_DBG("request for %s", hdev->name);
4293
4294 if (!lmp_le_capable(hdev))
4295 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4296 MGMT_STATUS_NOT_SUPPORTED);
4297
4298 key_count = __le16_to_cpu(cp->key_count);
4299
4300 expected_len = sizeof(*cp) + key_count *
4301 sizeof(struct mgmt_ltk_info);
4302 if (expected_len != len) {
4303 BT_ERR("load_keys: expected %u bytes, got %u bytes",
4304 len, expected_len);
4305 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4306 MGMT_STATUS_INVALID_PARAMS);
4307 }
4308
4309 BT_DBG("%s key_count %u", hdev->name, key_count);
4310
4311 for (i = 0; i < key_count; i++) {
4312 struct mgmt_ltk_info *key = &cp->keys[i];
4313
4314 if (!ltk_is_valid(key))
4315 return cmd_status(sk, hdev->id,
4316 MGMT_OP_LOAD_LONG_TERM_KEYS,
4317 MGMT_STATUS_INVALID_PARAMS);
4318 }
4319
4320 hci_dev_lock(hdev);
4321
4322 hci_smp_ltks_clear(hdev);
4323
4324 for (i = 0; i < key_count; i++) {
4325 struct mgmt_ltk_info *key = &cp->keys[i];
4326 u8 type, addr_type;
4327
4328 if (key->addr.type == BDADDR_LE_PUBLIC)
4329 addr_type = ADDR_LE_DEV_PUBLIC;
4330 else
4331 addr_type = ADDR_LE_DEV_RANDOM;
4332
4333 if (key->master)
4334 type = HCI_SMP_LTK;
4335 else
4336 type = HCI_SMP_LTK_SLAVE;
4337
4338 hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
4339 key->type, key->val, key->enc_size, key->ediv,
4340 key->rand);
4341 }
4342
4343 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
4344 NULL, 0);
4345
4346 hci_dev_unlock(hdev);
4347
4348 return err;
4349 }
4350
4351 static const struct mgmt_handler {
4352 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
4353 u16 data_len);
4354 bool var_len;
4355 size_t data_len;
4356 } mgmt_handlers[] = {
4357 { NULL }, /* 0x0000 (no command) */
4358 { read_version, false, MGMT_READ_VERSION_SIZE },
4359 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
4360 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
4361 { read_controller_info, false, MGMT_READ_INFO_SIZE },
4362 { set_powered, false, MGMT_SETTING_SIZE },
4363 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
4364 { set_connectable, false, MGMT_SETTING_SIZE },
4365 { set_fast_connectable, false, MGMT_SETTING_SIZE },
4366 { set_pairable, false, MGMT_SETTING_SIZE },
4367 { set_link_security, false, MGMT_SETTING_SIZE },
4368 { set_ssp, false, MGMT_SETTING_SIZE },
4369 { set_hs, false, MGMT_SETTING_SIZE },
4370 { set_le, false, MGMT_SETTING_SIZE },
4371 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
4372 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
4373 { add_uuid, false, MGMT_ADD_UUID_SIZE },
4374 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
4375 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
4376 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
4377 { disconnect, false, MGMT_DISCONNECT_SIZE },
4378 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
4379 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
4380 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
4381 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
4382 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
4383 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
4384 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
4385 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
4386 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
4387 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
4388 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
4389 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
4390 { add_remote_oob_data, true, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
4391 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
4392 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
4393 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
4394 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
4395 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
4396 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
4397 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
4398 { set_advertising, false, MGMT_SETTING_SIZE },
4399 { set_bredr, false, MGMT_SETTING_SIZE },
4400 { set_static_address, false, MGMT_SET_STATIC_ADDRESS_SIZE },
4401 { set_scan_params, false, MGMT_SET_SCAN_PARAMS_SIZE },
4402 { set_secure_conn, false, MGMT_SETTING_SIZE },
4403 { set_debug_keys, false, MGMT_SETTING_SIZE },
4404 { },
4405 { load_irks, true, MGMT_LOAD_IRKS_SIZE },
4406 };
4407
4408
4409 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
4410 {
4411 void *buf;
4412 u8 *cp;
4413 struct mgmt_hdr *hdr;
4414 u16 opcode, index, len;
4415 struct hci_dev *hdev = NULL;
4416 const struct mgmt_handler *handler;
4417 int err;
4418
4419 BT_DBG("got %zu bytes", msglen);
4420
4421 if (msglen < sizeof(*hdr))
4422 return -EINVAL;
4423
4424 buf = kmalloc(msglen, GFP_KERNEL);
4425 if (!buf)
4426 return -ENOMEM;
4427
4428 if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
4429 err = -EFAULT;
4430 goto done;
4431 }
4432
4433 hdr = buf;
4434 opcode = __le16_to_cpu(hdr->opcode);
4435 index = __le16_to_cpu(hdr->index);
4436 len = __le16_to_cpu(hdr->len);
4437
4438 if (len != msglen - sizeof(*hdr)) {
4439 err = -EINVAL;
4440 goto done;
4441 }
4442
4443 if (index != MGMT_INDEX_NONE) {
4444 hdev = hci_dev_get(index);
4445 if (!hdev) {
4446 err = cmd_status(sk, index, opcode,
4447 MGMT_STATUS_INVALID_INDEX);
4448 goto done;
4449 }
4450
4451 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
4452 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4453 err = cmd_status(sk, index, opcode,
4454 MGMT_STATUS_INVALID_INDEX);
4455 goto done;
4456 }
4457 }
4458
4459 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
4460 mgmt_handlers[opcode].func == NULL) {
4461 BT_DBG("Unknown op %u", opcode);
4462 err = cmd_status(sk, index, opcode,
4463 MGMT_STATUS_UNKNOWN_COMMAND);
4464 goto done;
4465 }
4466
4467 if ((hdev && opcode < MGMT_OP_READ_INFO) ||
4468 (!hdev && opcode >= MGMT_OP_READ_INFO)) {
4469 err = cmd_status(sk, index, opcode,
4470 MGMT_STATUS_INVALID_INDEX);
4471 goto done;
4472 }
4473
4474 handler = &mgmt_handlers[opcode];
4475
4476 if ((handler->var_len && len < handler->data_len) ||
4477 (!handler->var_len && len != handler->data_len)) {
4478 err = cmd_status(sk, index, opcode,
4479 MGMT_STATUS_INVALID_PARAMS);
4480 goto done;
4481 }
4482
4483 if (hdev)
4484 mgmt_init_hdev(sk, hdev);
4485
4486 cp = buf + sizeof(*hdr);
4487
4488 err = handler->func(sk, hdev, cp, len);
4489 if (err < 0)
4490 goto done;
4491
4492 err = msglen;
4493
4494 done:
4495 if (hdev)
4496 hci_dev_put(hdev);
4497
4498 kfree(buf);
4499 return err;
4500 }
4501
4502 void mgmt_index_added(struct hci_dev *hdev)
4503 {
4504 if (hdev->dev_type != HCI_BREDR)
4505 return;
4506
4507 mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
4508 }
4509
4510 void mgmt_index_removed(struct hci_dev *hdev)
4511 {
4512 u8 status = MGMT_STATUS_INVALID_INDEX;
4513
4514 if (hdev->dev_type != HCI_BREDR)
4515 return;
4516
4517 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
4518
4519 mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
4520 }
4521
4522 static void powered_complete(struct hci_dev *hdev, u8 status)
4523 {
4524 struct cmd_lookup match = { NULL, hdev };
4525
4526 BT_DBG("status 0x%02x", status);
4527
4528 hci_dev_lock(hdev);
4529
4530 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
4531
4532 new_settings(hdev, match.sk);
4533
4534 hci_dev_unlock(hdev);
4535
4536 if (match.sk)
4537 sock_put(match.sk);
4538 }
4539
4540 static int powered_update_hci(struct hci_dev *hdev)
4541 {
4542 struct hci_request req;
4543 u8 link_sec;
4544
4545 hci_req_init(&req, hdev);
4546
4547 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
4548 !lmp_host_ssp_capable(hdev)) {
4549 u8 ssp = 1;
4550
4551 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
4552 }
4553
4554 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
4555 lmp_bredr_capable(hdev)) {
4556 struct hci_cp_write_le_host_supported cp;
4557
4558 cp.le = 1;
4559 cp.simul = lmp_le_br_capable(hdev);
4560
4561 /* Check first if we already have the right
4562 * host state (host features set)
4563 */
4564 if (cp.le != lmp_host_le_capable(hdev) ||
4565 cp.simul != lmp_host_le_br_capable(hdev))
4566 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
4567 sizeof(cp), &cp);
4568 }
4569
4570 if (lmp_le_capable(hdev)) {
4571 /* Set random address to static address if configured */
4572 if (bacmp(&hdev->static_addr, BDADDR_ANY))
4573 hci_req_add(&req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
4574 &hdev->static_addr);
4575
4576 /* Make sure the controller has a good default for
4577 * advertising data. This also applies to the case
4578 * where BR/EDR was toggled during the AUTO_OFF phase.
4579 */
4580 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
4581 update_adv_data(&req);
4582 update_scan_rsp_data(&req);
4583 }
4584
4585 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
4586 enable_advertising(&req);
4587 }
4588
4589 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4590 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
4591 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
4592 sizeof(link_sec), &link_sec);
4593
4594 if (lmp_bredr_capable(hdev)) {
4595 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
4596 set_bredr_scan(&req);
4597 update_class(&req);
4598 update_name(&req);
4599 update_eir(&req);
4600 }
4601
4602 return hci_req_run(&req, powered_complete);
4603 }
4604
4605 int mgmt_powered(struct hci_dev *hdev, u8 powered)
4606 {
4607 struct cmd_lookup match = { NULL, hdev };
4608 u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
4609 u8 zero_cod[] = { 0, 0, 0 };
4610 int err;
4611
4612 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
4613 return 0;
4614
4615 if (powered) {
4616 if (powered_update_hci(hdev) == 0)
4617 return 0;
4618
4619 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
4620 &match);
4621 goto new_settings;
4622 }
4623
4624 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
4625 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered);
4626
4627 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
4628 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
4629 zero_cod, sizeof(zero_cod), NULL);
4630
4631 new_settings:
4632 err = new_settings(hdev, match.sk);
4633
4634 if (match.sk)
4635 sock_put(match.sk);
4636
4637 return err;
4638 }
4639
4640 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
4641 {
4642 struct pending_cmd *cmd;
4643 u8 status;
4644
4645 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
4646 if (!cmd)
4647 return;
4648
4649 if (err == -ERFKILL)
4650 status = MGMT_STATUS_RFKILLED;
4651 else
4652 status = MGMT_STATUS_FAILED;
4653
4654 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
4655
4656 mgmt_pending_remove(cmd);
4657 }
4658
4659 void mgmt_discoverable_timeout(struct hci_dev *hdev)
4660 {
4661 struct hci_request req;
4662
4663 hci_dev_lock(hdev);
4664
4665 /* When discoverable timeout triggers, then just make sure
4666 * the limited discoverable flag is cleared. Even in the case
4667 * of a timeout triggered from general discoverable, it is
4668 * safe to unconditionally clear the flag.
4669 */
4670 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
4671 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4672
4673 hci_req_init(&req, hdev);
4674 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4675 u8 scan = SCAN_PAGE;
4676 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
4677 sizeof(scan), &scan);
4678 }
4679 update_class(&req);
4680 update_adv_data(&req);
4681 hci_req_run(&req, NULL);
4682
4683 hdev->discov_timeout = 0;
4684
4685 new_settings(hdev, NULL);
4686
4687 hci_dev_unlock(hdev);
4688 }
4689
4690 void mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
4691 {
4692 bool changed;
4693
4694 /* Nothing needed here if there's a pending command since that
4695 * commands request completion callback takes care of everything
4696 * necessary.
4697 */
4698 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
4699 return;
4700
4701 if (discoverable) {
4702 changed = !test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4703 } else {
4704 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
4705 changed = test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4706 }
4707
4708 if (changed) {
4709 struct hci_request req;
4710
4711 /* In case this change in discoverable was triggered by
4712 * a disabling of connectable there could be a need to
4713 * update the advertising flags.
4714 */
4715 hci_req_init(&req, hdev);
4716 update_adv_data(&req);
4717 hci_req_run(&req, NULL);
4718
4719 new_settings(hdev, NULL);
4720 }
4721 }
4722
4723 void mgmt_connectable(struct hci_dev *hdev, u8 connectable)
4724 {
4725 bool changed;
4726
4727 /* Nothing needed here if there's a pending command since that
4728 * commands request completion callback takes care of everything
4729 * necessary.
4730 */
4731 if (mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
4732 return;
4733
4734 if (connectable)
4735 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
4736 else
4737 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
4738
4739 if (changed)
4740 new_settings(hdev, NULL);
4741 }
4742
4743 void mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
4744 {
4745 u8 mgmt_err = mgmt_status(status);
4746
4747 if (scan & SCAN_PAGE)
4748 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
4749 cmd_status_rsp, &mgmt_err);
4750
4751 if (scan & SCAN_INQUIRY)
4752 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
4753 cmd_status_rsp, &mgmt_err);
4754 }
4755
4756 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
4757 bool persistent)
4758 {
4759 struct mgmt_ev_new_link_key ev;
4760
4761 memset(&ev, 0, sizeof(ev));
4762
4763 ev.store_hint = persistent;
4764 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
4765 ev.key.addr.type = BDADDR_BREDR;
4766 ev.key.type = key->type;
4767 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
4768 ev.key.pin_len = key->pin_len;
4769
4770 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
4771 }
4772
4773 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key)
4774 {
4775 struct mgmt_ev_new_long_term_key ev;
4776
4777 memset(&ev, 0, sizeof(ev));
4778
4779 /* Devices using resolvable or non-resolvable random addresses
4780 * without providing an indentity resolving key don't require
4781 * to store long term keys. Their addresses will change the
4782 * next time around.
4783 *
4784 * Only when a remote device provides an identity address
4785 * make sure the long term key is stored. If the remote
4786 * identity is known, the long term keys are internally
4787 * mapped to the identity address. So allow static random
4788 * and public addresses here.
4789 */
4790 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
4791 (key->bdaddr.b[5] & 0xc0) != 0xc0)
4792 ev.store_hint = 0x00;
4793 else
4794 ev.store_hint = 0x01;
4795
4796 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
4797 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
4798 ev.key.type = key->authenticated;
4799 ev.key.enc_size = key->enc_size;
4800 ev.key.ediv = key->ediv;
4801
4802 if (key->type == HCI_SMP_LTK)
4803 ev.key.master = 1;
4804
4805 memcpy(ev.key.rand, key->rand, sizeof(key->rand));
4806 memcpy(ev.key.val, key->val, sizeof(key->val));
4807
4808 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
4809 }
4810
4811 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
4812 {
4813 struct mgmt_ev_new_irk ev;
4814
4815 memset(&ev, 0, sizeof(ev));
4816
4817 /* For identity resolving keys from devices that are already
4818 * using a public address or static random address, do not
4819 * ask for storing this key. The identity resolving key really
4820 * is only mandatory for devices using resovlable random
4821 * addresses.
4822 *
4823 * Storing all identity resolving keys has the downside that
4824 * they will be also loaded on next boot of they system. More
4825 * identity resolving keys, means more time during scanning is
4826 * needed to actually resolve these addresses.
4827 */
4828 if (bacmp(&irk->rpa, BDADDR_ANY))
4829 ev.store_hint = 0x01;
4830 else
4831 ev.store_hint = 0x00;
4832
4833 bacpy(&ev.rpa, &irk->rpa);
4834 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
4835 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
4836 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
4837
4838 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
4839 }
4840
4841 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
4842 u8 data_len)
4843 {
4844 eir[eir_len++] = sizeof(type) + data_len;
4845 eir[eir_len++] = type;
4846 memcpy(&eir[eir_len], data, data_len);
4847 eir_len += data_len;
4848
4849 return eir_len;
4850 }
4851
4852 void mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4853 u8 addr_type, u32 flags, u8 *name, u8 name_len,
4854 u8 *dev_class)
4855 {
4856 char buf[512];
4857 struct mgmt_ev_device_connected *ev = (void *) buf;
4858 u16 eir_len = 0;
4859
4860 bacpy(&ev->addr.bdaddr, bdaddr);
4861 ev->addr.type = link_to_bdaddr(link_type, addr_type);
4862
4863 ev->flags = __cpu_to_le32(flags);
4864
4865 if (name_len > 0)
4866 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
4867 name, name_len);
4868
4869 if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
4870 eir_len = eir_append_data(ev->eir, eir_len,
4871 EIR_CLASS_OF_DEV, dev_class, 3);
4872
4873 ev->eir_len = cpu_to_le16(eir_len);
4874
4875 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
4876 sizeof(*ev) + eir_len, NULL);
4877 }
4878
4879 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
4880 {
4881 struct mgmt_cp_disconnect *cp = cmd->param;
4882 struct sock **sk = data;
4883 struct mgmt_rp_disconnect rp;
4884
4885 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4886 rp.addr.type = cp->addr.type;
4887
4888 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
4889 sizeof(rp));
4890
4891 *sk = cmd->sk;
4892 sock_hold(*sk);
4893
4894 mgmt_pending_remove(cmd);
4895 }
4896
4897 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
4898 {
4899 struct hci_dev *hdev = data;
4900 struct mgmt_cp_unpair_device *cp = cmd->param;
4901 struct mgmt_rp_unpair_device rp;
4902
4903 memset(&rp, 0, sizeof(rp));
4904 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4905 rp.addr.type = cp->addr.type;
4906
4907 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
4908
4909 cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
4910
4911 mgmt_pending_remove(cmd);
4912 }
4913
4914 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
4915 u8 link_type, u8 addr_type, u8 reason)
4916 {
4917 struct mgmt_ev_device_disconnected ev;
4918 struct sock *sk = NULL;
4919
4920 if (link_type != ACL_LINK && link_type != LE_LINK)
4921 return;
4922
4923 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
4924
4925 bacpy(&ev.addr.bdaddr, bdaddr);
4926 ev.addr.type = link_to_bdaddr(link_type, addr_type);
4927 ev.reason = reason;
4928
4929 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
4930
4931 if (sk)
4932 sock_put(sk);
4933
4934 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
4935 hdev);
4936 }
4937
4938 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
4939 u8 link_type, u8 addr_type, u8 status)
4940 {
4941 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
4942 struct mgmt_cp_disconnect *cp;
4943 struct mgmt_rp_disconnect rp;
4944 struct pending_cmd *cmd;
4945
4946 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
4947 hdev);
4948
4949 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
4950 if (!cmd)
4951 return;
4952
4953 cp = cmd->param;
4954
4955 if (bacmp(bdaddr, &cp->addr.bdaddr))
4956 return;
4957
4958 if (cp->addr.type != bdaddr_type)
4959 return;
4960
4961 bacpy(&rp.addr.bdaddr, bdaddr);
4962 rp.addr.type = bdaddr_type;
4963
4964 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
4965 mgmt_status(status), &rp, sizeof(rp));
4966
4967 mgmt_pending_remove(cmd);
4968 }
4969
4970 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4971 u8 addr_type, u8 status)
4972 {
4973 struct mgmt_ev_connect_failed ev;
4974
4975 bacpy(&ev.addr.bdaddr, bdaddr);
4976 ev.addr.type = link_to_bdaddr(link_type, addr_type);
4977 ev.status = mgmt_status(status);
4978
4979 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
4980 }
4981
4982 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
4983 {
4984 struct mgmt_ev_pin_code_request ev;
4985
4986 bacpy(&ev.addr.bdaddr, bdaddr);
4987 ev.addr.type = BDADDR_BREDR;
4988 ev.secure = secure;
4989
4990 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
4991 }
4992
4993 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4994 u8 status)
4995 {
4996 struct pending_cmd *cmd;
4997 struct mgmt_rp_pin_code_reply rp;
4998
4999 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
5000 if (!cmd)
5001 return;
5002
5003 bacpy(&rp.addr.bdaddr, bdaddr);
5004 rp.addr.type = BDADDR_BREDR;
5005
5006 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
5007 mgmt_status(status), &rp, sizeof(rp));
5008
5009 mgmt_pending_remove(cmd);
5010 }
5011
5012 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5013 u8 status)
5014 {
5015 struct pending_cmd *cmd;
5016 struct mgmt_rp_pin_code_reply rp;
5017
5018 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
5019 if (!cmd)
5020 return;
5021
5022 bacpy(&rp.addr.bdaddr, bdaddr);
5023 rp.addr.type = BDADDR_BREDR;
5024
5025 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
5026 mgmt_status(status), &rp, sizeof(rp));
5027
5028 mgmt_pending_remove(cmd);
5029 }
5030
5031 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
5032 u8 link_type, u8 addr_type, __le32 value,
5033 u8 confirm_hint)
5034 {
5035 struct mgmt_ev_user_confirm_request ev;
5036
5037 BT_DBG("%s", hdev->name);
5038
5039 bacpy(&ev.addr.bdaddr, bdaddr);
5040 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5041 ev.confirm_hint = confirm_hint;
5042 ev.value = value;
5043
5044 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
5045 NULL);
5046 }
5047
5048 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
5049 u8 link_type, u8 addr_type)
5050 {
5051 struct mgmt_ev_user_passkey_request ev;
5052
5053 BT_DBG("%s", hdev->name);
5054
5055 bacpy(&ev.addr.bdaddr, bdaddr);
5056 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5057
5058 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
5059 NULL);
5060 }
5061
5062 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5063 u8 link_type, u8 addr_type, u8 status,
5064 u8 opcode)
5065 {
5066 struct pending_cmd *cmd;
5067 struct mgmt_rp_user_confirm_reply rp;
5068 int err;
5069
5070 cmd = mgmt_pending_find(opcode, hdev);
5071 if (!cmd)
5072 return -ENOENT;
5073
5074 bacpy(&rp.addr.bdaddr, bdaddr);
5075 rp.addr.type = link_to_bdaddr(link_type, addr_type);
5076 err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
5077 &rp, sizeof(rp));
5078
5079 mgmt_pending_remove(cmd);
5080
5081 return err;
5082 }
5083
5084 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5085 u8 link_type, u8 addr_type, u8 status)
5086 {
5087 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5088 status, MGMT_OP_USER_CONFIRM_REPLY);
5089 }
5090
5091 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5092 u8 link_type, u8 addr_type, u8 status)
5093 {
5094 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5095 status,
5096 MGMT_OP_USER_CONFIRM_NEG_REPLY);
5097 }
5098
5099 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5100 u8 link_type, u8 addr_type, u8 status)
5101 {
5102 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5103 status, MGMT_OP_USER_PASSKEY_REPLY);
5104 }
5105
5106 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5107 u8 link_type, u8 addr_type, u8 status)
5108 {
5109 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5110 status,
5111 MGMT_OP_USER_PASSKEY_NEG_REPLY);
5112 }
5113
5114 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
5115 u8 link_type, u8 addr_type, u32 passkey,
5116 u8 entered)
5117 {
5118 struct mgmt_ev_passkey_notify ev;
5119
5120 BT_DBG("%s", hdev->name);
5121
5122 bacpy(&ev.addr.bdaddr, bdaddr);
5123 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5124 ev.passkey = __cpu_to_le32(passkey);
5125 ev.entered = entered;
5126
5127 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
5128 }
5129
5130 void mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5131 u8 addr_type, u8 status)
5132 {
5133 struct mgmt_ev_auth_failed ev;
5134
5135 bacpy(&ev.addr.bdaddr, bdaddr);
5136 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5137 ev.status = mgmt_status(status);
5138
5139 mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
5140 }
5141
5142 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
5143 {
5144 struct cmd_lookup match = { NULL, hdev };
5145 bool changed;
5146
5147 if (status) {
5148 u8 mgmt_err = mgmt_status(status);
5149 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
5150 cmd_status_rsp, &mgmt_err);
5151 return;
5152 }
5153
5154 if (test_bit(HCI_AUTH, &hdev->flags))
5155 changed = !test_and_set_bit(HCI_LINK_SECURITY,
5156 &hdev->dev_flags);
5157 else
5158 changed = test_and_clear_bit(HCI_LINK_SECURITY,
5159 &hdev->dev_flags);
5160
5161 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
5162 &match);
5163
5164 if (changed)
5165 new_settings(hdev, match.sk);
5166
5167 if (match.sk)
5168 sock_put(match.sk);
5169 }
5170
5171 static void clear_eir(struct hci_request *req)
5172 {
5173 struct hci_dev *hdev = req->hdev;
5174 struct hci_cp_write_eir cp;
5175
5176 if (!lmp_ext_inq_capable(hdev))
5177 return;
5178
5179 memset(hdev->eir, 0, sizeof(hdev->eir));
5180
5181 memset(&cp, 0, sizeof(cp));
5182
5183 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
5184 }
5185
5186 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
5187 {
5188 struct cmd_lookup match = { NULL, hdev };
5189 struct hci_request req;
5190 bool changed = false;
5191
5192 if (status) {
5193 u8 mgmt_err = mgmt_status(status);
5194
5195 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
5196 &hdev->dev_flags)) {
5197 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
5198 new_settings(hdev, NULL);
5199 }
5200
5201 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
5202 &mgmt_err);
5203 return;
5204 }
5205
5206 if (enable) {
5207 changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
5208 } else {
5209 changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
5210 if (!changed)
5211 changed = test_and_clear_bit(HCI_HS_ENABLED,
5212 &hdev->dev_flags);
5213 else
5214 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
5215 }
5216
5217 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
5218
5219 if (changed)
5220 new_settings(hdev, match.sk);
5221
5222 if (match.sk)
5223 sock_put(match.sk);
5224
5225 hci_req_init(&req, hdev);
5226
5227 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
5228 update_eir(&req);
5229 else
5230 clear_eir(&req);
5231
5232 hci_req_run(&req, NULL);
5233 }
5234
5235 void mgmt_sc_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
5236 {
5237 struct cmd_lookup match = { NULL, hdev };
5238 bool changed = false;
5239
5240 if (status) {
5241 u8 mgmt_err = mgmt_status(status);
5242
5243 if (enable) {
5244 if (test_and_clear_bit(HCI_SC_ENABLED,
5245 &hdev->dev_flags))
5246 new_settings(hdev, NULL);
5247 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
5248 }
5249
5250 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
5251 cmd_status_rsp, &mgmt_err);
5252 return;
5253 }
5254
5255 if (enable) {
5256 changed = !test_and_set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
5257 } else {
5258 changed = test_and_clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
5259 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
5260 }
5261
5262 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
5263 settings_rsp, &match);
5264
5265 if (changed)
5266 new_settings(hdev, match.sk);
5267
5268 if (match.sk)
5269 sock_put(match.sk);
5270 }
5271
5272 static void sk_lookup(struct pending_cmd *cmd, void *data)
5273 {
5274 struct cmd_lookup *match = data;
5275
5276 if (match->sk == NULL) {
5277 match->sk = cmd->sk;
5278 sock_hold(match->sk);
5279 }
5280 }
5281
5282 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
5283 u8 status)
5284 {
5285 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
5286
5287 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
5288 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
5289 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
5290
5291 if (!status)
5292 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
5293 NULL);
5294
5295 if (match.sk)
5296 sock_put(match.sk);
5297 }
5298
5299 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
5300 {
5301 struct mgmt_cp_set_local_name ev;
5302 struct pending_cmd *cmd;
5303
5304 if (status)
5305 return;
5306
5307 memset(&ev, 0, sizeof(ev));
5308 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
5309 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
5310
5311 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
5312 if (!cmd) {
5313 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
5314
5315 /* If this is a HCI command related to powering on the
5316 * HCI dev don't send any mgmt signals.
5317 */
5318 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5319 return;
5320 }
5321
5322 mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
5323 cmd ? cmd->sk : NULL);
5324 }
5325
5326 void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
5327 u8 *randomizer192, u8 *hash256,
5328 u8 *randomizer256, u8 status)
5329 {
5330 struct pending_cmd *cmd;
5331
5332 BT_DBG("%s status %u", hdev->name, status);
5333
5334 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
5335 if (!cmd)
5336 return;
5337
5338 if (status) {
5339 cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5340 mgmt_status(status));
5341 } else {
5342 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
5343 hash256 && randomizer256) {
5344 struct mgmt_rp_read_local_oob_ext_data rp;
5345
5346 memcpy(rp.hash192, hash192, sizeof(rp.hash192));
5347 memcpy(rp.randomizer192, randomizer192,
5348 sizeof(rp.randomizer192));
5349
5350 memcpy(rp.hash256, hash256, sizeof(rp.hash256));
5351 memcpy(rp.randomizer256, randomizer256,
5352 sizeof(rp.randomizer256));
5353
5354 cmd_complete(cmd->sk, hdev->id,
5355 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
5356 &rp, sizeof(rp));
5357 } else {
5358 struct mgmt_rp_read_local_oob_data rp;
5359
5360 memcpy(rp.hash, hash192, sizeof(rp.hash));
5361 memcpy(rp.randomizer, randomizer192,
5362 sizeof(rp.randomizer));
5363
5364 cmd_complete(cmd->sk, hdev->id,
5365 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
5366 &rp, sizeof(rp));
5367 }
5368 }
5369
5370 mgmt_pending_remove(cmd);
5371 }
5372
5373 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5374 u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name, u8
5375 ssp, u8 *eir, u16 eir_len)
5376 {
5377 char buf[512];
5378 struct mgmt_ev_device_found *ev = (void *) buf;
5379 struct smp_irk *irk;
5380 size_t ev_size;
5381
5382 if (!hci_discovery_active(hdev))
5383 return;
5384
5385 /* Leave 5 bytes for a potential CoD field */
5386 if (sizeof(*ev) + eir_len + 5 > sizeof(buf))
5387 return;
5388
5389 memset(buf, 0, sizeof(buf));
5390
5391 irk = hci_get_irk(hdev, bdaddr, addr_type);
5392 if (irk) {
5393 bacpy(&ev->addr.bdaddr, &irk->bdaddr);
5394 ev->addr.type = link_to_bdaddr(link_type, irk->addr_type);
5395 } else {
5396 bacpy(&ev->addr.bdaddr, bdaddr);
5397 ev->addr.type = link_to_bdaddr(link_type, addr_type);
5398 }
5399
5400 ev->rssi = rssi;
5401 if (cfm_name)
5402 ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_CONFIRM_NAME);
5403 if (!ssp)
5404 ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_LEGACY_PAIRING);
5405
5406 if (eir_len > 0)
5407 memcpy(ev->eir, eir, eir_len);
5408
5409 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
5410 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
5411 dev_class, 3);
5412
5413 ev->eir_len = cpu_to_le16(eir_len);
5414 ev_size = sizeof(*ev) + eir_len;
5415
5416 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
5417 }
5418
5419 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5420 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
5421 {
5422 struct mgmt_ev_device_found *ev;
5423 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
5424 u16 eir_len;
5425
5426 ev = (struct mgmt_ev_device_found *) buf;
5427
5428 memset(buf, 0, sizeof(buf));
5429
5430 bacpy(&ev->addr.bdaddr, bdaddr);
5431 ev->addr.type = link_to_bdaddr(link_type, addr_type);
5432 ev->rssi = rssi;
5433
5434 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
5435 name_len);
5436
5437 ev->eir_len = cpu_to_le16(eir_len);
5438
5439 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
5440 }
5441
5442 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
5443 {
5444 struct mgmt_ev_discovering ev;
5445 struct pending_cmd *cmd;
5446
5447 BT_DBG("%s discovering %u", hdev->name, discovering);
5448
5449 if (discovering)
5450 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
5451 else
5452 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
5453
5454 if (cmd != NULL) {
5455 u8 type = hdev->discovery.type;
5456
5457 cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
5458 sizeof(type));
5459 mgmt_pending_remove(cmd);
5460 }
5461
5462 memset(&ev, 0, sizeof(ev));
5463 ev.type = hdev->discovery.type;
5464 ev.discovering = discovering;
5465
5466 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
5467 }
5468
5469 int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
5470 {
5471 struct pending_cmd *cmd;
5472 struct mgmt_ev_device_blocked ev;
5473
5474 cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, hdev);
5475
5476 bacpy(&ev.addr.bdaddr, bdaddr);
5477 ev.addr.type = type;
5478
5479 return mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &ev, sizeof(ev),
5480 cmd ? cmd->sk : NULL);
5481 }
5482
5483 int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
5484 {
5485 struct pending_cmd *cmd;
5486 struct mgmt_ev_device_unblocked ev;
5487
5488 cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, hdev);
5489
5490 bacpy(&ev.addr.bdaddr, bdaddr);
5491 ev.addr.type = type;
5492
5493 return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &ev, sizeof(ev),
5494 cmd ? cmd->sk : NULL);
5495 }
5496
5497 static void adv_enable_complete(struct hci_dev *hdev, u8 status)
5498 {
5499 BT_DBG("%s status %u", hdev->name, status);
5500
5501 /* Clear the advertising mgmt setting if we failed to re-enable it */
5502 if (status) {
5503 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
5504 new_settings(hdev, NULL);
5505 }
5506 }
5507
5508 void mgmt_reenable_advertising(struct hci_dev *hdev)
5509 {
5510 struct hci_request req;
5511
5512 if (hci_conn_num(hdev, LE_LINK) > 0)
5513 return;
5514
5515 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
5516 return;
5517
5518 hci_req_init(&req, hdev);
5519 enable_advertising(&req);
5520
5521 /* If this fails we have no option but to let user space know
5522 * that we've disabled advertising.
5523 */
5524 if (hci_req_run(&req, adv_enable_complete) < 0) {
5525 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
5526 new_settings(hdev, NULL);
5527 }
5528 }