]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - net/bluetooth/mgmt.c
Bluetooth: Use hci_update_random_address() for enabling advertising
[mirror_ubuntu-artful-kernel.git] / net / bluetooth / mgmt.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI Management interface */
26
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/mgmt.h>
33
34 #include "smp.h"
35
36 #define MGMT_VERSION 1
37 #define MGMT_REVISION 5
38
39 static const u16 mgmt_commands[] = {
40 MGMT_OP_READ_INDEX_LIST,
41 MGMT_OP_READ_INFO,
42 MGMT_OP_SET_POWERED,
43 MGMT_OP_SET_DISCOVERABLE,
44 MGMT_OP_SET_CONNECTABLE,
45 MGMT_OP_SET_FAST_CONNECTABLE,
46 MGMT_OP_SET_PAIRABLE,
47 MGMT_OP_SET_LINK_SECURITY,
48 MGMT_OP_SET_SSP,
49 MGMT_OP_SET_HS,
50 MGMT_OP_SET_LE,
51 MGMT_OP_SET_DEV_CLASS,
52 MGMT_OP_SET_LOCAL_NAME,
53 MGMT_OP_ADD_UUID,
54 MGMT_OP_REMOVE_UUID,
55 MGMT_OP_LOAD_LINK_KEYS,
56 MGMT_OP_LOAD_LONG_TERM_KEYS,
57 MGMT_OP_DISCONNECT,
58 MGMT_OP_GET_CONNECTIONS,
59 MGMT_OP_PIN_CODE_REPLY,
60 MGMT_OP_PIN_CODE_NEG_REPLY,
61 MGMT_OP_SET_IO_CAPABILITY,
62 MGMT_OP_PAIR_DEVICE,
63 MGMT_OP_CANCEL_PAIR_DEVICE,
64 MGMT_OP_UNPAIR_DEVICE,
65 MGMT_OP_USER_CONFIRM_REPLY,
66 MGMT_OP_USER_CONFIRM_NEG_REPLY,
67 MGMT_OP_USER_PASSKEY_REPLY,
68 MGMT_OP_USER_PASSKEY_NEG_REPLY,
69 MGMT_OP_READ_LOCAL_OOB_DATA,
70 MGMT_OP_ADD_REMOTE_OOB_DATA,
71 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
72 MGMT_OP_START_DISCOVERY,
73 MGMT_OP_STOP_DISCOVERY,
74 MGMT_OP_CONFIRM_NAME,
75 MGMT_OP_BLOCK_DEVICE,
76 MGMT_OP_UNBLOCK_DEVICE,
77 MGMT_OP_SET_DEVICE_ID,
78 MGMT_OP_SET_ADVERTISING,
79 MGMT_OP_SET_BREDR,
80 MGMT_OP_SET_STATIC_ADDRESS,
81 MGMT_OP_SET_SCAN_PARAMS,
82 MGMT_OP_SET_SECURE_CONN,
83 MGMT_OP_SET_DEBUG_KEYS,
84 MGMT_OP_LOAD_IRKS,
85 };
86
87 static const u16 mgmt_events[] = {
88 MGMT_EV_CONTROLLER_ERROR,
89 MGMT_EV_INDEX_ADDED,
90 MGMT_EV_INDEX_REMOVED,
91 MGMT_EV_NEW_SETTINGS,
92 MGMT_EV_CLASS_OF_DEV_CHANGED,
93 MGMT_EV_LOCAL_NAME_CHANGED,
94 MGMT_EV_NEW_LINK_KEY,
95 MGMT_EV_NEW_LONG_TERM_KEY,
96 MGMT_EV_DEVICE_CONNECTED,
97 MGMT_EV_DEVICE_DISCONNECTED,
98 MGMT_EV_CONNECT_FAILED,
99 MGMT_EV_PIN_CODE_REQUEST,
100 MGMT_EV_USER_CONFIRM_REQUEST,
101 MGMT_EV_USER_PASSKEY_REQUEST,
102 MGMT_EV_AUTH_FAILED,
103 MGMT_EV_DEVICE_FOUND,
104 MGMT_EV_DISCOVERING,
105 MGMT_EV_DEVICE_BLOCKED,
106 MGMT_EV_DEVICE_UNBLOCKED,
107 MGMT_EV_DEVICE_UNPAIRED,
108 MGMT_EV_PASSKEY_NOTIFY,
109 MGMT_EV_NEW_IRK,
110 };
111
112 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
113
114 #define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
115 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
116
117 struct pending_cmd {
118 struct list_head list;
119 u16 opcode;
120 int index;
121 void *param;
122 struct sock *sk;
123 void *user_data;
124 };
125
126 /* HCI to MGMT error code conversion table */
127 static u8 mgmt_status_table[] = {
128 MGMT_STATUS_SUCCESS,
129 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
130 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
131 MGMT_STATUS_FAILED, /* Hardware Failure */
132 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
133 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
134 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
135 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
136 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
137 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
138 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
139 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
140 MGMT_STATUS_BUSY, /* Command Disallowed */
141 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
142 MGMT_STATUS_REJECTED, /* Rejected Security */
143 MGMT_STATUS_REJECTED, /* Rejected Personal */
144 MGMT_STATUS_TIMEOUT, /* Host Timeout */
145 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
146 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
147 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
148 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
149 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
150 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
151 MGMT_STATUS_BUSY, /* Repeated Attempts */
152 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
153 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
154 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
155 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
156 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
157 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
158 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
159 MGMT_STATUS_FAILED, /* Unspecified Error */
160 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
161 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
162 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
163 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
164 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
165 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
166 MGMT_STATUS_FAILED, /* Unit Link Key Used */
167 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
168 MGMT_STATUS_TIMEOUT, /* Instant Passed */
169 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
170 MGMT_STATUS_FAILED, /* Transaction Collision */
171 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
172 MGMT_STATUS_REJECTED, /* QoS Rejected */
173 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
174 MGMT_STATUS_REJECTED, /* Insufficient Security */
175 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
176 MGMT_STATUS_BUSY, /* Role Switch Pending */
177 MGMT_STATUS_FAILED, /* Slot Violation */
178 MGMT_STATUS_FAILED, /* Role Switch Failed */
179 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
180 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
181 MGMT_STATUS_BUSY, /* Host Busy Pairing */
182 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
183 MGMT_STATUS_BUSY, /* Controller Busy */
184 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
185 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
186 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
187 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
188 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
189 };
190
191 static u8 mgmt_status(u8 hci_status)
192 {
193 if (hci_status < ARRAY_SIZE(mgmt_status_table))
194 return mgmt_status_table[hci_status];
195
196 return MGMT_STATUS_FAILED;
197 }
198
199 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
200 {
201 struct sk_buff *skb;
202 struct mgmt_hdr *hdr;
203 struct mgmt_ev_cmd_status *ev;
204 int err;
205
206 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
207
208 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
209 if (!skb)
210 return -ENOMEM;
211
212 hdr = (void *) skb_put(skb, sizeof(*hdr));
213
214 hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_STATUS);
215 hdr->index = cpu_to_le16(index);
216 hdr->len = cpu_to_le16(sizeof(*ev));
217
218 ev = (void *) skb_put(skb, sizeof(*ev));
219 ev->status = status;
220 ev->opcode = cpu_to_le16(cmd);
221
222 err = sock_queue_rcv_skb(sk, skb);
223 if (err < 0)
224 kfree_skb(skb);
225
226 return err;
227 }
228
229 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
230 void *rp, size_t rp_len)
231 {
232 struct sk_buff *skb;
233 struct mgmt_hdr *hdr;
234 struct mgmt_ev_cmd_complete *ev;
235 int err;
236
237 BT_DBG("sock %p", sk);
238
239 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
240 if (!skb)
241 return -ENOMEM;
242
243 hdr = (void *) skb_put(skb, sizeof(*hdr));
244
245 hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_COMPLETE);
246 hdr->index = cpu_to_le16(index);
247 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
248
249 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
250 ev->opcode = cpu_to_le16(cmd);
251 ev->status = status;
252
253 if (rp)
254 memcpy(ev->data, rp, rp_len);
255
256 err = sock_queue_rcv_skb(sk, skb);
257 if (err < 0)
258 kfree_skb(skb);
259
260 return err;
261 }
262
263 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
264 u16 data_len)
265 {
266 struct mgmt_rp_read_version rp;
267
268 BT_DBG("sock %p", sk);
269
270 rp.version = MGMT_VERSION;
271 rp.revision = __constant_cpu_to_le16(MGMT_REVISION);
272
273 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
274 sizeof(rp));
275 }
276
277 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
278 u16 data_len)
279 {
280 struct mgmt_rp_read_commands *rp;
281 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
282 const u16 num_events = ARRAY_SIZE(mgmt_events);
283 __le16 *opcode;
284 size_t rp_size;
285 int i, err;
286
287 BT_DBG("sock %p", sk);
288
289 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
290
291 rp = kmalloc(rp_size, GFP_KERNEL);
292 if (!rp)
293 return -ENOMEM;
294
295 rp->num_commands = __constant_cpu_to_le16(num_commands);
296 rp->num_events = __constant_cpu_to_le16(num_events);
297
298 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
299 put_unaligned_le16(mgmt_commands[i], opcode);
300
301 for (i = 0; i < num_events; i++, opcode++)
302 put_unaligned_le16(mgmt_events[i], opcode);
303
304 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
305 rp_size);
306 kfree(rp);
307
308 return err;
309 }
310
311 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
312 u16 data_len)
313 {
314 struct mgmt_rp_read_index_list *rp;
315 struct hci_dev *d;
316 size_t rp_len;
317 u16 count;
318 int err;
319
320 BT_DBG("sock %p", sk);
321
322 read_lock(&hci_dev_list_lock);
323
324 count = 0;
325 list_for_each_entry(d, &hci_dev_list, list) {
326 if (d->dev_type == HCI_BREDR)
327 count++;
328 }
329
330 rp_len = sizeof(*rp) + (2 * count);
331 rp = kmalloc(rp_len, GFP_ATOMIC);
332 if (!rp) {
333 read_unlock(&hci_dev_list_lock);
334 return -ENOMEM;
335 }
336
337 count = 0;
338 list_for_each_entry(d, &hci_dev_list, list) {
339 if (test_bit(HCI_SETUP, &d->dev_flags))
340 continue;
341
342 if (test_bit(HCI_USER_CHANNEL, &d->dev_flags))
343 continue;
344
345 if (d->dev_type == HCI_BREDR) {
346 rp->index[count++] = cpu_to_le16(d->id);
347 BT_DBG("Added hci%u", d->id);
348 }
349 }
350
351 rp->num_controllers = cpu_to_le16(count);
352 rp_len = sizeof(*rp) + (2 * count);
353
354 read_unlock(&hci_dev_list_lock);
355
356 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
357 rp_len);
358
359 kfree(rp);
360
361 return err;
362 }
363
364 static u32 get_supported_settings(struct hci_dev *hdev)
365 {
366 u32 settings = 0;
367
368 settings |= MGMT_SETTING_POWERED;
369 settings |= MGMT_SETTING_PAIRABLE;
370 settings |= MGMT_SETTING_DEBUG_KEYS;
371
372 if (lmp_bredr_capable(hdev)) {
373 settings |= MGMT_SETTING_CONNECTABLE;
374 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
375 settings |= MGMT_SETTING_FAST_CONNECTABLE;
376 settings |= MGMT_SETTING_DISCOVERABLE;
377 settings |= MGMT_SETTING_BREDR;
378 settings |= MGMT_SETTING_LINK_SECURITY;
379
380 if (lmp_ssp_capable(hdev)) {
381 settings |= MGMT_SETTING_SSP;
382 settings |= MGMT_SETTING_HS;
383 }
384
385 if (lmp_sc_capable(hdev) ||
386 test_bit(HCI_FORCE_SC, &hdev->dev_flags))
387 settings |= MGMT_SETTING_SECURE_CONN;
388 }
389
390 if (lmp_le_capable(hdev)) {
391 settings |= MGMT_SETTING_LE;
392 settings |= MGMT_SETTING_ADVERTISING;
393 settings |= MGMT_SETTING_PRIVACY;
394 }
395
396 return settings;
397 }
398
399 static u32 get_current_settings(struct hci_dev *hdev)
400 {
401 u32 settings = 0;
402
403 if (hdev_is_powered(hdev))
404 settings |= MGMT_SETTING_POWERED;
405
406 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
407 settings |= MGMT_SETTING_CONNECTABLE;
408
409 if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
410 settings |= MGMT_SETTING_FAST_CONNECTABLE;
411
412 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
413 settings |= MGMT_SETTING_DISCOVERABLE;
414
415 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
416 settings |= MGMT_SETTING_PAIRABLE;
417
418 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
419 settings |= MGMT_SETTING_BREDR;
420
421 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
422 settings |= MGMT_SETTING_LE;
423
424 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
425 settings |= MGMT_SETTING_LINK_SECURITY;
426
427 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
428 settings |= MGMT_SETTING_SSP;
429
430 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
431 settings |= MGMT_SETTING_HS;
432
433 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
434 settings |= MGMT_SETTING_ADVERTISING;
435
436 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
437 settings |= MGMT_SETTING_SECURE_CONN;
438
439 if (test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags))
440 settings |= MGMT_SETTING_DEBUG_KEYS;
441
442 if (test_bit(HCI_PRIVACY, &hdev->dev_flags))
443 settings |= MGMT_SETTING_PRIVACY;
444
445 return settings;
446 }
447
448 #define PNP_INFO_SVCLASS_ID 0x1200
449
450 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
451 {
452 u8 *ptr = data, *uuids_start = NULL;
453 struct bt_uuid *uuid;
454
455 if (len < 4)
456 return ptr;
457
458 list_for_each_entry(uuid, &hdev->uuids, list) {
459 u16 uuid16;
460
461 if (uuid->size != 16)
462 continue;
463
464 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
465 if (uuid16 < 0x1100)
466 continue;
467
468 if (uuid16 == PNP_INFO_SVCLASS_ID)
469 continue;
470
471 if (!uuids_start) {
472 uuids_start = ptr;
473 uuids_start[0] = 1;
474 uuids_start[1] = EIR_UUID16_ALL;
475 ptr += 2;
476 }
477
478 /* Stop if not enough space to put next UUID */
479 if ((ptr - data) + sizeof(u16) > len) {
480 uuids_start[1] = EIR_UUID16_SOME;
481 break;
482 }
483
484 *ptr++ = (uuid16 & 0x00ff);
485 *ptr++ = (uuid16 & 0xff00) >> 8;
486 uuids_start[0] += sizeof(uuid16);
487 }
488
489 return ptr;
490 }
491
492 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
493 {
494 u8 *ptr = data, *uuids_start = NULL;
495 struct bt_uuid *uuid;
496
497 if (len < 6)
498 return ptr;
499
500 list_for_each_entry(uuid, &hdev->uuids, list) {
501 if (uuid->size != 32)
502 continue;
503
504 if (!uuids_start) {
505 uuids_start = ptr;
506 uuids_start[0] = 1;
507 uuids_start[1] = EIR_UUID32_ALL;
508 ptr += 2;
509 }
510
511 /* Stop if not enough space to put next UUID */
512 if ((ptr - data) + sizeof(u32) > len) {
513 uuids_start[1] = EIR_UUID32_SOME;
514 break;
515 }
516
517 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
518 ptr += sizeof(u32);
519 uuids_start[0] += sizeof(u32);
520 }
521
522 return ptr;
523 }
524
525 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
526 {
527 u8 *ptr = data, *uuids_start = NULL;
528 struct bt_uuid *uuid;
529
530 if (len < 18)
531 return ptr;
532
533 list_for_each_entry(uuid, &hdev->uuids, list) {
534 if (uuid->size != 128)
535 continue;
536
537 if (!uuids_start) {
538 uuids_start = ptr;
539 uuids_start[0] = 1;
540 uuids_start[1] = EIR_UUID128_ALL;
541 ptr += 2;
542 }
543
544 /* Stop if not enough space to put next UUID */
545 if ((ptr - data) + 16 > len) {
546 uuids_start[1] = EIR_UUID128_SOME;
547 break;
548 }
549
550 memcpy(ptr, uuid->uuid, 16);
551 ptr += 16;
552 uuids_start[0] += 16;
553 }
554
555 return ptr;
556 }
557
558 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
559 {
560 struct pending_cmd *cmd;
561
562 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
563 if (cmd->opcode == opcode)
564 return cmd;
565 }
566
567 return NULL;
568 }
569
570 static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
571 {
572 u8 ad_len = 0;
573 size_t name_len;
574
575 name_len = strlen(hdev->dev_name);
576 if (name_len > 0) {
577 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
578
579 if (name_len > max_len) {
580 name_len = max_len;
581 ptr[1] = EIR_NAME_SHORT;
582 } else
583 ptr[1] = EIR_NAME_COMPLETE;
584
585 ptr[0] = name_len + 1;
586
587 memcpy(ptr + 2, hdev->dev_name, name_len);
588
589 ad_len += (name_len + 2);
590 ptr += (name_len + 2);
591 }
592
593 return ad_len;
594 }
595
596 static void update_scan_rsp_data(struct hci_request *req)
597 {
598 struct hci_dev *hdev = req->hdev;
599 struct hci_cp_le_set_scan_rsp_data cp;
600 u8 len;
601
602 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
603 return;
604
605 memset(&cp, 0, sizeof(cp));
606
607 len = create_scan_rsp_data(hdev, cp.data);
608
609 if (hdev->scan_rsp_data_len == len &&
610 memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
611 return;
612
613 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
614 hdev->scan_rsp_data_len = len;
615
616 cp.length = len;
617
618 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
619 }
620
621 static u8 get_adv_discov_flags(struct hci_dev *hdev)
622 {
623 struct pending_cmd *cmd;
624
625 /* If there's a pending mgmt command the flags will not yet have
626 * their final values, so check for this first.
627 */
628 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
629 if (cmd) {
630 struct mgmt_mode *cp = cmd->param;
631 if (cp->val == 0x01)
632 return LE_AD_GENERAL;
633 else if (cp->val == 0x02)
634 return LE_AD_LIMITED;
635 } else {
636 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
637 return LE_AD_LIMITED;
638 else if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
639 return LE_AD_GENERAL;
640 }
641
642 return 0;
643 }
644
645 static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
646 {
647 u8 ad_len = 0, flags = 0;
648
649 flags |= get_adv_discov_flags(hdev);
650
651 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
652 flags |= LE_AD_NO_BREDR;
653
654 if (flags) {
655 BT_DBG("adv flags 0x%02x", flags);
656
657 ptr[0] = 2;
658 ptr[1] = EIR_FLAGS;
659 ptr[2] = flags;
660
661 ad_len += 3;
662 ptr += 3;
663 }
664
665 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
666 ptr[0] = 2;
667 ptr[1] = EIR_TX_POWER;
668 ptr[2] = (u8) hdev->adv_tx_power;
669
670 ad_len += 3;
671 ptr += 3;
672 }
673
674 return ad_len;
675 }
676
677 static void update_adv_data(struct hci_request *req)
678 {
679 struct hci_dev *hdev = req->hdev;
680 struct hci_cp_le_set_adv_data cp;
681 u8 len;
682
683 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
684 return;
685
686 memset(&cp, 0, sizeof(cp));
687
688 len = create_adv_data(hdev, cp.data);
689
690 if (hdev->adv_data_len == len &&
691 memcmp(cp.data, hdev->adv_data, len) == 0)
692 return;
693
694 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
695 hdev->adv_data_len = len;
696
697 cp.length = len;
698
699 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
700 }
701
702 static void create_eir(struct hci_dev *hdev, u8 *data)
703 {
704 u8 *ptr = data;
705 size_t name_len;
706
707 name_len = strlen(hdev->dev_name);
708
709 if (name_len > 0) {
710 /* EIR Data type */
711 if (name_len > 48) {
712 name_len = 48;
713 ptr[1] = EIR_NAME_SHORT;
714 } else
715 ptr[1] = EIR_NAME_COMPLETE;
716
717 /* EIR Data length */
718 ptr[0] = name_len + 1;
719
720 memcpy(ptr + 2, hdev->dev_name, name_len);
721
722 ptr += (name_len + 2);
723 }
724
725 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
726 ptr[0] = 2;
727 ptr[1] = EIR_TX_POWER;
728 ptr[2] = (u8) hdev->inq_tx_power;
729
730 ptr += 3;
731 }
732
733 if (hdev->devid_source > 0) {
734 ptr[0] = 9;
735 ptr[1] = EIR_DEVICE_ID;
736
737 put_unaligned_le16(hdev->devid_source, ptr + 2);
738 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
739 put_unaligned_le16(hdev->devid_product, ptr + 6);
740 put_unaligned_le16(hdev->devid_version, ptr + 8);
741
742 ptr += 10;
743 }
744
745 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
746 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
747 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
748 }
749
750 static void update_eir(struct hci_request *req)
751 {
752 struct hci_dev *hdev = req->hdev;
753 struct hci_cp_write_eir cp;
754
755 if (!hdev_is_powered(hdev))
756 return;
757
758 if (!lmp_ext_inq_capable(hdev))
759 return;
760
761 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
762 return;
763
764 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
765 return;
766
767 memset(&cp, 0, sizeof(cp));
768
769 create_eir(hdev, cp.data);
770
771 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
772 return;
773
774 memcpy(hdev->eir, cp.data, sizeof(cp.data));
775
776 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
777 }
778
779 static u8 get_service_classes(struct hci_dev *hdev)
780 {
781 struct bt_uuid *uuid;
782 u8 val = 0;
783
784 list_for_each_entry(uuid, &hdev->uuids, list)
785 val |= uuid->svc_hint;
786
787 return val;
788 }
789
790 static void update_class(struct hci_request *req)
791 {
792 struct hci_dev *hdev = req->hdev;
793 u8 cod[3];
794
795 BT_DBG("%s", hdev->name);
796
797 if (!hdev_is_powered(hdev))
798 return;
799
800 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
801 return;
802
803 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
804 return;
805
806 cod[0] = hdev->minor_class;
807 cod[1] = hdev->major_class;
808 cod[2] = get_service_classes(hdev);
809
810 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
811 cod[1] |= 0x20;
812
813 if (memcmp(cod, hdev->dev_class, 3) == 0)
814 return;
815
816 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
817 }
818
819 static u8 get_adv_type(struct hci_dev *hdev)
820 {
821 struct pending_cmd *cmd;
822 bool connectable;
823
824 /* If there's a pending mgmt command the flag will not yet have
825 * it's final value, so check for this first.
826 */
827 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
828 if (cmd) {
829 struct mgmt_mode *cp = cmd->param;
830 connectable = !!cp->val;
831 } else {
832 connectable = test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
833 }
834
835 return connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
836 }
837
838 static void enable_advertising(struct hci_request *req)
839 {
840 struct hci_dev *hdev = req->hdev;
841 struct hci_cp_le_set_adv_param cp;
842 u8 own_addr_type, enable = 0x01;
843
844 memset(&cp, 0, sizeof(cp));
845
846 if (hci_update_random_address(req, &own_addr_type) < 0)
847 return;
848
849 cp.min_interval = __constant_cpu_to_le16(0x0800);
850 cp.max_interval = __constant_cpu_to_le16(0x0800);
851 cp.type = get_adv_type(hdev);
852 cp.own_address_type = own_addr_type;
853 cp.channel_map = hdev->le_adv_channel_map;
854
855 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
856
857 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
858 }
859
860 static void disable_advertising(struct hci_request *req)
861 {
862 u8 enable = 0x00;
863
864 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
865 }
866
867 static void service_cache_off(struct work_struct *work)
868 {
869 struct hci_dev *hdev = container_of(work, struct hci_dev,
870 service_cache.work);
871 struct hci_request req;
872
873 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
874 return;
875
876 hci_req_init(&req, hdev);
877
878 hci_dev_lock(hdev);
879
880 update_eir(&req);
881 update_class(&req);
882
883 hci_dev_unlock(hdev);
884
885 hci_req_run(&req, NULL);
886 }
887
888 static void rpa_expired(struct work_struct *work)
889 {
890 struct hci_dev *hdev = container_of(work, struct hci_dev,
891 rpa_expired.work);
892 struct hci_request req;
893
894 BT_DBG("");
895
896 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
897
898 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags) ||
899 hci_conn_num(hdev, LE_LINK) > 0)
900 return;
901
902 /* The generation of a new RPA and programming it into the
903 * controller happens in the enable_advertising() function.
904 */
905
906 hci_req_init(&req, hdev);
907
908 disable_advertising(&req);
909 enable_advertising(&req);
910
911 hci_req_run(&req, NULL);
912 }
913
914 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
915 {
916 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
917 return;
918
919 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
920 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
921
922 /* Non-mgmt controlled devices get this bit set
923 * implicitly so that pairing works for them, however
924 * for mgmt we require user-space to explicitly enable
925 * it
926 */
927 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
928 }
929
930 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
931 void *data, u16 data_len)
932 {
933 struct mgmt_rp_read_info rp;
934
935 BT_DBG("sock %p %s", sk, hdev->name);
936
937 hci_dev_lock(hdev);
938
939 memset(&rp, 0, sizeof(rp));
940
941 bacpy(&rp.bdaddr, &hdev->bdaddr);
942
943 rp.version = hdev->hci_ver;
944 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
945
946 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
947 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
948
949 memcpy(rp.dev_class, hdev->dev_class, 3);
950
951 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
952 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
953
954 hci_dev_unlock(hdev);
955
956 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
957 sizeof(rp));
958 }
959
960 static void mgmt_pending_free(struct pending_cmd *cmd)
961 {
962 sock_put(cmd->sk);
963 kfree(cmd->param);
964 kfree(cmd);
965 }
966
967 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
968 struct hci_dev *hdev, void *data,
969 u16 len)
970 {
971 struct pending_cmd *cmd;
972
973 cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
974 if (!cmd)
975 return NULL;
976
977 cmd->opcode = opcode;
978 cmd->index = hdev->id;
979
980 cmd->param = kmalloc(len, GFP_KERNEL);
981 if (!cmd->param) {
982 kfree(cmd);
983 return NULL;
984 }
985
986 if (data)
987 memcpy(cmd->param, data, len);
988
989 cmd->sk = sk;
990 sock_hold(sk);
991
992 list_add(&cmd->list, &hdev->mgmt_pending);
993
994 return cmd;
995 }
996
997 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
998 void (*cb)(struct pending_cmd *cmd,
999 void *data),
1000 void *data)
1001 {
1002 struct pending_cmd *cmd, *tmp;
1003
1004 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
1005 if (opcode > 0 && cmd->opcode != opcode)
1006 continue;
1007
1008 cb(cmd, data);
1009 }
1010 }
1011
1012 static void mgmt_pending_remove(struct pending_cmd *cmd)
1013 {
1014 list_del(&cmd->list);
1015 mgmt_pending_free(cmd);
1016 }
1017
1018 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1019 {
1020 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1021
1022 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
1023 sizeof(settings));
1024 }
1025
1026 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1027 u16 len)
1028 {
1029 struct mgmt_mode *cp = data;
1030 struct pending_cmd *cmd;
1031 int err;
1032
1033 BT_DBG("request for %s", hdev->name);
1034
1035 if (cp->val != 0x00 && cp->val != 0x01)
1036 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1037 MGMT_STATUS_INVALID_PARAMS);
1038
1039 hci_dev_lock(hdev);
1040
1041 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
1042 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1043 MGMT_STATUS_BUSY);
1044 goto failed;
1045 }
1046
1047 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1048 cancel_delayed_work(&hdev->power_off);
1049
1050 if (cp->val) {
1051 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
1052 data, len);
1053 err = mgmt_powered(hdev, 1);
1054 goto failed;
1055 }
1056 }
1057
1058 if (!!cp->val == hdev_is_powered(hdev)) {
1059 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1060 goto failed;
1061 }
1062
1063 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1064 if (!cmd) {
1065 err = -ENOMEM;
1066 goto failed;
1067 }
1068
1069 if (cp->val)
1070 queue_work(hdev->req_workqueue, &hdev->power_on);
1071 else
1072 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1073
1074 err = 0;
1075
1076 failed:
1077 hci_dev_unlock(hdev);
1078 return err;
1079 }
1080
1081 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
1082 struct sock *skip_sk)
1083 {
1084 struct sk_buff *skb;
1085 struct mgmt_hdr *hdr;
1086
1087 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
1088 if (!skb)
1089 return -ENOMEM;
1090
1091 hdr = (void *) skb_put(skb, sizeof(*hdr));
1092 hdr->opcode = cpu_to_le16(event);
1093 if (hdev)
1094 hdr->index = cpu_to_le16(hdev->id);
1095 else
1096 hdr->index = __constant_cpu_to_le16(MGMT_INDEX_NONE);
1097 hdr->len = cpu_to_le16(data_len);
1098
1099 if (data)
1100 memcpy(skb_put(skb, data_len), data, data_len);
1101
1102 /* Time stamp */
1103 __net_timestamp(skb);
1104
1105 hci_send_to_control(skb, skip_sk);
1106 kfree_skb(skb);
1107
1108 return 0;
1109 }
1110
1111 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1112 {
1113 __le32 ev;
1114
1115 ev = cpu_to_le32(get_current_settings(hdev));
1116
1117 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
1118 }
1119
1120 struct cmd_lookup {
1121 struct sock *sk;
1122 struct hci_dev *hdev;
1123 u8 mgmt_status;
1124 };
1125
1126 static void settings_rsp(struct pending_cmd *cmd, void *data)
1127 {
1128 struct cmd_lookup *match = data;
1129
1130 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1131
1132 list_del(&cmd->list);
1133
1134 if (match->sk == NULL) {
1135 match->sk = cmd->sk;
1136 sock_hold(match->sk);
1137 }
1138
1139 mgmt_pending_free(cmd);
1140 }
1141
1142 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
1143 {
1144 u8 *status = data;
1145
1146 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1147 mgmt_pending_remove(cmd);
1148 }
1149
1150 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1151 {
1152 if (!lmp_bredr_capable(hdev))
1153 return MGMT_STATUS_NOT_SUPPORTED;
1154 else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1155 return MGMT_STATUS_REJECTED;
1156 else
1157 return MGMT_STATUS_SUCCESS;
1158 }
1159
1160 static u8 mgmt_le_support(struct hci_dev *hdev)
1161 {
1162 if (!lmp_le_capable(hdev))
1163 return MGMT_STATUS_NOT_SUPPORTED;
1164 else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1165 return MGMT_STATUS_REJECTED;
1166 else
1167 return MGMT_STATUS_SUCCESS;
1168 }
1169
1170 static void set_discoverable_complete(struct hci_dev *hdev, u8 status)
1171 {
1172 struct pending_cmd *cmd;
1173 struct mgmt_mode *cp;
1174 struct hci_request req;
1175 bool changed;
1176
1177 BT_DBG("status 0x%02x", status);
1178
1179 hci_dev_lock(hdev);
1180
1181 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1182 if (!cmd)
1183 goto unlock;
1184
1185 if (status) {
1186 u8 mgmt_err = mgmt_status(status);
1187 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1188 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1189 goto remove_cmd;
1190 }
1191
1192 cp = cmd->param;
1193 if (cp->val) {
1194 changed = !test_and_set_bit(HCI_DISCOVERABLE,
1195 &hdev->dev_flags);
1196
1197 if (hdev->discov_timeout > 0) {
1198 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1199 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1200 to);
1201 }
1202 } else {
1203 changed = test_and_clear_bit(HCI_DISCOVERABLE,
1204 &hdev->dev_flags);
1205 }
1206
1207 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1208
1209 if (changed)
1210 new_settings(hdev, cmd->sk);
1211
1212 /* When the discoverable mode gets changed, make sure
1213 * that class of device has the limited discoverable
1214 * bit correctly set.
1215 */
1216 hci_req_init(&req, hdev);
1217 update_class(&req);
1218 hci_req_run(&req, NULL);
1219
1220 remove_cmd:
1221 mgmt_pending_remove(cmd);
1222
1223 unlock:
1224 hci_dev_unlock(hdev);
1225 }
1226
1227 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1228 u16 len)
1229 {
1230 struct mgmt_cp_set_discoverable *cp = data;
1231 struct pending_cmd *cmd;
1232 struct hci_request req;
1233 u16 timeout;
1234 u8 scan;
1235 int err;
1236
1237 BT_DBG("request for %s", hdev->name);
1238
1239 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1240 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1241 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1242 MGMT_STATUS_REJECTED);
1243
1244 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1245 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1246 MGMT_STATUS_INVALID_PARAMS);
1247
1248 timeout = __le16_to_cpu(cp->timeout);
1249
1250 /* Disabling discoverable requires that no timeout is set,
1251 * and enabling limited discoverable requires a timeout.
1252 */
1253 if ((cp->val == 0x00 && timeout > 0) ||
1254 (cp->val == 0x02 && timeout == 0))
1255 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1256 MGMT_STATUS_INVALID_PARAMS);
1257
1258 hci_dev_lock(hdev);
1259
1260 if (!hdev_is_powered(hdev) && timeout > 0) {
1261 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1262 MGMT_STATUS_NOT_POWERED);
1263 goto failed;
1264 }
1265
1266 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1267 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1268 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1269 MGMT_STATUS_BUSY);
1270 goto failed;
1271 }
1272
1273 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
1274 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1275 MGMT_STATUS_REJECTED);
1276 goto failed;
1277 }
1278
1279 if (!hdev_is_powered(hdev)) {
1280 bool changed = false;
1281
1282 /* Setting limited discoverable when powered off is
1283 * not a valid operation since it requires a timeout
1284 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1285 */
1286 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
1287 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1288 changed = true;
1289 }
1290
1291 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1292 if (err < 0)
1293 goto failed;
1294
1295 if (changed)
1296 err = new_settings(hdev, sk);
1297
1298 goto failed;
1299 }
1300
1301 /* If the current mode is the same, then just update the timeout
1302 * value with the new value. And if only the timeout gets updated,
1303 * then no need for any HCI transactions.
1304 */
1305 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
1306 (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
1307 &hdev->dev_flags)) {
1308 cancel_delayed_work(&hdev->discov_off);
1309 hdev->discov_timeout = timeout;
1310
1311 if (cp->val && hdev->discov_timeout > 0) {
1312 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1313 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1314 to);
1315 }
1316
1317 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1318 goto failed;
1319 }
1320
1321 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1322 if (!cmd) {
1323 err = -ENOMEM;
1324 goto failed;
1325 }
1326
1327 /* Cancel any potential discoverable timeout that might be
1328 * still active and store new timeout value. The arming of
1329 * the timeout happens in the complete handler.
1330 */
1331 cancel_delayed_work(&hdev->discov_off);
1332 hdev->discov_timeout = timeout;
1333
1334 /* Limited discoverable mode */
1335 if (cp->val == 0x02)
1336 set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1337 else
1338 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1339
1340 hci_req_init(&req, hdev);
1341
1342 /* The procedure for LE-only controllers is much simpler - just
1343 * update the advertising data.
1344 */
1345 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1346 goto update_ad;
1347
1348 scan = SCAN_PAGE;
1349
1350 if (cp->val) {
1351 struct hci_cp_write_current_iac_lap hci_cp;
1352
1353 if (cp->val == 0x02) {
1354 /* Limited discoverable mode */
1355 hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1356 hci_cp.iac_lap[0] = 0x00; /* LIAC */
1357 hci_cp.iac_lap[1] = 0x8b;
1358 hci_cp.iac_lap[2] = 0x9e;
1359 hci_cp.iac_lap[3] = 0x33; /* GIAC */
1360 hci_cp.iac_lap[4] = 0x8b;
1361 hci_cp.iac_lap[5] = 0x9e;
1362 } else {
1363 /* General discoverable mode */
1364 hci_cp.num_iac = 1;
1365 hci_cp.iac_lap[0] = 0x33; /* GIAC */
1366 hci_cp.iac_lap[1] = 0x8b;
1367 hci_cp.iac_lap[2] = 0x9e;
1368 }
1369
1370 hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1371 (hci_cp.num_iac * 3) + 1, &hci_cp);
1372
1373 scan |= SCAN_INQUIRY;
1374 } else {
1375 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1376 }
1377
1378 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1379
1380 update_ad:
1381 update_adv_data(&req);
1382
1383 err = hci_req_run(&req, set_discoverable_complete);
1384 if (err < 0)
1385 mgmt_pending_remove(cmd);
1386
1387 failed:
1388 hci_dev_unlock(hdev);
1389 return err;
1390 }
1391
1392 static void write_fast_connectable(struct hci_request *req, bool enable)
1393 {
1394 struct hci_dev *hdev = req->hdev;
1395 struct hci_cp_write_page_scan_activity acp;
1396 u8 type;
1397
1398 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1399 return;
1400
1401 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1402 return;
1403
1404 if (enable) {
1405 type = PAGE_SCAN_TYPE_INTERLACED;
1406
1407 /* 160 msec page scan interval */
1408 acp.interval = __constant_cpu_to_le16(0x0100);
1409 } else {
1410 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1411
1412 /* default 1.28 sec page scan */
1413 acp.interval = __constant_cpu_to_le16(0x0800);
1414 }
1415
1416 acp.window = __constant_cpu_to_le16(0x0012);
1417
1418 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1419 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1420 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1421 sizeof(acp), &acp);
1422
1423 if (hdev->page_scan_type != type)
1424 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1425 }
1426
1427 static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1428 {
1429 struct pending_cmd *cmd;
1430 struct mgmt_mode *cp;
1431 bool changed;
1432
1433 BT_DBG("status 0x%02x", status);
1434
1435 hci_dev_lock(hdev);
1436
1437 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1438 if (!cmd)
1439 goto unlock;
1440
1441 if (status) {
1442 u8 mgmt_err = mgmt_status(status);
1443 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1444 goto remove_cmd;
1445 }
1446
1447 cp = cmd->param;
1448 if (cp->val)
1449 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1450 else
1451 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1452
1453 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1454
1455 if (changed)
1456 new_settings(hdev, cmd->sk);
1457
1458 remove_cmd:
1459 mgmt_pending_remove(cmd);
1460
1461 unlock:
1462 hci_dev_unlock(hdev);
1463 }
1464
1465 static int set_connectable_update_settings(struct hci_dev *hdev,
1466 struct sock *sk, u8 val)
1467 {
1468 bool changed = false;
1469 int err;
1470
1471 if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1472 changed = true;
1473
1474 if (val) {
1475 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1476 } else {
1477 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1478 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1479 }
1480
1481 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1482 if (err < 0)
1483 return err;
1484
1485 if (changed)
1486 return new_settings(hdev, sk);
1487
1488 return 0;
1489 }
1490
1491 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1492 u16 len)
1493 {
1494 struct mgmt_mode *cp = data;
1495 struct pending_cmd *cmd;
1496 struct hci_request req;
1497 u8 scan;
1498 int err;
1499
1500 BT_DBG("request for %s", hdev->name);
1501
1502 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1503 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1504 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1505 MGMT_STATUS_REJECTED);
1506
1507 if (cp->val != 0x00 && cp->val != 0x01)
1508 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1509 MGMT_STATUS_INVALID_PARAMS);
1510
1511 hci_dev_lock(hdev);
1512
1513 if (!hdev_is_powered(hdev)) {
1514 err = set_connectable_update_settings(hdev, sk, cp->val);
1515 goto failed;
1516 }
1517
1518 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1519 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1520 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1521 MGMT_STATUS_BUSY);
1522 goto failed;
1523 }
1524
1525 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1526 if (!cmd) {
1527 err = -ENOMEM;
1528 goto failed;
1529 }
1530
1531 hci_req_init(&req, hdev);
1532
1533 /* If BR/EDR is not enabled and we disable advertising as a
1534 * by-product of disabling connectable, we need to update the
1535 * advertising flags.
1536 */
1537 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1538 if (!cp->val) {
1539 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1540 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1541 }
1542 update_adv_data(&req);
1543 } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
1544 if (cp->val) {
1545 scan = SCAN_PAGE;
1546 } else {
1547 scan = 0;
1548
1549 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1550 hdev->discov_timeout > 0)
1551 cancel_delayed_work(&hdev->discov_off);
1552 }
1553
1554 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1555 }
1556
1557 /* If we're going from non-connectable to connectable or
1558 * vice-versa when fast connectable is enabled ensure that fast
1559 * connectable gets disabled. write_fast_connectable won't do
1560 * anything if the page scan parameters are already what they
1561 * should be.
1562 */
1563 if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1564 write_fast_connectable(&req, false);
1565
1566 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) &&
1567 hci_conn_num(hdev, LE_LINK) == 0) {
1568 disable_advertising(&req);
1569 enable_advertising(&req);
1570 }
1571
1572 err = hci_req_run(&req, set_connectable_complete);
1573 if (err < 0) {
1574 mgmt_pending_remove(cmd);
1575 if (err == -ENODATA)
1576 err = set_connectable_update_settings(hdev, sk,
1577 cp->val);
1578 goto failed;
1579 }
1580
1581 failed:
1582 hci_dev_unlock(hdev);
1583 return err;
1584 }
1585
1586 static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
1587 u16 len)
1588 {
1589 struct mgmt_mode *cp = data;
1590 bool changed;
1591 int err;
1592
1593 BT_DBG("request for %s", hdev->name);
1594
1595 if (cp->val != 0x00 && cp->val != 0x01)
1596 return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE,
1597 MGMT_STATUS_INVALID_PARAMS);
1598
1599 hci_dev_lock(hdev);
1600
1601 if (cp->val)
1602 changed = !test_and_set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1603 else
1604 changed = test_and_clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1605
1606 err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
1607 if (err < 0)
1608 goto unlock;
1609
1610 if (changed)
1611 err = new_settings(hdev, sk);
1612
1613 unlock:
1614 hci_dev_unlock(hdev);
1615 return err;
1616 }
1617
1618 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1619 u16 len)
1620 {
1621 struct mgmt_mode *cp = data;
1622 struct pending_cmd *cmd;
1623 u8 val, status;
1624 int err;
1625
1626 BT_DBG("request for %s", hdev->name);
1627
1628 status = mgmt_bredr_support(hdev);
1629 if (status)
1630 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1631 status);
1632
1633 if (cp->val != 0x00 && cp->val != 0x01)
1634 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1635 MGMT_STATUS_INVALID_PARAMS);
1636
1637 hci_dev_lock(hdev);
1638
1639 if (!hdev_is_powered(hdev)) {
1640 bool changed = false;
1641
1642 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1643 &hdev->dev_flags)) {
1644 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
1645 changed = true;
1646 }
1647
1648 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1649 if (err < 0)
1650 goto failed;
1651
1652 if (changed)
1653 err = new_settings(hdev, sk);
1654
1655 goto failed;
1656 }
1657
1658 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1659 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1660 MGMT_STATUS_BUSY);
1661 goto failed;
1662 }
1663
1664 val = !!cp->val;
1665
1666 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1667 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1668 goto failed;
1669 }
1670
1671 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1672 if (!cmd) {
1673 err = -ENOMEM;
1674 goto failed;
1675 }
1676
1677 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1678 if (err < 0) {
1679 mgmt_pending_remove(cmd);
1680 goto failed;
1681 }
1682
1683 failed:
1684 hci_dev_unlock(hdev);
1685 return err;
1686 }
1687
1688 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1689 {
1690 struct mgmt_mode *cp = data;
1691 struct pending_cmd *cmd;
1692 u8 status;
1693 int err;
1694
1695 BT_DBG("request for %s", hdev->name);
1696
1697 status = mgmt_bredr_support(hdev);
1698 if (status)
1699 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1700
1701 if (!lmp_ssp_capable(hdev))
1702 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1703 MGMT_STATUS_NOT_SUPPORTED);
1704
1705 if (cp->val != 0x00 && cp->val != 0x01)
1706 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1707 MGMT_STATUS_INVALID_PARAMS);
1708
1709 hci_dev_lock(hdev);
1710
1711 if (!hdev_is_powered(hdev)) {
1712 bool changed;
1713
1714 if (cp->val) {
1715 changed = !test_and_set_bit(HCI_SSP_ENABLED,
1716 &hdev->dev_flags);
1717 } else {
1718 changed = test_and_clear_bit(HCI_SSP_ENABLED,
1719 &hdev->dev_flags);
1720 if (!changed)
1721 changed = test_and_clear_bit(HCI_HS_ENABLED,
1722 &hdev->dev_flags);
1723 else
1724 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1725 }
1726
1727 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1728 if (err < 0)
1729 goto failed;
1730
1731 if (changed)
1732 err = new_settings(hdev, sk);
1733
1734 goto failed;
1735 }
1736
1737 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev) ||
1738 mgmt_pending_find(MGMT_OP_SET_HS, hdev)) {
1739 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1740 MGMT_STATUS_BUSY);
1741 goto failed;
1742 }
1743
1744 if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1745 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1746 goto failed;
1747 }
1748
1749 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1750 if (!cmd) {
1751 err = -ENOMEM;
1752 goto failed;
1753 }
1754
1755 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1756 if (err < 0) {
1757 mgmt_pending_remove(cmd);
1758 goto failed;
1759 }
1760
1761 failed:
1762 hci_dev_unlock(hdev);
1763 return err;
1764 }
1765
1766 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1767 {
1768 struct mgmt_mode *cp = data;
1769 bool changed;
1770 u8 status;
1771 int err;
1772
1773 BT_DBG("request for %s", hdev->name);
1774
1775 status = mgmt_bredr_support(hdev);
1776 if (status)
1777 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1778
1779 if (!lmp_ssp_capable(hdev))
1780 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1781 MGMT_STATUS_NOT_SUPPORTED);
1782
1783 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
1784 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1785 MGMT_STATUS_REJECTED);
1786
1787 if (cp->val != 0x00 && cp->val != 0x01)
1788 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1789 MGMT_STATUS_INVALID_PARAMS);
1790
1791 hci_dev_lock(hdev);
1792
1793 if (cp->val) {
1794 changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1795 } else {
1796 if (hdev_is_powered(hdev)) {
1797 err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1798 MGMT_STATUS_REJECTED);
1799 goto unlock;
1800 }
1801
1802 changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1803 }
1804
1805 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1806 if (err < 0)
1807 goto unlock;
1808
1809 if (changed)
1810 err = new_settings(hdev, sk);
1811
1812 unlock:
1813 hci_dev_unlock(hdev);
1814 return err;
1815 }
1816
1817 static void le_enable_complete(struct hci_dev *hdev, u8 status)
1818 {
1819 struct cmd_lookup match = { NULL, hdev };
1820
1821 if (status) {
1822 u8 mgmt_err = mgmt_status(status);
1823
1824 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1825 &mgmt_err);
1826 return;
1827 }
1828
1829 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1830
1831 new_settings(hdev, match.sk);
1832
1833 if (match.sk)
1834 sock_put(match.sk);
1835
1836 /* Make sure the controller has a good default for
1837 * advertising data. Restrict the update to when LE
1838 * has actually been enabled. During power on, the
1839 * update in powered_update_hci will take care of it.
1840 */
1841 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1842 struct hci_request req;
1843
1844 hci_dev_lock(hdev);
1845
1846 hci_req_init(&req, hdev);
1847 update_adv_data(&req);
1848 update_scan_rsp_data(&req);
1849 hci_req_run(&req, NULL);
1850
1851 hci_dev_unlock(hdev);
1852 }
1853 }
1854
1855 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1856 {
1857 struct mgmt_mode *cp = data;
1858 struct hci_cp_write_le_host_supported hci_cp;
1859 struct pending_cmd *cmd;
1860 struct hci_request req;
1861 int err;
1862 u8 val, enabled;
1863
1864 BT_DBG("request for %s", hdev->name);
1865
1866 if (!lmp_le_capable(hdev))
1867 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1868 MGMT_STATUS_NOT_SUPPORTED);
1869
1870 if (cp->val != 0x00 && cp->val != 0x01)
1871 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1872 MGMT_STATUS_INVALID_PARAMS);
1873
1874 /* LE-only devices do not allow toggling LE on/off */
1875 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1876 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1877 MGMT_STATUS_REJECTED);
1878
1879 hci_dev_lock(hdev);
1880
1881 val = !!cp->val;
1882 enabled = lmp_host_le_capable(hdev);
1883
1884 if (!hdev_is_powered(hdev) || val == enabled) {
1885 bool changed = false;
1886
1887 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1888 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1889 changed = true;
1890 }
1891
1892 if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
1893 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
1894 changed = true;
1895 }
1896
1897 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1898 if (err < 0)
1899 goto unlock;
1900
1901 if (changed)
1902 err = new_settings(hdev, sk);
1903
1904 goto unlock;
1905 }
1906
1907 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
1908 mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
1909 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1910 MGMT_STATUS_BUSY);
1911 goto unlock;
1912 }
1913
1914 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
1915 if (!cmd) {
1916 err = -ENOMEM;
1917 goto unlock;
1918 }
1919
1920 hci_req_init(&req, hdev);
1921
1922 memset(&hci_cp, 0, sizeof(hci_cp));
1923
1924 if (val) {
1925 hci_cp.le = val;
1926 hci_cp.simul = lmp_le_br_capable(hdev);
1927 } else {
1928 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1929 disable_advertising(&req);
1930 }
1931
1932 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
1933 &hci_cp);
1934
1935 err = hci_req_run(&req, le_enable_complete);
1936 if (err < 0)
1937 mgmt_pending_remove(cmd);
1938
1939 unlock:
1940 hci_dev_unlock(hdev);
1941 return err;
1942 }
1943
1944 /* This is a helper function to test for pending mgmt commands that can
1945 * cause CoD or EIR HCI commands. We can only allow one such pending
1946 * mgmt command at a time since otherwise we cannot easily track what
1947 * the current values are, will be, and based on that calculate if a new
1948 * HCI command needs to be sent and if yes with what value.
1949 */
1950 static bool pending_eir_or_class(struct hci_dev *hdev)
1951 {
1952 struct pending_cmd *cmd;
1953
1954 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
1955 switch (cmd->opcode) {
1956 case MGMT_OP_ADD_UUID:
1957 case MGMT_OP_REMOVE_UUID:
1958 case MGMT_OP_SET_DEV_CLASS:
1959 case MGMT_OP_SET_POWERED:
1960 return true;
1961 }
1962 }
1963
1964 return false;
1965 }
1966
1967 static const u8 bluetooth_base_uuid[] = {
1968 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
1969 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1970 };
1971
1972 static u8 get_uuid_size(const u8 *uuid)
1973 {
1974 u32 val;
1975
1976 if (memcmp(uuid, bluetooth_base_uuid, 12))
1977 return 128;
1978
1979 val = get_unaligned_le32(&uuid[12]);
1980 if (val > 0xffff)
1981 return 32;
1982
1983 return 16;
1984 }
1985
1986 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
1987 {
1988 struct pending_cmd *cmd;
1989
1990 hci_dev_lock(hdev);
1991
1992 cmd = mgmt_pending_find(mgmt_op, hdev);
1993 if (!cmd)
1994 goto unlock;
1995
1996 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
1997 hdev->dev_class, 3);
1998
1999 mgmt_pending_remove(cmd);
2000
2001 unlock:
2002 hci_dev_unlock(hdev);
2003 }
2004
2005 static void add_uuid_complete(struct hci_dev *hdev, u8 status)
2006 {
2007 BT_DBG("status 0x%02x", status);
2008
2009 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2010 }
2011
2012 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2013 {
2014 struct mgmt_cp_add_uuid *cp = data;
2015 struct pending_cmd *cmd;
2016 struct hci_request req;
2017 struct bt_uuid *uuid;
2018 int err;
2019
2020 BT_DBG("request for %s", hdev->name);
2021
2022 hci_dev_lock(hdev);
2023
2024 if (pending_eir_or_class(hdev)) {
2025 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2026 MGMT_STATUS_BUSY);
2027 goto failed;
2028 }
2029
2030 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2031 if (!uuid) {
2032 err = -ENOMEM;
2033 goto failed;
2034 }
2035
2036 memcpy(uuid->uuid, cp->uuid, 16);
2037 uuid->svc_hint = cp->svc_hint;
2038 uuid->size = get_uuid_size(cp->uuid);
2039
2040 list_add_tail(&uuid->list, &hdev->uuids);
2041
2042 hci_req_init(&req, hdev);
2043
2044 update_class(&req);
2045 update_eir(&req);
2046
2047 err = hci_req_run(&req, add_uuid_complete);
2048 if (err < 0) {
2049 if (err != -ENODATA)
2050 goto failed;
2051
2052 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2053 hdev->dev_class, 3);
2054 goto failed;
2055 }
2056
2057 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2058 if (!cmd) {
2059 err = -ENOMEM;
2060 goto failed;
2061 }
2062
2063 err = 0;
2064
2065 failed:
2066 hci_dev_unlock(hdev);
2067 return err;
2068 }
2069
2070 static bool enable_service_cache(struct hci_dev *hdev)
2071 {
2072 if (!hdev_is_powered(hdev))
2073 return false;
2074
2075 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2076 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2077 CACHE_TIMEOUT);
2078 return true;
2079 }
2080
2081 return false;
2082 }
2083
2084 static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
2085 {
2086 BT_DBG("status 0x%02x", status);
2087
2088 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2089 }
2090
2091 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2092 u16 len)
2093 {
2094 struct mgmt_cp_remove_uuid *cp = data;
2095 struct pending_cmd *cmd;
2096 struct bt_uuid *match, *tmp;
2097 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2098 struct hci_request req;
2099 int err, found;
2100
2101 BT_DBG("request for %s", hdev->name);
2102
2103 hci_dev_lock(hdev);
2104
2105 if (pending_eir_or_class(hdev)) {
2106 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2107 MGMT_STATUS_BUSY);
2108 goto unlock;
2109 }
2110
2111 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2112 hci_uuids_clear(hdev);
2113
2114 if (enable_service_cache(hdev)) {
2115 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2116 0, hdev->dev_class, 3);
2117 goto unlock;
2118 }
2119
2120 goto update_class;
2121 }
2122
2123 found = 0;
2124
2125 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2126 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2127 continue;
2128
2129 list_del(&match->list);
2130 kfree(match);
2131 found++;
2132 }
2133
2134 if (found == 0) {
2135 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2136 MGMT_STATUS_INVALID_PARAMS);
2137 goto unlock;
2138 }
2139
2140 update_class:
2141 hci_req_init(&req, hdev);
2142
2143 update_class(&req);
2144 update_eir(&req);
2145
2146 err = hci_req_run(&req, remove_uuid_complete);
2147 if (err < 0) {
2148 if (err != -ENODATA)
2149 goto unlock;
2150
2151 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2152 hdev->dev_class, 3);
2153 goto unlock;
2154 }
2155
2156 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2157 if (!cmd) {
2158 err = -ENOMEM;
2159 goto unlock;
2160 }
2161
2162 err = 0;
2163
2164 unlock:
2165 hci_dev_unlock(hdev);
2166 return err;
2167 }
2168
2169 static void set_class_complete(struct hci_dev *hdev, u8 status)
2170 {
2171 BT_DBG("status 0x%02x", status);
2172
2173 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2174 }
2175
2176 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2177 u16 len)
2178 {
2179 struct mgmt_cp_set_dev_class *cp = data;
2180 struct pending_cmd *cmd;
2181 struct hci_request req;
2182 int err;
2183
2184 BT_DBG("request for %s", hdev->name);
2185
2186 if (!lmp_bredr_capable(hdev))
2187 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2188 MGMT_STATUS_NOT_SUPPORTED);
2189
2190 hci_dev_lock(hdev);
2191
2192 if (pending_eir_or_class(hdev)) {
2193 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2194 MGMT_STATUS_BUSY);
2195 goto unlock;
2196 }
2197
2198 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2199 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2200 MGMT_STATUS_INVALID_PARAMS);
2201 goto unlock;
2202 }
2203
2204 hdev->major_class = cp->major;
2205 hdev->minor_class = cp->minor;
2206
2207 if (!hdev_is_powered(hdev)) {
2208 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2209 hdev->dev_class, 3);
2210 goto unlock;
2211 }
2212
2213 hci_req_init(&req, hdev);
2214
2215 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2216 hci_dev_unlock(hdev);
2217 cancel_delayed_work_sync(&hdev->service_cache);
2218 hci_dev_lock(hdev);
2219 update_eir(&req);
2220 }
2221
2222 update_class(&req);
2223
2224 err = hci_req_run(&req, set_class_complete);
2225 if (err < 0) {
2226 if (err != -ENODATA)
2227 goto unlock;
2228
2229 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2230 hdev->dev_class, 3);
2231 goto unlock;
2232 }
2233
2234 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2235 if (!cmd) {
2236 err = -ENOMEM;
2237 goto unlock;
2238 }
2239
2240 err = 0;
2241
2242 unlock:
2243 hci_dev_unlock(hdev);
2244 return err;
2245 }
2246
2247 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2248 u16 len)
2249 {
2250 struct mgmt_cp_load_link_keys *cp = data;
2251 u16 key_count, expected_len;
2252 bool changed;
2253 int i;
2254
2255 BT_DBG("request for %s", hdev->name);
2256
2257 if (!lmp_bredr_capable(hdev))
2258 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2259 MGMT_STATUS_NOT_SUPPORTED);
2260
2261 key_count = __le16_to_cpu(cp->key_count);
2262
2263 expected_len = sizeof(*cp) + key_count *
2264 sizeof(struct mgmt_link_key_info);
2265 if (expected_len != len) {
2266 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2267 len, expected_len);
2268 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2269 MGMT_STATUS_INVALID_PARAMS);
2270 }
2271
2272 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2273 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2274 MGMT_STATUS_INVALID_PARAMS);
2275
2276 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2277 key_count);
2278
2279 for (i = 0; i < key_count; i++) {
2280 struct mgmt_link_key_info *key = &cp->keys[i];
2281
2282 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2283 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2284 MGMT_STATUS_INVALID_PARAMS);
2285 }
2286
2287 hci_dev_lock(hdev);
2288
2289 hci_link_keys_clear(hdev);
2290
2291 if (cp->debug_keys)
2292 changed = !test_and_set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
2293 else
2294 changed = test_and_clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
2295
2296 if (changed)
2297 new_settings(hdev, NULL);
2298
2299 for (i = 0; i < key_count; i++) {
2300 struct mgmt_link_key_info *key = &cp->keys[i];
2301
2302 hci_add_link_key(hdev, NULL, 0, &key->addr.bdaddr, key->val,
2303 key->type, key->pin_len);
2304 }
2305
2306 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2307
2308 hci_dev_unlock(hdev);
2309
2310 return 0;
2311 }
2312
2313 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2314 u8 addr_type, struct sock *skip_sk)
2315 {
2316 struct mgmt_ev_device_unpaired ev;
2317
2318 bacpy(&ev.addr.bdaddr, bdaddr);
2319 ev.addr.type = addr_type;
2320
2321 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2322 skip_sk);
2323 }
2324
2325 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2326 u16 len)
2327 {
2328 struct mgmt_cp_unpair_device *cp = data;
2329 struct mgmt_rp_unpair_device rp;
2330 struct hci_cp_disconnect dc;
2331 struct pending_cmd *cmd;
2332 struct hci_conn *conn;
2333 int err;
2334
2335 memset(&rp, 0, sizeof(rp));
2336 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2337 rp.addr.type = cp->addr.type;
2338
2339 if (!bdaddr_type_is_valid(cp->addr.type))
2340 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2341 MGMT_STATUS_INVALID_PARAMS,
2342 &rp, sizeof(rp));
2343
2344 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2345 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2346 MGMT_STATUS_INVALID_PARAMS,
2347 &rp, sizeof(rp));
2348
2349 hci_dev_lock(hdev);
2350
2351 if (!hdev_is_powered(hdev)) {
2352 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2353 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2354 goto unlock;
2355 }
2356
2357 if (cp->addr.type == BDADDR_BREDR) {
2358 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2359 } else {
2360 u8 addr_type;
2361
2362 if (cp->addr.type == BDADDR_LE_PUBLIC)
2363 addr_type = ADDR_LE_DEV_PUBLIC;
2364 else
2365 addr_type = ADDR_LE_DEV_RANDOM;
2366
2367 hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2368
2369 err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2370 }
2371
2372 if (err < 0) {
2373 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2374 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
2375 goto unlock;
2376 }
2377
2378 if (cp->disconnect) {
2379 if (cp->addr.type == BDADDR_BREDR)
2380 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2381 &cp->addr.bdaddr);
2382 else
2383 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2384 &cp->addr.bdaddr);
2385 } else {
2386 conn = NULL;
2387 }
2388
2389 if (!conn) {
2390 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2391 &rp, sizeof(rp));
2392 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2393 goto unlock;
2394 }
2395
2396 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2397 sizeof(*cp));
2398 if (!cmd) {
2399 err = -ENOMEM;
2400 goto unlock;
2401 }
2402
2403 dc.handle = cpu_to_le16(conn->handle);
2404 dc.reason = 0x13; /* Remote User Terminated Connection */
2405 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2406 if (err < 0)
2407 mgmt_pending_remove(cmd);
2408
2409 unlock:
2410 hci_dev_unlock(hdev);
2411 return err;
2412 }
2413
2414 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2415 u16 len)
2416 {
2417 struct mgmt_cp_disconnect *cp = data;
2418 struct mgmt_rp_disconnect rp;
2419 struct hci_cp_disconnect dc;
2420 struct pending_cmd *cmd;
2421 struct hci_conn *conn;
2422 int err;
2423
2424 BT_DBG("");
2425
2426 memset(&rp, 0, sizeof(rp));
2427 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2428 rp.addr.type = cp->addr.type;
2429
2430 if (!bdaddr_type_is_valid(cp->addr.type))
2431 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2432 MGMT_STATUS_INVALID_PARAMS,
2433 &rp, sizeof(rp));
2434
2435 hci_dev_lock(hdev);
2436
2437 if (!test_bit(HCI_UP, &hdev->flags)) {
2438 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2439 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2440 goto failed;
2441 }
2442
2443 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
2444 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2445 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2446 goto failed;
2447 }
2448
2449 if (cp->addr.type == BDADDR_BREDR)
2450 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2451 &cp->addr.bdaddr);
2452 else
2453 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2454
2455 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2456 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2457 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
2458 goto failed;
2459 }
2460
2461 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2462 if (!cmd) {
2463 err = -ENOMEM;
2464 goto failed;
2465 }
2466
2467 dc.handle = cpu_to_le16(conn->handle);
2468 dc.reason = HCI_ERROR_REMOTE_USER_TERM;
2469
2470 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2471 if (err < 0)
2472 mgmt_pending_remove(cmd);
2473
2474 failed:
2475 hci_dev_unlock(hdev);
2476 return err;
2477 }
2478
2479 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2480 {
2481 switch (link_type) {
2482 case LE_LINK:
2483 switch (addr_type) {
2484 case ADDR_LE_DEV_PUBLIC:
2485 return BDADDR_LE_PUBLIC;
2486
2487 default:
2488 /* Fallback to LE Random address type */
2489 return BDADDR_LE_RANDOM;
2490 }
2491
2492 default:
2493 /* Fallback to BR/EDR type */
2494 return BDADDR_BREDR;
2495 }
2496 }
2497
2498 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2499 u16 data_len)
2500 {
2501 struct mgmt_rp_get_connections *rp;
2502 struct hci_conn *c;
2503 size_t rp_len;
2504 int err;
2505 u16 i;
2506
2507 BT_DBG("");
2508
2509 hci_dev_lock(hdev);
2510
2511 if (!hdev_is_powered(hdev)) {
2512 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2513 MGMT_STATUS_NOT_POWERED);
2514 goto unlock;
2515 }
2516
2517 i = 0;
2518 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2519 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2520 i++;
2521 }
2522
2523 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2524 rp = kmalloc(rp_len, GFP_KERNEL);
2525 if (!rp) {
2526 err = -ENOMEM;
2527 goto unlock;
2528 }
2529
2530 i = 0;
2531 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2532 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2533 continue;
2534 bacpy(&rp->addr[i].bdaddr, &c->dst);
2535 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2536 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2537 continue;
2538 i++;
2539 }
2540
2541 rp->conn_count = cpu_to_le16(i);
2542
2543 /* Recalculate length in case of filtered SCO connections, etc */
2544 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2545
2546 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2547 rp_len);
2548
2549 kfree(rp);
2550
2551 unlock:
2552 hci_dev_unlock(hdev);
2553 return err;
2554 }
2555
2556 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2557 struct mgmt_cp_pin_code_neg_reply *cp)
2558 {
2559 struct pending_cmd *cmd;
2560 int err;
2561
2562 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2563 sizeof(*cp));
2564 if (!cmd)
2565 return -ENOMEM;
2566
2567 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2568 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2569 if (err < 0)
2570 mgmt_pending_remove(cmd);
2571
2572 return err;
2573 }
2574
2575 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2576 u16 len)
2577 {
2578 struct hci_conn *conn;
2579 struct mgmt_cp_pin_code_reply *cp = data;
2580 struct hci_cp_pin_code_reply reply;
2581 struct pending_cmd *cmd;
2582 int err;
2583
2584 BT_DBG("");
2585
2586 hci_dev_lock(hdev);
2587
2588 if (!hdev_is_powered(hdev)) {
2589 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2590 MGMT_STATUS_NOT_POWERED);
2591 goto failed;
2592 }
2593
2594 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2595 if (!conn) {
2596 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2597 MGMT_STATUS_NOT_CONNECTED);
2598 goto failed;
2599 }
2600
2601 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2602 struct mgmt_cp_pin_code_neg_reply ncp;
2603
2604 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2605
2606 BT_ERR("PIN code is not 16 bytes long");
2607
2608 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2609 if (err >= 0)
2610 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2611 MGMT_STATUS_INVALID_PARAMS);
2612
2613 goto failed;
2614 }
2615
2616 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2617 if (!cmd) {
2618 err = -ENOMEM;
2619 goto failed;
2620 }
2621
2622 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2623 reply.pin_len = cp->pin_len;
2624 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2625
2626 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2627 if (err < 0)
2628 mgmt_pending_remove(cmd);
2629
2630 failed:
2631 hci_dev_unlock(hdev);
2632 return err;
2633 }
2634
2635 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2636 u16 len)
2637 {
2638 struct mgmt_cp_set_io_capability *cp = data;
2639
2640 BT_DBG("");
2641
2642 hci_dev_lock(hdev);
2643
2644 hdev->io_capability = cp->io_capability;
2645
2646 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2647 hdev->io_capability);
2648
2649 hci_dev_unlock(hdev);
2650
2651 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
2652 0);
2653 }
2654
2655 static struct pending_cmd *find_pairing(struct hci_conn *conn)
2656 {
2657 struct hci_dev *hdev = conn->hdev;
2658 struct pending_cmd *cmd;
2659
2660 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2661 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2662 continue;
2663
2664 if (cmd->user_data != conn)
2665 continue;
2666
2667 return cmd;
2668 }
2669
2670 return NULL;
2671 }
2672
2673 static void pairing_complete(struct pending_cmd *cmd, u8 status)
2674 {
2675 struct mgmt_rp_pair_device rp;
2676 struct hci_conn *conn = cmd->user_data;
2677
2678 bacpy(&rp.addr.bdaddr, &conn->dst);
2679 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2680
2681 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
2682 &rp, sizeof(rp));
2683
2684 /* So we don't get further callbacks for this connection */
2685 conn->connect_cfm_cb = NULL;
2686 conn->security_cfm_cb = NULL;
2687 conn->disconn_cfm_cb = NULL;
2688
2689 hci_conn_drop(conn);
2690
2691 mgmt_pending_remove(cmd);
2692 }
2693
2694 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2695 {
2696 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2697 struct pending_cmd *cmd;
2698
2699 cmd = find_pairing(conn);
2700 if (cmd)
2701 pairing_complete(cmd, status);
2702 }
2703
2704 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2705 {
2706 struct pending_cmd *cmd;
2707
2708 BT_DBG("status %u", status);
2709
2710 cmd = find_pairing(conn);
2711 if (!cmd)
2712 BT_DBG("Unable to find a pending command");
2713 else
2714 pairing_complete(cmd, mgmt_status(status));
2715 }
2716
2717 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2718 {
2719 struct pending_cmd *cmd;
2720
2721 BT_DBG("status %u", status);
2722
2723 if (!status)
2724 return;
2725
2726 cmd = find_pairing(conn);
2727 if (!cmd)
2728 BT_DBG("Unable to find a pending command");
2729 else
2730 pairing_complete(cmd, mgmt_status(status));
2731 }
2732
2733 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2734 u16 len)
2735 {
2736 struct mgmt_cp_pair_device *cp = data;
2737 struct mgmt_rp_pair_device rp;
2738 struct pending_cmd *cmd;
2739 u8 sec_level, auth_type;
2740 struct hci_conn *conn;
2741 int err;
2742
2743 BT_DBG("");
2744
2745 memset(&rp, 0, sizeof(rp));
2746 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2747 rp.addr.type = cp->addr.type;
2748
2749 if (!bdaddr_type_is_valid(cp->addr.type))
2750 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2751 MGMT_STATUS_INVALID_PARAMS,
2752 &rp, sizeof(rp));
2753
2754 hci_dev_lock(hdev);
2755
2756 if (!hdev_is_powered(hdev)) {
2757 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2758 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2759 goto unlock;
2760 }
2761
2762 sec_level = BT_SECURITY_MEDIUM;
2763 if (cp->io_cap == 0x03)
2764 auth_type = HCI_AT_DEDICATED_BONDING;
2765 else
2766 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
2767
2768 if (cp->addr.type == BDADDR_BREDR)
2769 conn = hci_connect(hdev, ACL_LINK, &cp->addr.bdaddr,
2770 cp->addr.type, sec_level, auth_type);
2771 else
2772 conn = hci_connect(hdev, LE_LINK, &cp->addr.bdaddr,
2773 cp->addr.type, sec_level, auth_type);
2774
2775 if (IS_ERR(conn)) {
2776 int status;
2777
2778 if (PTR_ERR(conn) == -EBUSY)
2779 status = MGMT_STATUS_BUSY;
2780 else
2781 status = MGMT_STATUS_CONNECT_FAILED;
2782
2783 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2784 status, &rp,
2785 sizeof(rp));
2786 goto unlock;
2787 }
2788
2789 if (conn->connect_cfm_cb) {
2790 hci_conn_drop(conn);
2791 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2792 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2793 goto unlock;
2794 }
2795
2796 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2797 if (!cmd) {
2798 err = -ENOMEM;
2799 hci_conn_drop(conn);
2800 goto unlock;
2801 }
2802
2803 /* For LE, just connecting isn't a proof that the pairing finished */
2804 if (cp->addr.type == BDADDR_BREDR) {
2805 conn->connect_cfm_cb = pairing_complete_cb;
2806 conn->security_cfm_cb = pairing_complete_cb;
2807 conn->disconn_cfm_cb = pairing_complete_cb;
2808 } else {
2809 conn->connect_cfm_cb = le_pairing_complete_cb;
2810 conn->security_cfm_cb = le_pairing_complete_cb;
2811 conn->disconn_cfm_cb = le_pairing_complete_cb;
2812 }
2813
2814 conn->io_capability = cp->io_cap;
2815 cmd->user_data = conn;
2816
2817 if (conn->state == BT_CONNECTED &&
2818 hci_conn_security(conn, sec_level, auth_type))
2819 pairing_complete(cmd, 0);
2820
2821 err = 0;
2822
2823 unlock:
2824 hci_dev_unlock(hdev);
2825 return err;
2826 }
2827
2828 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2829 u16 len)
2830 {
2831 struct mgmt_addr_info *addr = data;
2832 struct pending_cmd *cmd;
2833 struct hci_conn *conn;
2834 int err;
2835
2836 BT_DBG("");
2837
2838 hci_dev_lock(hdev);
2839
2840 if (!hdev_is_powered(hdev)) {
2841 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2842 MGMT_STATUS_NOT_POWERED);
2843 goto unlock;
2844 }
2845
2846 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
2847 if (!cmd) {
2848 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2849 MGMT_STATUS_INVALID_PARAMS);
2850 goto unlock;
2851 }
2852
2853 conn = cmd->user_data;
2854
2855 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
2856 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2857 MGMT_STATUS_INVALID_PARAMS);
2858 goto unlock;
2859 }
2860
2861 pairing_complete(cmd, MGMT_STATUS_CANCELLED);
2862
2863 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
2864 addr, sizeof(*addr));
2865 unlock:
2866 hci_dev_unlock(hdev);
2867 return err;
2868 }
2869
2870 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
2871 struct mgmt_addr_info *addr, u16 mgmt_op,
2872 u16 hci_op, __le32 passkey)
2873 {
2874 struct pending_cmd *cmd;
2875 struct hci_conn *conn;
2876 int err;
2877
2878 hci_dev_lock(hdev);
2879
2880 if (!hdev_is_powered(hdev)) {
2881 err = cmd_complete(sk, hdev->id, mgmt_op,
2882 MGMT_STATUS_NOT_POWERED, addr,
2883 sizeof(*addr));
2884 goto done;
2885 }
2886
2887 if (addr->type == BDADDR_BREDR)
2888 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
2889 else
2890 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
2891
2892 if (!conn) {
2893 err = cmd_complete(sk, hdev->id, mgmt_op,
2894 MGMT_STATUS_NOT_CONNECTED, addr,
2895 sizeof(*addr));
2896 goto done;
2897 }
2898
2899 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
2900 /* Continue with pairing via SMP */
2901 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
2902
2903 if (!err)
2904 err = cmd_complete(sk, hdev->id, mgmt_op,
2905 MGMT_STATUS_SUCCESS, addr,
2906 sizeof(*addr));
2907 else
2908 err = cmd_complete(sk, hdev->id, mgmt_op,
2909 MGMT_STATUS_FAILED, addr,
2910 sizeof(*addr));
2911
2912 goto done;
2913 }
2914
2915 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
2916 if (!cmd) {
2917 err = -ENOMEM;
2918 goto done;
2919 }
2920
2921 /* Continue with pairing via HCI */
2922 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
2923 struct hci_cp_user_passkey_reply cp;
2924
2925 bacpy(&cp.bdaddr, &addr->bdaddr);
2926 cp.passkey = passkey;
2927 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
2928 } else
2929 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
2930 &addr->bdaddr);
2931
2932 if (err < 0)
2933 mgmt_pending_remove(cmd);
2934
2935 done:
2936 hci_dev_unlock(hdev);
2937 return err;
2938 }
2939
2940 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2941 void *data, u16 len)
2942 {
2943 struct mgmt_cp_pin_code_neg_reply *cp = data;
2944
2945 BT_DBG("");
2946
2947 return user_pairing_resp(sk, hdev, &cp->addr,
2948 MGMT_OP_PIN_CODE_NEG_REPLY,
2949 HCI_OP_PIN_CODE_NEG_REPLY, 0);
2950 }
2951
2952 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2953 u16 len)
2954 {
2955 struct mgmt_cp_user_confirm_reply *cp = data;
2956
2957 BT_DBG("");
2958
2959 if (len != sizeof(*cp))
2960 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
2961 MGMT_STATUS_INVALID_PARAMS);
2962
2963 return user_pairing_resp(sk, hdev, &cp->addr,
2964 MGMT_OP_USER_CONFIRM_REPLY,
2965 HCI_OP_USER_CONFIRM_REPLY, 0);
2966 }
2967
2968 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
2969 void *data, u16 len)
2970 {
2971 struct mgmt_cp_user_confirm_neg_reply *cp = data;
2972
2973 BT_DBG("");
2974
2975 return user_pairing_resp(sk, hdev, &cp->addr,
2976 MGMT_OP_USER_CONFIRM_NEG_REPLY,
2977 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
2978 }
2979
2980 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2981 u16 len)
2982 {
2983 struct mgmt_cp_user_passkey_reply *cp = data;
2984
2985 BT_DBG("");
2986
2987 return user_pairing_resp(sk, hdev, &cp->addr,
2988 MGMT_OP_USER_PASSKEY_REPLY,
2989 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
2990 }
2991
2992 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
2993 void *data, u16 len)
2994 {
2995 struct mgmt_cp_user_passkey_neg_reply *cp = data;
2996
2997 BT_DBG("");
2998
2999 return user_pairing_resp(sk, hdev, &cp->addr,
3000 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3001 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3002 }
3003
3004 static void update_name(struct hci_request *req)
3005 {
3006 struct hci_dev *hdev = req->hdev;
3007 struct hci_cp_write_local_name cp;
3008
3009 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3010
3011 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3012 }
3013
3014 static void set_name_complete(struct hci_dev *hdev, u8 status)
3015 {
3016 struct mgmt_cp_set_local_name *cp;
3017 struct pending_cmd *cmd;
3018
3019 BT_DBG("status 0x%02x", status);
3020
3021 hci_dev_lock(hdev);
3022
3023 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3024 if (!cmd)
3025 goto unlock;
3026
3027 cp = cmd->param;
3028
3029 if (status)
3030 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3031 mgmt_status(status));
3032 else
3033 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3034 cp, sizeof(*cp));
3035
3036 mgmt_pending_remove(cmd);
3037
3038 unlock:
3039 hci_dev_unlock(hdev);
3040 }
3041
3042 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3043 u16 len)
3044 {
3045 struct mgmt_cp_set_local_name *cp = data;
3046 struct pending_cmd *cmd;
3047 struct hci_request req;
3048 int err;
3049
3050 BT_DBG("");
3051
3052 hci_dev_lock(hdev);
3053
3054 /* If the old values are the same as the new ones just return a
3055 * direct command complete event.
3056 */
3057 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3058 !memcmp(hdev->short_name, cp->short_name,
3059 sizeof(hdev->short_name))) {
3060 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3061 data, len);
3062 goto failed;
3063 }
3064
3065 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3066
3067 if (!hdev_is_powered(hdev)) {
3068 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3069
3070 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3071 data, len);
3072 if (err < 0)
3073 goto failed;
3074
3075 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
3076 sk);
3077
3078 goto failed;
3079 }
3080
3081 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3082 if (!cmd) {
3083 err = -ENOMEM;
3084 goto failed;
3085 }
3086
3087 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3088
3089 hci_req_init(&req, hdev);
3090
3091 if (lmp_bredr_capable(hdev)) {
3092 update_name(&req);
3093 update_eir(&req);
3094 }
3095
3096 /* The name is stored in the scan response data and so
3097 * no need to udpate the advertising data here.
3098 */
3099 if (lmp_le_capable(hdev))
3100 update_scan_rsp_data(&req);
3101
3102 err = hci_req_run(&req, set_name_complete);
3103 if (err < 0)
3104 mgmt_pending_remove(cmd);
3105
3106 failed:
3107 hci_dev_unlock(hdev);
3108 return err;
3109 }
3110
3111 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3112 void *data, u16 data_len)
3113 {
3114 struct pending_cmd *cmd;
3115 int err;
3116
3117 BT_DBG("%s", hdev->name);
3118
3119 hci_dev_lock(hdev);
3120
3121 if (!hdev_is_powered(hdev)) {
3122 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3123 MGMT_STATUS_NOT_POWERED);
3124 goto unlock;
3125 }
3126
3127 if (!lmp_ssp_capable(hdev)) {
3128 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3129 MGMT_STATUS_NOT_SUPPORTED);
3130 goto unlock;
3131 }
3132
3133 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3134 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3135 MGMT_STATUS_BUSY);
3136 goto unlock;
3137 }
3138
3139 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3140 if (!cmd) {
3141 err = -ENOMEM;
3142 goto unlock;
3143 }
3144
3145 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
3146 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
3147 0, NULL);
3148 else
3149 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3150
3151 if (err < 0)
3152 mgmt_pending_remove(cmd);
3153
3154 unlock:
3155 hci_dev_unlock(hdev);
3156 return err;
3157 }
3158
3159 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3160 void *data, u16 len)
3161 {
3162 int err;
3163
3164 BT_DBG("%s ", hdev->name);
3165
3166 hci_dev_lock(hdev);
3167
3168 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3169 struct mgmt_cp_add_remote_oob_data *cp = data;
3170 u8 status;
3171
3172 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3173 cp->hash, cp->randomizer);
3174 if (err < 0)
3175 status = MGMT_STATUS_FAILED;
3176 else
3177 status = MGMT_STATUS_SUCCESS;
3178
3179 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3180 status, &cp->addr, sizeof(cp->addr));
3181 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3182 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3183 u8 status;
3184
3185 err = hci_add_remote_oob_ext_data(hdev, &cp->addr.bdaddr,
3186 cp->hash192,
3187 cp->randomizer192,
3188 cp->hash256,
3189 cp->randomizer256);
3190 if (err < 0)
3191 status = MGMT_STATUS_FAILED;
3192 else
3193 status = MGMT_STATUS_SUCCESS;
3194
3195 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3196 status, &cp->addr, sizeof(cp->addr));
3197 } else {
3198 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3199 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3200 MGMT_STATUS_INVALID_PARAMS);
3201 }
3202
3203 hci_dev_unlock(hdev);
3204 return err;
3205 }
3206
3207 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3208 void *data, u16 len)
3209 {
3210 struct mgmt_cp_remove_remote_oob_data *cp = data;
3211 u8 status;
3212 int err;
3213
3214 BT_DBG("%s", hdev->name);
3215
3216 hci_dev_lock(hdev);
3217
3218 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
3219 if (err < 0)
3220 status = MGMT_STATUS_INVALID_PARAMS;
3221 else
3222 status = MGMT_STATUS_SUCCESS;
3223
3224 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3225 status, &cp->addr, sizeof(cp->addr));
3226
3227 hci_dev_unlock(hdev);
3228 return err;
3229 }
3230
3231 static int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
3232 {
3233 struct pending_cmd *cmd;
3234 u8 type;
3235 int err;
3236
3237 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3238
3239 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3240 if (!cmd)
3241 return -ENOENT;
3242
3243 type = hdev->discovery.type;
3244
3245 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3246 &type, sizeof(type));
3247 mgmt_pending_remove(cmd);
3248
3249 return err;
3250 }
3251
3252 static void start_discovery_complete(struct hci_dev *hdev, u8 status)
3253 {
3254 BT_DBG("status %d", status);
3255
3256 if (status) {
3257 hci_dev_lock(hdev);
3258 mgmt_start_discovery_failed(hdev, status);
3259 hci_dev_unlock(hdev);
3260 return;
3261 }
3262
3263 hci_dev_lock(hdev);
3264 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3265 hci_dev_unlock(hdev);
3266
3267 switch (hdev->discovery.type) {
3268 case DISCOV_TYPE_LE:
3269 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
3270 DISCOV_LE_TIMEOUT);
3271 break;
3272
3273 case DISCOV_TYPE_INTERLEAVED:
3274 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
3275 DISCOV_INTERLEAVED_TIMEOUT);
3276 break;
3277
3278 case DISCOV_TYPE_BREDR:
3279 break;
3280
3281 default:
3282 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
3283 }
3284 }
3285
3286 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3287 void *data, u16 len)
3288 {
3289 struct mgmt_cp_start_discovery *cp = data;
3290 struct pending_cmd *cmd;
3291 struct hci_cp_le_set_scan_param param_cp;
3292 struct hci_cp_le_set_scan_enable enable_cp;
3293 struct hci_cp_inquiry inq_cp;
3294 struct hci_request req;
3295 /* General inquiry access code (GIAC) */
3296 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3297 u8 status;
3298 int err;
3299
3300 BT_DBG("%s", hdev->name);
3301
3302 hci_dev_lock(hdev);
3303
3304 if (!hdev_is_powered(hdev)) {
3305 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3306 MGMT_STATUS_NOT_POWERED);
3307 goto failed;
3308 }
3309
3310 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3311 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3312 MGMT_STATUS_BUSY);
3313 goto failed;
3314 }
3315
3316 if (hdev->discovery.state != DISCOVERY_STOPPED) {
3317 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3318 MGMT_STATUS_BUSY);
3319 goto failed;
3320 }
3321
3322 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
3323 if (!cmd) {
3324 err = -ENOMEM;
3325 goto failed;
3326 }
3327
3328 hdev->discovery.type = cp->type;
3329
3330 hci_req_init(&req, hdev);
3331
3332 switch (hdev->discovery.type) {
3333 case DISCOV_TYPE_BREDR:
3334 status = mgmt_bredr_support(hdev);
3335 if (status) {
3336 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3337 status);
3338 mgmt_pending_remove(cmd);
3339 goto failed;
3340 }
3341
3342 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3343 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3344 MGMT_STATUS_BUSY);
3345 mgmt_pending_remove(cmd);
3346 goto failed;
3347 }
3348
3349 hci_inquiry_cache_flush(hdev);
3350
3351 memset(&inq_cp, 0, sizeof(inq_cp));
3352 memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
3353 inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
3354 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
3355 break;
3356
3357 case DISCOV_TYPE_LE:
3358 case DISCOV_TYPE_INTERLEAVED:
3359 status = mgmt_le_support(hdev);
3360 if (status) {
3361 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3362 status);
3363 mgmt_pending_remove(cmd);
3364 goto failed;
3365 }
3366
3367 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3368 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3369 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3370 MGMT_STATUS_NOT_SUPPORTED);
3371 mgmt_pending_remove(cmd);
3372 goto failed;
3373 }
3374
3375 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3376 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3377 MGMT_STATUS_REJECTED);
3378 mgmt_pending_remove(cmd);
3379 goto failed;
3380 }
3381
3382 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
3383 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3384 MGMT_STATUS_BUSY);
3385 mgmt_pending_remove(cmd);
3386 goto failed;
3387 }
3388
3389 memset(&param_cp, 0, sizeof(param_cp));
3390 param_cp.type = LE_SCAN_ACTIVE;
3391 param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
3392 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3393 param_cp.own_address_type = hdev->own_addr_type;
3394 hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
3395 &param_cp);
3396
3397 memset(&enable_cp, 0, sizeof(enable_cp));
3398 enable_cp.enable = LE_SCAN_ENABLE;
3399 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3400 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
3401 &enable_cp);
3402 break;
3403
3404 default:
3405 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3406 MGMT_STATUS_INVALID_PARAMS);
3407 mgmt_pending_remove(cmd);
3408 goto failed;
3409 }
3410
3411 err = hci_req_run(&req, start_discovery_complete);
3412 if (err < 0)
3413 mgmt_pending_remove(cmd);
3414 else
3415 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3416
3417 failed:
3418 hci_dev_unlock(hdev);
3419 return err;
3420 }
3421
3422 static int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
3423 {
3424 struct pending_cmd *cmd;
3425 int err;
3426
3427 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
3428 if (!cmd)
3429 return -ENOENT;
3430
3431 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3432 &hdev->discovery.type, sizeof(hdev->discovery.type));
3433 mgmt_pending_remove(cmd);
3434
3435 return err;
3436 }
3437
3438 static void stop_discovery_complete(struct hci_dev *hdev, u8 status)
3439 {
3440 BT_DBG("status %d", status);
3441
3442 hci_dev_lock(hdev);
3443
3444 if (status) {
3445 mgmt_stop_discovery_failed(hdev, status);
3446 goto unlock;
3447 }
3448
3449 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3450
3451 unlock:
3452 hci_dev_unlock(hdev);
3453 }
3454
3455 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
3456 u16 len)
3457 {
3458 struct mgmt_cp_stop_discovery *mgmt_cp = data;
3459 struct pending_cmd *cmd;
3460 struct hci_cp_remote_name_req_cancel cp;
3461 struct inquiry_entry *e;
3462 struct hci_request req;
3463 struct hci_cp_le_set_scan_enable enable_cp;
3464 int err;
3465
3466 BT_DBG("%s", hdev->name);
3467
3468 hci_dev_lock(hdev);
3469
3470 if (!hci_discovery_active(hdev)) {
3471 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3472 MGMT_STATUS_REJECTED, &mgmt_cp->type,
3473 sizeof(mgmt_cp->type));
3474 goto unlock;
3475 }
3476
3477 if (hdev->discovery.type != mgmt_cp->type) {
3478 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3479 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
3480 sizeof(mgmt_cp->type));
3481 goto unlock;
3482 }
3483
3484 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
3485 if (!cmd) {
3486 err = -ENOMEM;
3487 goto unlock;
3488 }
3489
3490 hci_req_init(&req, hdev);
3491
3492 switch (hdev->discovery.state) {
3493 case DISCOVERY_FINDING:
3494 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3495 hci_req_add(&req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3496 } else {
3497 cancel_delayed_work(&hdev->le_scan_disable);
3498
3499 memset(&enable_cp, 0, sizeof(enable_cp));
3500 enable_cp.enable = LE_SCAN_DISABLE;
3501 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE,
3502 sizeof(enable_cp), &enable_cp);
3503 }
3504
3505 break;
3506
3507 case DISCOVERY_RESOLVING:
3508 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
3509 NAME_PENDING);
3510 if (!e) {
3511 mgmt_pending_remove(cmd);
3512 err = cmd_complete(sk, hdev->id,
3513 MGMT_OP_STOP_DISCOVERY, 0,
3514 &mgmt_cp->type,
3515 sizeof(mgmt_cp->type));
3516 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3517 goto unlock;
3518 }
3519
3520 bacpy(&cp.bdaddr, &e->data.bdaddr);
3521 hci_req_add(&req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
3522 &cp);
3523
3524 break;
3525
3526 default:
3527 BT_DBG("unknown discovery state %u", hdev->discovery.state);
3528
3529 mgmt_pending_remove(cmd);
3530 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3531 MGMT_STATUS_FAILED, &mgmt_cp->type,
3532 sizeof(mgmt_cp->type));
3533 goto unlock;
3534 }
3535
3536 err = hci_req_run(&req, stop_discovery_complete);
3537 if (err < 0)
3538 mgmt_pending_remove(cmd);
3539 else
3540 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
3541
3542 unlock:
3543 hci_dev_unlock(hdev);
3544 return err;
3545 }
3546
3547 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
3548 u16 len)
3549 {
3550 struct mgmt_cp_confirm_name *cp = data;
3551 struct inquiry_entry *e;
3552 int err;
3553
3554 BT_DBG("%s", hdev->name);
3555
3556 hci_dev_lock(hdev);
3557
3558 if (!hci_discovery_active(hdev)) {
3559 err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3560 MGMT_STATUS_FAILED);
3561 goto failed;
3562 }
3563
3564 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
3565 if (!e) {
3566 err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3567 MGMT_STATUS_INVALID_PARAMS);
3568 goto failed;
3569 }
3570
3571 if (cp->name_known) {
3572 e->name_state = NAME_KNOWN;
3573 list_del(&e->list);
3574 } else {
3575 e->name_state = NAME_NEEDED;
3576 hci_inquiry_cache_update_resolve(hdev, e);
3577 }
3578
3579 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
3580 sizeof(cp->addr));
3581
3582 failed:
3583 hci_dev_unlock(hdev);
3584 return err;
3585 }
3586
3587 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
3588 u16 len)
3589 {
3590 struct mgmt_cp_block_device *cp = data;
3591 u8 status;
3592 int err;
3593
3594 BT_DBG("%s", hdev->name);
3595
3596 if (!bdaddr_type_is_valid(cp->addr.type))
3597 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
3598 MGMT_STATUS_INVALID_PARAMS,
3599 &cp->addr, sizeof(cp->addr));
3600
3601 hci_dev_lock(hdev);
3602
3603 err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type);
3604 if (err < 0)
3605 status = MGMT_STATUS_FAILED;
3606 else
3607 status = MGMT_STATUS_SUCCESS;
3608
3609 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
3610 &cp->addr, sizeof(cp->addr));
3611
3612 hci_dev_unlock(hdev);
3613
3614 return err;
3615 }
3616
3617 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
3618 u16 len)
3619 {
3620 struct mgmt_cp_unblock_device *cp = data;
3621 u8 status;
3622 int err;
3623
3624 BT_DBG("%s", hdev->name);
3625
3626 if (!bdaddr_type_is_valid(cp->addr.type))
3627 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
3628 MGMT_STATUS_INVALID_PARAMS,
3629 &cp->addr, sizeof(cp->addr));
3630
3631 hci_dev_lock(hdev);
3632
3633 err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type);
3634 if (err < 0)
3635 status = MGMT_STATUS_INVALID_PARAMS;
3636 else
3637 status = MGMT_STATUS_SUCCESS;
3638
3639 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
3640 &cp->addr, sizeof(cp->addr));
3641
3642 hci_dev_unlock(hdev);
3643
3644 return err;
3645 }
3646
3647 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
3648 u16 len)
3649 {
3650 struct mgmt_cp_set_device_id *cp = data;
3651 struct hci_request req;
3652 int err;
3653 __u16 source;
3654
3655 BT_DBG("%s", hdev->name);
3656
3657 source = __le16_to_cpu(cp->source);
3658
3659 if (source > 0x0002)
3660 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
3661 MGMT_STATUS_INVALID_PARAMS);
3662
3663 hci_dev_lock(hdev);
3664
3665 hdev->devid_source = source;
3666 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
3667 hdev->devid_product = __le16_to_cpu(cp->product);
3668 hdev->devid_version = __le16_to_cpu(cp->version);
3669
3670 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
3671
3672 hci_req_init(&req, hdev);
3673 update_eir(&req);
3674 hci_req_run(&req, NULL);
3675
3676 hci_dev_unlock(hdev);
3677
3678 return err;
3679 }
3680
3681 static void set_advertising_complete(struct hci_dev *hdev, u8 status)
3682 {
3683 struct cmd_lookup match = { NULL, hdev };
3684
3685 if (status) {
3686 u8 mgmt_err = mgmt_status(status);
3687
3688 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
3689 cmd_status_rsp, &mgmt_err);
3690 return;
3691 }
3692
3693 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
3694 &match);
3695
3696 new_settings(hdev, match.sk);
3697
3698 if (match.sk)
3699 sock_put(match.sk);
3700 }
3701
3702 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
3703 u16 len)
3704 {
3705 struct mgmt_mode *cp = data;
3706 struct pending_cmd *cmd;
3707 struct hci_request req;
3708 u8 val, enabled, status;
3709 int err;
3710
3711 BT_DBG("request for %s", hdev->name);
3712
3713 status = mgmt_le_support(hdev);
3714 if (status)
3715 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3716 status);
3717
3718 if (cp->val != 0x00 && cp->val != 0x01)
3719 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3720 MGMT_STATUS_INVALID_PARAMS);
3721
3722 hci_dev_lock(hdev);
3723
3724 val = !!cp->val;
3725 enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
3726
3727 /* The following conditions are ones which mean that we should
3728 * not do any HCI communication but directly send a mgmt
3729 * response to user space (after toggling the flag if
3730 * necessary).
3731 */
3732 if (!hdev_is_powered(hdev) || val == enabled ||
3733 hci_conn_num(hdev, LE_LINK) > 0) {
3734 bool changed = false;
3735
3736 if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3737 change_bit(HCI_ADVERTISING, &hdev->dev_flags);
3738 changed = true;
3739 }
3740
3741 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
3742 if (err < 0)
3743 goto unlock;
3744
3745 if (changed)
3746 err = new_settings(hdev, sk);
3747
3748 goto unlock;
3749 }
3750
3751 if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
3752 mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
3753 err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3754 MGMT_STATUS_BUSY);
3755 goto unlock;
3756 }
3757
3758 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
3759 if (!cmd) {
3760 err = -ENOMEM;
3761 goto unlock;
3762 }
3763
3764 hci_req_init(&req, hdev);
3765
3766 if (val)
3767 enable_advertising(&req);
3768 else
3769 disable_advertising(&req);
3770
3771 err = hci_req_run(&req, set_advertising_complete);
3772 if (err < 0)
3773 mgmt_pending_remove(cmd);
3774
3775 unlock:
3776 hci_dev_unlock(hdev);
3777 return err;
3778 }
3779
3780 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
3781 void *data, u16 len)
3782 {
3783 struct mgmt_cp_set_static_address *cp = data;
3784 int err;
3785
3786 BT_DBG("%s", hdev->name);
3787
3788 if (!lmp_le_capable(hdev))
3789 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
3790 MGMT_STATUS_NOT_SUPPORTED);
3791
3792 if (hdev_is_powered(hdev))
3793 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
3794 MGMT_STATUS_REJECTED);
3795
3796 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
3797 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
3798 return cmd_status(sk, hdev->id,
3799 MGMT_OP_SET_STATIC_ADDRESS,
3800 MGMT_STATUS_INVALID_PARAMS);
3801
3802 /* Two most significant bits shall be set */
3803 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
3804 return cmd_status(sk, hdev->id,
3805 MGMT_OP_SET_STATIC_ADDRESS,
3806 MGMT_STATUS_INVALID_PARAMS);
3807 }
3808
3809 hci_dev_lock(hdev);
3810
3811 bacpy(&hdev->static_addr, &cp->bdaddr);
3812
3813 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0);
3814
3815 hci_dev_unlock(hdev);
3816
3817 return err;
3818 }
3819
3820 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
3821 void *data, u16 len)
3822 {
3823 struct mgmt_cp_set_scan_params *cp = data;
3824 __u16 interval, window;
3825 int err;
3826
3827 BT_DBG("%s", hdev->name);
3828
3829 if (!lmp_le_capable(hdev))
3830 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3831 MGMT_STATUS_NOT_SUPPORTED);
3832
3833 interval = __le16_to_cpu(cp->interval);
3834
3835 if (interval < 0x0004 || interval > 0x4000)
3836 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3837 MGMT_STATUS_INVALID_PARAMS);
3838
3839 window = __le16_to_cpu(cp->window);
3840
3841 if (window < 0x0004 || window > 0x4000)
3842 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3843 MGMT_STATUS_INVALID_PARAMS);
3844
3845 if (window > interval)
3846 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3847 MGMT_STATUS_INVALID_PARAMS);
3848
3849 hci_dev_lock(hdev);
3850
3851 hdev->le_scan_interval = interval;
3852 hdev->le_scan_window = window;
3853
3854 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);
3855
3856 hci_dev_unlock(hdev);
3857
3858 return err;
3859 }
3860
3861 static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
3862 {
3863 struct pending_cmd *cmd;
3864
3865 BT_DBG("status 0x%02x", status);
3866
3867 hci_dev_lock(hdev);
3868
3869 cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
3870 if (!cmd)
3871 goto unlock;
3872
3873 if (status) {
3874 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3875 mgmt_status(status));
3876 } else {
3877 struct mgmt_mode *cp = cmd->param;
3878
3879 if (cp->val)
3880 set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
3881 else
3882 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
3883
3884 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
3885 new_settings(hdev, cmd->sk);
3886 }
3887
3888 mgmt_pending_remove(cmd);
3889
3890 unlock:
3891 hci_dev_unlock(hdev);
3892 }
3893
3894 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
3895 void *data, u16 len)
3896 {
3897 struct mgmt_mode *cp = data;
3898 struct pending_cmd *cmd;
3899 struct hci_request req;
3900 int err;
3901
3902 BT_DBG("%s", hdev->name);
3903
3904 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
3905 hdev->hci_ver < BLUETOOTH_VER_1_2)
3906 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3907 MGMT_STATUS_NOT_SUPPORTED);
3908
3909 if (cp->val != 0x00 && cp->val != 0x01)
3910 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3911 MGMT_STATUS_INVALID_PARAMS);
3912
3913 if (!hdev_is_powered(hdev))
3914 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3915 MGMT_STATUS_NOT_POWERED);
3916
3917 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3918 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3919 MGMT_STATUS_REJECTED);
3920
3921 hci_dev_lock(hdev);
3922
3923 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
3924 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3925 MGMT_STATUS_BUSY);
3926 goto unlock;
3927 }
3928
3929 if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
3930 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
3931 hdev);
3932 goto unlock;
3933 }
3934
3935 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
3936 data, len);
3937 if (!cmd) {
3938 err = -ENOMEM;
3939 goto unlock;
3940 }
3941
3942 hci_req_init(&req, hdev);
3943
3944 write_fast_connectable(&req, cp->val);
3945
3946 err = hci_req_run(&req, fast_connectable_complete);
3947 if (err < 0) {
3948 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3949 MGMT_STATUS_FAILED);
3950 mgmt_pending_remove(cmd);
3951 }
3952
3953 unlock:
3954 hci_dev_unlock(hdev);
3955
3956 return err;
3957 }
3958
3959 static void set_bredr_scan(struct hci_request *req)
3960 {
3961 struct hci_dev *hdev = req->hdev;
3962 u8 scan = 0;
3963
3964 /* Ensure that fast connectable is disabled. This function will
3965 * not do anything if the page scan parameters are already what
3966 * they should be.
3967 */
3968 write_fast_connectable(req, false);
3969
3970 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3971 scan |= SCAN_PAGE;
3972 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
3973 scan |= SCAN_INQUIRY;
3974
3975 if (scan)
3976 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
3977 }
3978
3979 static void set_bredr_complete(struct hci_dev *hdev, u8 status)
3980 {
3981 struct pending_cmd *cmd;
3982
3983 BT_DBG("status 0x%02x", status);
3984
3985 hci_dev_lock(hdev);
3986
3987 cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
3988 if (!cmd)
3989 goto unlock;
3990
3991 if (status) {
3992 u8 mgmt_err = mgmt_status(status);
3993
3994 /* We need to restore the flag if related HCI commands
3995 * failed.
3996 */
3997 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3998
3999 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4000 } else {
4001 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4002 new_settings(hdev, cmd->sk);
4003 }
4004
4005 mgmt_pending_remove(cmd);
4006
4007 unlock:
4008 hci_dev_unlock(hdev);
4009 }
4010
4011 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4012 {
4013 struct mgmt_mode *cp = data;
4014 struct pending_cmd *cmd;
4015 struct hci_request req;
4016 int err;
4017
4018 BT_DBG("request for %s", hdev->name);
4019
4020 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4021 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4022 MGMT_STATUS_NOT_SUPPORTED);
4023
4024 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4025 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4026 MGMT_STATUS_REJECTED);
4027
4028 if (cp->val != 0x00 && cp->val != 0x01)
4029 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4030 MGMT_STATUS_INVALID_PARAMS);
4031
4032 hci_dev_lock(hdev);
4033
4034 if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4035 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4036 goto unlock;
4037 }
4038
4039 if (!hdev_is_powered(hdev)) {
4040 if (!cp->val) {
4041 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4042 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4043 clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4044 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4045 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4046 }
4047
4048 change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4049
4050 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4051 if (err < 0)
4052 goto unlock;
4053
4054 err = new_settings(hdev, sk);
4055 goto unlock;
4056 }
4057
4058 /* Reject disabling when powered on */
4059 if (!cp->val) {
4060 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4061 MGMT_STATUS_REJECTED);
4062 goto unlock;
4063 }
4064
4065 if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
4066 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4067 MGMT_STATUS_BUSY);
4068 goto unlock;
4069 }
4070
4071 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4072 if (!cmd) {
4073 err = -ENOMEM;
4074 goto unlock;
4075 }
4076
4077 /* We need to flip the bit already here so that update_adv_data
4078 * generates the correct flags.
4079 */
4080 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4081
4082 hci_req_init(&req, hdev);
4083
4084 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4085 set_bredr_scan(&req);
4086
4087 /* Since only the advertising data flags will change, there
4088 * is no need to update the scan response data.
4089 */
4090 update_adv_data(&req);
4091
4092 err = hci_req_run(&req, set_bredr_complete);
4093 if (err < 0)
4094 mgmt_pending_remove(cmd);
4095
4096 unlock:
4097 hci_dev_unlock(hdev);
4098 return err;
4099 }
4100
4101 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4102 void *data, u16 len)
4103 {
4104 struct mgmt_mode *cp = data;
4105 struct pending_cmd *cmd;
4106 u8 val, status;
4107 int err;
4108
4109 BT_DBG("request for %s", hdev->name);
4110
4111 status = mgmt_bredr_support(hdev);
4112 if (status)
4113 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4114 status);
4115
4116 if (!lmp_sc_capable(hdev) &&
4117 !test_bit(HCI_FORCE_SC, &hdev->dev_flags))
4118 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4119 MGMT_STATUS_NOT_SUPPORTED);
4120
4121 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4122 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4123 MGMT_STATUS_INVALID_PARAMS);
4124
4125 hci_dev_lock(hdev);
4126
4127 if (!hdev_is_powered(hdev)) {
4128 bool changed;
4129
4130 if (cp->val) {
4131 changed = !test_and_set_bit(HCI_SC_ENABLED,
4132 &hdev->dev_flags);
4133 if (cp->val == 0x02)
4134 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4135 else
4136 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4137 } else {
4138 changed = test_and_clear_bit(HCI_SC_ENABLED,
4139 &hdev->dev_flags);
4140 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4141 }
4142
4143 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4144 if (err < 0)
4145 goto failed;
4146
4147 if (changed)
4148 err = new_settings(hdev, sk);
4149
4150 goto failed;
4151 }
4152
4153 if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4154 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4155 MGMT_STATUS_BUSY);
4156 goto failed;
4157 }
4158
4159 val = !!cp->val;
4160
4161 if (val == test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
4162 (cp->val == 0x02) == test_bit(HCI_SC_ONLY, &hdev->dev_flags)) {
4163 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4164 goto failed;
4165 }
4166
4167 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4168 if (!cmd) {
4169 err = -ENOMEM;
4170 goto failed;
4171 }
4172
4173 err = hci_send_cmd(hdev, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4174 if (err < 0) {
4175 mgmt_pending_remove(cmd);
4176 goto failed;
4177 }
4178
4179 if (cp->val == 0x02)
4180 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4181 else
4182 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4183
4184 failed:
4185 hci_dev_unlock(hdev);
4186 return err;
4187 }
4188
4189 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
4190 void *data, u16 len)
4191 {
4192 struct mgmt_mode *cp = data;
4193 bool changed;
4194 int err;
4195
4196 BT_DBG("request for %s", hdev->name);
4197
4198 if (cp->val != 0x00 && cp->val != 0x01)
4199 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
4200 MGMT_STATUS_INVALID_PARAMS);
4201
4202 hci_dev_lock(hdev);
4203
4204 if (cp->val)
4205 changed = !test_and_set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
4206 else
4207 changed = test_and_clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
4208
4209 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
4210 if (err < 0)
4211 goto unlock;
4212
4213 if (changed)
4214 err = new_settings(hdev, sk);
4215
4216 unlock:
4217 hci_dev_unlock(hdev);
4218 return err;
4219 }
4220
4221 static bool irk_is_valid(struct mgmt_irk_info *irk)
4222 {
4223 switch (irk->addr.type) {
4224 case BDADDR_LE_PUBLIC:
4225 return true;
4226
4227 case BDADDR_LE_RANDOM:
4228 /* Two most significant bits shall be set */
4229 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4230 return false;
4231 return true;
4232 }
4233
4234 return false;
4235 }
4236
4237 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4238 u16 len)
4239 {
4240 struct mgmt_cp_load_irks *cp = cp_data;
4241 u16 irk_count, expected_len;
4242 int i, err;
4243
4244 BT_DBG("request for %s", hdev->name);
4245
4246 if (!lmp_le_capable(hdev))
4247 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4248 MGMT_STATUS_NOT_SUPPORTED);
4249
4250 irk_count = __le16_to_cpu(cp->irk_count);
4251
4252 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
4253 if (expected_len != len) {
4254 BT_ERR("load_irks: expected %u bytes, got %u bytes",
4255 len, expected_len);
4256 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4257 MGMT_STATUS_INVALID_PARAMS);
4258 }
4259
4260 BT_DBG("%s irk_count %u", hdev->name, irk_count);
4261
4262 for (i = 0; i < irk_count; i++) {
4263 struct mgmt_irk_info *key = &cp->irks[i];
4264
4265 if (!irk_is_valid(key))
4266 return cmd_status(sk, hdev->id,
4267 MGMT_OP_LOAD_IRKS,
4268 MGMT_STATUS_INVALID_PARAMS);
4269 }
4270
4271 hci_dev_lock(hdev);
4272
4273 hci_smp_irks_clear(hdev);
4274
4275 for (i = 0; i < irk_count; i++) {
4276 struct mgmt_irk_info *irk = &cp->irks[i];
4277 u8 addr_type;
4278
4279 if (irk->addr.type == BDADDR_LE_PUBLIC)
4280 addr_type = ADDR_LE_DEV_PUBLIC;
4281 else
4282 addr_type = ADDR_LE_DEV_RANDOM;
4283
4284 hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
4285 BDADDR_ANY);
4286 }
4287
4288 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4289
4290 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
4291
4292 hci_dev_unlock(hdev);
4293
4294 return err;
4295 }
4296
4297 static bool ltk_is_valid(struct mgmt_ltk_info *key)
4298 {
4299 if (key->master != 0x00 && key->master != 0x01)
4300 return false;
4301
4302 switch (key->addr.type) {
4303 case BDADDR_LE_PUBLIC:
4304 return true;
4305
4306 case BDADDR_LE_RANDOM:
4307 /* Two most significant bits shall be set */
4308 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4309 return false;
4310 return true;
4311 }
4312
4313 return false;
4314 }
4315
4316 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
4317 void *cp_data, u16 len)
4318 {
4319 struct mgmt_cp_load_long_term_keys *cp = cp_data;
4320 u16 key_count, expected_len;
4321 int i, err;
4322
4323 BT_DBG("request for %s", hdev->name);
4324
4325 if (!lmp_le_capable(hdev))
4326 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4327 MGMT_STATUS_NOT_SUPPORTED);
4328
4329 key_count = __le16_to_cpu(cp->key_count);
4330
4331 expected_len = sizeof(*cp) + key_count *
4332 sizeof(struct mgmt_ltk_info);
4333 if (expected_len != len) {
4334 BT_ERR("load_keys: expected %u bytes, got %u bytes",
4335 len, expected_len);
4336 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4337 MGMT_STATUS_INVALID_PARAMS);
4338 }
4339
4340 BT_DBG("%s key_count %u", hdev->name, key_count);
4341
4342 for (i = 0; i < key_count; i++) {
4343 struct mgmt_ltk_info *key = &cp->keys[i];
4344
4345 if (!ltk_is_valid(key))
4346 return cmd_status(sk, hdev->id,
4347 MGMT_OP_LOAD_LONG_TERM_KEYS,
4348 MGMT_STATUS_INVALID_PARAMS);
4349 }
4350
4351 hci_dev_lock(hdev);
4352
4353 hci_smp_ltks_clear(hdev);
4354
4355 for (i = 0; i < key_count; i++) {
4356 struct mgmt_ltk_info *key = &cp->keys[i];
4357 u8 type, addr_type;
4358
4359 if (key->addr.type == BDADDR_LE_PUBLIC)
4360 addr_type = ADDR_LE_DEV_PUBLIC;
4361 else
4362 addr_type = ADDR_LE_DEV_RANDOM;
4363
4364 if (key->master)
4365 type = HCI_SMP_LTK;
4366 else
4367 type = HCI_SMP_LTK_SLAVE;
4368
4369 hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
4370 key->type, key->val, key->enc_size, key->ediv,
4371 key->rand);
4372 }
4373
4374 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
4375 NULL, 0);
4376
4377 hci_dev_unlock(hdev);
4378
4379 return err;
4380 }
4381
4382 static const struct mgmt_handler {
4383 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
4384 u16 data_len);
4385 bool var_len;
4386 size_t data_len;
4387 } mgmt_handlers[] = {
4388 { NULL }, /* 0x0000 (no command) */
4389 { read_version, false, MGMT_READ_VERSION_SIZE },
4390 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
4391 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
4392 { read_controller_info, false, MGMT_READ_INFO_SIZE },
4393 { set_powered, false, MGMT_SETTING_SIZE },
4394 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
4395 { set_connectable, false, MGMT_SETTING_SIZE },
4396 { set_fast_connectable, false, MGMT_SETTING_SIZE },
4397 { set_pairable, false, MGMT_SETTING_SIZE },
4398 { set_link_security, false, MGMT_SETTING_SIZE },
4399 { set_ssp, false, MGMT_SETTING_SIZE },
4400 { set_hs, false, MGMT_SETTING_SIZE },
4401 { set_le, false, MGMT_SETTING_SIZE },
4402 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
4403 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
4404 { add_uuid, false, MGMT_ADD_UUID_SIZE },
4405 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
4406 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
4407 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
4408 { disconnect, false, MGMT_DISCONNECT_SIZE },
4409 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
4410 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
4411 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
4412 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
4413 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
4414 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
4415 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
4416 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
4417 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
4418 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
4419 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
4420 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
4421 { add_remote_oob_data, true, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
4422 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
4423 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
4424 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
4425 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
4426 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
4427 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
4428 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
4429 { set_advertising, false, MGMT_SETTING_SIZE },
4430 { set_bredr, false, MGMT_SETTING_SIZE },
4431 { set_static_address, false, MGMT_SET_STATIC_ADDRESS_SIZE },
4432 { set_scan_params, false, MGMT_SET_SCAN_PARAMS_SIZE },
4433 { set_secure_conn, false, MGMT_SETTING_SIZE },
4434 { set_debug_keys, false, MGMT_SETTING_SIZE },
4435 { },
4436 { load_irks, true, MGMT_LOAD_IRKS_SIZE },
4437 };
4438
4439
4440 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
4441 {
4442 void *buf;
4443 u8 *cp;
4444 struct mgmt_hdr *hdr;
4445 u16 opcode, index, len;
4446 struct hci_dev *hdev = NULL;
4447 const struct mgmt_handler *handler;
4448 int err;
4449
4450 BT_DBG("got %zu bytes", msglen);
4451
4452 if (msglen < sizeof(*hdr))
4453 return -EINVAL;
4454
4455 buf = kmalloc(msglen, GFP_KERNEL);
4456 if (!buf)
4457 return -ENOMEM;
4458
4459 if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
4460 err = -EFAULT;
4461 goto done;
4462 }
4463
4464 hdr = buf;
4465 opcode = __le16_to_cpu(hdr->opcode);
4466 index = __le16_to_cpu(hdr->index);
4467 len = __le16_to_cpu(hdr->len);
4468
4469 if (len != msglen - sizeof(*hdr)) {
4470 err = -EINVAL;
4471 goto done;
4472 }
4473
4474 if (index != MGMT_INDEX_NONE) {
4475 hdev = hci_dev_get(index);
4476 if (!hdev) {
4477 err = cmd_status(sk, index, opcode,
4478 MGMT_STATUS_INVALID_INDEX);
4479 goto done;
4480 }
4481
4482 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
4483 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4484 err = cmd_status(sk, index, opcode,
4485 MGMT_STATUS_INVALID_INDEX);
4486 goto done;
4487 }
4488 }
4489
4490 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
4491 mgmt_handlers[opcode].func == NULL) {
4492 BT_DBG("Unknown op %u", opcode);
4493 err = cmd_status(sk, index, opcode,
4494 MGMT_STATUS_UNKNOWN_COMMAND);
4495 goto done;
4496 }
4497
4498 if ((hdev && opcode < MGMT_OP_READ_INFO) ||
4499 (!hdev && opcode >= MGMT_OP_READ_INFO)) {
4500 err = cmd_status(sk, index, opcode,
4501 MGMT_STATUS_INVALID_INDEX);
4502 goto done;
4503 }
4504
4505 handler = &mgmt_handlers[opcode];
4506
4507 if ((handler->var_len && len < handler->data_len) ||
4508 (!handler->var_len && len != handler->data_len)) {
4509 err = cmd_status(sk, index, opcode,
4510 MGMT_STATUS_INVALID_PARAMS);
4511 goto done;
4512 }
4513
4514 if (hdev)
4515 mgmt_init_hdev(sk, hdev);
4516
4517 cp = buf + sizeof(*hdr);
4518
4519 err = handler->func(sk, hdev, cp, len);
4520 if (err < 0)
4521 goto done;
4522
4523 err = msglen;
4524
4525 done:
4526 if (hdev)
4527 hci_dev_put(hdev);
4528
4529 kfree(buf);
4530 return err;
4531 }
4532
4533 void mgmt_index_added(struct hci_dev *hdev)
4534 {
4535 if (hdev->dev_type != HCI_BREDR)
4536 return;
4537
4538 mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
4539 }
4540
4541 void mgmt_index_removed(struct hci_dev *hdev)
4542 {
4543 u8 status = MGMT_STATUS_INVALID_INDEX;
4544
4545 if (hdev->dev_type != HCI_BREDR)
4546 return;
4547
4548 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
4549
4550 mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
4551 }
4552
4553 static void powered_complete(struct hci_dev *hdev, u8 status)
4554 {
4555 struct cmd_lookup match = { NULL, hdev };
4556
4557 BT_DBG("status 0x%02x", status);
4558
4559 hci_dev_lock(hdev);
4560
4561 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
4562
4563 new_settings(hdev, match.sk);
4564
4565 hci_dev_unlock(hdev);
4566
4567 if (match.sk)
4568 sock_put(match.sk);
4569 }
4570
4571 static int powered_update_hci(struct hci_dev *hdev)
4572 {
4573 struct hci_request req;
4574 u8 link_sec;
4575
4576 hci_req_init(&req, hdev);
4577
4578 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
4579 !lmp_host_ssp_capable(hdev)) {
4580 u8 ssp = 1;
4581
4582 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
4583 }
4584
4585 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
4586 lmp_bredr_capable(hdev)) {
4587 struct hci_cp_write_le_host_supported cp;
4588
4589 cp.le = 1;
4590 cp.simul = lmp_le_br_capable(hdev);
4591
4592 /* Check first if we already have the right
4593 * host state (host features set)
4594 */
4595 if (cp.le != lmp_host_le_capable(hdev) ||
4596 cp.simul != lmp_host_le_br_capable(hdev))
4597 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
4598 sizeof(cp), &cp);
4599 }
4600
4601 if (lmp_le_capable(hdev)) {
4602 /* Set random address to static address if configured */
4603 if (bacmp(&hdev->static_addr, BDADDR_ANY))
4604 hci_req_add(&req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
4605 &hdev->static_addr);
4606
4607 /* Make sure the controller has a good default for
4608 * advertising data. This also applies to the case
4609 * where BR/EDR was toggled during the AUTO_OFF phase.
4610 */
4611 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
4612 update_adv_data(&req);
4613 update_scan_rsp_data(&req);
4614 }
4615
4616 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
4617 enable_advertising(&req);
4618 }
4619
4620 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4621 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
4622 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
4623 sizeof(link_sec), &link_sec);
4624
4625 if (lmp_bredr_capable(hdev)) {
4626 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
4627 set_bredr_scan(&req);
4628 update_class(&req);
4629 update_name(&req);
4630 update_eir(&req);
4631 }
4632
4633 return hci_req_run(&req, powered_complete);
4634 }
4635
4636 int mgmt_powered(struct hci_dev *hdev, u8 powered)
4637 {
4638 struct cmd_lookup match = { NULL, hdev };
4639 u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
4640 u8 zero_cod[] = { 0, 0, 0 };
4641 int err;
4642
4643 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
4644 return 0;
4645
4646 if (powered) {
4647 if (powered_update_hci(hdev) == 0)
4648 return 0;
4649
4650 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
4651 &match);
4652 goto new_settings;
4653 }
4654
4655 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
4656 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered);
4657
4658 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
4659 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
4660 zero_cod, sizeof(zero_cod), NULL);
4661
4662 new_settings:
4663 err = new_settings(hdev, match.sk);
4664
4665 if (match.sk)
4666 sock_put(match.sk);
4667
4668 return err;
4669 }
4670
4671 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
4672 {
4673 struct pending_cmd *cmd;
4674 u8 status;
4675
4676 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
4677 if (!cmd)
4678 return;
4679
4680 if (err == -ERFKILL)
4681 status = MGMT_STATUS_RFKILLED;
4682 else
4683 status = MGMT_STATUS_FAILED;
4684
4685 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
4686
4687 mgmt_pending_remove(cmd);
4688 }
4689
4690 void mgmt_discoverable_timeout(struct hci_dev *hdev)
4691 {
4692 struct hci_request req;
4693
4694 hci_dev_lock(hdev);
4695
4696 /* When discoverable timeout triggers, then just make sure
4697 * the limited discoverable flag is cleared. Even in the case
4698 * of a timeout triggered from general discoverable, it is
4699 * safe to unconditionally clear the flag.
4700 */
4701 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
4702 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4703
4704 hci_req_init(&req, hdev);
4705 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4706 u8 scan = SCAN_PAGE;
4707 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
4708 sizeof(scan), &scan);
4709 }
4710 update_class(&req);
4711 update_adv_data(&req);
4712 hci_req_run(&req, NULL);
4713
4714 hdev->discov_timeout = 0;
4715
4716 new_settings(hdev, NULL);
4717
4718 hci_dev_unlock(hdev);
4719 }
4720
4721 void mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
4722 {
4723 bool changed;
4724
4725 /* Nothing needed here if there's a pending command since that
4726 * commands request completion callback takes care of everything
4727 * necessary.
4728 */
4729 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
4730 return;
4731
4732 if (discoverable) {
4733 changed = !test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4734 } else {
4735 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
4736 changed = test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4737 }
4738
4739 if (changed) {
4740 struct hci_request req;
4741
4742 /* In case this change in discoverable was triggered by
4743 * a disabling of connectable there could be a need to
4744 * update the advertising flags.
4745 */
4746 hci_req_init(&req, hdev);
4747 update_adv_data(&req);
4748 hci_req_run(&req, NULL);
4749
4750 new_settings(hdev, NULL);
4751 }
4752 }
4753
4754 void mgmt_connectable(struct hci_dev *hdev, u8 connectable)
4755 {
4756 bool changed;
4757
4758 /* Nothing needed here if there's a pending command since that
4759 * commands request completion callback takes care of everything
4760 * necessary.
4761 */
4762 if (mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
4763 return;
4764
4765 if (connectable)
4766 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
4767 else
4768 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
4769
4770 if (changed)
4771 new_settings(hdev, NULL);
4772 }
4773
4774 void mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
4775 {
4776 u8 mgmt_err = mgmt_status(status);
4777
4778 if (scan & SCAN_PAGE)
4779 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
4780 cmd_status_rsp, &mgmt_err);
4781
4782 if (scan & SCAN_INQUIRY)
4783 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
4784 cmd_status_rsp, &mgmt_err);
4785 }
4786
4787 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
4788 bool persistent)
4789 {
4790 struct mgmt_ev_new_link_key ev;
4791
4792 memset(&ev, 0, sizeof(ev));
4793
4794 ev.store_hint = persistent;
4795 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
4796 ev.key.addr.type = BDADDR_BREDR;
4797 ev.key.type = key->type;
4798 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
4799 ev.key.pin_len = key->pin_len;
4800
4801 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
4802 }
4803
4804 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key)
4805 {
4806 struct mgmt_ev_new_long_term_key ev;
4807
4808 memset(&ev, 0, sizeof(ev));
4809
4810 /* Devices using resolvable or non-resolvable random addresses
4811 * without providing an indentity resolving key don't require
4812 * to store long term keys. Their addresses will change the
4813 * next time around.
4814 *
4815 * Only when a remote device provides an identity address
4816 * make sure the long term key is stored. If the remote
4817 * identity is known, the long term keys are internally
4818 * mapped to the identity address. So allow static random
4819 * and public addresses here.
4820 */
4821 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
4822 (key->bdaddr.b[5] & 0xc0) != 0xc0)
4823 ev.store_hint = 0x00;
4824 else
4825 ev.store_hint = 0x01;
4826
4827 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
4828 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
4829 ev.key.type = key->authenticated;
4830 ev.key.enc_size = key->enc_size;
4831 ev.key.ediv = key->ediv;
4832
4833 if (key->type == HCI_SMP_LTK)
4834 ev.key.master = 1;
4835
4836 memcpy(ev.key.rand, key->rand, sizeof(key->rand));
4837 memcpy(ev.key.val, key->val, sizeof(key->val));
4838
4839 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
4840 }
4841
4842 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
4843 {
4844 struct mgmt_ev_new_irk ev;
4845
4846 memset(&ev, 0, sizeof(ev));
4847
4848 /* For identity resolving keys from devices that are already
4849 * using a public address or static random address, do not
4850 * ask for storing this key. The identity resolving key really
4851 * is only mandatory for devices using resovlable random
4852 * addresses.
4853 *
4854 * Storing all identity resolving keys has the downside that
4855 * they will be also loaded on next boot of they system. More
4856 * identity resolving keys, means more time during scanning is
4857 * needed to actually resolve these addresses.
4858 */
4859 if (bacmp(&irk->rpa, BDADDR_ANY))
4860 ev.store_hint = 0x01;
4861 else
4862 ev.store_hint = 0x00;
4863
4864 bacpy(&ev.rpa, &irk->rpa);
4865 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
4866 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
4867 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
4868
4869 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
4870 }
4871
4872 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
4873 u8 data_len)
4874 {
4875 eir[eir_len++] = sizeof(type) + data_len;
4876 eir[eir_len++] = type;
4877 memcpy(&eir[eir_len], data, data_len);
4878 eir_len += data_len;
4879
4880 return eir_len;
4881 }
4882
4883 void mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4884 u8 addr_type, u32 flags, u8 *name, u8 name_len,
4885 u8 *dev_class)
4886 {
4887 char buf[512];
4888 struct mgmt_ev_device_connected *ev = (void *) buf;
4889 u16 eir_len = 0;
4890
4891 bacpy(&ev->addr.bdaddr, bdaddr);
4892 ev->addr.type = link_to_bdaddr(link_type, addr_type);
4893
4894 ev->flags = __cpu_to_le32(flags);
4895
4896 if (name_len > 0)
4897 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
4898 name, name_len);
4899
4900 if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
4901 eir_len = eir_append_data(ev->eir, eir_len,
4902 EIR_CLASS_OF_DEV, dev_class, 3);
4903
4904 ev->eir_len = cpu_to_le16(eir_len);
4905
4906 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
4907 sizeof(*ev) + eir_len, NULL);
4908 }
4909
4910 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
4911 {
4912 struct mgmt_cp_disconnect *cp = cmd->param;
4913 struct sock **sk = data;
4914 struct mgmt_rp_disconnect rp;
4915
4916 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4917 rp.addr.type = cp->addr.type;
4918
4919 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
4920 sizeof(rp));
4921
4922 *sk = cmd->sk;
4923 sock_hold(*sk);
4924
4925 mgmt_pending_remove(cmd);
4926 }
4927
4928 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
4929 {
4930 struct hci_dev *hdev = data;
4931 struct mgmt_cp_unpair_device *cp = cmd->param;
4932 struct mgmt_rp_unpair_device rp;
4933
4934 memset(&rp, 0, sizeof(rp));
4935 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4936 rp.addr.type = cp->addr.type;
4937
4938 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
4939
4940 cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
4941
4942 mgmt_pending_remove(cmd);
4943 }
4944
4945 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
4946 u8 link_type, u8 addr_type, u8 reason)
4947 {
4948 struct mgmt_ev_device_disconnected ev;
4949 struct sock *sk = NULL;
4950
4951 if (link_type != ACL_LINK && link_type != LE_LINK)
4952 return;
4953
4954 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
4955
4956 bacpy(&ev.addr.bdaddr, bdaddr);
4957 ev.addr.type = link_to_bdaddr(link_type, addr_type);
4958 ev.reason = reason;
4959
4960 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
4961
4962 if (sk)
4963 sock_put(sk);
4964
4965 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
4966 hdev);
4967 }
4968
4969 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
4970 u8 link_type, u8 addr_type, u8 status)
4971 {
4972 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
4973 struct mgmt_cp_disconnect *cp;
4974 struct mgmt_rp_disconnect rp;
4975 struct pending_cmd *cmd;
4976
4977 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
4978 hdev);
4979
4980 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
4981 if (!cmd)
4982 return;
4983
4984 cp = cmd->param;
4985
4986 if (bacmp(bdaddr, &cp->addr.bdaddr))
4987 return;
4988
4989 if (cp->addr.type != bdaddr_type)
4990 return;
4991
4992 bacpy(&rp.addr.bdaddr, bdaddr);
4993 rp.addr.type = bdaddr_type;
4994
4995 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
4996 mgmt_status(status), &rp, sizeof(rp));
4997
4998 mgmt_pending_remove(cmd);
4999 }
5000
5001 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5002 u8 addr_type, u8 status)
5003 {
5004 struct mgmt_ev_connect_failed ev;
5005
5006 bacpy(&ev.addr.bdaddr, bdaddr);
5007 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5008 ev.status = mgmt_status(status);
5009
5010 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
5011 }
5012
5013 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
5014 {
5015 struct mgmt_ev_pin_code_request ev;
5016
5017 bacpy(&ev.addr.bdaddr, bdaddr);
5018 ev.addr.type = BDADDR_BREDR;
5019 ev.secure = secure;
5020
5021 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
5022 }
5023
5024 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5025 u8 status)
5026 {
5027 struct pending_cmd *cmd;
5028 struct mgmt_rp_pin_code_reply rp;
5029
5030 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
5031 if (!cmd)
5032 return;
5033
5034 bacpy(&rp.addr.bdaddr, bdaddr);
5035 rp.addr.type = BDADDR_BREDR;
5036
5037 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
5038 mgmt_status(status), &rp, sizeof(rp));
5039
5040 mgmt_pending_remove(cmd);
5041 }
5042
5043 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5044 u8 status)
5045 {
5046 struct pending_cmd *cmd;
5047 struct mgmt_rp_pin_code_reply rp;
5048
5049 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
5050 if (!cmd)
5051 return;
5052
5053 bacpy(&rp.addr.bdaddr, bdaddr);
5054 rp.addr.type = BDADDR_BREDR;
5055
5056 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
5057 mgmt_status(status), &rp, sizeof(rp));
5058
5059 mgmt_pending_remove(cmd);
5060 }
5061
5062 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
5063 u8 link_type, u8 addr_type, __le32 value,
5064 u8 confirm_hint)
5065 {
5066 struct mgmt_ev_user_confirm_request ev;
5067
5068 BT_DBG("%s", hdev->name);
5069
5070 bacpy(&ev.addr.bdaddr, bdaddr);
5071 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5072 ev.confirm_hint = confirm_hint;
5073 ev.value = value;
5074
5075 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
5076 NULL);
5077 }
5078
5079 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
5080 u8 link_type, u8 addr_type)
5081 {
5082 struct mgmt_ev_user_passkey_request ev;
5083
5084 BT_DBG("%s", hdev->name);
5085
5086 bacpy(&ev.addr.bdaddr, bdaddr);
5087 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5088
5089 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
5090 NULL);
5091 }
5092
5093 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5094 u8 link_type, u8 addr_type, u8 status,
5095 u8 opcode)
5096 {
5097 struct pending_cmd *cmd;
5098 struct mgmt_rp_user_confirm_reply rp;
5099 int err;
5100
5101 cmd = mgmt_pending_find(opcode, hdev);
5102 if (!cmd)
5103 return -ENOENT;
5104
5105 bacpy(&rp.addr.bdaddr, bdaddr);
5106 rp.addr.type = link_to_bdaddr(link_type, addr_type);
5107 err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
5108 &rp, sizeof(rp));
5109
5110 mgmt_pending_remove(cmd);
5111
5112 return err;
5113 }
5114
5115 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5116 u8 link_type, u8 addr_type, u8 status)
5117 {
5118 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5119 status, MGMT_OP_USER_CONFIRM_REPLY);
5120 }
5121
5122 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5123 u8 link_type, u8 addr_type, u8 status)
5124 {
5125 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5126 status,
5127 MGMT_OP_USER_CONFIRM_NEG_REPLY);
5128 }
5129
5130 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5131 u8 link_type, u8 addr_type, u8 status)
5132 {
5133 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5134 status, MGMT_OP_USER_PASSKEY_REPLY);
5135 }
5136
5137 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5138 u8 link_type, u8 addr_type, u8 status)
5139 {
5140 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5141 status,
5142 MGMT_OP_USER_PASSKEY_NEG_REPLY);
5143 }
5144
5145 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
5146 u8 link_type, u8 addr_type, u32 passkey,
5147 u8 entered)
5148 {
5149 struct mgmt_ev_passkey_notify ev;
5150
5151 BT_DBG("%s", hdev->name);
5152
5153 bacpy(&ev.addr.bdaddr, bdaddr);
5154 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5155 ev.passkey = __cpu_to_le32(passkey);
5156 ev.entered = entered;
5157
5158 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
5159 }
5160
5161 void mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5162 u8 addr_type, u8 status)
5163 {
5164 struct mgmt_ev_auth_failed ev;
5165
5166 bacpy(&ev.addr.bdaddr, bdaddr);
5167 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5168 ev.status = mgmt_status(status);
5169
5170 mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
5171 }
5172
5173 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
5174 {
5175 struct cmd_lookup match = { NULL, hdev };
5176 bool changed;
5177
5178 if (status) {
5179 u8 mgmt_err = mgmt_status(status);
5180 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
5181 cmd_status_rsp, &mgmt_err);
5182 return;
5183 }
5184
5185 if (test_bit(HCI_AUTH, &hdev->flags))
5186 changed = !test_and_set_bit(HCI_LINK_SECURITY,
5187 &hdev->dev_flags);
5188 else
5189 changed = test_and_clear_bit(HCI_LINK_SECURITY,
5190 &hdev->dev_flags);
5191
5192 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
5193 &match);
5194
5195 if (changed)
5196 new_settings(hdev, match.sk);
5197
5198 if (match.sk)
5199 sock_put(match.sk);
5200 }
5201
5202 static void clear_eir(struct hci_request *req)
5203 {
5204 struct hci_dev *hdev = req->hdev;
5205 struct hci_cp_write_eir cp;
5206
5207 if (!lmp_ext_inq_capable(hdev))
5208 return;
5209
5210 memset(hdev->eir, 0, sizeof(hdev->eir));
5211
5212 memset(&cp, 0, sizeof(cp));
5213
5214 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
5215 }
5216
5217 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
5218 {
5219 struct cmd_lookup match = { NULL, hdev };
5220 struct hci_request req;
5221 bool changed = false;
5222
5223 if (status) {
5224 u8 mgmt_err = mgmt_status(status);
5225
5226 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
5227 &hdev->dev_flags)) {
5228 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
5229 new_settings(hdev, NULL);
5230 }
5231
5232 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
5233 &mgmt_err);
5234 return;
5235 }
5236
5237 if (enable) {
5238 changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
5239 } else {
5240 changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
5241 if (!changed)
5242 changed = test_and_clear_bit(HCI_HS_ENABLED,
5243 &hdev->dev_flags);
5244 else
5245 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
5246 }
5247
5248 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
5249
5250 if (changed)
5251 new_settings(hdev, match.sk);
5252
5253 if (match.sk)
5254 sock_put(match.sk);
5255
5256 hci_req_init(&req, hdev);
5257
5258 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
5259 update_eir(&req);
5260 else
5261 clear_eir(&req);
5262
5263 hci_req_run(&req, NULL);
5264 }
5265
5266 void mgmt_sc_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
5267 {
5268 struct cmd_lookup match = { NULL, hdev };
5269 bool changed = false;
5270
5271 if (status) {
5272 u8 mgmt_err = mgmt_status(status);
5273
5274 if (enable) {
5275 if (test_and_clear_bit(HCI_SC_ENABLED,
5276 &hdev->dev_flags))
5277 new_settings(hdev, NULL);
5278 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
5279 }
5280
5281 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
5282 cmd_status_rsp, &mgmt_err);
5283 return;
5284 }
5285
5286 if (enable) {
5287 changed = !test_and_set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
5288 } else {
5289 changed = test_and_clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
5290 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
5291 }
5292
5293 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
5294 settings_rsp, &match);
5295
5296 if (changed)
5297 new_settings(hdev, match.sk);
5298
5299 if (match.sk)
5300 sock_put(match.sk);
5301 }
5302
5303 static void sk_lookup(struct pending_cmd *cmd, void *data)
5304 {
5305 struct cmd_lookup *match = data;
5306
5307 if (match->sk == NULL) {
5308 match->sk = cmd->sk;
5309 sock_hold(match->sk);
5310 }
5311 }
5312
5313 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
5314 u8 status)
5315 {
5316 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
5317
5318 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
5319 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
5320 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
5321
5322 if (!status)
5323 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
5324 NULL);
5325
5326 if (match.sk)
5327 sock_put(match.sk);
5328 }
5329
5330 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
5331 {
5332 struct mgmt_cp_set_local_name ev;
5333 struct pending_cmd *cmd;
5334
5335 if (status)
5336 return;
5337
5338 memset(&ev, 0, sizeof(ev));
5339 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
5340 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
5341
5342 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
5343 if (!cmd) {
5344 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
5345
5346 /* If this is a HCI command related to powering on the
5347 * HCI dev don't send any mgmt signals.
5348 */
5349 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5350 return;
5351 }
5352
5353 mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
5354 cmd ? cmd->sk : NULL);
5355 }
5356
5357 void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
5358 u8 *randomizer192, u8 *hash256,
5359 u8 *randomizer256, u8 status)
5360 {
5361 struct pending_cmd *cmd;
5362
5363 BT_DBG("%s status %u", hdev->name, status);
5364
5365 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
5366 if (!cmd)
5367 return;
5368
5369 if (status) {
5370 cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5371 mgmt_status(status));
5372 } else {
5373 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
5374 hash256 && randomizer256) {
5375 struct mgmt_rp_read_local_oob_ext_data rp;
5376
5377 memcpy(rp.hash192, hash192, sizeof(rp.hash192));
5378 memcpy(rp.randomizer192, randomizer192,
5379 sizeof(rp.randomizer192));
5380
5381 memcpy(rp.hash256, hash256, sizeof(rp.hash256));
5382 memcpy(rp.randomizer256, randomizer256,
5383 sizeof(rp.randomizer256));
5384
5385 cmd_complete(cmd->sk, hdev->id,
5386 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
5387 &rp, sizeof(rp));
5388 } else {
5389 struct mgmt_rp_read_local_oob_data rp;
5390
5391 memcpy(rp.hash, hash192, sizeof(rp.hash));
5392 memcpy(rp.randomizer, randomizer192,
5393 sizeof(rp.randomizer));
5394
5395 cmd_complete(cmd->sk, hdev->id,
5396 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
5397 &rp, sizeof(rp));
5398 }
5399 }
5400
5401 mgmt_pending_remove(cmd);
5402 }
5403
5404 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5405 u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name, u8
5406 ssp, u8 *eir, u16 eir_len)
5407 {
5408 char buf[512];
5409 struct mgmt_ev_device_found *ev = (void *) buf;
5410 struct smp_irk *irk;
5411 size_t ev_size;
5412
5413 if (!hci_discovery_active(hdev))
5414 return;
5415
5416 /* Leave 5 bytes for a potential CoD field */
5417 if (sizeof(*ev) + eir_len + 5 > sizeof(buf))
5418 return;
5419
5420 memset(buf, 0, sizeof(buf));
5421
5422 irk = hci_get_irk(hdev, bdaddr, addr_type);
5423 if (irk) {
5424 bacpy(&ev->addr.bdaddr, &irk->bdaddr);
5425 ev->addr.type = link_to_bdaddr(link_type, irk->addr_type);
5426 } else {
5427 bacpy(&ev->addr.bdaddr, bdaddr);
5428 ev->addr.type = link_to_bdaddr(link_type, addr_type);
5429 }
5430
5431 ev->rssi = rssi;
5432 if (cfm_name)
5433 ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_CONFIRM_NAME);
5434 if (!ssp)
5435 ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_LEGACY_PAIRING);
5436
5437 if (eir_len > 0)
5438 memcpy(ev->eir, eir, eir_len);
5439
5440 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
5441 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
5442 dev_class, 3);
5443
5444 ev->eir_len = cpu_to_le16(eir_len);
5445 ev_size = sizeof(*ev) + eir_len;
5446
5447 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
5448 }
5449
5450 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5451 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
5452 {
5453 struct mgmt_ev_device_found *ev;
5454 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
5455 u16 eir_len;
5456
5457 ev = (struct mgmt_ev_device_found *) buf;
5458
5459 memset(buf, 0, sizeof(buf));
5460
5461 bacpy(&ev->addr.bdaddr, bdaddr);
5462 ev->addr.type = link_to_bdaddr(link_type, addr_type);
5463 ev->rssi = rssi;
5464
5465 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
5466 name_len);
5467
5468 ev->eir_len = cpu_to_le16(eir_len);
5469
5470 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
5471 }
5472
5473 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
5474 {
5475 struct mgmt_ev_discovering ev;
5476 struct pending_cmd *cmd;
5477
5478 BT_DBG("%s discovering %u", hdev->name, discovering);
5479
5480 if (discovering)
5481 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
5482 else
5483 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
5484
5485 if (cmd != NULL) {
5486 u8 type = hdev->discovery.type;
5487
5488 cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
5489 sizeof(type));
5490 mgmt_pending_remove(cmd);
5491 }
5492
5493 memset(&ev, 0, sizeof(ev));
5494 ev.type = hdev->discovery.type;
5495 ev.discovering = discovering;
5496
5497 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
5498 }
5499
5500 int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
5501 {
5502 struct pending_cmd *cmd;
5503 struct mgmt_ev_device_blocked ev;
5504
5505 cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, hdev);
5506
5507 bacpy(&ev.addr.bdaddr, bdaddr);
5508 ev.addr.type = type;
5509
5510 return mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &ev, sizeof(ev),
5511 cmd ? cmd->sk : NULL);
5512 }
5513
5514 int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
5515 {
5516 struct pending_cmd *cmd;
5517 struct mgmt_ev_device_unblocked ev;
5518
5519 cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, hdev);
5520
5521 bacpy(&ev.addr.bdaddr, bdaddr);
5522 ev.addr.type = type;
5523
5524 return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &ev, sizeof(ev),
5525 cmd ? cmd->sk : NULL);
5526 }
5527
5528 static void adv_enable_complete(struct hci_dev *hdev, u8 status)
5529 {
5530 BT_DBG("%s status %u", hdev->name, status);
5531
5532 /* Clear the advertising mgmt setting if we failed to re-enable it */
5533 if (status) {
5534 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
5535 new_settings(hdev, NULL);
5536 }
5537 }
5538
5539 void mgmt_reenable_advertising(struct hci_dev *hdev)
5540 {
5541 struct hci_request req;
5542
5543 if (hci_conn_num(hdev, LE_LINK) > 0)
5544 return;
5545
5546 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
5547 return;
5548
5549 hci_req_init(&req, hdev);
5550 enable_advertising(&req);
5551
5552 /* If this fails we have no option but to let user space know
5553 * that we've disabled advertising.
5554 */
5555 if (hci_req_run(&req, adv_enable_complete) < 0) {
5556 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
5557 new_settings(hdev, NULL);
5558 }
5559 }