]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - net/bluetooth/mgmt.c
Bluetooth: Fix issue with missing management event opcode
[mirror_ubuntu-artful-kernel.git] / net / bluetooth / mgmt.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI Management interface */
26
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/mgmt.h>
33
34 #include "smp.h"
35
36 #define MGMT_VERSION 1
37 #define MGMT_REVISION 5
38
39 static const u16 mgmt_commands[] = {
40 MGMT_OP_READ_INDEX_LIST,
41 MGMT_OP_READ_INFO,
42 MGMT_OP_SET_POWERED,
43 MGMT_OP_SET_DISCOVERABLE,
44 MGMT_OP_SET_CONNECTABLE,
45 MGMT_OP_SET_FAST_CONNECTABLE,
46 MGMT_OP_SET_PAIRABLE,
47 MGMT_OP_SET_LINK_SECURITY,
48 MGMT_OP_SET_SSP,
49 MGMT_OP_SET_HS,
50 MGMT_OP_SET_LE,
51 MGMT_OP_SET_DEV_CLASS,
52 MGMT_OP_SET_LOCAL_NAME,
53 MGMT_OP_ADD_UUID,
54 MGMT_OP_REMOVE_UUID,
55 MGMT_OP_LOAD_LINK_KEYS,
56 MGMT_OP_LOAD_LONG_TERM_KEYS,
57 MGMT_OP_DISCONNECT,
58 MGMT_OP_GET_CONNECTIONS,
59 MGMT_OP_PIN_CODE_REPLY,
60 MGMT_OP_PIN_CODE_NEG_REPLY,
61 MGMT_OP_SET_IO_CAPABILITY,
62 MGMT_OP_PAIR_DEVICE,
63 MGMT_OP_CANCEL_PAIR_DEVICE,
64 MGMT_OP_UNPAIR_DEVICE,
65 MGMT_OP_USER_CONFIRM_REPLY,
66 MGMT_OP_USER_CONFIRM_NEG_REPLY,
67 MGMT_OP_USER_PASSKEY_REPLY,
68 MGMT_OP_USER_PASSKEY_NEG_REPLY,
69 MGMT_OP_READ_LOCAL_OOB_DATA,
70 MGMT_OP_ADD_REMOTE_OOB_DATA,
71 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
72 MGMT_OP_START_DISCOVERY,
73 MGMT_OP_STOP_DISCOVERY,
74 MGMT_OP_CONFIRM_NAME,
75 MGMT_OP_BLOCK_DEVICE,
76 MGMT_OP_UNBLOCK_DEVICE,
77 MGMT_OP_SET_DEVICE_ID,
78 MGMT_OP_SET_ADVERTISING,
79 MGMT_OP_SET_BREDR,
80 MGMT_OP_SET_STATIC_ADDRESS,
81 MGMT_OP_SET_SCAN_PARAMS,
82 MGMT_OP_SET_SECURE_CONN,
83 MGMT_OP_SET_DEBUG_KEYS,
84 MGMT_OP_LOAD_IRKS,
85 };
86
87 static const u16 mgmt_events[] = {
88 MGMT_EV_CONTROLLER_ERROR,
89 MGMT_EV_INDEX_ADDED,
90 MGMT_EV_INDEX_REMOVED,
91 MGMT_EV_NEW_SETTINGS,
92 MGMT_EV_CLASS_OF_DEV_CHANGED,
93 MGMT_EV_LOCAL_NAME_CHANGED,
94 MGMT_EV_NEW_LINK_KEY,
95 MGMT_EV_NEW_LONG_TERM_KEY,
96 MGMT_EV_DEVICE_CONNECTED,
97 MGMT_EV_DEVICE_DISCONNECTED,
98 MGMT_EV_CONNECT_FAILED,
99 MGMT_EV_PIN_CODE_REQUEST,
100 MGMT_EV_USER_CONFIRM_REQUEST,
101 MGMT_EV_USER_PASSKEY_REQUEST,
102 MGMT_EV_AUTH_FAILED,
103 MGMT_EV_DEVICE_FOUND,
104 MGMT_EV_DISCOVERING,
105 MGMT_EV_DEVICE_BLOCKED,
106 MGMT_EV_DEVICE_UNBLOCKED,
107 MGMT_EV_DEVICE_UNPAIRED,
108 MGMT_EV_PASSKEY_NOTIFY,
109 MGMT_EV_NEW_IRK,
110 };
111
112 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
113
114 #define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
115 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
116
117 struct pending_cmd {
118 struct list_head list;
119 u16 opcode;
120 int index;
121 void *param;
122 struct sock *sk;
123 void *user_data;
124 };
125
126 /* HCI to MGMT error code conversion table */
127 static u8 mgmt_status_table[] = {
128 MGMT_STATUS_SUCCESS,
129 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
130 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
131 MGMT_STATUS_FAILED, /* Hardware Failure */
132 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
133 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
134 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
135 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
136 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
137 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
138 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
139 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
140 MGMT_STATUS_BUSY, /* Command Disallowed */
141 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
142 MGMT_STATUS_REJECTED, /* Rejected Security */
143 MGMT_STATUS_REJECTED, /* Rejected Personal */
144 MGMT_STATUS_TIMEOUT, /* Host Timeout */
145 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
146 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
147 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
148 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
149 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
150 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
151 MGMT_STATUS_BUSY, /* Repeated Attempts */
152 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
153 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
154 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
155 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
156 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
157 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
158 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
159 MGMT_STATUS_FAILED, /* Unspecified Error */
160 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
161 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
162 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
163 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
164 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
165 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
166 MGMT_STATUS_FAILED, /* Unit Link Key Used */
167 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
168 MGMT_STATUS_TIMEOUT, /* Instant Passed */
169 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
170 MGMT_STATUS_FAILED, /* Transaction Collision */
171 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
172 MGMT_STATUS_REJECTED, /* QoS Rejected */
173 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
174 MGMT_STATUS_REJECTED, /* Insufficient Security */
175 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
176 MGMT_STATUS_BUSY, /* Role Switch Pending */
177 MGMT_STATUS_FAILED, /* Slot Violation */
178 MGMT_STATUS_FAILED, /* Role Switch Failed */
179 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
180 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
181 MGMT_STATUS_BUSY, /* Host Busy Pairing */
182 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
183 MGMT_STATUS_BUSY, /* Controller Busy */
184 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
185 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
186 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
187 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
188 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
189 };
190
191 static u8 mgmt_status(u8 hci_status)
192 {
193 if (hci_status < ARRAY_SIZE(mgmt_status_table))
194 return mgmt_status_table[hci_status];
195
196 return MGMT_STATUS_FAILED;
197 }
198
199 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
200 {
201 struct sk_buff *skb;
202 struct mgmt_hdr *hdr;
203 struct mgmt_ev_cmd_status *ev;
204 int err;
205
206 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
207
208 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
209 if (!skb)
210 return -ENOMEM;
211
212 hdr = (void *) skb_put(skb, sizeof(*hdr));
213
214 hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_STATUS);
215 hdr->index = cpu_to_le16(index);
216 hdr->len = cpu_to_le16(sizeof(*ev));
217
218 ev = (void *) skb_put(skb, sizeof(*ev));
219 ev->status = status;
220 ev->opcode = cpu_to_le16(cmd);
221
222 err = sock_queue_rcv_skb(sk, skb);
223 if (err < 0)
224 kfree_skb(skb);
225
226 return err;
227 }
228
229 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
230 void *rp, size_t rp_len)
231 {
232 struct sk_buff *skb;
233 struct mgmt_hdr *hdr;
234 struct mgmt_ev_cmd_complete *ev;
235 int err;
236
237 BT_DBG("sock %p", sk);
238
239 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
240 if (!skb)
241 return -ENOMEM;
242
243 hdr = (void *) skb_put(skb, sizeof(*hdr));
244
245 hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_COMPLETE);
246 hdr->index = cpu_to_le16(index);
247 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
248
249 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
250 ev->opcode = cpu_to_le16(cmd);
251 ev->status = status;
252
253 if (rp)
254 memcpy(ev->data, rp, rp_len);
255
256 err = sock_queue_rcv_skb(sk, skb);
257 if (err < 0)
258 kfree_skb(skb);
259
260 return err;
261 }
262
263 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
264 u16 data_len)
265 {
266 struct mgmt_rp_read_version rp;
267
268 BT_DBG("sock %p", sk);
269
270 rp.version = MGMT_VERSION;
271 rp.revision = __constant_cpu_to_le16(MGMT_REVISION);
272
273 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
274 sizeof(rp));
275 }
276
277 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
278 u16 data_len)
279 {
280 struct mgmt_rp_read_commands *rp;
281 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
282 const u16 num_events = ARRAY_SIZE(mgmt_events);
283 __le16 *opcode;
284 size_t rp_size;
285 int i, err;
286
287 BT_DBG("sock %p", sk);
288
289 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
290
291 rp = kmalloc(rp_size, GFP_KERNEL);
292 if (!rp)
293 return -ENOMEM;
294
295 rp->num_commands = __constant_cpu_to_le16(num_commands);
296 rp->num_events = __constant_cpu_to_le16(num_events);
297
298 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
299 put_unaligned_le16(mgmt_commands[i], opcode);
300
301 for (i = 0; i < num_events; i++, opcode++)
302 put_unaligned_le16(mgmt_events[i], opcode);
303
304 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
305 rp_size);
306 kfree(rp);
307
308 return err;
309 }
310
311 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
312 u16 data_len)
313 {
314 struct mgmt_rp_read_index_list *rp;
315 struct hci_dev *d;
316 size_t rp_len;
317 u16 count;
318 int err;
319
320 BT_DBG("sock %p", sk);
321
322 read_lock(&hci_dev_list_lock);
323
324 count = 0;
325 list_for_each_entry(d, &hci_dev_list, list) {
326 if (d->dev_type == HCI_BREDR)
327 count++;
328 }
329
330 rp_len = sizeof(*rp) + (2 * count);
331 rp = kmalloc(rp_len, GFP_ATOMIC);
332 if (!rp) {
333 read_unlock(&hci_dev_list_lock);
334 return -ENOMEM;
335 }
336
337 count = 0;
338 list_for_each_entry(d, &hci_dev_list, list) {
339 if (test_bit(HCI_SETUP, &d->dev_flags))
340 continue;
341
342 if (test_bit(HCI_USER_CHANNEL, &d->dev_flags))
343 continue;
344
345 if (d->dev_type == HCI_BREDR) {
346 rp->index[count++] = cpu_to_le16(d->id);
347 BT_DBG("Added hci%u", d->id);
348 }
349 }
350
351 rp->num_controllers = cpu_to_le16(count);
352 rp_len = sizeof(*rp) + (2 * count);
353
354 read_unlock(&hci_dev_list_lock);
355
356 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
357 rp_len);
358
359 kfree(rp);
360
361 return err;
362 }
363
364 static u32 get_supported_settings(struct hci_dev *hdev)
365 {
366 u32 settings = 0;
367
368 settings |= MGMT_SETTING_POWERED;
369 settings |= MGMT_SETTING_PAIRABLE;
370 settings |= MGMT_SETTING_DEBUG_KEYS;
371
372 if (lmp_bredr_capable(hdev)) {
373 settings |= MGMT_SETTING_CONNECTABLE;
374 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
375 settings |= MGMT_SETTING_FAST_CONNECTABLE;
376 settings |= MGMT_SETTING_DISCOVERABLE;
377 settings |= MGMT_SETTING_BREDR;
378 settings |= MGMT_SETTING_LINK_SECURITY;
379
380 if (lmp_ssp_capable(hdev)) {
381 settings |= MGMT_SETTING_SSP;
382 settings |= MGMT_SETTING_HS;
383 }
384
385 if (lmp_sc_capable(hdev) ||
386 test_bit(HCI_FORCE_SC, &hdev->dev_flags))
387 settings |= MGMT_SETTING_SECURE_CONN;
388 }
389
390 if (lmp_le_capable(hdev)) {
391 settings |= MGMT_SETTING_LE;
392 settings |= MGMT_SETTING_ADVERTISING;
393 }
394
395 return settings;
396 }
397
398 static u32 get_current_settings(struct hci_dev *hdev)
399 {
400 u32 settings = 0;
401
402 if (hdev_is_powered(hdev))
403 settings |= MGMT_SETTING_POWERED;
404
405 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
406 settings |= MGMT_SETTING_CONNECTABLE;
407
408 if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
409 settings |= MGMT_SETTING_FAST_CONNECTABLE;
410
411 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
412 settings |= MGMT_SETTING_DISCOVERABLE;
413
414 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
415 settings |= MGMT_SETTING_PAIRABLE;
416
417 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
418 settings |= MGMT_SETTING_BREDR;
419
420 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
421 settings |= MGMT_SETTING_LE;
422
423 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
424 settings |= MGMT_SETTING_LINK_SECURITY;
425
426 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
427 settings |= MGMT_SETTING_SSP;
428
429 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
430 settings |= MGMT_SETTING_HS;
431
432 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
433 settings |= MGMT_SETTING_ADVERTISING;
434
435 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
436 settings |= MGMT_SETTING_SECURE_CONN;
437
438 if (test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags))
439 settings |= MGMT_SETTING_DEBUG_KEYS;
440
441 return settings;
442 }
443
444 #define PNP_INFO_SVCLASS_ID 0x1200
445
446 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
447 {
448 u8 *ptr = data, *uuids_start = NULL;
449 struct bt_uuid *uuid;
450
451 if (len < 4)
452 return ptr;
453
454 list_for_each_entry(uuid, &hdev->uuids, list) {
455 u16 uuid16;
456
457 if (uuid->size != 16)
458 continue;
459
460 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
461 if (uuid16 < 0x1100)
462 continue;
463
464 if (uuid16 == PNP_INFO_SVCLASS_ID)
465 continue;
466
467 if (!uuids_start) {
468 uuids_start = ptr;
469 uuids_start[0] = 1;
470 uuids_start[1] = EIR_UUID16_ALL;
471 ptr += 2;
472 }
473
474 /* Stop if not enough space to put next UUID */
475 if ((ptr - data) + sizeof(u16) > len) {
476 uuids_start[1] = EIR_UUID16_SOME;
477 break;
478 }
479
480 *ptr++ = (uuid16 & 0x00ff);
481 *ptr++ = (uuid16 & 0xff00) >> 8;
482 uuids_start[0] += sizeof(uuid16);
483 }
484
485 return ptr;
486 }
487
488 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
489 {
490 u8 *ptr = data, *uuids_start = NULL;
491 struct bt_uuid *uuid;
492
493 if (len < 6)
494 return ptr;
495
496 list_for_each_entry(uuid, &hdev->uuids, list) {
497 if (uuid->size != 32)
498 continue;
499
500 if (!uuids_start) {
501 uuids_start = ptr;
502 uuids_start[0] = 1;
503 uuids_start[1] = EIR_UUID32_ALL;
504 ptr += 2;
505 }
506
507 /* Stop if not enough space to put next UUID */
508 if ((ptr - data) + sizeof(u32) > len) {
509 uuids_start[1] = EIR_UUID32_SOME;
510 break;
511 }
512
513 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
514 ptr += sizeof(u32);
515 uuids_start[0] += sizeof(u32);
516 }
517
518 return ptr;
519 }
520
521 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
522 {
523 u8 *ptr = data, *uuids_start = NULL;
524 struct bt_uuid *uuid;
525
526 if (len < 18)
527 return ptr;
528
529 list_for_each_entry(uuid, &hdev->uuids, list) {
530 if (uuid->size != 128)
531 continue;
532
533 if (!uuids_start) {
534 uuids_start = ptr;
535 uuids_start[0] = 1;
536 uuids_start[1] = EIR_UUID128_ALL;
537 ptr += 2;
538 }
539
540 /* Stop if not enough space to put next UUID */
541 if ((ptr - data) + 16 > len) {
542 uuids_start[1] = EIR_UUID128_SOME;
543 break;
544 }
545
546 memcpy(ptr, uuid->uuid, 16);
547 ptr += 16;
548 uuids_start[0] += 16;
549 }
550
551 return ptr;
552 }
553
554 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
555 {
556 struct pending_cmd *cmd;
557
558 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
559 if (cmd->opcode == opcode)
560 return cmd;
561 }
562
563 return NULL;
564 }
565
566 static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
567 {
568 u8 ad_len = 0;
569 size_t name_len;
570
571 name_len = strlen(hdev->dev_name);
572 if (name_len > 0) {
573 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
574
575 if (name_len > max_len) {
576 name_len = max_len;
577 ptr[1] = EIR_NAME_SHORT;
578 } else
579 ptr[1] = EIR_NAME_COMPLETE;
580
581 ptr[0] = name_len + 1;
582
583 memcpy(ptr + 2, hdev->dev_name, name_len);
584
585 ad_len += (name_len + 2);
586 ptr += (name_len + 2);
587 }
588
589 return ad_len;
590 }
591
592 static void update_scan_rsp_data(struct hci_request *req)
593 {
594 struct hci_dev *hdev = req->hdev;
595 struct hci_cp_le_set_scan_rsp_data cp;
596 u8 len;
597
598 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
599 return;
600
601 memset(&cp, 0, sizeof(cp));
602
603 len = create_scan_rsp_data(hdev, cp.data);
604
605 if (hdev->scan_rsp_data_len == len &&
606 memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
607 return;
608
609 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
610 hdev->scan_rsp_data_len = len;
611
612 cp.length = len;
613
614 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
615 }
616
617 static u8 get_adv_discov_flags(struct hci_dev *hdev)
618 {
619 struct pending_cmd *cmd;
620
621 /* If there's a pending mgmt command the flags will not yet have
622 * their final values, so check for this first.
623 */
624 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
625 if (cmd) {
626 struct mgmt_mode *cp = cmd->param;
627 if (cp->val == 0x01)
628 return LE_AD_GENERAL;
629 else if (cp->val == 0x02)
630 return LE_AD_LIMITED;
631 } else {
632 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
633 return LE_AD_LIMITED;
634 else if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
635 return LE_AD_GENERAL;
636 }
637
638 return 0;
639 }
640
641 static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
642 {
643 u8 ad_len = 0, flags = 0;
644
645 flags |= get_adv_discov_flags(hdev);
646
647 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
648 flags |= LE_AD_NO_BREDR;
649
650 if (flags) {
651 BT_DBG("adv flags 0x%02x", flags);
652
653 ptr[0] = 2;
654 ptr[1] = EIR_FLAGS;
655 ptr[2] = flags;
656
657 ad_len += 3;
658 ptr += 3;
659 }
660
661 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
662 ptr[0] = 2;
663 ptr[1] = EIR_TX_POWER;
664 ptr[2] = (u8) hdev->adv_tx_power;
665
666 ad_len += 3;
667 ptr += 3;
668 }
669
670 return ad_len;
671 }
672
673 static void update_adv_data(struct hci_request *req)
674 {
675 struct hci_dev *hdev = req->hdev;
676 struct hci_cp_le_set_adv_data cp;
677 u8 len;
678
679 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
680 return;
681
682 memset(&cp, 0, sizeof(cp));
683
684 len = create_adv_data(hdev, cp.data);
685
686 if (hdev->adv_data_len == len &&
687 memcmp(cp.data, hdev->adv_data, len) == 0)
688 return;
689
690 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
691 hdev->adv_data_len = len;
692
693 cp.length = len;
694
695 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
696 }
697
698 static void create_eir(struct hci_dev *hdev, u8 *data)
699 {
700 u8 *ptr = data;
701 size_t name_len;
702
703 name_len = strlen(hdev->dev_name);
704
705 if (name_len > 0) {
706 /* EIR Data type */
707 if (name_len > 48) {
708 name_len = 48;
709 ptr[1] = EIR_NAME_SHORT;
710 } else
711 ptr[1] = EIR_NAME_COMPLETE;
712
713 /* EIR Data length */
714 ptr[0] = name_len + 1;
715
716 memcpy(ptr + 2, hdev->dev_name, name_len);
717
718 ptr += (name_len + 2);
719 }
720
721 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
722 ptr[0] = 2;
723 ptr[1] = EIR_TX_POWER;
724 ptr[2] = (u8) hdev->inq_tx_power;
725
726 ptr += 3;
727 }
728
729 if (hdev->devid_source > 0) {
730 ptr[0] = 9;
731 ptr[1] = EIR_DEVICE_ID;
732
733 put_unaligned_le16(hdev->devid_source, ptr + 2);
734 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
735 put_unaligned_le16(hdev->devid_product, ptr + 6);
736 put_unaligned_le16(hdev->devid_version, ptr + 8);
737
738 ptr += 10;
739 }
740
741 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
742 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
743 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
744 }
745
746 static void update_eir(struct hci_request *req)
747 {
748 struct hci_dev *hdev = req->hdev;
749 struct hci_cp_write_eir cp;
750
751 if (!hdev_is_powered(hdev))
752 return;
753
754 if (!lmp_ext_inq_capable(hdev))
755 return;
756
757 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
758 return;
759
760 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
761 return;
762
763 memset(&cp, 0, sizeof(cp));
764
765 create_eir(hdev, cp.data);
766
767 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
768 return;
769
770 memcpy(hdev->eir, cp.data, sizeof(cp.data));
771
772 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
773 }
774
775 static u8 get_service_classes(struct hci_dev *hdev)
776 {
777 struct bt_uuid *uuid;
778 u8 val = 0;
779
780 list_for_each_entry(uuid, &hdev->uuids, list)
781 val |= uuid->svc_hint;
782
783 return val;
784 }
785
786 static void update_class(struct hci_request *req)
787 {
788 struct hci_dev *hdev = req->hdev;
789 u8 cod[3];
790
791 BT_DBG("%s", hdev->name);
792
793 if (!hdev_is_powered(hdev))
794 return;
795
796 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
797 return;
798
799 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
800 return;
801
802 cod[0] = hdev->minor_class;
803 cod[1] = hdev->major_class;
804 cod[2] = get_service_classes(hdev);
805
806 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
807 cod[1] |= 0x20;
808
809 if (memcmp(cod, hdev->dev_class, 3) == 0)
810 return;
811
812 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
813 }
814
815 static void service_cache_off(struct work_struct *work)
816 {
817 struct hci_dev *hdev = container_of(work, struct hci_dev,
818 service_cache.work);
819 struct hci_request req;
820
821 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
822 return;
823
824 hci_req_init(&req, hdev);
825
826 hci_dev_lock(hdev);
827
828 update_eir(&req);
829 update_class(&req);
830
831 hci_dev_unlock(hdev);
832
833 hci_req_run(&req, NULL);
834 }
835
836 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
837 {
838 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
839 return;
840
841 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
842
843 /* Non-mgmt controlled devices get this bit set
844 * implicitly so that pairing works for them, however
845 * for mgmt we require user-space to explicitly enable
846 * it
847 */
848 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
849 }
850
851 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
852 void *data, u16 data_len)
853 {
854 struct mgmt_rp_read_info rp;
855
856 BT_DBG("sock %p %s", sk, hdev->name);
857
858 hci_dev_lock(hdev);
859
860 memset(&rp, 0, sizeof(rp));
861
862 bacpy(&rp.bdaddr, &hdev->bdaddr);
863
864 rp.version = hdev->hci_ver;
865 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
866
867 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
868 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
869
870 memcpy(rp.dev_class, hdev->dev_class, 3);
871
872 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
873 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
874
875 hci_dev_unlock(hdev);
876
877 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
878 sizeof(rp));
879 }
880
881 static void mgmt_pending_free(struct pending_cmd *cmd)
882 {
883 sock_put(cmd->sk);
884 kfree(cmd->param);
885 kfree(cmd);
886 }
887
888 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
889 struct hci_dev *hdev, void *data,
890 u16 len)
891 {
892 struct pending_cmd *cmd;
893
894 cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
895 if (!cmd)
896 return NULL;
897
898 cmd->opcode = opcode;
899 cmd->index = hdev->id;
900
901 cmd->param = kmalloc(len, GFP_KERNEL);
902 if (!cmd->param) {
903 kfree(cmd);
904 return NULL;
905 }
906
907 if (data)
908 memcpy(cmd->param, data, len);
909
910 cmd->sk = sk;
911 sock_hold(sk);
912
913 list_add(&cmd->list, &hdev->mgmt_pending);
914
915 return cmd;
916 }
917
918 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
919 void (*cb)(struct pending_cmd *cmd,
920 void *data),
921 void *data)
922 {
923 struct pending_cmd *cmd, *tmp;
924
925 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
926 if (opcode > 0 && cmd->opcode != opcode)
927 continue;
928
929 cb(cmd, data);
930 }
931 }
932
933 static void mgmt_pending_remove(struct pending_cmd *cmd)
934 {
935 list_del(&cmd->list);
936 mgmt_pending_free(cmd);
937 }
938
939 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
940 {
941 __le32 settings = cpu_to_le32(get_current_settings(hdev));
942
943 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
944 sizeof(settings));
945 }
946
947 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
948 u16 len)
949 {
950 struct mgmt_mode *cp = data;
951 struct pending_cmd *cmd;
952 int err;
953
954 BT_DBG("request for %s", hdev->name);
955
956 if (cp->val != 0x00 && cp->val != 0x01)
957 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
958 MGMT_STATUS_INVALID_PARAMS);
959
960 hci_dev_lock(hdev);
961
962 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
963 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
964 MGMT_STATUS_BUSY);
965 goto failed;
966 }
967
968 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
969 cancel_delayed_work(&hdev->power_off);
970
971 if (cp->val) {
972 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
973 data, len);
974 err = mgmt_powered(hdev, 1);
975 goto failed;
976 }
977 }
978
979 if (!!cp->val == hdev_is_powered(hdev)) {
980 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
981 goto failed;
982 }
983
984 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
985 if (!cmd) {
986 err = -ENOMEM;
987 goto failed;
988 }
989
990 if (cp->val)
991 queue_work(hdev->req_workqueue, &hdev->power_on);
992 else
993 queue_work(hdev->req_workqueue, &hdev->power_off.work);
994
995 err = 0;
996
997 failed:
998 hci_dev_unlock(hdev);
999 return err;
1000 }
1001
1002 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
1003 struct sock *skip_sk)
1004 {
1005 struct sk_buff *skb;
1006 struct mgmt_hdr *hdr;
1007
1008 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
1009 if (!skb)
1010 return -ENOMEM;
1011
1012 hdr = (void *) skb_put(skb, sizeof(*hdr));
1013 hdr->opcode = cpu_to_le16(event);
1014 if (hdev)
1015 hdr->index = cpu_to_le16(hdev->id);
1016 else
1017 hdr->index = __constant_cpu_to_le16(MGMT_INDEX_NONE);
1018 hdr->len = cpu_to_le16(data_len);
1019
1020 if (data)
1021 memcpy(skb_put(skb, data_len), data, data_len);
1022
1023 /* Time stamp */
1024 __net_timestamp(skb);
1025
1026 hci_send_to_control(skb, skip_sk);
1027 kfree_skb(skb);
1028
1029 return 0;
1030 }
1031
1032 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1033 {
1034 __le32 ev;
1035
1036 ev = cpu_to_le32(get_current_settings(hdev));
1037
1038 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
1039 }
1040
1041 struct cmd_lookup {
1042 struct sock *sk;
1043 struct hci_dev *hdev;
1044 u8 mgmt_status;
1045 };
1046
1047 static void settings_rsp(struct pending_cmd *cmd, void *data)
1048 {
1049 struct cmd_lookup *match = data;
1050
1051 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1052
1053 list_del(&cmd->list);
1054
1055 if (match->sk == NULL) {
1056 match->sk = cmd->sk;
1057 sock_hold(match->sk);
1058 }
1059
1060 mgmt_pending_free(cmd);
1061 }
1062
1063 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
1064 {
1065 u8 *status = data;
1066
1067 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1068 mgmt_pending_remove(cmd);
1069 }
1070
1071 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1072 {
1073 if (!lmp_bredr_capable(hdev))
1074 return MGMT_STATUS_NOT_SUPPORTED;
1075 else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1076 return MGMT_STATUS_REJECTED;
1077 else
1078 return MGMT_STATUS_SUCCESS;
1079 }
1080
1081 static u8 mgmt_le_support(struct hci_dev *hdev)
1082 {
1083 if (!lmp_le_capable(hdev))
1084 return MGMT_STATUS_NOT_SUPPORTED;
1085 else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1086 return MGMT_STATUS_REJECTED;
1087 else
1088 return MGMT_STATUS_SUCCESS;
1089 }
1090
1091 static void set_discoverable_complete(struct hci_dev *hdev, u8 status)
1092 {
1093 struct pending_cmd *cmd;
1094 struct mgmt_mode *cp;
1095 struct hci_request req;
1096 bool changed;
1097
1098 BT_DBG("status 0x%02x", status);
1099
1100 hci_dev_lock(hdev);
1101
1102 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1103 if (!cmd)
1104 goto unlock;
1105
1106 if (status) {
1107 u8 mgmt_err = mgmt_status(status);
1108 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1109 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1110 goto remove_cmd;
1111 }
1112
1113 cp = cmd->param;
1114 if (cp->val) {
1115 changed = !test_and_set_bit(HCI_DISCOVERABLE,
1116 &hdev->dev_flags);
1117
1118 if (hdev->discov_timeout > 0) {
1119 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1120 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1121 to);
1122 }
1123 } else {
1124 changed = test_and_clear_bit(HCI_DISCOVERABLE,
1125 &hdev->dev_flags);
1126 }
1127
1128 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1129
1130 if (changed)
1131 new_settings(hdev, cmd->sk);
1132
1133 /* When the discoverable mode gets changed, make sure
1134 * that class of device has the limited discoverable
1135 * bit correctly set.
1136 */
1137 hci_req_init(&req, hdev);
1138 update_class(&req);
1139 hci_req_run(&req, NULL);
1140
1141 remove_cmd:
1142 mgmt_pending_remove(cmd);
1143
1144 unlock:
1145 hci_dev_unlock(hdev);
1146 }
1147
1148 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1149 u16 len)
1150 {
1151 struct mgmt_cp_set_discoverable *cp = data;
1152 struct pending_cmd *cmd;
1153 struct hci_request req;
1154 u16 timeout;
1155 u8 scan;
1156 int err;
1157
1158 BT_DBG("request for %s", hdev->name);
1159
1160 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1161 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1162 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1163 MGMT_STATUS_REJECTED);
1164
1165 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1166 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1167 MGMT_STATUS_INVALID_PARAMS);
1168
1169 timeout = __le16_to_cpu(cp->timeout);
1170
1171 /* Disabling discoverable requires that no timeout is set,
1172 * and enabling limited discoverable requires a timeout.
1173 */
1174 if ((cp->val == 0x00 && timeout > 0) ||
1175 (cp->val == 0x02 && timeout == 0))
1176 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1177 MGMT_STATUS_INVALID_PARAMS);
1178
1179 hci_dev_lock(hdev);
1180
1181 if (!hdev_is_powered(hdev) && timeout > 0) {
1182 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1183 MGMT_STATUS_NOT_POWERED);
1184 goto failed;
1185 }
1186
1187 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1188 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1189 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1190 MGMT_STATUS_BUSY);
1191 goto failed;
1192 }
1193
1194 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
1195 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1196 MGMT_STATUS_REJECTED);
1197 goto failed;
1198 }
1199
1200 if (!hdev_is_powered(hdev)) {
1201 bool changed = false;
1202
1203 /* Setting limited discoverable when powered off is
1204 * not a valid operation since it requires a timeout
1205 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1206 */
1207 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
1208 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1209 changed = true;
1210 }
1211
1212 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1213 if (err < 0)
1214 goto failed;
1215
1216 if (changed)
1217 err = new_settings(hdev, sk);
1218
1219 goto failed;
1220 }
1221
1222 /* If the current mode is the same, then just update the timeout
1223 * value with the new value. And if only the timeout gets updated,
1224 * then no need for any HCI transactions.
1225 */
1226 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
1227 (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
1228 &hdev->dev_flags)) {
1229 cancel_delayed_work(&hdev->discov_off);
1230 hdev->discov_timeout = timeout;
1231
1232 if (cp->val && hdev->discov_timeout > 0) {
1233 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1234 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1235 to);
1236 }
1237
1238 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1239 goto failed;
1240 }
1241
1242 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1243 if (!cmd) {
1244 err = -ENOMEM;
1245 goto failed;
1246 }
1247
1248 /* Cancel any potential discoverable timeout that might be
1249 * still active and store new timeout value. The arming of
1250 * the timeout happens in the complete handler.
1251 */
1252 cancel_delayed_work(&hdev->discov_off);
1253 hdev->discov_timeout = timeout;
1254
1255 /* Limited discoverable mode */
1256 if (cp->val == 0x02)
1257 set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1258 else
1259 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1260
1261 hci_req_init(&req, hdev);
1262
1263 /* The procedure for LE-only controllers is much simpler - just
1264 * update the advertising data.
1265 */
1266 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1267 goto update_ad;
1268
1269 scan = SCAN_PAGE;
1270
1271 if (cp->val) {
1272 struct hci_cp_write_current_iac_lap hci_cp;
1273
1274 if (cp->val == 0x02) {
1275 /* Limited discoverable mode */
1276 hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1277 hci_cp.iac_lap[0] = 0x00; /* LIAC */
1278 hci_cp.iac_lap[1] = 0x8b;
1279 hci_cp.iac_lap[2] = 0x9e;
1280 hci_cp.iac_lap[3] = 0x33; /* GIAC */
1281 hci_cp.iac_lap[4] = 0x8b;
1282 hci_cp.iac_lap[5] = 0x9e;
1283 } else {
1284 /* General discoverable mode */
1285 hci_cp.num_iac = 1;
1286 hci_cp.iac_lap[0] = 0x33; /* GIAC */
1287 hci_cp.iac_lap[1] = 0x8b;
1288 hci_cp.iac_lap[2] = 0x9e;
1289 }
1290
1291 hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1292 (hci_cp.num_iac * 3) + 1, &hci_cp);
1293
1294 scan |= SCAN_INQUIRY;
1295 } else {
1296 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1297 }
1298
1299 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1300
1301 update_ad:
1302 update_adv_data(&req);
1303
1304 err = hci_req_run(&req, set_discoverable_complete);
1305 if (err < 0)
1306 mgmt_pending_remove(cmd);
1307
1308 failed:
1309 hci_dev_unlock(hdev);
1310 return err;
1311 }
1312
1313 static void write_fast_connectable(struct hci_request *req, bool enable)
1314 {
1315 struct hci_dev *hdev = req->hdev;
1316 struct hci_cp_write_page_scan_activity acp;
1317 u8 type;
1318
1319 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1320 return;
1321
1322 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1323 return;
1324
1325 if (enable) {
1326 type = PAGE_SCAN_TYPE_INTERLACED;
1327
1328 /* 160 msec page scan interval */
1329 acp.interval = __constant_cpu_to_le16(0x0100);
1330 } else {
1331 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1332
1333 /* default 1.28 sec page scan */
1334 acp.interval = __constant_cpu_to_le16(0x0800);
1335 }
1336
1337 acp.window = __constant_cpu_to_le16(0x0012);
1338
1339 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1340 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1341 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1342 sizeof(acp), &acp);
1343
1344 if (hdev->page_scan_type != type)
1345 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1346 }
1347
1348 static u8 get_adv_type(struct hci_dev *hdev)
1349 {
1350 struct pending_cmd *cmd;
1351 bool connectable;
1352
1353 /* If there's a pending mgmt command the flag will not yet have
1354 * it's final value, so check for this first.
1355 */
1356 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1357 if (cmd) {
1358 struct mgmt_mode *cp = cmd->param;
1359 connectable = !!cp->val;
1360 } else {
1361 connectable = test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1362 }
1363
1364 return connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
1365 }
1366
1367 static void enable_advertising(struct hci_request *req)
1368 {
1369 struct hci_dev *hdev = req->hdev;
1370 struct hci_cp_le_set_adv_param cp;
1371 u8 enable = 0x01;
1372
1373 memset(&cp, 0, sizeof(cp));
1374 cp.min_interval = __constant_cpu_to_le16(0x0800);
1375 cp.max_interval = __constant_cpu_to_le16(0x0800);
1376 cp.type = get_adv_type(hdev);
1377 cp.own_address_type = hdev->own_addr_type;
1378 cp.channel_map = hdev->le_adv_channel_map;
1379
1380 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1381
1382 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1383 }
1384
1385 static void disable_advertising(struct hci_request *req)
1386 {
1387 u8 enable = 0x00;
1388
1389 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1390 }
1391
1392 static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1393 {
1394 struct pending_cmd *cmd;
1395 struct mgmt_mode *cp;
1396 bool changed;
1397
1398 BT_DBG("status 0x%02x", status);
1399
1400 hci_dev_lock(hdev);
1401
1402 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1403 if (!cmd)
1404 goto unlock;
1405
1406 if (status) {
1407 u8 mgmt_err = mgmt_status(status);
1408 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1409 goto remove_cmd;
1410 }
1411
1412 cp = cmd->param;
1413 if (cp->val)
1414 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1415 else
1416 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1417
1418 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1419
1420 if (changed)
1421 new_settings(hdev, cmd->sk);
1422
1423 remove_cmd:
1424 mgmt_pending_remove(cmd);
1425
1426 unlock:
1427 hci_dev_unlock(hdev);
1428 }
1429
1430 static int set_connectable_update_settings(struct hci_dev *hdev,
1431 struct sock *sk, u8 val)
1432 {
1433 bool changed = false;
1434 int err;
1435
1436 if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1437 changed = true;
1438
1439 if (val) {
1440 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1441 } else {
1442 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1443 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1444 }
1445
1446 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1447 if (err < 0)
1448 return err;
1449
1450 if (changed)
1451 return new_settings(hdev, sk);
1452
1453 return 0;
1454 }
1455
1456 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1457 u16 len)
1458 {
1459 struct mgmt_mode *cp = data;
1460 struct pending_cmd *cmd;
1461 struct hci_request req;
1462 u8 scan;
1463 int err;
1464
1465 BT_DBG("request for %s", hdev->name);
1466
1467 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1468 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1469 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1470 MGMT_STATUS_REJECTED);
1471
1472 if (cp->val != 0x00 && cp->val != 0x01)
1473 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1474 MGMT_STATUS_INVALID_PARAMS);
1475
1476 hci_dev_lock(hdev);
1477
1478 if (!hdev_is_powered(hdev)) {
1479 err = set_connectable_update_settings(hdev, sk, cp->val);
1480 goto failed;
1481 }
1482
1483 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1484 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1485 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1486 MGMT_STATUS_BUSY);
1487 goto failed;
1488 }
1489
1490 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1491 if (!cmd) {
1492 err = -ENOMEM;
1493 goto failed;
1494 }
1495
1496 hci_req_init(&req, hdev);
1497
1498 /* If BR/EDR is not enabled and we disable advertising as a
1499 * by-product of disabling connectable, we need to update the
1500 * advertising flags.
1501 */
1502 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1503 if (!cp->val) {
1504 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1505 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1506 }
1507 update_adv_data(&req);
1508 } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
1509 if (cp->val) {
1510 scan = SCAN_PAGE;
1511 } else {
1512 scan = 0;
1513
1514 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1515 hdev->discov_timeout > 0)
1516 cancel_delayed_work(&hdev->discov_off);
1517 }
1518
1519 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1520 }
1521
1522 /* If we're going from non-connectable to connectable or
1523 * vice-versa when fast connectable is enabled ensure that fast
1524 * connectable gets disabled. write_fast_connectable won't do
1525 * anything if the page scan parameters are already what they
1526 * should be.
1527 */
1528 if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1529 write_fast_connectable(&req, false);
1530
1531 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) &&
1532 hci_conn_num(hdev, LE_LINK) == 0) {
1533 disable_advertising(&req);
1534 enable_advertising(&req);
1535 }
1536
1537 err = hci_req_run(&req, set_connectable_complete);
1538 if (err < 0) {
1539 mgmt_pending_remove(cmd);
1540 if (err == -ENODATA)
1541 err = set_connectable_update_settings(hdev, sk,
1542 cp->val);
1543 goto failed;
1544 }
1545
1546 failed:
1547 hci_dev_unlock(hdev);
1548 return err;
1549 }
1550
1551 static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
1552 u16 len)
1553 {
1554 struct mgmt_mode *cp = data;
1555 bool changed;
1556 int err;
1557
1558 BT_DBG("request for %s", hdev->name);
1559
1560 if (cp->val != 0x00 && cp->val != 0x01)
1561 return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE,
1562 MGMT_STATUS_INVALID_PARAMS);
1563
1564 hci_dev_lock(hdev);
1565
1566 if (cp->val)
1567 changed = !test_and_set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1568 else
1569 changed = test_and_clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1570
1571 err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
1572 if (err < 0)
1573 goto unlock;
1574
1575 if (changed)
1576 err = new_settings(hdev, sk);
1577
1578 unlock:
1579 hci_dev_unlock(hdev);
1580 return err;
1581 }
1582
1583 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1584 u16 len)
1585 {
1586 struct mgmt_mode *cp = data;
1587 struct pending_cmd *cmd;
1588 u8 val, status;
1589 int err;
1590
1591 BT_DBG("request for %s", hdev->name);
1592
1593 status = mgmt_bredr_support(hdev);
1594 if (status)
1595 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1596 status);
1597
1598 if (cp->val != 0x00 && cp->val != 0x01)
1599 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1600 MGMT_STATUS_INVALID_PARAMS);
1601
1602 hci_dev_lock(hdev);
1603
1604 if (!hdev_is_powered(hdev)) {
1605 bool changed = false;
1606
1607 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1608 &hdev->dev_flags)) {
1609 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
1610 changed = true;
1611 }
1612
1613 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1614 if (err < 0)
1615 goto failed;
1616
1617 if (changed)
1618 err = new_settings(hdev, sk);
1619
1620 goto failed;
1621 }
1622
1623 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1624 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1625 MGMT_STATUS_BUSY);
1626 goto failed;
1627 }
1628
1629 val = !!cp->val;
1630
1631 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1632 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1633 goto failed;
1634 }
1635
1636 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1637 if (!cmd) {
1638 err = -ENOMEM;
1639 goto failed;
1640 }
1641
1642 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1643 if (err < 0) {
1644 mgmt_pending_remove(cmd);
1645 goto failed;
1646 }
1647
1648 failed:
1649 hci_dev_unlock(hdev);
1650 return err;
1651 }
1652
1653 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1654 {
1655 struct mgmt_mode *cp = data;
1656 struct pending_cmd *cmd;
1657 u8 status;
1658 int err;
1659
1660 BT_DBG("request for %s", hdev->name);
1661
1662 status = mgmt_bredr_support(hdev);
1663 if (status)
1664 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1665
1666 if (!lmp_ssp_capable(hdev))
1667 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1668 MGMT_STATUS_NOT_SUPPORTED);
1669
1670 if (cp->val != 0x00 && cp->val != 0x01)
1671 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1672 MGMT_STATUS_INVALID_PARAMS);
1673
1674 hci_dev_lock(hdev);
1675
1676 if (!hdev_is_powered(hdev)) {
1677 bool changed;
1678
1679 if (cp->val) {
1680 changed = !test_and_set_bit(HCI_SSP_ENABLED,
1681 &hdev->dev_flags);
1682 } else {
1683 changed = test_and_clear_bit(HCI_SSP_ENABLED,
1684 &hdev->dev_flags);
1685 if (!changed)
1686 changed = test_and_clear_bit(HCI_HS_ENABLED,
1687 &hdev->dev_flags);
1688 else
1689 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1690 }
1691
1692 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1693 if (err < 0)
1694 goto failed;
1695
1696 if (changed)
1697 err = new_settings(hdev, sk);
1698
1699 goto failed;
1700 }
1701
1702 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev) ||
1703 mgmt_pending_find(MGMT_OP_SET_HS, hdev)) {
1704 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1705 MGMT_STATUS_BUSY);
1706 goto failed;
1707 }
1708
1709 if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1710 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1711 goto failed;
1712 }
1713
1714 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1715 if (!cmd) {
1716 err = -ENOMEM;
1717 goto failed;
1718 }
1719
1720 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1721 if (err < 0) {
1722 mgmt_pending_remove(cmd);
1723 goto failed;
1724 }
1725
1726 failed:
1727 hci_dev_unlock(hdev);
1728 return err;
1729 }
1730
1731 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1732 {
1733 struct mgmt_mode *cp = data;
1734 bool changed;
1735 u8 status;
1736 int err;
1737
1738 BT_DBG("request for %s", hdev->name);
1739
1740 status = mgmt_bredr_support(hdev);
1741 if (status)
1742 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1743
1744 if (!lmp_ssp_capable(hdev))
1745 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1746 MGMT_STATUS_NOT_SUPPORTED);
1747
1748 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
1749 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1750 MGMT_STATUS_REJECTED);
1751
1752 if (cp->val != 0x00 && cp->val != 0x01)
1753 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1754 MGMT_STATUS_INVALID_PARAMS);
1755
1756 hci_dev_lock(hdev);
1757
1758 if (cp->val) {
1759 changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1760 } else {
1761 if (hdev_is_powered(hdev)) {
1762 err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1763 MGMT_STATUS_REJECTED);
1764 goto unlock;
1765 }
1766
1767 changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1768 }
1769
1770 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1771 if (err < 0)
1772 goto unlock;
1773
1774 if (changed)
1775 err = new_settings(hdev, sk);
1776
1777 unlock:
1778 hci_dev_unlock(hdev);
1779 return err;
1780 }
1781
1782 static void le_enable_complete(struct hci_dev *hdev, u8 status)
1783 {
1784 struct cmd_lookup match = { NULL, hdev };
1785
1786 if (status) {
1787 u8 mgmt_err = mgmt_status(status);
1788
1789 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1790 &mgmt_err);
1791 return;
1792 }
1793
1794 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1795
1796 new_settings(hdev, match.sk);
1797
1798 if (match.sk)
1799 sock_put(match.sk);
1800
1801 /* Make sure the controller has a good default for
1802 * advertising data. Restrict the update to when LE
1803 * has actually been enabled. During power on, the
1804 * update in powered_update_hci will take care of it.
1805 */
1806 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1807 struct hci_request req;
1808
1809 hci_dev_lock(hdev);
1810
1811 hci_req_init(&req, hdev);
1812 update_adv_data(&req);
1813 update_scan_rsp_data(&req);
1814 hci_req_run(&req, NULL);
1815
1816 hci_dev_unlock(hdev);
1817 }
1818 }
1819
1820 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1821 {
1822 struct mgmt_mode *cp = data;
1823 struct hci_cp_write_le_host_supported hci_cp;
1824 struct pending_cmd *cmd;
1825 struct hci_request req;
1826 int err;
1827 u8 val, enabled;
1828
1829 BT_DBG("request for %s", hdev->name);
1830
1831 if (!lmp_le_capable(hdev))
1832 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1833 MGMT_STATUS_NOT_SUPPORTED);
1834
1835 if (cp->val != 0x00 && cp->val != 0x01)
1836 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1837 MGMT_STATUS_INVALID_PARAMS);
1838
1839 /* LE-only devices do not allow toggling LE on/off */
1840 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1841 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1842 MGMT_STATUS_REJECTED);
1843
1844 hci_dev_lock(hdev);
1845
1846 val = !!cp->val;
1847 enabled = lmp_host_le_capable(hdev);
1848
1849 if (!hdev_is_powered(hdev) || val == enabled) {
1850 bool changed = false;
1851
1852 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1853 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1854 changed = true;
1855 }
1856
1857 if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
1858 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
1859 changed = true;
1860 }
1861
1862 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1863 if (err < 0)
1864 goto unlock;
1865
1866 if (changed)
1867 err = new_settings(hdev, sk);
1868
1869 goto unlock;
1870 }
1871
1872 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
1873 mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
1874 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1875 MGMT_STATUS_BUSY);
1876 goto unlock;
1877 }
1878
1879 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
1880 if (!cmd) {
1881 err = -ENOMEM;
1882 goto unlock;
1883 }
1884
1885 hci_req_init(&req, hdev);
1886
1887 memset(&hci_cp, 0, sizeof(hci_cp));
1888
1889 if (val) {
1890 hci_cp.le = val;
1891 hci_cp.simul = lmp_le_br_capable(hdev);
1892 } else {
1893 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1894 disable_advertising(&req);
1895 }
1896
1897 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
1898 &hci_cp);
1899
1900 err = hci_req_run(&req, le_enable_complete);
1901 if (err < 0)
1902 mgmt_pending_remove(cmd);
1903
1904 unlock:
1905 hci_dev_unlock(hdev);
1906 return err;
1907 }
1908
1909 /* This is a helper function to test for pending mgmt commands that can
1910 * cause CoD or EIR HCI commands. We can only allow one such pending
1911 * mgmt command at a time since otherwise we cannot easily track what
1912 * the current values are, will be, and based on that calculate if a new
1913 * HCI command needs to be sent and if yes with what value.
1914 */
1915 static bool pending_eir_or_class(struct hci_dev *hdev)
1916 {
1917 struct pending_cmd *cmd;
1918
1919 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
1920 switch (cmd->opcode) {
1921 case MGMT_OP_ADD_UUID:
1922 case MGMT_OP_REMOVE_UUID:
1923 case MGMT_OP_SET_DEV_CLASS:
1924 case MGMT_OP_SET_POWERED:
1925 return true;
1926 }
1927 }
1928
1929 return false;
1930 }
1931
1932 static const u8 bluetooth_base_uuid[] = {
1933 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
1934 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1935 };
1936
1937 static u8 get_uuid_size(const u8 *uuid)
1938 {
1939 u32 val;
1940
1941 if (memcmp(uuid, bluetooth_base_uuid, 12))
1942 return 128;
1943
1944 val = get_unaligned_le32(&uuid[12]);
1945 if (val > 0xffff)
1946 return 32;
1947
1948 return 16;
1949 }
1950
1951 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
1952 {
1953 struct pending_cmd *cmd;
1954
1955 hci_dev_lock(hdev);
1956
1957 cmd = mgmt_pending_find(mgmt_op, hdev);
1958 if (!cmd)
1959 goto unlock;
1960
1961 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
1962 hdev->dev_class, 3);
1963
1964 mgmt_pending_remove(cmd);
1965
1966 unlock:
1967 hci_dev_unlock(hdev);
1968 }
1969
1970 static void add_uuid_complete(struct hci_dev *hdev, u8 status)
1971 {
1972 BT_DBG("status 0x%02x", status);
1973
1974 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
1975 }
1976
1977 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1978 {
1979 struct mgmt_cp_add_uuid *cp = data;
1980 struct pending_cmd *cmd;
1981 struct hci_request req;
1982 struct bt_uuid *uuid;
1983 int err;
1984
1985 BT_DBG("request for %s", hdev->name);
1986
1987 hci_dev_lock(hdev);
1988
1989 if (pending_eir_or_class(hdev)) {
1990 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
1991 MGMT_STATUS_BUSY);
1992 goto failed;
1993 }
1994
1995 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
1996 if (!uuid) {
1997 err = -ENOMEM;
1998 goto failed;
1999 }
2000
2001 memcpy(uuid->uuid, cp->uuid, 16);
2002 uuid->svc_hint = cp->svc_hint;
2003 uuid->size = get_uuid_size(cp->uuid);
2004
2005 list_add_tail(&uuid->list, &hdev->uuids);
2006
2007 hci_req_init(&req, hdev);
2008
2009 update_class(&req);
2010 update_eir(&req);
2011
2012 err = hci_req_run(&req, add_uuid_complete);
2013 if (err < 0) {
2014 if (err != -ENODATA)
2015 goto failed;
2016
2017 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2018 hdev->dev_class, 3);
2019 goto failed;
2020 }
2021
2022 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2023 if (!cmd) {
2024 err = -ENOMEM;
2025 goto failed;
2026 }
2027
2028 err = 0;
2029
2030 failed:
2031 hci_dev_unlock(hdev);
2032 return err;
2033 }
2034
2035 static bool enable_service_cache(struct hci_dev *hdev)
2036 {
2037 if (!hdev_is_powered(hdev))
2038 return false;
2039
2040 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2041 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2042 CACHE_TIMEOUT);
2043 return true;
2044 }
2045
2046 return false;
2047 }
2048
2049 static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
2050 {
2051 BT_DBG("status 0x%02x", status);
2052
2053 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2054 }
2055
2056 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2057 u16 len)
2058 {
2059 struct mgmt_cp_remove_uuid *cp = data;
2060 struct pending_cmd *cmd;
2061 struct bt_uuid *match, *tmp;
2062 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2063 struct hci_request req;
2064 int err, found;
2065
2066 BT_DBG("request for %s", hdev->name);
2067
2068 hci_dev_lock(hdev);
2069
2070 if (pending_eir_or_class(hdev)) {
2071 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2072 MGMT_STATUS_BUSY);
2073 goto unlock;
2074 }
2075
2076 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2077 hci_uuids_clear(hdev);
2078
2079 if (enable_service_cache(hdev)) {
2080 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2081 0, hdev->dev_class, 3);
2082 goto unlock;
2083 }
2084
2085 goto update_class;
2086 }
2087
2088 found = 0;
2089
2090 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2091 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2092 continue;
2093
2094 list_del(&match->list);
2095 kfree(match);
2096 found++;
2097 }
2098
2099 if (found == 0) {
2100 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2101 MGMT_STATUS_INVALID_PARAMS);
2102 goto unlock;
2103 }
2104
2105 update_class:
2106 hci_req_init(&req, hdev);
2107
2108 update_class(&req);
2109 update_eir(&req);
2110
2111 err = hci_req_run(&req, remove_uuid_complete);
2112 if (err < 0) {
2113 if (err != -ENODATA)
2114 goto unlock;
2115
2116 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2117 hdev->dev_class, 3);
2118 goto unlock;
2119 }
2120
2121 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2122 if (!cmd) {
2123 err = -ENOMEM;
2124 goto unlock;
2125 }
2126
2127 err = 0;
2128
2129 unlock:
2130 hci_dev_unlock(hdev);
2131 return err;
2132 }
2133
2134 static void set_class_complete(struct hci_dev *hdev, u8 status)
2135 {
2136 BT_DBG("status 0x%02x", status);
2137
2138 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2139 }
2140
2141 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2142 u16 len)
2143 {
2144 struct mgmt_cp_set_dev_class *cp = data;
2145 struct pending_cmd *cmd;
2146 struct hci_request req;
2147 int err;
2148
2149 BT_DBG("request for %s", hdev->name);
2150
2151 if (!lmp_bredr_capable(hdev))
2152 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2153 MGMT_STATUS_NOT_SUPPORTED);
2154
2155 hci_dev_lock(hdev);
2156
2157 if (pending_eir_or_class(hdev)) {
2158 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2159 MGMT_STATUS_BUSY);
2160 goto unlock;
2161 }
2162
2163 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2164 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2165 MGMT_STATUS_INVALID_PARAMS);
2166 goto unlock;
2167 }
2168
2169 hdev->major_class = cp->major;
2170 hdev->minor_class = cp->minor;
2171
2172 if (!hdev_is_powered(hdev)) {
2173 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2174 hdev->dev_class, 3);
2175 goto unlock;
2176 }
2177
2178 hci_req_init(&req, hdev);
2179
2180 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2181 hci_dev_unlock(hdev);
2182 cancel_delayed_work_sync(&hdev->service_cache);
2183 hci_dev_lock(hdev);
2184 update_eir(&req);
2185 }
2186
2187 update_class(&req);
2188
2189 err = hci_req_run(&req, set_class_complete);
2190 if (err < 0) {
2191 if (err != -ENODATA)
2192 goto unlock;
2193
2194 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2195 hdev->dev_class, 3);
2196 goto unlock;
2197 }
2198
2199 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2200 if (!cmd) {
2201 err = -ENOMEM;
2202 goto unlock;
2203 }
2204
2205 err = 0;
2206
2207 unlock:
2208 hci_dev_unlock(hdev);
2209 return err;
2210 }
2211
2212 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2213 u16 len)
2214 {
2215 struct mgmt_cp_load_link_keys *cp = data;
2216 u16 key_count, expected_len;
2217 bool changed;
2218 int i;
2219
2220 BT_DBG("request for %s", hdev->name);
2221
2222 if (!lmp_bredr_capable(hdev))
2223 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2224 MGMT_STATUS_NOT_SUPPORTED);
2225
2226 key_count = __le16_to_cpu(cp->key_count);
2227
2228 expected_len = sizeof(*cp) + key_count *
2229 sizeof(struct mgmt_link_key_info);
2230 if (expected_len != len) {
2231 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2232 len, expected_len);
2233 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2234 MGMT_STATUS_INVALID_PARAMS);
2235 }
2236
2237 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2238 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2239 MGMT_STATUS_INVALID_PARAMS);
2240
2241 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2242 key_count);
2243
2244 for (i = 0; i < key_count; i++) {
2245 struct mgmt_link_key_info *key = &cp->keys[i];
2246
2247 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2248 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2249 MGMT_STATUS_INVALID_PARAMS);
2250 }
2251
2252 hci_dev_lock(hdev);
2253
2254 hci_link_keys_clear(hdev);
2255
2256 if (cp->debug_keys)
2257 changed = !test_and_set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
2258 else
2259 changed = test_and_clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
2260
2261 if (changed)
2262 new_settings(hdev, NULL);
2263
2264 for (i = 0; i < key_count; i++) {
2265 struct mgmt_link_key_info *key = &cp->keys[i];
2266
2267 hci_add_link_key(hdev, NULL, 0, &key->addr.bdaddr, key->val,
2268 key->type, key->pin_len);
2269 }
2270
2271 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2272
2273 hci_dev_unlock(hdev);
2274
2275 return 0;
2276 }
2277
2278 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2279 u8 addr_type, struct sock *skip_sk)
2280 {
2281 struct mgmt_ev_device_unpaired ev;
2282
2283 bacpy(&ev.addr.bdaddr, bdaddr);
2284 ev.addr.type = addr_type;
2285
2286 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2287 skip_sk);
2288 }
2289
2290 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2291 u16 len)
2292 {
2293 struct mgmt_cp_unpair_device *cp = data;
2294 struct mgmt_rp_unpair_device rp;
2295 struct hci_cp_disconnect dc;
2296 struct pending_cmd *cmd;
2297 struct hci_conn *conn;
2298 int err;
2299
2300 memset(&rp, 0, sizeof(rp));
2301 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2302 rp.addr.type = cp->addr.type;
2303
2304 if (!bdaddr_type_is_valid(cp->addr.type))
2305 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2306 MGMT_STATUS_INVALID_PARAMS,
2307 &rp, sizeof(rp));
2308
2309 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2310 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2311 MGMT_STATUS_INVALID_PARAMS,
2312 &rp, sizeof(rp));
2313
2314 hci_dev_lock(hdev);
2315
2316 if (!hdev_is_powered(hdev)) {
2317 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2318 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2319 goto unlock;
2320 }
2321
2322 if (cp->addr.type == BDADDR_BREDR) {
2323 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2324 } else {
2325 u8 addr_type;
2326
2327 if (cp->addr.type == BDADDR_LE_PUBLIC)
2328 addr_type = ADDR_LE_DEV_PUBLIC;
2329 else
2330 addr_type = ADDR_LE_DEV_RANDOM;
2331
2332 hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2333
2334 err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2335 }
2336
2337 if (err < 0) {
2338 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2339 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
2340 goto unlock;
2341 }
2342
2343 if (cp->disconnect) {
2344 if (cp->addr.type == BDADDR_BREDR)
2345 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2346 &cp->addr.bdaddr);
2347 else
2348 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2349 &cp->addr.bdaddr);
2350 } else {
2351 conn = NULL;
2352 }
2353
2354 if (!conn) {
2355 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2356 &rp, sizeof(rp));
2357 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2358 goto unlock;
2359 }
2360
2361 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2362 sizeof(*cp));
2363 if (!cmd) {
2364 err = -ENOMEM;
2365 goto unlock;
2366 }
2367
2368 dc.handle = cpu_to_le16(conn->handle);
2369 dc.reason = 0x13; /* Remote User Terminated Connection */
2370 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2371 if (err < 0)
2372 mgmt_pending_remove(cmd);
2373
2374 unlock:
2375 hci_dev_unlock(hdev);
2376 return err;
2377 }
2378
2379 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2380 u16 len)
2381 {
2382 struct mgmt_cp_disconnect *cp = data;
2383 struct mgmt_rp_disconnect rp;
2384 struct hci_cp_disconnect dc;
2385 struct pending_cmd *cmd;
2386 struct hci_conn *conn;
2387 int err;
2388
2389 BT_DBG("");
2390
2391 memset(&rp, 0, sizeof(rp));
2392 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2393 rp.addr.type = cp->addr.type;
2394
2395 if (!bdaddr_type_is_valid(cp->addr.type))
2396 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2397 MGMT_STATUS_INVALID_PARAMS,
2398 &rp, sizeof(rp));
2399
2400 hci_dev_lock(hdev);
2401
2402 if (!test_bit(HCI_UP, &hdev->flags)) {
2403 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2404 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2405 goto failed;
2406 }
2407
2408 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
2409 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2410 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2411 goto failed;
2412 }
2413
2414 if (cp->addr.type == BDADDR_BREDR)
2415 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2416 &cp->addr.bdaddr);
2417 else
2418 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2419
2420 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2421 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2422 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
2423 goto failed;
2424 }
2425
2426 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2427 if (!cmd) {
2428 err = -ENOMEM;
2429 goto failed;
2430 }
2431
2432 dc.handle = cpu_to_le16(conn->handle);
2433 dc.reason = HCI_ERROR_REMOTE_USER_TERM;
2434
2435 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2436 if (err < 0)
2437 mgmt_pending_remove(cmd);
2438
2439 failed:
2440 hci_dev_unlock(hdev);
2441 return err;
2442 }
2443
2444 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2445 {
2446 switch (link_type) {
2447 case LE_LINK:
2448 switch (addr_type) {
2449 case ADDR_LE_DEV_PUBLIC:
2450 return BDADDR_LE_PUBLIC;
2451
2452 default:
2453 /* Fallback to LE Random address type */
2454 return BDADDR_LE_RANDOM;
2455 }
2456
2457 default:
2458 /* Fallback to BR/EDR type */
2459 return BDADDR_BREDR;
2460 }
2461 }
2462
2463 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2464 u16 data_len)
2465 {
2466 struct mgmt_rp_get_connections *rp;
2467 struct hci_conn *c;
2468 size_t rp_len;
2469 int err;
2470 u16 i;
2471
2472 BT_DBG("");
2473
2474 hci_dev_lock(hdev);
2475
2476 if (!hdev_is_powered(hdev)) {
2477 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2478 MGMT_STATUS_NOT_POWERED);
2479 goto unlock;
2480 }
2481
2482 i = 0;
2483 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2484 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2485 i++;
2486 }
2487
2488 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2489 rp = kmalloc(rp_len, GFP_KERNEL);
2490 if (!rp) {
2491 err = -ENOMEM;
2492 goto unlock;
2493 }
2494
2495 i = 0;
2496 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2497 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2498 continue;
2499 bacpy(&rp->addr[i].bdaddr, &c->dst);
2500 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2501 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2502 continue;
2503 i++;
2504 }
2505
2506 rp->conn_count = cpu_to_le16(i);
2507
2508 /* Recalculate length in case of filtered SCO connections, etc */
2509 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2510
2511 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2512 rp_len);
2513
2514 kfree(rp);
2515
2516 unlock:
2517 hci_dev_unlock(hdev);
2518 return err;
2519 }
2520
2521 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2522 struct mgmt_cp_pin_code_neg_reply *cp)
2523 {
2524 struct pending_cmd *cmd;
2525 int err;
2526
2527 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2528 sizeof(*cp));
2529 if (!cmd)
2530 return -ENOMEM;
2531
2532 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2533 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2534 if (err < 0)
2535 mgmt_pending_remove(cmd);
2536
2537 return err;
2538 }
2539
2540 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2541 u16 len)
2542 {
2543 struct hci_conn *conn;
2544 struct mgmt_cp_pin_code_reply *cp = data;
2545 struct hci_cp_pin_code_reply reply;
2546 struct pending_cmd *cmd;
2547 int err;
2548
2549 BT_DBG("");
2550
2551 hci_dev_lock(hdev);
2552
2553 if (!hdev_is_powered(hdev)) {
2554 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2555 MGMT_STATUS_NOT_POWERED);
2556 goto failed;
2557 }
2558
2559 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2560 if (!conn) {
2561 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2562 MGMT_STATUS_NOT_CONNECTED);
2563 goto failed;
2564 }
2565
2566 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2567 struct mgmt_cp_pin_code_neg_reply ncp;
2568
2569 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2570
2571 BT_ERR("PIN code is not 16 bytes long");
2572
2573 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2574 if (err >= 0)
2575 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2576 MGMT_STATUS_INVALID_PARAMS);
2577
2578 goto failed;
2579 }
2580
2581 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2582 if (!cmd) {
2583 err = -ENOMEM;
2584 goto failed;
2585 }
2586
2587 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2588 reply.pin_len = cp->pin_len;
2589 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2590
2591 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2592 if (err < 0)
2593 mgmt_pending_remove(cmd);
2594
2595 failed:
2596 hci_dev_unlock(hdev);
2597 return err;
2598 }
2599
2600 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2601 u16 len)
2602 {
2603 struct mgmt_cp_set_io_capability *cp = data;
2604
2605 BT_DBG("");
2606
2607 hci_dev_lock(hdev);
2608
2609 hdev->io_capability = cp->io_capability;
2610
2611 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2612 hdev->io_capability);
2613
2614 hci_dev_unlock(hdev);
2615
2616 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
2617 0);
2618 }
2619
2620 static struct pending_cmd *find_pairing(struct hci_conn *conn)
2621 {
2622 struct hci_dev *hdev = conn->hdev;
2623 struct pending_cmd *cmd;
2624
2625 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2626 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2627 continue;
2628
2629 if (cmd->user_data != conn)
2630 continue;
2631
2632 return cmd;
2633 }
2634
2635 return NULL;
2636 }
2637
2638 static void pairing_complete(struct pending_cmd *cmd, u8 status)
2639 {
2640 struct mgmt_rp_pair_device rp;
2641 struct hci_conn *conn = cmd->user_data;
2642
2643 bacpy(&rp.addr.bdaddr, &conn->dst);
2644 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2645
2646 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
2647 &rp, sizeof(rp));
2648
2649 /* So we don't get further callbacks for this connection */
2650 conn->connect_cfm_cb = NULL;
2651 conn->security_cfm_cb = NULL;
2652 conn->disconn_cfm_cb = NULL;
2653
2654 hci_conn_drop(conn);
2655
2656 mgmt_pending_remove(cmd);
2657 }
2658
2659 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2660 {
2661 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2662 struct pending_cmd *cmd;
2663
2664 cmd = find_pairing(conn);
2665 if (cmd)
2666 pairing_complete(cmd, status);
2667 }
2668
2669 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2670 {
2671 struct pending_cmd *cmd;
2672
2673 BT_DBG("status %u", status);
2674
2675 cmd = find_pairing(conn);
2676 if (!cmd)
2677 BT_DBG("Unable to find a pending command");
2678 else
2679 pairing_complete(cmd, mgmt_status(status));
2680 }
2681
2682 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2683 {
2684 struct pending_cmd *cmd;
2685
2686 BT_DBG("status %u", status);
2687
2688 if (!status)
2689 return;
2690
2691 cmd = find_pairing(conn);
2692 if (!cmd)
2693 BT_DBG("Unable to find a pending command");
2694 else
2695 pairing_complete(cmd, mgmt_status(status));
2696 }
2697
2698 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2699 u16 len)
2700 {
2701 struct mgmt_cp_pair_device *cp = data;
2702 struct mgmt_rp_pair_device rp;
2703 struct pending_cmd *cmd;
2704 u8 sec_level, auth_type;
2705 struct hci_conn *conn;
2706 int err;
2707
2708 BT_DBG("");
2709
2710 memset(&rp, 0, sizeof(rp));
2711 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2712 rp.addr.type = cp->addr.type;
2713
2714 if (!bdaddr_type_is_valid(cp->addr.type))
2715 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2716 MGMT_STATUS_INVALID_PARAMS,
2717 &rp, sizeof(rp));
2718
2719 hci_dev_lock(hdev);
2720
2721 if (!hdev_is_powered(hdev)) {
2722 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2723 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2724 goto unlock;
2725 }
2726
2727 sec_level = BT_SECURITY_MEDIUM;
2728 if (cp->io_cap == 0x03)
2729 auth_type = HCI_AT_DEDICATED_BONDING;
2730 else
2731 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
2732
2733 if (cp->addr.type == BDADDR_BREDR)
2734 conn = hci_connect(hdev, ACL_LINK, &cp->addr.bdaddr,
2735 cp->addr.type, sec_level, auth_type);
2736 else
2737 conn = hci_connect(hdev, LE_LINK, &cp->addr.bdaddr,
2738 cp->addr.type, sec_level, auth_type);
2739
2740 if (IS_ERR(conn)) {
2741 int status;
2742
2743 if (PTR_ERR(conn) == -EBUSY)
2744 status = MGMT_STATUS_BUSY;
2745 else
2746 status = MGMT_STATUS_CONNECT_FAILED;
2747
2748 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2749 status, &rp,
2750 sizeof(rp));
2751 goto unlock;
2752 }
2753
2754 if (conn->connect_cfm_cb) {
2755 hci_conn_drop(conn);
2756 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2757 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2758 goto unlock;
2759 }
2760
2761 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2762 if (!cmd) {
2763 err = -ENOMEM;
2764 hci_conn_drop(conn);
2765 goto unlock;
2766 }
2767
2768 /* For LE, just connecting isn't a proof that the pairing finished */
2769 if (cp->addr.type == BDADDR_BREDR) {
2770 conn->connect_cfm_cb = pairing_complete_cb;
2771 conn->security_cfm_cb = pairing_complete_cb;
2772 conn->disconn_cfm_cb = pairing_complete_cb;
2773 } else {
2774 conn->connect_cfm_cb = le_pairing_complete_cb;
2775 conn->security_cfm_cb = le_pairing_complete_cb;
2776 conn->disconn_cfm_cb = le_pairing_complete_cb;
2777 }
2778
2779 conn->io_capability = cp->io_cap;
2780 cmd->user_data = conn;
2781
2782 if (conn->state == BT_CONNECTED &&
2783 hci_conn_security(conn, sec_level, auth_type))
2784 pairing_complete(cmd, 0);
2785
2786 err = 0;
2787
2788 unlock:
2789 hci_dev_unlock(hdev);
2790 return err;
2791 }
2792
2793 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2794 u16 len)
2795 {
2796 struct mgmt_addr_info *addr = data;
2797 struct pending_cmd *cmd;
2798 struct hci_conn *conn;
2799 int err;
2800
2801 BT_DBG("");
2802
2803 hci_dev_lock(hdev);
2804
2805 if (!hdev_is_powered(hdev)) {
2806 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2807 MGMT_STATUS_NOT_POWERED);
2808 goto unlock;
2809 }
2810
2811 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
2812 if (!cmd) {
2813 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2814 MGMT_STATUS_INVALID_PARAMS);
2815 goto unlock;
2816 }
2817
2818 conn = cmd->user_data;
2819
2820 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
2821 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2822 MGMT_STATUS_INVALID_PARAMS);
2823 goto unlock;
2824 }
2825
2826 pairing_complete(cmd, MGMT_STATUS_CANCELLED);
2827
2828 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
2829 addr, sizeof(*addr));
2830 unlock:
2831 hci_dev_unlock(hdev);
2832 return err;
2833 }
2834
2835 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
2836 struct mgmt_addr_info *addr, u16 mgmt_op,
2837 u16 hci_op, __le32 passkey)
2838 {
2839 struct pending_cmd *cmd;
2840 struct hci_conn *conn;
2841 int err;
2842
2843 hci_dev_lock(hdev);
2844
2845 if (!hdev_is_powered(hdev)) {
2846 err = cmd_complete(sk, hdev->id, mgmt_op,
2847 MGMT_STATUS_NOT_POWERED, addr,
2848 sizeof(*addr));
2849 goto done;
2850 }
2851
2852 if (addr->type == BDADDR_BREDR)
2853 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
2854 else
2855 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
2856
2857 if (!conn) {
2858 err = cmd_complete(sk, hdev->id, mgmt_op,
2859 MGMT_STATUS_NOT_CONNECTED, addr,
2860 sizeof(*addr));
2861 goto done;
2862 }
2863
2864 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
2865 /* Continue with pairing via SMP */
2866 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
2867
2868 if (!err)
2869 err = cmd_complete(sk, hdev->id, mgmt_op,
2870 MGMT_STATUS_SUCCESS, addr,
2871 sizeof(*addr));
2872 else
2873 err = cmd_complete(sk, hdev->id, mgmt_op,
2874 MGMT_STATUS_FAILED, addr,
2875 sizeof(*addr));
2876
2877 goto done;
2878 }
2879
2880 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
2881 if (!cmd) {
2882 err = -ENOMEM;
2883 goto done;
2884 }
2885
2886 /* Continue with pairing via HCI */
2887 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
2888 struct hci_cp_user_passkey_reply cp;
2889
2890 bacpy(&cp.bdaddr, &addr->bdaddr);
2891 cp.passkey = passkey;
2892 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
2893 } else
2894 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
2895 &addr->bdaddr);
2896
2897 if (err < 0)
2898 mgmt_pending_remove(cmd);
2899
2900 done:
2901 hci_dev_unlock(hdev);
2902 return err;
2903 }
2904
2905 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2906 void *data, u16 len)
2907 {
2908 struct mgmt_cp_pin_code_neg_reply *cp = data;
2909
2910 BT_DBG("");
2911
2912 return user_pairing_resp(sk, hdev, &cp->addr,
2913 MGMT_OP_PIN_CODE_NEG_REPLY,
2914 HCI_OP_PIN_CODE_NEG_REPLY, 0);
2915 }
2916
2917 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2918 u16 len)
2919 {
2920 struct mgmt_cp_user_confirm_reply *cp = data;
2921
2922 BT_DBG("");
2923
2924 if (len != sizeof(*cp))
2925 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
2926 MGMT_STATUS_INVALID_PARAMS);
2927
2928 return user_pairing_resp(sk, hdev, &cp->addr,
2929 MGMT_OP_USER_CONFIRM_REPLY,
2930 HCI_OP_USER_CONFIRM_REPLY, 0);
2931 }
2932
2933 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
2934 void *data, u16 len)
2935 {
2936 struct mgmt_cp_user_confirm_neg_reply *cp = data;
2937
2938 BT_DBG("");
2939
2940 return user_pairing_resp(sk, hdev, &cp->addr,
2941 MGMT_OP_USER_CONFIRM_NEG_REPLY,
2942 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
2943 }
2944
2945 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2946 u16 len)
2947 {
2948 struct mgmt_cp_user_passkey_reply *cp = data;
2949
2950 BT_DBG("");
2951
2952 return user_pairing_resp(sk, hdev, &cp->addr,
2953 MGMT_OP_USER_PASSKEY_REPLY,
2954 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
2955 }
2956
2957 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
2958 void *data, u16 len)
2959 {
2960 struct mgmt_cp_user_passkey_neg_reply *cp = data;
2961
2962 BT_DBG("");
2963
2964 return user_pairing_resp(sk, hdev, &cp->addr,
2965 MGMT_OP_USER_PASSKEY_NEG_REPLY,
2966 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
2967 }
2968
2969 static void update_name(struct hci_request *req)
2970 {
2971 struct hci_dev *hdev = req->hdev;
2972 struct hci_cp_write_local_name cp;
2973
2974 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
2975
2976 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
2977 }
2978
2979 static void set_name_complete(struct hci_dev *hdev, u8 status)
2980 {
2981 struct mgmt_cp_set_local_name *cp;
2982 struct pending_cmd *cmd;
2983
2984 BT_DBG("status 0x%02x", status);
2985
2986 hci_dev_lock(hdev);
2987
2988 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
2989 if (!cmd)
2990 goto unlock;
2991
2992 cp = cmd->param;
2993
2994 if (status)
2995 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
2996 mgmt_status(status));
2997 else
2998 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2999 cp, sizeof(*cp));
3000
3001 mgmt_pending_remove(cmd);
3002
3003 unlock:
3004 hci_dev_unlock(hdev);
3005 }
3006
3007 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3008 u16 len)
3009 {
3010 struct mgmt_cp_set_local_name *cp = data;
3011 struct pending_cmd *cmd;
3012 struct hci_request req;
3013 int err;
3014
3015 BT_DBG("");
3016
3017 hci_dev_lock(hdev);
3018
3019 /* If the old values are the same as the new ones just return a
3020 * direct command complete event.
3021 */
3022 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3023 !memcmp(hdev->short_name, cp->short_name,
3024 sizeof(hdev->short_name))) {
3025 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3026 data, len);
3027 goto failed;
3028 }
3029
3030 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3031
3032 if (!hdev_is_powered(hdev)) {
3033 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3034
3035 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3036 data, len);
3037 if (err < 0)
3038 goto failed;
3039
3040 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
3041 sk);
3042
3043 goto failed;
3044 }
3045
3046 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3047 if (!cmd) {
3048 err = -ENOMEM;
3049 goto failed;
3050 }
3051
3052 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3053
3054 hci_req_init(&req, hdev);
3055
3056 if (lmp_bredr_capable(hdev)) {
3057 update_name(&req);
3058 update_eir(&req);
3059 }
3060
3061 /* The name is stored in the scan response data and so
3062 * no need to udpate the advertising data here.
3063 */
3064 if (lmp_le_capable(hdev))
3065 update_scan_rsp_data(&req);
3066
3067 err = hci_req_run(&req, set_name_complete);
3068 if (err < 0)
3069 mgmt_pending_remove(cmd);
3070
3071 failed:
3072 hci_dev_unlock(hdev);
3073 return err;
3074 }
3075
3076 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3077 void *data, u16 data_len)
3078 {
3079 struct pending_cmd *cmd;
3080 int err;
3081
3082 BT_DBG("%s", hdev->name);
3083
3084 hci_dev_lock(hdev);
3085
3086 if (!hdev_is_powered(hdev)) {
3087 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3088 MGMT_STATUS_NOT_POWERED);
3089 goto unlock;
3090 }
3091
3092 if (!lmp_ssp_capable(hdev)) {
3093 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3094 MGMT_STATUS_NOT_SUPPORTED);
3095 goto unlock;
3096 }
3097
3098 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3099 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3100 MGMT_STATUS_BUSY);
3101 goto unlock;
3102 }
3103
3104 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3105 if (!cmd) {
3106 err = -ENOMEM;
3107 goto unlock;
3108 }
3109
3110 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
3111 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
3112 0, NULL);
3113 else
3114 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3115
3116 if (err < 0)
3117 mgmt_pending_remove(cmd);
3118
3119 unlock:
3120 hci_dev_unlock(hdev);
3121 return err;
3122 }
3123
3124 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3125 void *data, u16 len)
3126 {
3127 int err;
3128
3129 BT_DBG("%s ", hdev->name);
3130
3131 hci_dev_lock(hdev);
3132
3133 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3134 struct mgmt_cp_add_remote_oob_data *cp = data;
3135 u8 status;
3136
3137 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3138 cp->hash, cp->randomizer);
3139 if (err < 0)
3140 status = MGMT_STATUS_FAILED;
3141 else
3142 status = MGMT_STATUS_SUCCESS;
3143
3144 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3145 status, &cp->addr, sizeof(cp->addr));
3146 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3147 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3148 u8 status;
3149
3150 err = hci_add_remote_oob_ext_data(hdev, &cp->addr.bdaddr,
3151 cp->hash192,
3152 cp->randomizer192,
3153 cp->hash256,
3154 cp->randomizer256);
3155 if (err < 0)
3156 status = MGMT_STATUS_FAILED;
3157 else
3158 status = MGMT_STATUS_SUCCESS;
3159
3160 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3161 status, &cp->addr, sizeof(cp->addr));
3162 } else {
3163 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3164 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3165 MGMT_STATUS_INVALID_PARAMS);
3166 }
3167
3168 hci_dev_unlock(hdev);
3169 return err;
3170 }
3171
3172 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3173 void *data, u16 len)
3174 {
3175 struct mgmt_cp_remove_remote_oob_data *cp = data;
3176 u8 status;
3177 int err;
3178
3179 BT_DBG("%s", hdev->name);
3180
3181 hci_dev_lock(hdev);
3182
3183 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
3184 if (err < 0)
3185 status = MGMT_STATUS_INVALID_PARAMS;
3186 else
3187 status = MGMT_STATUS_SUCCESS;
3188
3189 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3190 status, &cp->addr, sizeof(cp->addr));
3191
3192 hci_dev_unlock(hdev);
3193 return err;
3194 }
3195
3196 static int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
3197 {
3198 struct pending_cmd *cmd;
3199 u8 type;
3200 int err;
3201
3202 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3203
3204 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3205 if (!cmd)
3206 return -ENOENT;
3207
3208 type = hdev->discovery.type;
3209
3210 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3211 &type, sizeof(type));
3212 mgmt_pending_remove(cmd);
3213
3214 return err;
3215 }
3216
3217 static void start_discovery_complete(struct hci_dev *hdev, u8 status)
3218 {
3219 BT_DBG("status %d", status);
3220
3221 if (status) {
3222 hci_dev_lock(hdev);
3223 mgmt_start_discovery_failed(hdev, status);
3224 hci_dev_unlock(hdev);
3225 return;
3226 }
3227
3228 hci_dev_lock(hdev);
3229 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3230 hci_dev_unlock(hdev);
3231
3232 switch (hdev->discovery.type) {
3233 case DISCOV_TYPE_LE:
3234 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
3235 DISCOV_LE_TIMEOUT);
3236 break;
3237
3238 case DISCOV_TYPE_INTERLEAVED:
3239 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
3240 DISCOV_INTERLEAVED_TIMEOUT);
3241 break;
3242
3243 case DISCOV_TYPE_BREDR:
3244 break;
3245
3246 default:
3247 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
3248 }
3249 }
3250
3251 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3252 void *data, u16 len)
3253 {
3254 struct mgmt_cp_start_discovery *cp = data;
3255 struct pending_cmd *cmd;
3256 struct hci_cp_le_set_scan_param param_cp;
3257 struct hci_cp_le_set_scan_enable enable_cp;
3258 struct hci_cp_inquiry inq_cp;
3259 struct hci_request req;
3260 /* General inquiry access code (GIAC) */
3261 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3262 u8 status;
3263 int err;
3264
3265 BT_DBG("%s", hdev->name);
3266
3267 hci_dev_lock(hdev);
3268
3269 if (!hdev_is_powered(hdev)) {
3270 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3271 MGMT_STATUS_NOT_POWERED);
3272 goto failed;
3273 }
3274
3275 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3276 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3277 MGMT_STATUS_BUSY);
3278 goto failed;
3279 }
3280
3281 if (hdev->discovery.state != DISCOVERY_STOPPED) {
3282 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3283 MGMT_STATUS_BUSY);
3284 goto failed;
3285 }
3286
3287 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
3288 if (!cmd) {
3289 err = -ENOMEM;
3290 goto failed;
3291 }
3292
3293 hdev->discovery.type = cp->type;
3294
3295 hci_req_init(&req, hdev);
3296
3297 switch (hdev->discovery.type) {
3298 case DISCOV_TYPE_BREDR:
3299 status = mgmt_bredr_support(hdev);
3300 if (status) {
3301 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3302 status);
3303 mgmt_pending_remove(cmd);
3304 goto failed;
3305 }
3306
3307 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3308 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3309 MGMT_STATUS_BUSY);
3310 mgmt_pending_remove(cmd);
3311 goto failed;
3312 }
3313
3314 hci_inquiry_cache_flush(hdev);
3315
3316 memset(&inq_cp, 0, sizeof(inq_cp));
3317 memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
3318 inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
3319 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
3320 break;
3321
3322 case DISCOV_TYPE_LE:
3323 case DISCOV_TYPE_INTERLEAVED:
3324 status = mgmt_le_support(hdev);
3325 if (status) {
3326 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3327 status);
3328 mgmt_pending_remove(cmd);
3329 goto failed;
3330 }
3331
3332 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3333 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3334 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3335 MGMT_STATUS_NOT_SUPPORTED);
3336 mgmt_pending_remove(cmd);
3337 goto failed;
3338 }
3339
3340 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3341 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3342 MGMT_STATUS_REJECTED);
3343 mgmt_pending_remove(cmd);
3344 goto failed;
3345 }
3346
3347 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
3348 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3349 MGMT_STATUS_BUSY);
3350 mgmt_pending_remove(cmd);
3351 goto failed;
3352 }
3353
3354 memset(&param_cp, 0, sizeof(param_cp));
3355 param_cp.type = LE_SCAN_ACTIVE;
3356 param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
3357 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3358 param_cp.own_address_type = hdev->own_addr_type;
3359 hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
3360 &param_cp);
3361
3362 memset(&enable_cp, 0, sizeof(enable_cp));
3363 enable_cp.enable = LE_SCAN_ENABLE;
3364 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3365 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
3366 &enable_cp);
3367 break;
3368
3369 default:
3370 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3371 MGMT_STATUS_INVALID_PARAMS);
3372 mgmt_pending_remove(cmd);
3373 goto failed;
3374 }
3375
3376 err = hci_req_run(&req, start_discovery_complete);
3377 if (err < 0)
3378 mgmt_pending_remove(cmd);
3379 else
3380 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3381
3382 failed:
3383 hci_dev_unlock(hdev);
3384 return err;
3385 }
3386
3387 static int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
3388 {
3389 struct pending_cmd *cmd;
3390 int err;
3391
3392 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
3393 if (!cmd)
3394 return -ENOENT;
3395
3396 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3397 &hdev->discovery.type, sizeof(hdev->discovery.type));
3398 mgmt_pending_remove(cmd);
3399
3400 return err;
3401 }
3402
3403 static void stop_discovery_complete(struct hci_dev *hdev, u8 status)
3404 {
3405 BT_DBG("status %d", status);
3406
3407 hci_dev_lock(hdev);
3408
3409 if (status) {
3410 mgmt_stop_discovery_failed(hdev, status);
3411 goto unlock;
3412 }
3413
3414 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3415
3416 unlock:
3417 hci_dev_unlock(hdev);
3418 }
3419
3420 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
3421 u16 len)
3422 {
3423 struct mgmt_cp_stop_discovery *mgmt_cp = data;
3424 struct pending_cmd *cmd;
3425 struct hci_cp_remote_name_req_cancel cp;
3426 struct inquiry_entry *e;
3427 struct hci_request req;
3428 struct hci_cp_le_set_scan_enable enable_cp;
3429 int err;
3430
3431 BT_DBG("%s", hdev->name);
3432
3433 hci_dev_lock(hdev);
3434
3435 if (!hci_discovery_active(hdev)) {
3436 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3437 MGMT_STATUS_REJECTED, &mgmt_cp->type,
3438 sizeof(mgmt_cp->type));
3439 goto unlock;
3440 }
3441
3442 if (hdev->discovery.type != mgmt_cp->type) {
3443 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3444 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
3445 sizeof(mgmt_cp->type));
3446 goto unlock;
3447 }
3448
3449 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
3450 if (!cmd) {
3451 err = -ENOMEM;
3452 goto unlock;
3453 }
3454
3455 hci_req_init(&req, hdev);
3456
3457 switch (hdev->discovery.state) {
3458 case DISCOVERY_FINDING:
3459 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3460 hci_req_add(&req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3461 } else {
3462 cancel_delayed_work(&hdev->le_scan_disable);
3463
3464 memset(&enable_cp, 0, sizeof(enable_cp));
3465 enable_cp.enable = LE_SCAN_DISABLE;
3466 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE,
3467 sizeof(enable_cp), &enable_cp);
3468 }
3469
3470 break;
3471
3472 case DISCOVERY_RESOLVING:
3473 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
3474 NAME_PENDING);
3475 if (!e) {
3476 mgmt_pending_remove(cmd);
3477 err = cmd_complete(sk, hdev->id,
3478 MGMT_OP_STOP_DISCOVERY, 0,
3479 &mgmt_cp->type,
3480 sizeof(mgmt_cp->type));
3481 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3482 goto unlock;
3483 }
3484
3485 bacpy(&cp.bdaddr, &e->data.bdaddr);
3486 hci_req_add(&req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
3487 &cp);
3488
3489 break;
3490
3491 default:
3492 BT_DBG("unknown discovery state %u", hdev->discovery.state);
3493
3494 mgmt_pending_remove(cmd);
3495 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3496 MGMT_STATUS_FAILED, &mgmt_cp->type,
3497 sizeof(mgmt_cp->type));
3498 goto unlock;
3499 }
3500
3501 err = hci_req_run(&req, stop_discovery_complete);
3502 if (err < 0)
3503 mgmt_pending_remove(cmd);
3504 else
3505 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
3506
3507 unlock:
3508 hci_dev_unlock(hdev);
3509 return err;
3510 }
3511
3512 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
3513 u16 len)
3514 {
3515 struct mgmt_cp_confirm_name *cp = data;
3516 struct inquiry_entry *e;
3517 int err;
3518
3519 BT_DBG("%s", hdev->name);
3520
3521 hci_dev_lock(hdev);
3522
3523 if (!hci_discovery_active(hdev)) {
3524 err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3525 MGMT_STATUS_FAILED);
3526 goto failed;
3527 }
3528
3529 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
3530 if (!e) {
3531 err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3532 MGMT_STATUS_INVALID_PARAMS);
3533 goto failed;
3534 }
3535
3536 if (cp->name_known) {
3537 e->name_state = NAME_KNOWN;
3538 list_del(&e->list);
3539 } else {
3540 e->name_state = NAME_NEEDED;
3541 hci_inquiry_cache_update_resolve(hdev, e);
3542 }
3543
3544 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
3545 sizeof(cp->addr));
3546
3547 failed:
3548 hci_dev_unlock(hdev);
3549 return err;
3550 }
3551
3552 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
3553 u16 len)
3554 {
3555 struct mgmt_cp_block_device *cp = data;
3556 u8 status;
3557 int err;
3558
3559 BT_DBG("%s", hdev->name);
3560
3561 if (!bdaddr_type_is_valid(cp->addr.type))
3562 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
3563 MGMT_STATUS_INVALID_PARAMS,
3564 &cp->addr, sizeof(cp->addr));
3565
3566 hci_dev_lock(hdev);
3567
3568 err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type);
3569 if (err < 0)
3570 status = MGMT_STATUS_FAILED;
3571 else
3572 status = MGMT_STATUS_SUCCESS;
3573
3574 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
3575 &cp->addr, sizeof(cp->addr));
3576
3577 hci_dev_unlock(hdev);
3578
3579 return err;
3580 }
3581
3582 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
3583 u16 len)
3584 {
3585 struct mgmt_cp_unblock_device *cp = data;
3586 u8 status;
3587 int err;
3588
3589 BT_DBG("%s", hdev->name);
3590
3591 if (!bdaddr_type_is_valid(cp->addr.type))
3592 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
3593 MGMT_STATUS_INVALID_PARAMS,
3594 &cp->addr, sizeof(cp->addr));
3595
3596 hci_dev_lock(hdev);
3597
3598 err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type);
3599 if (err < 0)
3600 status = MGMT_STATUS_INVALID_PARAMS;
3601 else
3602 status = MGMT_STATUS_SUCCESS;
3603
3604 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
3605 &cp->addr, sizeof(cp->addr));
3606
3607 hci_dev_unlock(hdev);
3608
3609 return err;
3610 }
3611
3612 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
3613 u16 len)
3614 {
3615 struct mgmt_cp_set_device_id *cp = data;
3616 struct hci_request req;
3617 int err;
3618 __u16 source;
3619
3620 BT_DBG("%s", hdev->name);
3621
3622 source = __le16_to_cpu(cp->source);
3623
3624 if (source > 0x0002)
3625 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
3626 MGMT_STATUS_INVALID_PARAMS);
3627
3628 hci_dev_lock(hdev);
3629
3630 hdev->devid_source = source;
3631 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
3632 hdev->devid_product = __le16_to_cpu(cp->product);
3633 hdev->devid_version = __le16_to_cpu(cp->version);
3634
3635 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
3636
3637 hci_req_init(&req, hdev);
3638 update_eir(&req);
3639 hci_req_run(&req, NULL);
3640
3641 hci_dev_unlock(hdev);
3642
3643 return err;
3644 }
3645
3646 static void set_advertising_complete(struct hci_dev *hdev, u8 status)
3647 {
3648 struct cmd_lookup match = { NULL, hdev };
3649
3650 if (status) {
3651 u8 mgmt_err = mgmt_status(status);
3652
3653 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
3654 cmd_status_rsp, &mgmt_err);
3655 return;
3656 }
3657
3658 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
3659 &match);
3660
3661 new_settings(hdev, match.sk);
3662
3663 if (match.sk)
3664 sock_put(match.sk);
3665 }
3666
3667 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
3668 u16 len)
3669 {
3670 struct mgmt_mode *cp = data;
3671 struct pending_cmd *cmd;
3672 struct hci_request req;
3673 u8 val, enabled, status;
3674 int err;
3675
3676 BT_DBG("request for %s", hdev->name);
3677
3678 status = mgmt_le_support(hdev);
3679 if (status)
3680 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3681 status);
3682
3683 if (cp->val != 0x00 && cp->val != 0x01)
3684 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3685 MGMT_STATUS_INVALID_PARAMS);
3686
3687 hci_dev_lock(hdev);
3688
3689 val = !!cp->val;
3690 enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
3691
3692 /* The following conditions are ones which mean that we should
3693 * not do any HCI communication but directly send a mgmt
3694 * response to user space (after toggling the flag if
3695 * necessary).
3696 */
3697 if (!hdev_is_powered(hdev) || val == enabled ||
3698 hci_conn_num(hdev, LE_LINK) > 0) {
3699 bool changed = false;
3700
3701 if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3702 change_bit(HCI_ADVERTISING, &hdev->dev_flags);
3703 changed = true;
3704 }
3705
3706 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
3707 if (err < 0)
3708 goto unlock;
3709
3710 if (changed)
3711 err = new_settings(hdev, sk);
3712
3713 goto unlock;
3714 }
3715
3716 if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
3717 mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
3718 err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3719 MGMT_STATUS_BUSY);
3720 goto unlock;
3721 }
3722
3723 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
3724 if (!cmd) {
3725 err = -ENOMEM;
3726 goto unlock;
3727 }
3728
3729 hci_req_init(&req, hdev);
3730
3731 if (val)
3732 enable_advertising(&req);
3733 else
3734 disable_advertising(&req);
3735
3736 err = hci_req_run(&req, set_advertising_complete);
3737 if (err < 0)
3738 mgmt_pending_remove(cmd);
3739
3740 unlock:
3741 hci_dev_unlock(hdev);
3742 return err;
3743 }
3744
3745 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
3746 void *data, u16 len)
3747 {
3748 struct mgmt_cp_set_static_address *cp = data;
3749 int err;
3750
3751 BT_DBG("%s", hdev->name);
3752
3753 if (!lmp_le_capable(hdev))
3754 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
3755 MGMT_STATUS_NOT_SUPPORTED);
3756
3757 if (hdev_is_powered(hdev))
3758 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
3759 MGMT_STATUS_REJECTED);
3760
3761 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
3762 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
3763 return cmd_status(sk, hdev->id,
3764 MGMT_OP_SET_STATIC_ADDRESS,
3765 MGMT_STATUS_INVALID_PARAMS);
3766
3767 /* Two most significant bits shall be set */
3768 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
3769 return cmd_status(sk, hdev->id,
3770 MGMT_OP_SET_STATIC_ADDRESS,
3771 MGMT_STATUS_INVALID_PARAMS);
3772 }
3773
3774 hci_dev_lock(hdev);
3775
3776 bacpy(&hdev->static_addr, &cp->bdaddr);
3777
3778 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0);
3779
3780 hci_dev_unlock(hdev);
3781
3782 return err;
3783 }
3784
3785 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
3786 void *data, u16 len)
3787 {
3788 struct mgmt_cp_set_scan_params *cp = data;
3789 __u16 interval, window;
3790 int err;
3791
3792 BT_DBG("%s", hdev->name);
3793
3794 if (!lmp_le_capable(hdev))
3795 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3796 MGMT_STATUS_NOT_SUPPORTED);
3797
3798 interval = __le16_to_cpu(cp->interval);
3799
3800 if (interval < 0x0004 || interval > 0x4000)
3801 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3802 MGMT_STATUS_INVALID_PARAMS);
3803
3804 window = __le16_to_cpu(cp->window);
3805
3806 if (window < 0x0004 || window > 0x4000)
3807 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3808 MGMT_STATUS_INVALID_PARAMS);
3809
3810 if (window > interval)
3811 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3812 MGMT_STATUS_INVALID_PARAMS);
3813
3814 hci_dev_lock(hdev);
3815
3816 hdev->le_scan_interval = interval;
3817 hdev->le_scan_window = window;
3818
3819 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);
3820
3821 hci_dev_unlock(hdev);
3822
3823 return err;
3824 }
3825
3826 static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
3827 {
3828 struct pending_cmd *cmd;
3829
3830 BT_DBG("status 0x%02x", status);
3831
3832 hci_dev_lock(hdev);
3833
3834 cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
3835 if (!cmd)
3836 goto unlock;
3837
3838 if (status) {
3839 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3840 mgmt_status(status));
3841 } else {
3842 struct mgmt_mode *cp = cmd->param;
3843
3844 if (cp->val)
3845 set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
3846 else
3847 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
3848
3849 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
3850 new_settings(hdev, cmd->sk);
3851 }
3852
3853 mgmt_pending_remove(cmd);
3854
3855 unlock:
3856 hci_dev_unlock(hdev);
3857 }
3858
3859 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
3860 void *data, u16 len)
3861 {
3862 struct mgmt_mode *cp = data;
3863 struct pending_cmd *cmd;
3864 struct hci_request req;
3865 int err;
3866
3867 BT_DBG("%s", hdev->name);
3868
3869 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
3870 hdev->hci_ver < BLUETOOTH_VER_1_2)
3871 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3872 MGMT_STATUS_NOT_SUPPORTED);
3873
3874 if (cp->val != 0x00 && cp->val != 0x01)
3875 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3876 MGMT_STATUS_INVALID_PARAMS);
3877
3878 if (!hdev_is_powered(hdev))
3879 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3880 MGMT_STATUS_NOT_POWERED);
3881
3882 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3883 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3884 MGMT_STATUS_REJECTED);
3885
3886 hci_dev_lock(hdev);
3887
3888 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
3889 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3890 MGMT_STATUS_BUSY);
3891 goto unlock;
3892 }
3893
3894 if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
3895 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
3896 hdev);
3897 goto unlock;
3898 }
3899
3900 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
3901 data, len);
3902 if (!cmd) {
3903 err = -ENOMEM;
3904 goto unlock;
3905 }
3906
3907 hci_req_init(&req, hdev);
3908
3909 write_fast_connectable(&req, cp->val);
3910
3911 err = hci_req_run(&req, fast_connectable_complete);
3912 if (err < 0) {
3913 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3914 MGMT_STATUS_FAILED);
3915 mgmt_pending_remove(cmd);
3916 }
3917
3918 unlock:
3919 hci_dev_unlock(hdev);
3920
3921 return err;
3922 }
3923
3924 static void set_bredr_scan(struct hci_request *req)
3925 {
3926 struct hci_dev *hdev = req->hdev;
3927 u8 scan = 0;
3928
3929 /* Ensure that fast connectable is disabled. This function will
3930 * not do anything if the page scan parameters are already what
3931 * they should be.
3932 */
3933 write_fast_connectable(req, false);
3934
3935 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3936 scan |= SCAN_PAGE;
3937 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
3938 scan |= SCAN_INQUIRY;
3939
3940 if (scan)
3941 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
3942 }
3943
3944 static void set_bredr_complete(struct hci_dev *hdev, u8 status)
3945 {
3946 struct pending_cmd *cmd;
3947
3948 BT_DBG("status 0x%02x", status);
3949
3950 hci_dev_lock(hdev);
3951
3952 cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
3953 if (!cmd)
3954 goto unlock;
3955
3956 if (status) {
3957 u8 mgmt_err = mgmt_status(status);
3958
3959 /* We need to restore the flag if related HCI commands
3960 * failed.
3961 */
3962 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3963
3964 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
3965 } else {
3966 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
3967 new_settings(hdev, cmd->sk);
3968 }
3969
3970 mgmt_pending_remove(cmd);
3971
3972 unlock:
3973 hci_dev_unlock(hdev);
3974 }
3975
3976 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
3977 {
3978 struct mgmt_mode *cp = data;
3979 struct pending_cmd *cmd;
3980 struct hci_request req;
3981 int err;
3982
3983 BT_DBG("request for %s", hdev->name);
3984
3985 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
3986 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
3987 MGMT_STATUS_NOT_SUPPORTED);
3988
3989 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
3990 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
3991 MGMT_STATUS_REJECTED);
3992
3993 if (cp->val != 0x00 && cp->val != 0x01)
3994 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
3995 MGMT_STATUS_INVALID_PARAMS);
3996
3997 hci_dev_lock(hdev);
3998
3999 if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4000 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4001 goto unlock;
4002 }
4003
4004 if (!hdev_is_powered(hdev)) {
4005 if (!cp->val) {
4006 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4007 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4008 clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4009 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4010 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4011 }
4012
4013 change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4014
4015 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4016 if (err < 0)
4017 goto unlock;
4018
4019 err = new_settings(hdev, sk);
4020 goto unlock;
4021 }
4022
4023 /* Reject disabling when powered on */
4024 if (!cp->val) {
4025 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4026 MGMT_STATUS_REJECTED);
4027 goto unlock;
4028 }
4029
4030 if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
4031 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4032 MGMT_STATUS_BUSY);
4033 goto unlock;
4034 }
4035
4036 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4037 if (!cmd) {
4038 err = -ENOMEM;
4039 goto unlock;
4040 }
4041
4042 /* We need to flip the bit already here so that update_adv_data
4043 * generates the correct flags.
4044 */
4045 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4046
4047 hci_req_init(&req, hdev);
4048
4049 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4050 set_bredr_scan(&req);
4051
4052 /* Since only the advertising data flags will change, there
4053 * is no need to update the scan response data.
4054 */
4055 update_adv_data(&req);
4056
4057 err = hci_req_run(&req, set_bredr_complete);
4058 if (err < 0)
4059 mgmt_pending_remove(cmd);
4060
4061 unlock:
4062 hci_dev_unlock(hdev);
4063 return err;
4064 }
4065
4066 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4067 void *data, u16 len)
4068 {
4069 struct mgmt_mode *cp = data;
4070 struct pending_cmd *cmd;
4071 u8 val, status;
4072 int err;
4073
4074 BT_DBG("request for %s", hdev->name);
4075
4076 status = mgmt_bredr_support(hdev);
4077 if (status)
4078 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4079 status);
4080
4081 if (!lmp_sc_capable(hdev) &&
4082 !test_bit(HCI_FORCE_SC, &hdev->dev_flags))
4083 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4084 MGMT_STATUS_NOT_SUPPORTED);
4085
4086 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4087 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4088 MGMT_STATUS_INVALID_PARAMS);
4089
4090 hci_dev_lock(hdev);
4091
4092 if (!hdev_is_powered(hdev)) {
4093 bool changed;
4094
4095 if (cp->val) {
4096 changed = !test_and_set_bit(HCI_SC_ENABLED,
4097 &hdev->dev_flags);
4098 if (cp->val == 0x02)
4099 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4100 else
4101 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4102 } else {
4103 changed = test_and_clear_bit(HCI_SC_ENABLED,
4104 &hdev->dev_flags);
4105 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4106 }
4107
4108 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4109 if (err < 0)
4110 goto failed;
4111
4112 if (changed)
4113 err = new_settings(hdev, sk);
4114
4115 goto failed;
4116 }
4117
4118 if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4119 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4120 MGMT_STATUS_BUSY);
4121 goto failed;
4122 }
4123
4124 val = !!cp->val;
4125
4126 if (val == test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
4127 (cp->val == 0x02) == test_bit(HCI_SC_ONLY, &hdev->dev_flags)) {
4128 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4129 goto failed;
4130 }
4131
4132 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4133 if (!cmd) {
4134 err = -ENOMEM;
4135 goto failed;
4136 }
4137
4138 err = hci_send_cmd(hdev, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4139 if (err < 0) {
4140 mgmt_pending_remove(cmd);
4141 goto failed;
4142 }
4143
4144 if (cp->val == 0x02)
4145 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4146 else
4147 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4148
4149 failed:
4150 hci_dev_unlock(hdev);
4151 return err;
4152 }
4153
4154 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
4155 void *data, u16 len)
4156 {
4157 struct mgmt_mode *cp = data;
4158 bool changed;
4159 int err;
4160
4161 BT_DBG("request for %s", hdev->name);
4162
4163 if (cp->val != 0x00 && cp->val != 0x01)
4164 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
4165 MGMT_STATUS_INVALID_PARAMS);
4166
4167 hci_dev_lock(hdev);
4168
4169 if (cp->val)
4170 changed = !test_and_set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
4171 else
4172 changed = test_and_clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
4173
4174 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
4175 if (err < 0)
4176 goto unlock;
4177
4178 if (changed)
4179 err = new_settings(hdev, sk);
4180
4181 unlock:
4182 hci_dev_unlock(hdev);
4183 return err;
4184 }
4185
4186 static bool irk_is_valid(struct mgmt_irk_info *irk)
4187 {
4188 switch (irk->addr.type) {
4189 case BDADDR_LE_PUBLIC:
4190 return true;
4191
4192 case BDADDR_LE_RANDOM:
4193 /* Two most significant bits shall be set */
4194 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4195 return false;
4196 return true;
4197 }
4198
4199 return false;
4200 }
4201
4202 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4203 u16 len)
4204 {
4205 struct mgmt_cp_load_irks *cp = cp_data;
4206 u16 irk_count, expected_len;
4207 int i, err;
4208
4209 BT_DBG("request for %s", hdev->name);
4210
4211 if (!lmp_le_capable(hdev))
4212 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4213 MGMT_STATUS_NOT_SUPPORTED);
4214
4215 irk_count = __le16_to_cpu(cp->irk_count);
4216
4217 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
4218 if (expected_len != len) {
4219 BT_ERR("load_irks: expected %u bytes, got %u bytes",
4220 len, expected_len);
4221 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4222 MGMT_STATUS_INVALID_PARAMS);
4223 }
4224
4225 BT_DBG("%s irk_count %u", hdev->name, irk_count);
4226
4227 for (i = 0; i < irk_count; i++) {
4228 struct mgmt_irk_info *key = &cp->irks[i];
4229
4230 if (!irk_is_valid(key))
4231 return cmd_status(sk, hdev->id,
4232 MGMT_OP_LOAD_IRKS,
4233 MGMT_STATUS_INVALID_PARAMS);
4234 }
4235
4236 hci_dev_lock(hdev);
4237
4238 hci_smp_irks_clear(hdev);
4239
4240 for (i = 0; i < irk_count; i++) {
4241 struct mgmt_irk_info *irk = &cp->irks[i];
4242 u8 addr_type;
4243
4244 if (irk->addr.type == BDADDR_LE_PUBLIC)
4245 addr_type = ADDR_LE_DEV_PUBLIC;
4246 else
4247 addr_type = ADDR_LE_DEV_RANDOM;
4248
4249 hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
4250 BDADDR_ANY);
4251 }
4252
4253 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4254
4255 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
4256
4257 hci_dev_unlock(hdev);
4258
4259 return err;
4260 }
4261
4262 static bool ltk_is_valid(struct mgmt_ltk_info *key)
4263 {
4264 if (key->master != 0x00 && key->master != 0x01)
4265 return false;
4266
4267 switch (key->addr.type) {
4268 case BDADDR_LE_PUBLIC:
4269 return true;
4270
4271 case BDADDR_LE_RANDOM:
4272 /* Two most significant bits shall be set */
4273 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4274 return false;
4275 return true;
4276 }
4277
4278 return false;
4279 }
4280
4281 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
4282 void *cp_data, u16 len)
4283 {
4284 struct mgmt_cp_load_long_term_keys *cp = cp_data;
4285 u16 key_count, expected_len;
4286 int i, err;
4287
4288 BT_DBG("request for %s", hdev->name);
4289
4290 if (!lmp_le_capable(hdev))
4291 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4292 MGMT_STATUS_NOT_SUPPORTED);
4293
4294 key_count = __le16_to_cpu(cp->key_count);
4295
4296 expected_len = sizeof(*cp) + key_count *
4297 sizeof(struct mgmt_ltk_info);
4298 if (expected_len != len) {
4299 BT_ERR("load_keys: expected %u bytes, got %u bytes",
4300 len, expected_len);
4301 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4302 MGMT_STATUS_INVALID_PARAMS);
4303 }
4304
4305 BT_DBG("%s key_count %u", hdev->name, key_count);
4306
4307 for (i = 0; i < key_count; i++) {
4308 struct mgmt_ltk_info *key = &cp->keys[i];
4309
4310 if (!ltk_is_valid(key))
4311 return cmd_status(sk, hdev->id,
4312 MGMT_OP_LOAD_LONG_TERM_KEYS,
4313 MGMT_STATUS_INVALID_PARAMS);
4314 }
4315
4316 hci_dev_lock(hdev);
4317
4318 hci_smp_ltks_clear(hdev);
4319
4320 for (i = 0; i < key_count; i++) {
4321 struct mgmt_ltk_info *key = &cp->keys[i];
4322 u8 type, addr_type;
4323
4324 if (key->addr.type == BDADDR_LE_PUBLIC)
4325 addr_type = ADDR_LE_DEV_PUBLIC;
4326 else
4327 addr_type = ADDR_LE_DEV_RANDOM;
4328
4329 if (key->master)
4330 type = HCI_SMP_LTK;
4331 else
4332 type = HCI_SMP_LTK_SLAVE;
4333
4334 hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
4335 key->type, key->val, key->enc_size, key->ediv,
4336 key->rand);
4337 }
4338
4339 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
4340 NULL, 0);
4341
4342 hci_dev_unlock(hdev);
4343
4344 return err;
4345 }
4346
4347 static const struct mgmt_handler {
4348 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
4349 u16 data_len);
4350 bool var_len;
4351 size_t data_len;
4352 } mgmt_handlers[] = {
4353 { NULL }, /* 0x0000 (no command) */
4354 { read_version, false, MGMT_READ_VERSION_SIZE },
4355 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
4356 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
4357 { read_controller_info, false, MGMT_READ_INFO_SIZE },
4358 { set_powered, false, MGMT_SETTING_SIZE },
4359 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
4360 { set_connectable, false, MGMT_SETTING_SIZE },
4361 { set_fast_connectable, false, MGMT_SETTING_SIZE },
4362 { set_pairable, false, MGMT_SETTING_SIZE },
4363 { set_link_security, false, MGMT_SETTING_SIZE },
4364 { set_ssp, false, MGMT_SETTING_SIZE },
4365 { set_hs, false, MGMT_SETTING_SIZE },
4366 { set_le, false, MGMT_SETTING_SIZE },
4367 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
4368 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
4369 { add_uuid, false, MGMT_ADD_UUID_SIZE },
4370 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
4371 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
4372 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
4373 { disconnect, false, MGMT_DISCONNECT_SIZE },
4374 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
4375 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
4376 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
4377 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
4378 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
4379 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
4380 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
4381 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
4382 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
4383 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
4384 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
4385 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
4386 { add_remote_oob_data, true, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
4387 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
4388 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
4389 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
4390 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
4391 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
4392 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
4393 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
4394 { set_advertising, false, MGMT_SETTING_SIZE },
4395 { set_bredr, false, MGMT_SETTING_SIZE },
4396 { set_static_address, false, MGMT_SET_STATIC_ADDRESS_SIZE },
4397 { set_scan_params, false, MGMT_SET_SCAN_PARAMS_SIZE },
4398 { set_secure_conn, false, MGMT_SETTING_SIZE },
4399 { set_debug_keys, false, MGMT_SETTING_SIZE },
4400 { },
4401 { load_irks, true, MGMT_LOAD_IRKS_SIZE },
4402 };
4403
4404
4405 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
4406 {
4407 void *buf;
4408 u8 *cp;
4409 struct mgmt_hdr *hdr;
4410 u16 opcode, index, len;
4411 struct hci_dev *hdev = NULL;
4412 const struct mgmt_handler *handler;
4413 int err;
4414
4415 BT_DBG("got %zu bytes", msglen);
4416
4417 if (msglen < sizeof(*hdr))
4418 return -EINVAL;
4419
4420 buf = kmalloc(msglen, GFP_KERNEL);
4421 if (!buf)
4422 return -ENOMEM;
4423
4424 if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
4425 err = -EFAULT;
4426 goto done;
4427 }
4428
4429 hdr = buf;
4430 opcode = __le16_to_cpu(hdr->opcode);
4431 index = __le16_to_cpu(hdr->index);
4432 len = __le16_to_cpu(hdr->len);
4433
4434 if (len != msglen - sizeof(*hdr)) {
4435 err = -EINVAL;
4436 goto done;
4437 }
4438
4439 if (index != MGMT_INDEX_NONE) {
4440 hdev = hci_dev_get(index);
4441 if (!hdev) {
4442 err = cmd_status(sk, index, opcode,
4443 MGMT_STATUS_INVALID_INDEX);
4444 goto done;
4445 }
4446
4447 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
4448 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4449 err = cmd_status(sk, index, opcode,
4450 MGMT_STATUS_INVALID_INDEX);
4451 goto done;
4452 }
4453 }
4454
4455 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
4456 mgmt_handlers[opcode].func == NULL) {
4457 BT_DBG("Unknown op %u", opcode);
4458 err = cmd_status(sk, index, opcode,
4459 MGMT_STATUS_UNKNOWN_COMMAND);
4460 goto done;
4461 }
4462
4463 if ((hdev && opcode < MGMT_OP_READ_INFO) ||
4464 (!hdev && opcode >= MGMT_OP_READ_INFO)) {
4465 err = cmd_status(sk, index, opcode,
4466 MGMT_STATUS_INVALID_INDEX);
4467 goto done;
4468 }
4469
4470 handler = &mgmt_handlers[opcode];
4471
4472 if ((handler->var_len && len < handler->data_len) ||
4473 (!handler->var_len && len != handler->data_len)) {
4474 err = cmd_status(sk, index, opcode,
4475 MGMT_STATUS_INVALID_PARAMS);
4476 goto done;
4477 }
4478
4479 if (hdev)
4480 mgmt_init_hdev(sk, hdev);
4481
4482 cp = buf + sizeof(*hdr);
4483
4484 err = handler->func(sk, hdev, cp, len);
4485 if (err < 0)
4486 goto done;
4487
4488 err = msglen;
4489
4490 done:
4491 if (hdev)
4492 hci_dev_put(hdev);
4493
4494 kfree(buf);
4495 return err;
4496 }
4497
4498 void mgmt_index_added(struct hci_dev *hdev)
4499 {
4500 if (hdev->dev_type != HCI_BREDR)
4501 return;
4502
4503 mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
4504 }
4505
4506 void mgmt_index_removed(struct hci_dev *hdev)
4507 {
4508 u8 status = MGMT_STATUS_INVALID_INDEX;
4509
4510 if (hdev->dev_type != HCI_BREDR)
4511 return;
4512
4513 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
4514
4515 mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
4516 }
4517
4518 static void powered_complete(struct hci_dev *hdev, u8 status)
4519 {
4520 struct cmd_lookup match = { NULL, hdev };
4521
4522 BT_DBG("status 0x%02x", status);
4523
4524 hci_dev_lock(hdev);
4525
4526 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
4527
4528 new_settings(hdev, match.sk);
4529
4530 hci_dev_unlock(hdev);
4531
4532 if (match.sk)
4533 sock_put(match.sk);
4534 }
4535
4536 static int powered_update_hci(struct hci_dev *hdev)
4537 {
4538 struct hci_request req;
4539 u8 link_sec;
4540
4541 hci_req_init(&req, hdev);
4542
4543 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
4544 !lmp_host_ssp_capable(hdev)) {
4545 u8 ssp = 1;
4546
4547 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
4548 }
4549
4550 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
4551 lmp_bredr_capable(hdev)) {
4552 struct hci_cp_write_le_host_supported cp;
4553
4554 cp.le = 1;
4555 cp.simul = lmp_le_br_capable(hdev);
4556
4557 /* Check first if we already have the right
4558 * host state (host features set)
4559 */
4560 if (cp.le != lmp_host_le_capable(hdev) ||
4561 cp.simul != lmp_host_le_br_capable(hdev))
4562 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
4563 sizeof(cp), &cp);
4564 }
4565
4566 if (lmp_le_capable(hdev)) {
4567 /* Set random address to static address if configured */
4568 if (bacmp(&hdev->static_addr, BDADDR_ANY))
4569 hci_req_add(&req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
4570 &hdev->static_addr);
4571
4572 /* Make sure the controller has a good default for
4573 * advertising data. This also applies to the case
4574 * where BR/EDR was toggled during the AUTO_OFF phase.
4575 */
4576 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
4577 update_adv_data(&req);
4578 update_scan_rsp_data(&req);
4579 }
4580
4581 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
4582 enable_advertising(&req);
4583 }
4584
4585 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4586 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
4587 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
4588 sizeof(link_sec), &link_sec);
4589
4590 if (lmp_bredr_capable(hdev)) {
4591 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
4592 set_bredr_scan(&req);
4593 update_class(&req);
4594 update_name(&req);
4595 update_eir(&req);
4596 }
4597
4598 return hci_req_run(&req, powered_complete);
4599 }
4600
4601 int mgmt_powered(struct hci_dev *hdev, u8 powered)
4602 {
4603 struct cmd_lookup match = { NULL, hdev };
4604 u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
4605 u8 zero_cod[] = { 0, 0, 0 };
4606 int err;
4607
4608 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
4609 return 0;
4610
4611 if (powered) {
4612 if (powered_update_hci(hdev) == 0)
4613 return 0;
4614
4615 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
4616 &match);
4617 goto new_settings;
4618 }
4619
4620 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
4621 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered);
4622
4623 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
4624 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
4625 zero_cod, sizeof(zero_cod), NULL);
4626
4627 new_settings:
4628 err = new_settings(hdev, match.sk);
4629
4630 if (match.sk)
4631 sock_put(match.sk);
4632
4633 return err;
4634 }
4635
4636 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
4637 {
4638 struct pending_cmd *cmd;
4639 u8 status;
4640
4641 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
4642 if (!cmd)
4643 return;
4644
4645 if (err == -ERFKILL)
4646 status = MGMT_STATUS_RFKILLED;
4647 else
4648 status = MGMT_STATUS_FAILED;
4649
4650 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
4651
4652 mgmt_pending_remove(cmd);
4653 }
4654
4655 void mgmt_discoverable_timeout(struct hci_dev *hdev)
4656 {
4657 struct hci_request req;
4658
4659 hci_dev_lock(hdev);
4660
4661 /* When discoverable timeout triggers, then just make sure
4662 * the limited discoverable flag is cleared. Even in the case
4663 * of a timeout triggered from general discoverable, it is
4664 * safe to unconditionally clear the flag.
4665 */
4666 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
4667 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4668
4669 hci_req_init(&req, hdev);
4670 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4671 u8 scan = SCAN_PAGE;
4672 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
4673 sizeof(scan), &scan);
4674 }
4675 update_class(&req);
4676 update_adv_data(&req);
4677 hci_req_run(&req, NULL);
4678
4679 hdev->discov_timeout = 0;
4680
4681 new_settings(hdev, NULL);
4682
4683 hci_dev_unlock(hdev);
4684 }
4685
4686 void mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
4687 {
4688 bool changed;
4689
4690 /* Nothing needed here if there's a pending command since that
4691 * commands request completion callback takes care of everything
4692 * necessary.
4693 */
4694 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
4695 return;
4696
4697 if (discoverable) {
4698 changed = !test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4699 } else {
4700 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
4701 changed = test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4702 }
4703
4704 if (changed) {
4705 struct hci_request req;
4706
4707 /* In case this change in discoverable was triggered by
4708 * a disabling of connectable there could be a need to
4709 * update the advertising flags.
4710 */
4711 hci_req_init(&req, hdev);
4712 update_adv_data(&req);
4713 hci_req_run(&req, NULL);
4714
4715 new_settings(hdev, NULL);
4716 }
4717 }
4718
4719 void mgmt_connectable(struct hci_dev *hdev, u8 connectable)
4720 {
4721 bool changed;
4722
4723 /* Nothing needed here if there's a pending command since that
4724 * commands request completion callback takes care of everything
4725 * necessary.
4726 */
4727 if (mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
4728 return;
4729
4730 if (connectable)
4731 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
4732 else
4733 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
4734
4735 if (changed)
4736 new_settings(hdev, NULL);
4737 }
4738
4739 void mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
4740 {
4741 u8 mgmt_err = mgmt_status(status);
4742
4743 if (scan & SCAN_PAGE)
4744 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
4745 cmd_status_rsp, &mgmt_err);
4746
4747 if (scan & SCAN_INQUIRY)
4748 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
4749 cmd_status_rsp, &mgmt_err);
4750 }
4751
4752 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
4753 bool persistent)
4754 {
4755 struct mgmt_ev_new_link_key ev;
4756
4757 memset(&ev, 0, sizeof(ev));
4758
4759 ev.store_hint = persistent;
4760 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
4761 ev.key.addr.type = BDADDR_BREDR;
4762 ev.key.type = key->type;
4763 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
4764 ev.key.pin_len = key->pin_len;
4765
4766 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
4767 }
4768
4769 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key)
4770 {
4771 struct mgmt_ev_new_long_term_key ev;
4772
4773 memset(&ev, 0, sizeof(ev));
4774
4775 /* Devices using resolvable or non-resolvable random addresses
4776 * without providing an indentity resolving key don't require
4777 * to store long term keys. Their addresses will change the
4778 * next time around.
4779 *
4780 * Only when a remote device provides an identity address
4781 * make sure the long term key is stored. If the remote
4782 * identity is known, the long term keys are internally
4783 * mapped to the identity address. So allow static random
4784 * and public addresses here.
4785 */
4786 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
4787 (key->bdaddr.b[5] & 0xc0) != 0xc0)
4788 ev.store_hint = 0x00;
4789 else
4790 ev.store_hint = 0x01;
4791
4792 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
4793 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
4794 ev.key.type = key->authenticated;
4795 ev.key.enc_size = key->enc_size;
4796 ev.key.ediv = key->ediv;
4797
4798 if (key->type == HCI_SMP_LTK)
4799 ev.key.master = 1;
4800
4801 memcpy(ev.key.rand, key->rand, sizeof(key->rand));
4802 memcpy(ev.key.val, key->val, sizeof(key->val));
4803
4804 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
4805 }
4806
4807 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
4808 {
4809 struct mgmt_ev_new_irk ev;
4810
4811 memset(&ev, 0, sizeof(ev));
4812
4813 /* For identity resolving keys from devices that are already
4814 * using a public address or static random address, do not
4815 * ask for storing this key. The identity resolving key really
4816 * is only mandatory for devices using resovlable random
4817 * addresses.
4818 *
4819 * Storing all identity resolving keys has the downside that
4820 * they will be also loaded on next boot of they system. More
4821 * identity resolving keys, means more time during scanning is
4822 * needed to actually resolve these addresses.
4823 */
4824 if (bacmp(&irk->rpa, BDADDR_ANY))
4825 ev.store_hint = 0x01;
4826 else
4827 ev.store_hint = 0x00;
4828
4829 bacpy(&ev.rpa, &irk->rpa);
4830 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
4831 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
4832 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
4833
4834 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
4835 }
4836
4837 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
4838 u8 data_len)
4839 {
4840 eir[eir_len++] = sizeof(type) + data_len;
4841 eir[eir_len++] = type;
4842 memcpy(&eir[eir_len], data, data_len);
4843 eir_len += data_len;
4844
4845 return eir_len;
4846 }
4847
4848 void mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4849 u8 addr_type, u32 flags, u8 *name, u8 name_len,
4850 u8 *dev_class)
4851 {
4852 char buf[512];
4853 struct mgmt_ev_device_connected *ev = (void *) buf;
4854 u16 eir_len = 0;
4855
4856 bacpy(&ev->addr.bdaddr, bdaddr);
4857 ev->addr.type = link_to_bdaddr(link_type, addr_type);
4858
4859 ev->flags = __cpu_to_le32(flags);
4860
4861 if (name_len > 0)
4862 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
4863 name, name_len);
4864
4865 if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
4866 eir_len = eir_append_data(ev->eir, eir_len,
4867 EIR_CLASS_OF_DEV, dev_class, 3);
4868
4869 ev->eir_len = cpu_to_le16(eir_len);
4870
4871 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
4872 sizeof(*ev) + eir_len, NULL);
4873 }
4874
4875 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
4876 {
4877 struct mgmt_cp_disconnect *cp = cmd->param;
4878 struct sock **sk = data;
4879 struct mgmt_rp_disconnect rp;
4880
4881 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4882 rp.addr.type = cp->addr.type;
4883
4884 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
4885 sizeof(rp));
4886
4887 *sk = cmd->sk;
4888 sock_hold(*sk);
4889
4890 mgmt_pending_remove(cmd);
4891 }
4892
4893 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
4894 {
4895 struct hci_dev *hdev = data;
4896 struct mgmt_cp_unpair_device *cp = cmd->param;
4897 struct mgmt_rp_unpair_device rp;
4898
4899 memset(&rp, 0, sizeof(rp));
4900 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4901 rp.addr.type = cp->addr.type;
4902
4903 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
4904
4905 cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
4906
4907 mgmt_pending_remove(cmd);
4908 }
4909
4910 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
4911 u8 link_type, u8 addr_type, u8 reason)
4912 {
4913 struct mgmt_ev_device_disconnected ev;
4914 struct sock *sk = NULL;
4915
4916 if (link_type != ACL_LINK && link_type != LE_LINK)
4917 return;
4918
4919 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
4920
4921 bacpy(&ev.addr.bdaddr, bdaddr);
4922 ev.addr.type = link_to_bdaddr(link_type, addr_type);
4923 ev.reason = reason;
4924
4925 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
4926
4927 if (sk)
4928 sock_put(sk);
4929
4930 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
4931 hdev);
4932 }
4933
4934 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
4935 u8 link_type, u8 addr_type, u8 status)
4936 {
4937 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
4938 struct mgmt_cp_disconnect *cp;
4939 struct mgmt_rp_disconnect rp;
4940 struct pending_cmd *cmd;
4941
4942 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
4943 hdev);
4944
4945 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
4946 if (!cmd)
4947 return;
4948
4949 cp = cmd->param;
4950
4951 if (bacmp(bdaddr, &cp->addr.bdaddr))
4952 return;
4953
4954 if (cp->addr.type != bdaddr_type)
4955 return;
4956
4957 bacpy(&rp.addr.bdaddr, bdaddr);
4958 rp.addr.type = bdaddr_type;
4959
4960 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
4961 mgmt_status(status), &rp, sizeof(rp));
4962
4963 mgmt_pending_remove(cmd);
4964 }
4965
4966 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4967 u8 addr_type, u8 status)
4968 {
4969 struct mgmt_ev_connect_failed ev;
4970
4971 bacpy(&ev.addr.bdaddr, bdaddr);
4972 ev.addr.type = link_to_bdaddr(link_type, addr_type);
4973 ev.status = mgmt_status(status);
4974
4975 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
4976 }
4977
4978 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
4979 {
4980 struct mgmt_ev_pin_code_request ev;
4981
4982 bacpy(&ev.addr.bdaddr, bdaddr);
4983 ev.addr.type = BDADDR_BREDR;
4984 ev.secure = secure;
4985
4986 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
4987 }
4988
4989 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4990 u8 status)
4991 {
4992 struct pending_cmd *cmd;
4993 struct mgmt_rp_pin_code_reply rp;
4994
4995 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
4996 if (!cmd)
4997 return;
4998
4999 bacpy(&rp.addr.bdaddr, bdaddr);
5000 rp.addr.type = BDADDR_BREDR;
5001
5002 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
5003 mgmt_status(status), &rp, sizeof(rp));
5004
5005 mgmt_pending_remove(cmd);
5006 }
5007
5008 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5009 u8 status)
5010 {
5011 struct pending_cmd *cmd;
5012 struct mgmt_rp_pin_code_reply rp;
5013
5014 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
5015 if (!cmd)
5016 return;
5017
5018 bacpy(&rp.addr.bdaddr, bdaddr);
5019 rp.addr.type = BDADDR_BREDR;
5020
5021 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
5022 mgmt_status(status), &rp, sizeof(rp));
5023
5024 mgmt_pending_remove(cmd);
5025 }
5026
5027 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
5028 u8 link_type, u8 addr_type, __le32 value,
5029 u8 confirm_hint)
5030 {
5031 struct mgmt_ev_user_confirm_request ev;
5032
5033 BT_DBG("%s", hdev->name);
5034
5035 bacpy(&ev.addr.bdaddr, bdaddr);
5036 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5037 ev.confirm_hint = confirm_hint;
5038 ev.value = value;
5039
5040 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
5041 NULL);
5042 }
5043
5044 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
5045 u8 link_type, u8 addr_type)
5046 {
5047 struct mgmt_ev_user_passkey_request ev;
5048
5049 BT_DBG("%s", hdev->name);
5050
5051 bacpy(&ev.addr.bdaddr, bdaddr);
5052 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5053
5054 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
5055 NULL);
5056 }
5057
5058 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5059 u8 link_type, u8 addr_type, u8 status,
5060 u8 opcode)
5061 {
5062 struct pending_cmd *cmd;
5063 struct mgmt_rp_user_confirm_reply rp;
5064 int err;
5065
5066 cmd = mgmt_pending_find(opcode, hdev);
5067 if (!cmd)
5068 return -ENOENT;
5069
5070 bacpy(&rp.addr.bdaddr, bdaddr);
5071 rp.addr.type = link_to_bdaddr(link_type, addr_type);
5072 err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
5073 &rp, sizeof(rp));
5074
5075 mgmt_pending_remove(cmd);
5076
5077 return err;
5078 }
5079
5080 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5081 u8 link_type, u8 addr_type, u8 status)
5082 {
5083 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5084 status, MGMT_OP_USER_CONFIRM_REPLY);
5085 }
5086
5087 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5088 u8 link_type, u8 addr_type, u8 status)
5089 {
5090 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5091 status,
5092 MGMT_OP_USER_CONFIRM_NEG_REPLY);
5093 }
5094
5095 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5096 u8 link_type, u8 addr_type, u8 status)
5097 {
5098 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5099 status, MGMT_OP_USER_PASSKEY_REPLY);
5100 }
5101
5102 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5103 u8 link_type, u8 addr_type, u8 status)
5104 {
5105 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5106 status,
5107 MGMT_OP_USER_PASSKEY_NEG_REPLY);
5108 }
5109
5110 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
5111 u8 link_type, u8 addr_type, u32 passkey,
5112 u8 entered)
5113 {
5114 struct mgmt_ev_passkey_notify ev;
5115
5116 BT_DBG("%s", hdev->name);
5117
5118 bacpy(&ev.addr.bdaddr, bdaddr);
5119 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5120 ev.passkey = __cpu_to_le32(passkey);
5121 ev.entered = entered;
5122
5123 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
5124 }
5125
5126 void mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5127 u8 addr_type, u8 status)
5128 {
5129 struct mgmt_ev_auth_failed ev;
5130
5131 bacpy(&ev.addr.bdaddr, bdaddr);
5132 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5133 ev.status = mgmt_status(status);
5134
5135 mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
5136 }
5137
5138 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
5139 {
5140 struct cmd_lookup match = { NULL, hdev };
5141 bool changed;
5142
5143 if (status) {
5144 u8 mgmt_err = mgmt_status(status);
5145 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
5146 cmd_status_rsp, &mgmt_err);
5147 return;
5148 }
5149
5150 if (test_bit(HCI_AUTH, &hdev->flags))
5151 changed = !test_and_set_bit(HCI_LINK_SECURITY,
5152 &hdev->dev_flags);
5153 else
5154 changed = test_and_clear_bit(HCI_LINK_SECURITY,
5155 &hdev->dev_flags);
5156
5157 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
5158 &match);
5159
5160 if (changed)
5161 new_settings(hdev, match.sk);
5162
5163 if (match.sk)
5164 sock_put(match.sk);
5165 }
5166
5167 static void clear_eir(struct hci_request *req)
5168 {
5169 struct hci_dev *hdev = req->hdev;
5170 struct hci_cp_write_eir cp;
5171
5172 if (!lmp_ext_inq_capable(hdev))
5173 return;
5174
5175 memset(hdev->eir, 0, sizeof(hdev->eir));
5176
5177 memset(&cp, 0, sizeof(cp));
5178
5179 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
5180 }
5181
5182 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
5183 {
5184 struct cmd_lookup match = { NULL, hdev };
5185 struct hci_request req;
5186 bool changed = false;
5187
5188 if (status) {
5189 u8 mgmt_err = mgmt_status(status);
5190
5191 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
5192 &hdev->dev_flags)) {
5193 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
5194 new_settings(hdev, NULL);
5195 }
5196
5197 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
5198 &mgmt_err);
5199 return;
5200 }
5201
5202 if (enable) {
5203 changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
5204 } else {
5205 changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
5206 if (!changed)
5207 changed = test_and_clear_bit(HCI_HS_ENABLED,
5208 &hdev->dev_flags);
5209 else
5210 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
5211 }
5212
5213 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
5214
5215 if (changed)
5216 new_settings(hdev, match.sk);
5217
5218 if (match.sk)
5219 sock_put(match.sk);
5220
5221 hci_req_init(&req, hdev);
5222
5223 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
5224 update_eir(&req);
5225 else
5226 clear_eir(&req);
5227
5228 hci_req_run(&req, NULL);
5229 }
5230
5231 void mgmt_sc_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
5232 {
5233 struct cmd_lookup match = { NULL, hdev };
5234 bool changed = false;
5235
5236 if (status) {
5237 u8 mgmt_err = mgmt_status(status);
5238
5239 if (enable) {
5240 if (test_and_clear_bit(HCI_SC_ENABLED,
5241 &hdev->dev_flags))
5242 new_settings(hdev, NULL);
5243 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
5244 }
5245
5246 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
5247 cmd_status_rsp, &mgmt_err);
5248 return;
5249 }
5250
5251 if (enable) {
5252 changed = !test_and_set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
5253 } else {
5254 changed = test_and_clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
5255 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
5256 }
5257
5258 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
5259 settings_rsp, &match);
5260
5261 if (changed)
5262 new_settings(hdev, match.sk);
5263
5264 if (match.sk)
5265 sock_put(match.sk);
5266 }
5267
5268 static void sk_lookup(struct pending_cmd *cmd, void *data)
5269 {
5270 struct cmd_lookup *match = data;
5271
5272 if (match->sk == NULL) {
5273 match->sk = cmd->sk;
5274 sock_hold(match->sk);
5275 }
5276 }
5277
5278 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
5279 u8 status)
5280 {
5281 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
5282
5283 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
5284 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
5285 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
5286
5287 if (!status)
5288 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
5289 NULL);
5290
5291 if (match.sk)
5292 sock_put(match.sk);
5293 }
5294
5295 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
5296 {
5297 struct mgmt_cp_set_local_name ev;
5298 struct pending_cmd *cmd;
5299
5300 if (status)
5301 return;
5302
5303 memset(&ev, 0, sizeof(ev));
5304 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
5305 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
5306
5307 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
5308 if (!cmd) {
5309 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
5310
5311 /* If this is a HCI command related to powering on the
5312 * HCI dev don't send any mgmt signals.
5313 */
5314 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5315 return;
5316 }
5317
5318 mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
5319 cmd ? cmd->sk : NULL);
5320 }
5321
5322 void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
5323 u8 *randomizer192, u8 *hash256,
5324 u8 *randomizer256, u8 status)
5325 {
5326 struct pending_cmd *cmd;
5327
5328 BT_DBG("%s status %u", hdev->name, status);
5329
5330 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
5331 if (!cmd)
5332 return;
5333
5334 if (status) {
5335 cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5336 mgmt_status(status));
5337 } else {
5338 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
5339 hash256 && randomizer256) {
5340 struct mgmt_rp_read_local_oob_ext_data rp;
5341
5342 memcpy(rp.hash192, hash192, sizeof(rp.hash192));
5343 memcpy(rp.randomizer192, randomizer192,
5344 sizeof(rp.randomizer192));
5345
5346 memcpy(rp.hash256, hash256, sizeof(rp.hash256));
5347 memcpy(rp.randomizer256, randomizer256,
5348 sizeof(rp.randomizer256));
5349
5350 cmd_complete(cmd->sk, hdev->id,
5351 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
5352 &rp, sizeof(rp));
5353 } else {
5354 struct mgmt_rp_read_local_oob_data rp;
5355
5356 memcpy(rp.hash, hash192, sizeof(rp.hash));
5357 memcpy(rp.randomizer, randomizer192,
5358 sizeof(rp.randomizer));
5359
5360 cmd_complete(cmd->sk, hdev->id,
5361 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
5362 &rp, sizeof(rp));
5363 }
5364 }
5365
5366 mgmt_pending_remove(cmd);
5367 }
5368
5369 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5370 u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name, u8
5371 ssp, u8 *eir, u16 eir_len)
5372 {
5373 char buf[512];
5374 struct mgmt_ev_device_found *ev = (void *) buf;
5375 struct smp_irk *irk;
5376 size_t ev_size;
5377
5378 if (!hci_discovery_active(hdev))
5379 return;
5380
5381 /* Leave 5 bytes for a potential CoD field */
5382 if (sizeof(*ev) + eir_len + 5 > sizeof(buf))
5383 return;
5384
5385 memset(buf, 0, sizeof(buf));
5386
5387 irk = hci_get_irk(hdev, bdaddr, addr_type);
5388 if (irk) {
5389 bacpy(&ev->addr.bdaddr, &irk->bdaddr);
5390 ev->addr.type = link_to_bdaddr(link_type, irk->addr_type);
5391 } else {
5392 bacpy(&ev->addr.bdaddr, bdaddr);
5393 ev->addr.type = link_to_bdaddr(link_type, addr_type);
5394 }
5395
5396 ev->rssi = rssi;
5397 if (cfm_name)
5398 ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_CONFIRM_NAME);
5399 if (!ssp)
5400 ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_LEGACY_PAIRING);
5401
5402 if (eir_len > 0)
5403 memcpy(ev->eir, eir, eir_len);
5404
5405 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
5406 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
5407 dev_class, 3);
5408
5409 ev->eir_len = cpu_to_le16(eir_len);
5410 ev_size = sizeof(*ev) + eir_len;
5411
5412 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
5413 }
5414
5415 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5416 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
5417 {
5418 struct mgmt_ev_device_found *ev;
5419 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
5420 u16 eir_len;
5421
5422 ev = (struct mgmt_ev_device_found *) buf;
5423
5424 memset(buf, 0, sizeof(buf));
5425
5426 bacpy(&ev->addr.bdaddr, bdaddr);
5427 ev->addr.type = link_to_bdaddr(link_type, addr_type);
5428 ev->rssi = rssi;
5429
5430 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
5431 name_len);
5432
5433 ev->eir_len = cpu_to_le16(eir_len);
5434
5435 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
5436 }
5437
5438 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
5439 {
5440 struct mgmt_ev_discovering ev;
5441 struct pending_cmd *cmd;
5442
5443 BT_DBG("%s discovering %u", hdev->name, discovering);
5444
5445 if (discovering)
5446 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
5447 else
5448 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
5449
5450 if (cmd != NULL) {
5451 u8 type = hdev->discovery.type;
5452
5453 cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
5454 sizeof(type));
5455 mgmt_pending_remove(cmd);
5456 }
5457
5458 memset(&ev, 0, sizeof(ev));
5459 ev.type = hdev->discovery.type;
5460 ev.discovering = discovering;
5461
5462 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
5463 }
5464
5465 int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
5466 {
5467 struct pending_cmd *cmd;
5468 struct mgmt_ev_device_blocked ev;
5469
5470 cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, hdev);
5471
5472 bacpy(&ev.addr.bdaddr, bdaddr);
5473 ev.addr.type = type;
5474
5475 return mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &ev, sizeof(ev),
5476 cmd ? cmd->sk : NULL);
5477 }
5478
5479 int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
5480 {
5481 struct pending_cmd *cmd;
5482 struct mgmt_ev_device_unblocked ev;
5483
5484 cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, hdev);
5485
5486 bacpy(&ev.addr.bdaddr, bdaddr);
5487 ev.addr.type = type;
5488
5489 return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &ev, sizeof(ev),
5490 cmd ? cmd->sk : NULL);
5491 }
5492
5493 static void adv_enable_complete(struct hci_dev *hdev, u8 status)
5494 {
5495 BT_DBG("%s status %u", hdev->name, status);
5496
5497 /* Clear the advertising mgmt setting if we failed to re-enable it */
5498 if (status) {
5499 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
5500 new_settings(hdev, NULL);
5501 }
5502 }
5503
5504 void mgmt_reenable_advertising(struct hci_dev *hdev)
5505 {
5506 struct hci_request req;
5507
5508 if (hci_conn_num(hdev, LE_LINK) > 0)
5509 return;
5510
5511 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
5512 return;
5513
5514 hci_req_init(&req, hdev);
5515 enable_advertising(&req);
5516
5517 /* If this fails we have no option but to let user space know
5518 * that we've disabled advertising.
5519 */
5520 if (hci_req_run(&req, adv_enable_complete) < 0) {
5521 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
5522 new_settings(hdev, NULL);
5523 }
5524 }