]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - net/bluetooth/mgmt.c
Bluetooth: Fix missing address type check for removing LTKs
[mirror_ubuntu-artful-kernel.git] / net / bluetooth / mgmt.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI Management interface */
26
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/mgmt.h>
33
34 #include "smp.h"
35
36 #define MGMT_VERSION 1
37 #define MGMT_REVISION 5
38
39 static const u16 mgmt_commands[] = {
40 MGMT_OP_READ_INDEX_LIST,
41 MGMT_OP_READ_INFO,
42 MGMT_OP_SET_POWERED,
43 MGMT_OP_SET_DISCOVERABLE,
44 MGMT_OP_SET_CONNECTABLE,
45 MGMT_OP_SET_FAST_CONNECTABLE,
46 MGMT_OP_SET_PAIRABLE,
47 MGMT_OP_SET_LINK_SECURITY,
48 MGMT_OP_SET_SSP,
49 MGMT_OP_SET_HS,
50 MGMT_OP_SET_LE,
51 MGMT_OP_SET_DEV_CLASS,
52 MGMT_OP_SET_LOCAL_NAME,
53 MGMT_OP_ADD_UUID,
54 MGMT_OP_REMOVE_UUID,
55 MGMT_OP_LOAD_LINK_KEYS,
56 MGMT_OP_LOAD_LONG_TERM_KEYS,
57 MGMT_OP_DISCONNECT,
58 MGMT_OP_GET_CONNECTIONS,
59 MGMT_OP_PIN_CODE_REPLY,
60 MGMT_OP_PIN_CODE_NEG_REPLY,
61 MGMT_OP_SET_IO_CAPABILITY,
62 MGMT_OP_PAIR_DEVICE,
63 MGMT_OP_CANCEL_PAIR_DEVICE,
64 MGMT_OP_UNPAIR_DEVICE,
65 MGMT_OP_USER_CONFIRM_REPLY,
66 MGMT_OP_USER_CONFIRM_NEG_REPLY,
67 MGMT_OP_USER_PASSKEY_REPLY,
68 MGMT_OP_USER_PASSKEY_NEG_REPLY,
69 MGMT_OP_READ_LOCAL_OOB_DATA,
70 MGMT_OP_ADD_REMOTE_OOB_DATA,
71 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
72 MGMT_OP_START_DISCOVERY,
73 MGMT_OP_STOP_DISCOVERY,
74 MGMT_OP_CONFIRM_NAME,
75 MGMT_OP_BLOCK_DEVICE,
76 MGMT_OP_UNBLOCK_DEVICE,
77 MGMT_OP_SET_DEVICE_ID,
78 MGMT_OP_SET_ADVERTISING,
79 MGMT_OP_SET_BREDR,
80 MGMT_OP_SET_STATIC_ADDRESS,
81 MGMT_OP_SET_SCAN_PARAMS,
82 MGMT_OP_SET_SECURE_CONN,
83 MGMT_OP_SET_DEBUG_KEYS,
84 MGMT_OP_LOAD_IRKS,
85 };
86
87 static const u16 mgmt_events[] = {
88 MGMT_EV_CONTROLLER_ERROR,
89 MGMT_EV_INDEX_ADDED,
90 MGMT_EV_INDEX_REMOVED,
91 MGMT_EV_NEW_SETTINGS,
92 MGMT_EV_CLASS_OF_DEV_CHANGED,
93 MGMT_EV_LOCAL_NAME_CHANGED,
94 MGMT_EV_NEW_LINK_KEY,
95 MGMT_EV_NEW_LONG_TERM_KEY,
96 MGMT_EV_DEVICE_CONNECTED,
97 MGMT_EV_DEVICE_DISCONNECTED,
98 MGMT_EV_CONNECT_FAILED,
99 MGMT_EV_PIN_CODE_REQUEST,
100 MGMT_EV_USER_CONFIRM_REQUEST,
101 MGMT_EV_USER_PASSKEY_REQUEST,
102 MGMT_EV_AUTH_FAILED,
103 MGMT_EV_DEVICE_FOUND,
104 MGMT_EV_DISCOVERING,
105 MGMT_EV_DEVICE_BLOCKED,
106 MGMT_EV_DEVICE_UNBLOCKED,
107 MGMT_EV_DEVICE_UNPAIRED,
108 MGMT_EV_PASSKEY_NOTIFY,
109 };
110
111 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
112
113 #define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
114 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
115
116 struct pending_cmd {
117 struct list_head list;
118 u16 opcode;
119 int index;
120 void *param;
121 struct sock *sk;
122 void *user_data;
123 };
124
125 /* HCI to MGMT error code conversion table */
126 static u8 mgmt_status_table[] = {
127 MGMT_STATUS_SUCCESS,
128 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
129 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
130 MGMT_STATUS_FAILED, /* Hardware Failure */
131 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
132 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
133 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
134 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
135 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
136 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
137 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
138 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
139 MGMT_STATUS_BUSY, /* Command Disallowed */
140 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
141 MGMT_STATUS_REJECTED, /* Rejected Security */
142 MGMT_STATUS_REJECTED, /* Rejected Personal */
143 MGMT_STATUS_TIMEOUT, /* Host Timeout */
144 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
145 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
146 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
147 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
148 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
149 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
150 MGMT_STATUS_BUSY, /* Repeated Attempts */
151 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
152 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
153 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
154 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
155 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
156 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
157 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
158 MGMT_STATUS_FAILED, /* Unspecified Error */
159 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
160 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
161 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
162 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
163 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
164 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
165 MGMT_STATUS_FAILED, /* Unit Link Key Used */
166 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
167 MGMT_STATUS_TIMEOUT, /* Instant Passed */
168 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
169 MGMT_STATUS_FAILED, /* Transaction Collision */
170 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
171 MGMT_STATUS_REJECTED, /* QoS Rejected */
172 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
173 MGMT_STATUS_REJECTED, /* Insufficient Security */
174 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
175 MGMT_STATUS_BUSY, /* Role Switch Pending */
176 MGMT_STATUS_FAILED, /* Slot Violation */
177 MGMT_STATUS_FAILED, /* Role Switch Failed */
178 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
179 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
180 MGMT_STATUS_BUSY, /* Host Busy Pairing */
181 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
182 MGMT_STATUS_BUSY, /* Controller Busy */
183 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
184 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
185 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
186 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
187 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
188 };
189
190 static u8 mgmt_status(u8 hci_status)
191 {
192 if (hci_status < ARRAY_SIZE(mgmt_status_table))
193 return mgmt_status_table[hci_status];
194
195 return MGMT_STATUS_FAILED;
196 }
197
198 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
199 {
200 struct sk_buff *skb;
201 struct mgmt_hdr *hdr;
202 struct mgmt_ev_cmd_status *ev;
203 int err;
204
205 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
206
207 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
208 if (!skb)
209 return -ENOMEM;
210
211 hdr = (void *) skb_put(skb, sizeof(*hdr));
212
213 hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_STATUS);
214 hdr->index = cpu_to_le16(index);
215 hdr->len = cpu_to_le16(sizeof(*ev));
216
217 ev = (void *) skb_put(skb, sizeof(*ev));
218 ev->status = status;
219 ev->opcode = cpu_to_le16(cmd);
220
221 err = sock_queue_rcv_skb(sk, skb);
222 if (err < 0)
223 kfree_skb(skb);
224
225 return err;
226 }
227
228 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
229 void *rp, size_t rp_len)
230 {
231 struct sk_buff *skb;
232 struct mgmt_hdr *hdr;
233 struct mgmt_ev_cmd_complete *ev;
234 int err;
235
236 BT_DBG("sock %p", sk);
237
238 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
239 if (!skb)
240 return -ENOMEM;
241
242 hdr = (void *) skb_put(skb, sizeof(*hdr));
243
244 hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_COMPLETE);
245 hdr->index = cpu_to_le16(index);
246 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
247
248 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
249 ev->opcode = cpu_to_le16(cmd);
250 ev->status = status;
251
252 if (rp)
253 memcpy(ev->data, rp, rp_len);
254
255 err = sock_queue_rcv_skb(sk, skb);
256 if (err < 0)
257 kfree_skb(skb);
258
259 return err;
260 }
261
262 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
263 u16 data_len)
264 {
265 struct mgmt_rp_read_version rp;
266
267 BT_DBG("sock %p", sk);
268
269 rp.version = MGMT_VERSION;
270 rp.revision = __constant_cpu_to_le16(MGMT_REVISION);
271
272 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
273 sizeof(rp));
274 }
275
276 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
277 u16 data_len)
278 {
279 struct mgmt_rp_read_commands *rp;
280 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
281 const u16 num_events = ARRAY_SIZE(mgmt_events);
282 __le16 *opcode;
283 size_t rp_size;
284 int i, err;
285
286 BT_DBG("sock %p", sk);
287
288 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
289
290 rp = kmalloc(rp_size, GFP_KERNEL);
291 if (!rp)
292 return -ENOMEM;
293
294 rp->num_commands = __constant_cpu_to_le16(num_commands);
295 rp->num_events = __constant_cpu_to_le16(num_events);
296
297 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
298 put_unaligned_le16(mgmt_commands[i], opcode);
299
300 for (i = 0; i < num_events; i++, opcode++)
301 put_unaligned_le16(mgmt_events[i], opcode);
302
303 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
304 rp_size);
305 kfree(rp);
306
307 return err;
308 }
309
310 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
311 u16 data_len)
312 {
313 struct mgmt_rp_read_index_list *rp;
314 struct hci_dev *d;
315 size_t rp_len;
316 u16 count;
317 int err;
318
319 BT_DBG("sock %p", sk);
320
321 read_lock(&hci_dev_list_lock);
322
323 count = 0;
324 list_for_each_entry(d, &hci_dev_list, list) {
325 if (d->dev_type == HCI_BREDR)
326 count++;
327 }
328
329 rp_len = sizeof(*rp) + (2 * count);
330 rp = kmalloc(rp_len, GFP_ATOMIC);
331 if (!rp) {
332 read_unlock(&hci_dev_list_lock);
333 return -ENOMEM;
334 }
335
336 count = 0;
337 list_for_each_entry(d, &hci_dev_list, list) {
338 if (test_bit(HCI_SETUP, &d->dev_flags))
339 continue;
340
341 if (test_bit(HCI_USER_CHANNEL, &d->dev_flags))
342 continue;
343
344 if (d->dev_type == HCI_BREDR) {
345 rp->index[count++] = cpu_to_le16(d->id);
346 BT_DBG("Added hci%u", d->id);
347 }
348 }
349
350 rp->num_controllers = cpu_to_le16(count);
351 rp_len = sizeof(*rp) + (2 * count);
352
353 read_unlock(&hci_dev_list_lock);
354
355 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
356 rp_len);
357
358 kfree(rp);
359
360 return err;
361 }
362
363 static u32 get_supported_settings(struct hci_dev *hdev)
364 {
365 u32 settings = 0;
366
367 settings |= MGMT_SETTING_POWERED;
368 settings |= MGMT_SETTING_PAIRABLE;
369 settings |= MGMT_SETTING_DEBUG_KEYS;
370
371 if (lmp_bredr_capable(hdev)) {
372 settings |= MGMT_SETTING_CONNECTABLE;
373 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
374 settings |= MGMT_SETTING_FAST_CONNECTABLE;
375 settings |= MGMT_SETTING_DISCOVERABLE;
376 settings |= MGMT_SETTING_BREDR;
377 settings |= MGMT_SETTING_LINK_SECURITY;
378
379 if (lmp_ssp_capable(hdev)) {
380 settings |= MGMT_SETTING_SSP;
381 settings |= MGMT_SETTING_HS;
382 }
383
384 if (lmp_sc_capable(hdev) ||
385 test_bit(HCI_FORCE_SC, &hdev->dev_flags))
386 settings |= MGMT_SETTING_SECURE_CONN;
387 }
388
389 if (lmp_le_capable(hdev)) {
390 settings |= MGMT_SETTING_LE;
391 settings |= MGMT_SETTING_ADVERTISING;
392 }
393
394 return settings;
395 }
396
397 static u32 get_current_settings(struct hci_dev *hdev)
398 {
399 u32 settings = 0;
400
401 if (hdev_is_powered(hdev))
402 settings |= MGMT_SETTING_POWERED;
403
404 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
405 settings |= MGMT_SETTING_CONNECTABLE;
406
407 if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
408 settings |= MGMT_SETTING_FAST_CONNECTABLE;
409
410 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
411 settings |= MGMT_SETTING_DISCOVERABLE;
412
413 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
414 settings |= MGMT_SETTING_PAIRABLE;
415
416 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
417 settings |= MGMT_SETTING_BREDR;
418
419 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
420 settings |= MGMT_SETTING_LE;
421
422 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
423 settings |= MGMT_SETTING_LINK_SECURITY;
424
425 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
426 settings |= MGMT_SETTING_SSP;
427
428 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
429 settings |= MGMT_SETTING_HS;
430
431 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
432 settings |= MGMT_SETTING_ADVERTISING;
433
434 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
435 settings |= MGMT_SETTING_SECURE_CONN;
436
437 if (test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags))
438 settings |= MGMT_SETTING_DEBUG_KEYS;
439
440 return settings;
441 }
442
443 #define PNP_INFO_SVCLASS_ID 0x1200
444
445 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
446 {
447 u8 *ptr = data, *uuids_start = NULL;
448 struct bt_uuid *uuid;
449
450 if (len < 4)
451 return ptr;
452
453 list_for_each_entry(uuid, &hdev->uuids, list) {
454 u16 uuid16;
455
456 if (uuid->size != 16)
457 continue;
458
459 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
460 if (uuid16 < 0x1100)
461 continue;
462
463 if (uuid16 == PNP_INFO_SVCLASS_ID)
464 continue;
465
466 if (!uuids_start) {
467 uuids_start = ptr;
468 uuids_start[0] = 1;
469 uuids_start[1] = EIR_UUID16_ALL;
470 ptr += 2;
471 }
472
473 /* Stop if not enough space to put next UUID */
474 if ((ptr - data) + sizeof(u16) > len) {
475 uuids_start[1] = EIR_UUID16_SOME;
476 break;
477 }
478
479 *ptr++ = (uuid16 & 0x00ff);
480 *ptr++ = (uuid16 & 0xff00) >> 8;
481 uuids_start[0] += sizeof(uuid16);
482 }
483
484 return ptr;
485 }
486
487 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
488 {
489 u8 *ptr = data, *uuids_start = NULL;
490 struct bt_uuid *uuid;
491
492 if (len < 6)
493 return ptr;
494
495 list_for_each_entry(uuid, &hdev->uuids, list) {
496 if (uuid->size != 32)
497 continue;
498
499 if (!uuids_start) {
500 uuids_start = ptr;
501 uuids_start[0] = 1;
502 uuids_start[1] = EIR_UUID32_ALL;
503 ptr += 2;
504 }
505
506 /* Stop if not enough space to put next UUID */
507 if ((ptr - data) + sizeof(u32) > len) {
508 uuids_start[1] = EIR_UUID32_SOME;
509 break;
510 }
511
512 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
513 ptr += sizeof(u32);
514 uuids_start[0] += sizeof(u32);
515 }
516
517 return ptr;
518 }
519
520 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
521 {
522 u8 *ptr = data, *uuids_start = NULL;
523 struct bt_uuid *uuid;
524
525 if (len < 18)
526 return ptr;
527
528 list_for_each_entry(uuid, &hdev->uuids, list) {
529 if (uuid->size != 128)
530 continue;
531
532 if (!uuids_start) {
533 uuids_start = ptr;
534 uuids_start[0] = 1;
535 uuids_start[1] = EIR_UUID128_ALL;
536 ptr += 2;
537 }
538
539 /* Stop if not enough space to put next UUID */
540 if ((ptr - data) + 16 > len) {
541 uuids_start[1] = EIR_UUID128_SOME;
542 break;
543 }
544
545 memcpy(ptr, uuid->uuid, 16);
546 ptr += 16;
547 uuids_start[0] += 16;
548 }
549
550 return ptr;
551 }
552
553 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
554 {
555 struct pending_cmd *cmd;
556
557 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
558 if (cmd->opcode == opcode)
559 return cmd;
560 }
561
562 return NULL;
563 }
564
565 static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
566 {
567 u8 ad_len = 0;
568 size_t name_len;
569
570 name_len = strlen(hdev->dev_name);
571 if (name_len > 0) {
572 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
573
574 if (name_len > max_len) {
575 name_len = max_len;
576 ptr[1] = EIR_NAME_SHORT;
577 } else
578 ptr[1] = EIR_NAME_COMPLETE;
579
580 ptr[0] = name_len + 1;
581
582 memcpy(ptr + 2, hdev->dev_name, name_len);
583
584 ad_len += (name_len + 2);
585 ptr += (name_len + 2);
586 }
587
588 return ad_len;
589 }
590
591 static void update_scan_rsp_data(struct hci_request *req)
592 {
593 struct hci_dev *hdev = req->hdev;
594 struct hci_cp_le_set_scan_rsp_data cp;
595 u8 len;
596
597 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
598 return;
599
600 memset(&cp, 0, sizeof(cp));
601
602 len = create_scan_rsp_data(hdev, cp.data);
603
604 if (hdev->scan_rsp_data_len == len &&
605 memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
606 return;
607
608 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
609 hdev->scan_rsp_data_len = len;
610
611 cp.length = len;
612
613 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
614 }
615
616 static u8 get_adv_discov_flags(struct hci_dev *hdev)
617 {
618 struct pending_cmd *cmd;
619
620 /* If there's a pending mgmt command the flags will not yet have
621 * their final values, so check for this first.
622 */
623 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
624 if (cmd) {
625 struct mgmt_mode *cp = cmd->param;
626 if (cp->val == 0x01)
627 return LE_AD_GENERAL;
628 else if (cp->val == 0x02)
629 return LE_AD_LIMITED;
630 } else {
631 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
632 return LE_AD_LIMITED;
633 else if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
634 return LE_AD_GENERAL;
635 }
636
637 return 0;
638 }
639
640 static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
641 {
642 u8 ad_len = 0, flags = 0;
643
644 flags |= get_adv_discov_flags(hdev);
645
646 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
647 flags |= LE_AD_NO_BREDR;
648
649 if (flags) {
650 BT_DBG("adv flags 0x%02x", flags);
651
652 ptr[0] = 2;
653 ptr[1] = EIR_FLAGS;
654 ptr[2] = flags;
655
656 ad_len += 3;
657 ptr += 3;
658 }
659
660 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
661 ptr[0] = 2;
662 ptr[1] = EIR_TX_POWER;
663 ptr[2] = (u8) hdev->adv_tx_power;
664
665 ad_len += 3;
666 ptr += 3;
667 }
668
669 return ad_len;
670 }
671
672 static void update_adv_data(struct hci_request *req)
673 {
674 struct hci_dev *hdev = req->hdev;
675 struct hci_cp_le_set_adv_data cp;
676 u8 len;
677
678 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
679 return;
680
681 memset(&cp, 0, sizeof(cp));
682
683 len = create_adv_data(hdev, cp.data);
684
685 if (hdev->adv_data_len == len &&
686 memcmp(cp.data, hdev->adv_data, len) == 0)
687 return;
688
689 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
690 hdev->adv_data_len = len;
691
692 cp.length = len;
693
694 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
695 }
696
697 static void create_eir(struct hci_dev *hdev, u8 *data)
698 {
699 u8 *ptr = data;
700 size_t name_len;
701
702 name_len = strlen(hdev->dev_name);
703
704 if (name_len > 0) {
705 /* EIR Data type */
706 if (name_len > 48) {
707 name_len = 48;
708 ptr[1] = EIR_NAME_SHORT;
709 } else
710 ptr[1] = EIR_NAME_COMPLETE;
711
712 /* EIR Data length */
713 ptr[0] = name_len + 1;
714
715 memcpy(ptr + 2, hdev->dev_name, name_len);
716
717 ptr += (name_len + 2);
718 }
719
720 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
721 ptr[0] = 2;
722 ptr[1] = EIR_TX_POWER;
723 ptr[2] = (u8) hdev->inq_tx_power;
724
725 ptr += 3;
726 }
727
728 if (hdev->devid_source > 0) {
729 ptr[0] = 9;
730 ptr[1] = EIR_DEVICE_ID;
731
732 put_unaligned_le16(hdev->devid_source, ptr + 2);
733 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
734 put_unaligned_le16(hdev->devid_product, ptr + 6);
735 put_unaligned_le16(hdev->devid_version, ptr + 8);
736
737 ptr += 10;
738 }
739
740 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
741 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
742 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
743 }
744
745 static void update_eir(struct hci_request *req)
746 {
747 struct hci_dev *hdev = req->hdev;
748 struct hci_cp_write_eir cp;
749
750 if (!hdev_is_powered(hdev))
751 return;
752
753 if (!lmp_ext_inq_capable(hdev))
754 return;
755
756 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
757 return;
758
759 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
760 return;
761
762 memset(&cp, 0, sizeof(cp));
763
764 create_eir(hdev, cp.data);
765
766 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
767 return;
768
769 memcpy(hdev->eir, cp.data, sizeof(cp.data));
770
771 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
772 }
773
774 static u8 get_service_classes(struct hci_dev *hdev)
775 {
776 struct bt_uuid *uuid;
777 u8 val = 0;
778
779 list_for_each_entry(uuid, &hdev->uuids, list)
780 val |= uuid->svc_hint;
781
782 return val;
783 }
784
785 static void update_class(struct hci_request *req)
786 {
787 struct hci_dev *hdev = req->hdev;
788 u8 cod[3];
789
790 BT_DBG("%s", hdev->name);
791
792 if (!hdev_is_powered(hdev))
793 return;
794
795 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
796 return;
797
798 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
799 return;
800
801 cod[0] = hdev->minor_class;
802 cod[1] = hdev->major_class;
803 cod[2] = get_service_classes(hdev);
804
805 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
806 cod[1] |= 0x20;
807
808 if (memcmp(cod, hdev->dev_class, 3) == 0)
809 return;
810
811 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
812 }
813
814 static void service_cache_off(struct work_struct *work)
815 {
816 struct hci_dev *hdev = container_of(work, struct hci_dev,
817 service_cache.work);
818 struct hci_request req;
819
820 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
821 return;
822
823 hci_req_init(&req, hdev);
824
825 hci_dev_lock(hdev);
826
827 update_eir(&req);
828 update_class(&req);
829
830 hci_dev_unlock(hdev);
831
832 hci_req_run(&req, NULL);
833 }
834
835 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
836 {
837 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
838 return;
839
840 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
841
842 /* Non-mgmt controlled devices get this bit set
843 * implicitly so that pairing works for them, however
844 * for mgmt we require user-space to explicitly enable
845 * it
846 */
847 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
848 }
849
850 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
851 void *data, u16 data_len)
852 {
853 struct mgmt_rp_read_info rp;
854
855 BT_DBG("sock %p %s", sk, hdev->name);
856
857 hci_dev_lock(hdev);
858
859 memset(&rp, 0, sizeof(rp));
860
861 bacpy(&rp.bdaddr, &hdev->bdaddr);
862
863 rp.version = hdev->hci_ver;
864 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
865
866 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
867 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
868
869 memcpy(rp.dev_class, hdev->dev_class, 3);
870
871 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
872 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
873
874 hci_dev_unlock(hdev);
875
876 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
877 sizeof(rp));
878 }
879
880 static void mgmt_pending_free(struct pending_cmd *cmd)
881 {
882 sock_put(cmd->sk);
883 kfree(cmd->param);
884 kfree(cmd);
885 }
886
887 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
888 struct hci_dev *hdev, void *data,
889 u16 len)
890 {
891 struct pending_cmd *cmd;
892
893 cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
894 if (!cmd)
895 return NULL;
896
897 cmd->opcode = opcode;
898 cmd->index = hdev->id;
899
900 cmd->param = kmalloc(len, GFP_KERNEL);
901 if (!cmd->param) {
902 kfree(cmd);
903 return NULL;
904 }
905
906 if (data)
907 memcpy(cmd->param, data, len);
908
909 cmd->sk = sk;
910 sock_hold(sk);
911
912 list_add(&cmd->list, &hdev->mgmt_pending);
913
914 return cmd;
915 }
916
917 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
918 void (*cb)(struct pending_cmd *cmd,
919 void *data),
920 void *data)
921 {
922 struct pending_cmd *cmd, *tmp;
923
924 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
925 if (opcode > 0 && cmd->opcode != opcode)
926 continue;
927
928 cb(cmd, data);
929 }
930 }
931
932 static void mgmt_pending_remove(struct pending_cmd *cmd)
933 {
934 list_del(&cmd->list);
935 mgmt_pending_free(cmd);
936 }
937
938 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
939 {
940 __le32 settings = cpu_to_le32(get_current_settings(hdev));
941
942 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
943 sizeof(settings));
944 }
945
946 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
947 u16 len)
948 {
949 struct mgmt_mode *cp = data;
950 struct pending_cmd *cmd;
951 int err;
952
953 BT_DBG("request for %s", hdev->name);
954
955 if (cp->val != 0x00 && cp->val != 0x01)
956 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
957 MGMT_STATUS_INVALID_PARAMS);
958
959 hci_dev_lock(hdev);
960
961 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
962 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
963 MGMT_STATUS_BUSY);
964 goto failed;
965 }
966
967 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
968 cancel_delayed_work(&hdev->power_off);
969
970 if (cp->val) {
971 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
972 data, len);
973 err = mgmt_powered(hdev, 1);
974 goto failed;
975 }
976 }
977
978 if (!!cp->val == hdev_is_powered(hdev)) {
979 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
980 goto failed;
981 }
982
983 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
984 if (!cmd) {
985 err = -ENOMEM;
986 goto failed;
987 }
988
989 if (cp->val)
990 queue_work(hdev->req_workqueue, &hdev->power_on);
991 else
992 queue_work(hdev->req_workqueue, &hdev->power_off.work);
993
994 err = 0;
995
996 failed:
997 hci_dev_unlock(hdev);
998 return err;
999 }
1000
1001 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
1002 struct sock *skip_sk)
1003 {
1004 struct sk_buff *skb;
1005 struct mgmt_hdr *hdr;
1006
1007 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
1008 if (!skb)
1009 return -ENOMEM;
1010
1011 hdr = (void *) skb_put(skb, sizeof(*hdr));
1012 hdr->opcode = cpu_to_le16(event);
1013 if (hdev)
1014 hdr->index = cpu_to_le16(hdev->id);
1015 else
1016 hdr->index = __constant_cpu_to_le16(MGMT_INDEX_NONE);
1017 hdr->len = cpu_to_le16(data_len);
1018
1019 if (data)
1020 memcpy(skb_put(skb, data_len), data, data_len);
1021
1022 /* Time stamp */
1023 __net_timestamp(skb);
1024
1025 hci_send_to_control(skb, skip_sk);
1026 kfree_skb(skb);
1027
1028 return 0;
1029 }
1030
1031 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1032 {
1033 __le32 ev;
1034
1035 ev = cpu_to_le32(get_current_settings(hdev));
1036
1037 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
1038 }
1039
1040 struct cmd_lookup {
1041 struct sock *sk;
1042 struct hci_dev *hdev;
1043 u8 mgmt_status;
1044 };
1045
1046 static void settings_rsp(struct pending_cmd *cmd, void *data)
1047 {
1048 struct cmd_lookup *match = data;
1049
1050 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1051
1052 list_del(&cmd->list);
1053
1054 if (match->sk == NULL) {
1055 match->sk = cmd->sk;
1056 sock_hold(match->sk);
1057 }
1058
1059 mgmt_pending_free(cmd);
1060 }
1061
1062 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
1063 {
1064 u8 *status = data;
1065
1066 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1067 mgmt_pending_remove(cmd);
1068 }
1069
1070 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1071 {
1072 if (!lmp_bredr_capable(hdev))
1073 return MGMT_STATUS_NOT_SUPPORTED;
1074 else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1075 return MGMT_STATUS_REJECTED;
1076 else
1077 return MGMT_STATUS_SUCCESS;
1078 }
1079
1080 static u8 mgmt_le_support(struct hci_dev *hdev)
1081 {
1082 if (!lmp_le_capable(hdev))
1083 return MGMT_STATUS_NOT_SUPPORTED;
1084 else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1085 return MGMT_STATUS_REJECTED;
1086 else
1087 return MGMT_STATUS_SUCCESS;
1088 }
1089
1090 static void set_discoverable_complete(struct hci_dev *hdev, u8 status)
1091 {
1092 struct pending_cmd *cmd;
1093 struct mgmt_mode *cp;
1094 struct hci_request req;
1095 bool changed;
1096
1097 BT_DBG("status 0x%02x", status);
1098
1099 hci_dev_lock(hdev);
1100
1101 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1102 if (!cmd)
1103 goto unlock;
1104
1105 if (status) {
1106 u8 mgmt_err = mgmt_status(status);
1107 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1108 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1109 goto remove_cmd;
1110 }
1111
1112 cp = cmd->param;
1113 if (cp->val) {
1114 changed = !test_and_set_bit(HCI_DISCOVERABLE,
1115 &hdev->dev_flags);
1116
1117 if (hdev->discov_timeout > 0) {
1118 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1119 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1120 to);
1121 }
1122 } else {
1123 changed = test_and_clear_bit(HCI_DISCOVERABLE,
1124 &hdev->dev_flags);
1125 }
1126
1127 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1128
1129 if (changed)
1130 new_settings(hdev, cmd->sk);
1131
1132 /* When the discoverable mode gets changed, make sure
1133 * that class of device has the limited discoverable
1134 * bit correctly set.
1135 */
1136 hci_req_init(&req, hdev);
1137 update_class(&req);
1138 hci_req_run(&req, NULL);
1139
1140 remove_cmd:
1141 mgmt_pending_remove(cmd);
1142
1143 unlock:
1144 hci_dev_unlock(hdev);
1145 }
1146
1147 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1148 u16 len)
1149 {
1150 struct mgmt_cp_set_discoverable *cp = data;
1151 struct pending_cmd *cmd;
1152 struct hci_request req;
1153 u16 timeout;
1154 u8 scan;
1155 int err;
1156
1157 BT_DBG("request for %s", hdev->name);
1158
1159 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1160 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1161 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1162 MGMT_STATUS_REJECTED);
1163
1164 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1165 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1166 MGMT_STATUS_INVALID_PARAMS);
1167
1168 timeout = __le16_to_cpu(cp->timeout);
1169
1170 /* Disabling discoverable requires that no timeout is set,
1171 * and enabling limited discoverable requires a timeout.
1172 */
1173 if ((cp->val == 0x00 && timeout > 0) ||
1174 (cp->val == 0x02 && timeout == 0))
1175 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1176 MGMT_STATUS_INVALID_PARAMS);
1177
1178 hci_dev_lock(hdev);
1179
1180 if (!hdev_is_powered(hdev) && timeout > 0) {
1181 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1182 MGMT_STATUS_NOT_POWERED);
1183 goto failed;
1184 }
1185
1186 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1187 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1188 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1189 MGMT_STATUS_BUSY);
1190 goto failed;
1191 }
1192
1193 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
1194 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1195 MGMT_STATUS_REJECTED);
1196 goto failed;
1197 }
1198
1199 if (!hdev_is_powered(hdev)) {
1200 bool changed = false;
1201
1202 /* Setting limited discoverable when powered off is
1203 * not a valid operation since it requires a timeout
1204 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1205 */
1206 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
1207 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1208 changed = true;
1209 }
1210
1211 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1212 if (err < 0)
1213 goto failed;
1214
1215 if (changed)
1216 err = new_settings(hdev, sk);
1217
1218 goto failed;
1219 }
1220
1221 /* If the current mode is the same, then just update the timeout
1222 * value with the new value. And if only the timeout gets updated,
1223 * then no need for any HCI transactions.
1224 */
1225 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
1226 (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
1227 &hdev->dev_flags)) {
1228 cancel_delayed_work(&hdev->discov_off);
1229 hdev->discov_timeout = timeout;
1230
1231 if (cp->val && hdev->discov_timeout > 0) {
1232 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1233 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1234 to);
1235 }
1236
1237 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1238 goto failed;
1239 }
1240
1241 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1242 if (!cmd) {
1243 err = -ENOMEM;
1244 goto failed;
1245 }
1246
1247 /* Cancel any potential discoverable timeout that might be
1248 * still active and store new timeout value. The arming of
1249 * the timeout happens in the complete handler.
1250 */
1251 cancel_delayed_work(&hdev->discov_off);
1252 hdev->discov_timeout = timeout;
1253
1254 /* Limited discoverable mode */
1255 if (cp->val == 0x02)
1256 set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1257 else
1258 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1259
1260 hci_req_init(&req, hdev);
1261
1262 /* The procedure for LE-only controllers is much simpler - just
1263 * update the advertising data.
1264 */
1265 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1266 goto update_ad;
1267
1268 scan = SCAN_PAGE;
1269
1270 if (cp->val) {
1271 struct hci_cp_write_current_iac_lap hci_cp;
1272
1273 if (cp->val == 0x02) {
1274 /* Limited discoverable mode */
1275 hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1276 hci_cp.iac_lap[0] = 0x00; /* LIAC */
1277 hci_cp.iac_lap[1] = 0x8b;
1278 hci_cp.iac_lap[2] = 0x9e;
1279 hci_cp.iac_lap[3] = 0x33; /* GIAC */
1280 hci_cp.iac_lap[4] = 0x8b;
1281 hci_cp.iac_lap[5] = 0x9e;
1282 } else {
1283 /* General discoverable mode */
1284 hci_cp.num_iac = 1;
1285 hci_cp.iac_lap[0] = 0x33; /* GIAC */
1286 hci_cp.iac_lap[1] = 0x8b;
1287 hci_cp.iac_lap[2] = 0x9e;
1288 }
1289
1290 hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1291 (hci_cp.num_iac * 3) + 1, &hci_cp);
1292
1293 scan |= SCAN_INQUIRY;
1294 } else {
1295 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1296 }
1297
1298 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1299
1300 update_ad:
1301 update_adv_data(&req);
1302
1303 err = hci_req_run(&req, set_discoverable_complete);
1304 if (err < 0)
1305 mgmt_pending_remove(cmd);
1306
1307 failed:
1308 hci_dev_unlock(hdev);
1309 return err;
1310 }
1311
1312 static void write_fast_connectable(struct hci_request *req, bool enable)
1313 {
1314 struct hci_dev *hdev = req->hdev;
1315 struct hci_cp_write_page_scan_activity acp;
1316 u8 type;
1317
1318 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1319 return;
1320
1321 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1322 return;
1323
1324 if (enable) {
1325 type = PAGE_SCAN_TYPE_INTERLACED;
1326
1327 /* 160 msec page scan interval */
1328 acp.interval = __constant_cpu_to_le16(0x0100);
1329 } else {
1330 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1331
1332 /* default 1.28 sec page scan */
1333 acp.interval = __constant_cpu_to_le16(0x0800);
1334 }
1335
1336 acp.window = __constant_cpu_to_le16(0x0012);
1337
1338 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1339 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1340 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1341 sizeof(acp), &acp);
1342
1343 if (hdev->page_scan_type != type)
1344 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1345 }
1346
1347 static u8 get_adv_type(struct hci_dev *hdev)
1348 {
1349 struct pending_cmd *cmd;
1350 bool connectable;
1351
1352 /* If there's a pending mgmt command the flag will not yet have
1353 * it's final value, so check for this first.
1354 */
1355 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1356 if (cmd) {
1357 struct mgmt_mode *cp = cmd->param;
1358 connectable = !!cp->val;
1359 } else {
1360 connectable = test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1361 }
1362
1363 return connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
1364 }
1365
1366 static void enable_advertising(struct hci_request *req)
1367 {
1368 struct hci_dev *hdev = req->hdev;
1369 struct hci_cp_le_set_adv_param cp;
1370 u8 enable = 0x01;
1371
1372 memset(&cp, 0, sizeof(cp));
1373 cp.min_interval = __constant_cpu_to_le16(0x0800);
1374 cp.max_interval = __constant_cpu_to_le16(0x0800);
1375 cp.type = get_adv_type(hdev);
1376 cp.own_address_type = hdev->own_addr_type;
1377 cp.channel_map = 0x07;
1378
1379 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1380
1381 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1382 }
1383
1384 static void disable_advertising(struct hci_request *req)
1385 {
1386 u8 enable = 0x00;
1387
1388 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1389 }
1390
1391 static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1392 {
1393 struct pending_cmd *cmd;
1394 struct mgmt_mode *cp;
1395 bool changed;
1396
1397 BT_DBG("status 0x%02x", status);
1398
1399 hci_dev_lock(hdev);
1400
1401 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1402 if (!cmd)
1403 goto unlock;
1404
1405 if (status) {
1406 u8 mgmt_err = mgmt_status(status);
1407 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1408 goto remove_cmd;
1409 }
1410
1411 cp = cmd->param;
1412 if (cp->val)
1413 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1414 else
1415 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1416
1417 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1418
1419 if (changed)
1420 new_settings(hdev, cmd->sk);
1421
1422 remove_cmd:
1423 mgmt_pending_remove(cmd);
1424
1425 unlock:
1426 hci_dev_unlock(hdev);
1427 }
1428
1429 static int set_connectable_update_settings(struct hci_dev *hdev,
1430 struct sock *sk, u8 val)
1431 {
1432 bool changed = false;
1433 int err;
1434
1435 if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1436 changed = true;
1437
1438 if (val) {
1439 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1440 } else {
1441 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1442 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1443 }
1444
1445 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1446 if (err < 0)
1447 return err;
1448
1449 if (changed)
1450 return new_settings(hdev, sk);
1451
1452 return 0;
1453 }
1454
1455 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1456 u16 len)
1457 {
1458 struct mgmt_mode *cp = data;
1459 struct pending_cmd *cmd;
1460 struct hci_request req;
1461 u8 scan;
1462 int err;
1463
1464 BT_DBG("request for %s", hdev->name);
1465
1466 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1467 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1468 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1469 MGMT_STATUS_REJECTED);
1470
1471 if (cp->val != 0x00 && cp->val != 0x01)
1472 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1473 MGMT_STATUS_INVALID_PARAMS);
1474
1475 hci_dev_lock(hdev);
1476
1477 if (!hdev_is_powered(hdev)) {
1478 err = set_connectable_update_settings(hdev, sk, cp->val);
1479 goto failed;
1480 }
1481
1482 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1483 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1484 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1485 MGMT_STATUS_BUSY);
1486 goto failed;
1487 }
1488
1489 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1490 if (!cmd) {
1491 err = -ENOMEM;
1492 goto failed;
1493 }
1494
1495 hci_req_init(&req, hdev);
1496
1497 /* If BR/EDR is not enabled and we disable advertising as a
1498 * by-product of disabling connectable, we need to update the
1499 * advertising flags.
1500 */
1501 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1502 if (!cp->val) {
1503 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1504 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1505 }
1506 update_adv_data(&req);
1507 } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
1508 if (cp->val) {
1509 scan = SCAN_PAGE;
1510 } else {
1511 scan = 0;
1512
1513 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1514 hdev->discov_timeout > 0)
1515 cancel_delayed_work(&hdev->discov_off);
1516 }
1517
1518 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1519 }
1520
1521 /* If we're going from non-connectable to connectable or
1522 * vice-versa when fast connectable is enabled ensure that fast
1523 * connectable gets disabled. write_fast_connectable won't do
1524 * anything if the page scan parameters are already what they
1525 * should be.
1526 */
1527 if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1528 write_fast_connectable(&req, false);
1529
1530 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) &&
1531 hci_conn_num(hdev, LE_LINK) == 0) {
1532 disable_advertising(&req);
1533 enable_advertising(&req);
1534 }
1535
1536 err = hci_req_run(&req, set_connectable_complete);
1537 if (err < 0) {
1538 mgmt_pending_remove(cmd);
1539 if (err == -ENODATA)
1540 err = set_connectable_update_settings(hdev, sk,
1541 cp->val);
1542 goto failed;
1543 }
1544
1545 failed:
1546 hci_dev_unlock(hdev);
1547 return err;
1548 }
1549
1550 static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
1551 u16 len)
1552 {
1553 struct mgmt_mode *cp = data;
1554 bool changed;
1555 int err;
1556
1557 BT_DBG("request for %s", hdev->name);
1558
1559 if (cp->val != 0x00 && cp->val != 0x01)
1560 return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE,
1561 MGMT_STATUS_INVALID_PARAMS);
1562
1563 hci_dev_lock(hdev);
1564
1565 if (cp->val)
1566 changed = !test_and_set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1567 else
1568 changed = test_and_clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1569
1570 err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
1571 if (err < 0)
1572 goto unlock;
1573
1574 if (changed)
1575 err = new_settings(hdev, sk);
1576
1577 unlock:
1578 hci_dev_unlock(hdev);
1579 return err;
1580 }
1581
1582 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1583 u16 len)
1584 {
1585 struct mgmt_mode *cp = data;
1586 struct pending_cmd *cmd;
1587 u8 val, status;
1588 int err;
1589
1590 BT_DBG("request for %s", hdev->name);
1591
1592 status = mgmt_bredr_support(hdev);
1593 if (status)
1594 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1595 status);
1596
1597 if (cp->val != 0x00 && cp->val != 0x01)
1598 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1599 MGMT_STATUS_INVALID_PARAMS);
1600
1601 hci_dev_lock(hdev);
1602
1603 if (!hdev_is_powered(hdev)) {
1604 bool changed = false;
1605
1606 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1607 &hdev->dev_flags)) {
1608 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
1609 changed = true;
1610 }
1611
1612 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1613 if (err < 0)
1614 goto failed;
1615
1616 if (changed)
1617 err = new_settings(hdev, sk);
1618
1619 goto failed;
1620 }
1621
1622 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1623 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1624 MGMT_STATUS_BUSY);
1625 goto failed;
1626 }
1627
1628 val = !!cp->val;
1629
1630 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1631 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1632 goto failed;
1633 }
1634
1635 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1636 if (!cmd) {
1637 err = -ENOMEM;
1638 goto failed;
1639 }
1640
1641 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1642 if (err < 0) {
1643 mgmt_pending_remove(cmd);
1644 goto failed;
1645 }
1646
1647 failed:
1648 hci_dev_unlock(hdev);
1649 return err;
1650 }
1651
1652 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1653 {
1654 struct mgmt_mode *cp = data;
1655 struct pending_cmd *cmd;
1656 u8 status;
1657 int err;
1658
1659 BT_DBG("request for %s", hdev->name);
1660
1661 status = mgmt_bredr_support(hdev);
1662 if (status)
1663 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1664
1665 if (!lmp_ssp_capable(hdev))
1666 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1667 MGMT_STATUS_NOT_SUPPORTED);
1668
1669 if (cp->val != 0x00 && cp->val != 0x01)
1670 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1671 MGMT_STATUS_INVALID_PARAMS);
1672
1673 hci_dev_lock(hdev);
1674
1675 if (!hdev_is_powered(hdev)) {
1676 bool changed;
1677
1678 if (cp->val) {
1679 changed = !test_and_set_bit(HCI_SSP_ENABLED,
1680 &hdev->dev_flags);
1681 } else {
1682 changed = test_and_clear_bit(HCI_SSP_ENABLED,
1683 &hdev->dev_flags);
1684 if (!changed)
1685 changed = test_and_clear_bit(HCI_HS_ENABLED,
1686 &hdev->dev_flags);
1687 else
1688 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1689 }
1690
1691 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1692 if (err < 0)
1693 goto failed;
1694
1695 if (changed)
1696 err = new_settings(hdev, sk);
1697
1698 goto failed;
1699 }
1700
1701 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev) ||
1702 mgmt_pending_find(MGMT_OP_SET_HS, hdev)) {
1703 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1704 MGMT_STATUS_BUSY);
1705 goto failed;
1706 }
1707
1708 if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1709 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1710 goto failed;
1711 }
1712
1713 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1714 if (!cmd) {
1715 err = -ENOMEM;
1716 goto failed;
1717 }
1718
1719 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1720 if (err < 0) {
1721 mgmt_pending_remove(cmd);
1722 goto failed;
1723 }
1724
1725 failed:
1726 hci_dev_unlock(hdev);
1727 return err;
1728 }
1729
1730 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1731 {
1732 struct mgmt_mode *cp = data;
1733 bool changed;
1734 u8 status;
1735 int err;
1736
1737 BT_DBG("request for %s", hdev->name);
1738
1739 status = mgmt_bredr_support(hdev);
1740 if (status)
1741 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1742
1743 if (!lmp_ssp_capable(hdev))
1744 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1745 MGMT_STATUS_NOT_SUPPORTED);
1746
1747 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
1748 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1749 MGMT_STATUS_REJECTED);
1750
1751 if (cp->val != 0x00 && cp->val != 0x01)
1752 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1753 MGMT_STATUS_INVALID_PARAMS);
1754
1755 hci_dev_lock(hdev);
1756
1757 if (cp->val) {
1758 changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1759 } else {
1760 if (hdev_is_powered(hdev)) {
1761 err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1762 MGMT_STATUS_REJECTED);
1763 goto unlock;
1764 }
1765
1766 changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1767 }
1768
1769 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1770 if (err < 0)
1771 goto unlock;
1772
1773 if (changed)
1774 err = new_settings(hdev, sk);
1775
1776 unlock:
1777 hci_dev_unlock(hdev);
1778 return err;
1779 }
1780
1781 static void le_enable_complete(struct hci_dev *hdev, u8 status)
1782 {
1783 struct cmd_lookup match = { NULL, hdev };
1784
1785 if (status) {
1786 u8 mgmt_err = mgmt_status(status);
1787
1788 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1789 &mgmt_err);
1790 return;
1791 }
1792
1793 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1794
1795 new_settings(hdev, match.sk);
1796
1797 if (match.sk)
1798 sock_put(match.sk);
1799
1800 /* Make sure the controller has a good default for
1801 * advertising data. Restrict the update to when LE
1802 * has actually been enabled. During power on, the
1803 * update in powered_update_hci will take care of it.
1804 */
1805 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1806 struct hci_request req;
1807
1808 hci_dev_lock(hdev);
1809
1810 hci_req_init(&req, hdev);
1811 update_adv_data(&req);
1812 update_scan_rsp_data(&req);
1813 hci_req_run(&req, NULL);
1814
1815 hci_dev_unlock(hdev);
1816 }
1817 }
1818
1819 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1820 {
1821 struct mgmt_mode *cp = data;
1822 struct hci_cp_write_le_host_supported hci_cp;
1823 struct pending_cmd *cmd;
1824 struct hci_request req;
1825 int err;
1826 u8 val, enabled;
1827
1828 BT_DBG("request for %s", hdev->name);
1829
1830 if (!lmp_le_capable(hdev))
1831 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1832 MGMT_STATUS_NOT_SUPPORTED);
1833
1834 if (cp->val != 0x00 && cp->val != 0x01)
1835 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1836 MGMT_STATUS_INVALID_PARAMS);
1837
1838 /* LE-only devices do not allow toggling LE on/off */
1839 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1840 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1841 MGMT_STATUS_REJECTED);
1842
1843 hci_dev_lock(hdev);
1844
1845 val = !!cp->val;
1846 enabled = lmp_host_le_capable(hdev);
1847
1848 if (!hdev_is_powered(hdev) || val == enabled) {
1849 bool changed = false;
1850
1851 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1852 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1853 changed = true;
1854 }
1855
1856 if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
1857 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
1858 changed = true;
1859 }
1860
1861 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1862 if (err < 0)
1863 goto unlock;
1864
1865 if (changed)
1866 err = new_settings(hdev, sk);
1867
1868 goto unlock;
1869 }
1870
1871 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
1872 mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
1873 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1874 MGMT_STATUS_BUSY);
1875 goto unlock;
1876 }
1877
1878 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
1879 if (!cmd) {
1880 err = -ENOMEM;
1881 goto unlock;
1882 }
1883
1884 hci_req_init(&req, hdev);
1885
1886 memset(&hci_cp, 0, sizeof(hci_cp));
1887
1888 if (val) {
1889 hci_cp.le = val;
1890 hci_cp.simul = lmp_le_br_capable(hdev);
1891 } else {
1892 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1893 disable_advertising(&req);
1894 }
1895
1896 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
1897 &hci_cp);
1898
1899 err = hci_req_run(&req, le_enable_complete);
1900 if (err < 0)
1901 mgmt_pending_remove(cmd);
1902
1903 unlock:
1904 hci_dev_unlock(hdev);
1905 return err;
1906 }
1907
1908 /* This is a helper function to test for pending mgmt commands that can
1909 * cause CoD or EIR HCI commands. We can only allow one such pending
1910 * mgmt command at a time since otherwise we cannot easily track what
1911 * the current values are, will be, and based on that calculate if a new
1912 * HCI command needs to be sent and if yes with what value.
1913 */
1914 static bool pending_eir_or_class(struct hci_dev *hdev)
1915 {
1916 struct pending_cmd *cmd;
1917
1918 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
1919 switch (cmd->opcode) {
1920 case MGMT_OP_ADD_UUID:
1921 case MGMT_OP_REMOVE_UUID:
1922 case MGMT_OP_SET_DEV_CLASS:
1923 case MGMT_OP_SET_POWERED:
1924 return true;
1925 }
1926 }
1927
1928 return false;
1929 }
1930
1931 static const u8 bluetooth_base_uuid[] = {
1932 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
1933 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1934 };
1935
1936 static u8 get_uuid_size(const u8 *uuid)
1937 {
1938 u32 val;
1939
1940 if (memcmp(uuid, bluetooth_base_uuid, 12))
1941 return 128;
1942
1943 val = get_unaligned_le32(&uuid[12]);
1944 if (val > 0xffff)
1945 return 32;
1946
1947 return 16;
1948 }
1949
1950 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
1951 {
1952 struct pending_cmd *cmd;
1953
1954 hci_dev_lock(hdev);
1955
1956 cmd = mgmt_pending_find(mgmt_op, hdev);
1957 if (!cmd)
1958 goto unlock;
1959
1960 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
1961 hdev->dev_class, 3);
1962
1963 mgmt_pending_remove(cmd);
1964
1965 unlock:
1966 hci_dev_unlock(hdev);
1967 }
1968
1969 static void add_uuid_complete(struct hci_dev *hdev, u8 status)
1970 {
1971 BT_DBG("status 0x%02x", status);
1972
1973 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
1974 }
1975
1976 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1977 {
1978 struct mgmt_cp_add_uuid *cp = data;
1979 struct pending_cmd *cmd;
1980 struct hci_request req;
1981 struct bt_uuid *uuid;
1982 int err;
1983
1984 BT_DBG("request for %s", hdev->name);
1985
1986 hci_dev_lock(hdev);
1987
1988 if (pending_eir_or_class(hdev)) {
1989 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
1990 MGMT_STATUS_BUSY);
1991 goto failed;
1992 }
1993
1994 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
1995 if (!uuid) {
1996 err = -ENOMEM;
1997 goto failed;
1998 }
1999
2000 memcpy(uuid->uuid, cp->uuid, 16);
2001 uuid->svc_hint = cp->svc_hint;
2002 uuid->size = get_uuid_size(cp->uuid);
2003
2004 list_add_tail(&uuid->list, &hdev->uuids);
2005
2006 hci_req_init(&req, hdev);
2007
2008 update_class(&req);
2009 update_eir(&req);
2010
2011 err = hci_req_run(&req, add_uuid_complete);
2012 if (err < 0) {
2013 if (err != -ENODATA)
2014 goto failed;
2015
2016 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2017 hdev->dev_class, 3);
2018 goto failed;
2019 }
2020
2021 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2022 if (!cmd) {
2023 err = -ENOMEM;
2024 goto failed;
2025 }
2026
2027 err = 0;
2028
2029 failed:
2030 hci_dev_unlock(hdev);
2031 return err;
2032 }
2033
2034 static bool enable_service_cache(struct hci_dev *hdev)
2035 {
2036 if (!hdev_is_powered(hdev))
2037 return false;
2038
2039 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2040 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2041 CACHE_TIMEOUT);
2042 return true;
2043 }
2044
2045 return false;
2046 }
2047
2048 static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
2049 {
2050 BT_DBG("status 0x%02x", status);
2051
2052 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2053 }
2054
2055 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2056 u16 len)
2057 {
2058 struct mgmt_cp_remove_uuid *cp = data;
2059 struct pending_cmd *cmd;
2060 struct bt_uuid *match, *tmp;
2061 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2062 struct hci_request req;
2063 int err, found;
2064
2065 BT_DBG("request for %s", hdev->name);
2066
2067 hci_dev_lock(hdev);
2068
2069 if (pending_eir_or_class(hdev)) {
2070 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2071 MGMT_STATUS_BUSY);
2072 goto unlock;
2073 }
2074
2075 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2076 err = hci_uuids_clear(hdev);
2077
2078 if (enable_service_cache(hdev)) {
2079 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2080 0, hdev->dev_class, 3);
2081 goto unlock;
2082 }
2083
2084 goto update_class;
2085 }
2086
2087 found = 0;
2088
2089 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2090 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2091 continue;
2092
2093 list_del(&match->list);
2094 kfree(match);
2095 found++;
2096 }
2097
2098 if (found == 0) {
2099 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2100 MGMT_STATUS_INVALID_PARAMS);
2101 goto unlock;
2102 }
2103
2104 update_class:
2105 hci_req_init(&req, hdev);
2106
2107 update_class(&req);
2108 update_eir(&req);
2109
2110 err = hci_req_run(&req, remove_uuid_complete);
2111 if (err < 0) {
2112 if (err != -ENODATA)
2113 goto unlock;
2114
2115 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2116 hdev->dev_class, 3);
2117 goto unlock;
2118 }
2119
2120 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2121 if (!cmd) {
2122 err = -ENOMEM;
2123 goto unlock;
2124 }
2125
2126 err = 0;
2127
2128 unlock:
2129 hci_dev_unlock(hdev);
2130 return err;
2131 }
2132
2133 static void set_class_complete(struct hci_dev *hdev, u8 status)
2134 {
2135 BT_DBG("status 0x%02x", status);
2136
2137 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2138 }
2139
2140 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2141 u16 len)
2142 {
2143 struct mgmt_cp_set_dev_class *cp = data;
2144 struct pending_cmd *cmd;
2145 struct hci_request req;
2146 int err;
2147
2148 BT_DBG("request for %s", hdev->name);
2149
2150 if (!lmp_bredr_capable(hdev))
2151 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2152 MGMT_STATUS_NOT_SUPPORTED);
2153
2154 hci_dev_lock(hdev);
2155
2156 if (pending_eir_or_class(hdev)) {
2157 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2158 MGMT_STATUS_BUSY);
2159 goto unlock;
2160 }
2161
2162 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2163 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2164 MGMT_STATUS_INVALID_PARAMS);
2165 goto unlock;
2166 }
2167
2168 hdev->major_class = cp->major;
2169 hdev->minor_class = cp->minor;
2170
2171 if (!hdev_is_powered(hdev)) {
2172 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2173 hdev->dev_class, 3);
2174 goto unlock;
2175 }
2176
2177 hci_req_init(&req, hdev);
2178
2179 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2180 hci_dev_unlock(hdev);
2181 cancel_delayed_work_sync(&hdev->service_cache);
2182 hci_dev_lock(hdev);
2183 update_eir(&req);
2184 }
2185
2186 update_class(&req);
2187
2188 err = hci_req_run(&req, set_class_complete);
2189 if (err < 0) {
2190 if (err != -ENODATA)
2191 goto unlock;
2192
2193 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2194 hdev->dev_class, 3);
2195 goto unlock;
2196 }
2197
2198 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2199 if (!cmd) {
2200 err = -ENOMEM;
2201 goto unlock;
2202 }
2203
2204 err = 0;
2205
2206 unlock:
2207 hci_dev_unlock(hdev);
2208 return err;
2209 }
2210
2211 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2212 u16 len)
2213 {
2214 struct mgmt_cp_load_link_keys *cp = data;
2215 u16 key_count, expected_len;
2216 bool changed;
2217 int i;
2218
2219 BT_DBG("request for %s", hdev->name);
2220
2221 if (!lmp_bredr_capable(hdev))
2222 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2223 MGMT_STATUS_NOT_SUPPORTED);
2224
2225 key_count = __le16_to_cpu(cp->key_count);
2226
2227 expected_len = sizeof(*cp) + key_count *
2228 sizeof(struct mgmt_link_key_info);
2229 if (expected_len != len) {
2230 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2231 len, expected_len);
2232 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2233 MGMT_STATUS_INVALID_PARAMS);
2234 }
2235
2236 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2237 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2238 MGMT_STATUS_INVALID_PARAMS);
2239
2240 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2241 key_count);
2242
2243 for (i = 0; i < key_count; i++) {
2244 struct mgmt_link_key_info *key = &cp->keys[i];
2245
2246 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2247 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2248 MGMT_STATUS_INVALID_PARAMS);
2249 }
2250
2251 hci_dev_lock(hdev);
2252
2253 hci_link_keys_clear(hdev);
2254
2255 if (cp->debug_keys)
2256 changed = !test_and_set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
2257 else
2258 changed = test_and_clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
2259
2260 if (changed)
2261 new_settings(hdev, NULL);
2262
2263 for (i = 0; i < key_count; i++) {
2264 struct mgmt_link_key_info *key = &cp->keys[i];
2265
2266 hci_add_link_key(hdev, NULL, 0, &key->addr.bdaddr, key->val,
2267 key->type, key->pin_len);
2268 }
2269
2270 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2271
2272 hci_dev_unlock(hdev);
2273
2274 return 0;
2275 }
2276
2277 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2278 u8 addr_type, struct sock *skip_sk)
2279 {
2280 struct mgmt_ev_device_unpaired ev;
2281
2282 bacpy(&ev.addr.bdaddr, bdaddr);
2283 ev.addr.type = addr_type;
2284
2285 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2286 skip_sk);
2287 }
2288
2289 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2290 u16 len)
2291 {
2292 struct mgmt_cp_unpair_device *cp = data;
2293 struct mgmt_rp_unpair_device rp;
2294 struct hci_cp_disconnect dc;
2295 struct pending_cmd *cmd;
2296 struct hci_conn *conn;
2297 int err;
2298
2299 memset(&rp, 0, sizeof(rp));
2300 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2301 rp.addr.type = cp->addr.type;
2302
2303 if (!bdaddr_type_is_valid(cp->addr.type))
2304 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2305 MGMT_STATUS_INVALID_PARAMS,
2306 &rp, sizeof(rp));
2307
2308 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2309 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2310 MGMT_STATUS_INVALID_PARAMS,
2311 &rp, sizeof(rp));
2312
2313 hci_dev_lock(hdev);
2314
2315 if (!hdev_is_powered(hdev)) {
2316 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2317 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2318 goto unlock;
2319 }
2320
2321 if (cp->addr.type == BDADDR_BREDR) {
2322 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2323 } else {
2324 u8 addr_type;
2325
2326 if (cp->addr.type == BDADDR_LE_PUBLIC)
2327 addr_type = ADDR_LE_DEV_PUBLIC;
2328 else
2329 addr_type = ADDR_LE_DEV_RANDOM;
2330
2331 err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2332 }
2333
2334 if (err < 0) {
2335 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2336 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
2337 goto unlock;
2338 }
2339
2340 if (cp->disconnect) {
2341 if (cp->addr.type == BDADDR_BREDR)
2342 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2343 &cp->addr.bdaddr);
2344 else
2345 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2346 &cp->addr.bdaddr);
2347 } else {
2348 conn = NULL;
2349 }
2350
2351 if (!conn) {
2352 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2353 &rp, sizeof(rp));
2354 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2355 goto unlock;
2356 }
2357
2358 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2359 sizeof(*cp));
2360 if (!cmd) {
2361 err = -ENOMEM;
2362 goto unlock;
2363 }
2364
2365 dc.handle = cpu_to_le16(conn->handle);
2366 dc.reason = 0x13; /* Remote User Terminated Connection */
2367 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2368 if (err < 0)
2369 mgmt_pending_remove(cmd);
2370
2371 unlock:
2372 hci_dev_unlock(hdev);
2373 return err;
2374 }
2375
2376 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2377 u16 len)
2378 {
2379 struct mgmt_cp_disconnect *cp = data;
2380 struct mgmt_rp_disconnect rp;
2381 struct hci_cp_disconnect dc;
2382 struct pending_cmd *cmd;
2383 struct hci_conn *conn;
2384 int err;
2385
2386 BT_DBG("");
2387
2388 memset(&rp, 0, sizeof(rp));
2389 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2390 rp.addr.type = cp->addr.type;
2391
2392 if (!bdaddr_type_is_valid(cp->addr.type))
2393 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2394 MGMT_STATUS_INVALID_PARAMS,
2395 &rp, sizeof(rp));
2396
2397 hci_dev_lock(hdev);
2398
2399 if (!test_bit(HCI_UP, &hdev->flags)) {
2400 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2401 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2402 goto failed;
2403 }
2404
2405 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
2406 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2407 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2408 goto failed;
2409 }
2410
2411 if (cp->addr.type == BDADDR_BREDR)
2412 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2413 &cp->addr.bdaddr);
2414 else
2415 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2416
2417 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2418 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2419 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
2420 goto failed;
2421 }
2422
2423 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2424 if (!cmd) {
2425 err = -ENOMEM;
2426 goto failed;
2427 }
2428
2429 dc.handle = cpu_to_le16(conn->handle);
2430 dc.reason = HCI_ERROR_REMOTE_USER_TERM;
2431
2432 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2433 if (err < 0)
2434 mgmt_pending_remove(cmd);
2435
2436 failed:
2437 hci_dev_unlock(hdev);
2438 return err;
2439 }
2440
2441 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2442 {
2443 switch (link_type) {
2444 case LE_LINK:
2445 switch (addr_type) {
2446 case ADDR_LE_DEV_PUBLIC:
2447 return BDADDR_LE_PUBLIC;
2448
2449 default:
2450 /* Fallback to LE Random address type */
2451 return BDADDR_LE_RANDOM;
2452 }
2453
2454 default:
2455 /* Fallback to BR/EDR type */
2456 return BDADDR_BREDR;
2457 }
2458 }
2459
2460 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2461 u16 data_len)
2462 {
2463 struct mgmt_rp_get_connections *rp;
2464 struct hci_conn *c;
2465 size_t rp_len;
2466 int err;
2467 u16 i;
2468
2469 BT_DBG("");
2470
2471 hci_dev_lock(hdev);
2472
2473 if (!hdev_is_powered(hdev)) {
2474 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2475 MGMT_STATUS_NOT_POWERED);
2476 goto unlock;
2477 }
2478
2479 i = 0;
2480 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2481 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2482 i++;
2483 }
2484
2485 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2486 rp = kmalloc(rp_len, GFP_KERNEL);
2487 if (!rp) {
2488 err = -ENOMEM;
2489 goto unlock;
2490 }
2491
2492 i = 0;
2493 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2494 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2495 continue;
2496 bacpy(&rp->addr[i].bdaddr, &c->dst);
2497 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2498 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2499 continue;
2500 i++;
2501 }
2502
2503 rp->conn_count = cpu_to_le16(i);
2504
2505 /* Recalculate length in case of filtered SCO connections, etc */
2506 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2507
2508 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2509 rp_len);
2510
2511 kfree(rp);
2512
2513 unlock:
2514 hci_dev_unlock(hdev);
2515 return err;
2516 }
2517
2518 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2519 struct mgmt_cp_pin_code_neg_reply *cp)
2520 {
2521 struct pending_cmd *cmd;
2522 int err;
2523
2524 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2525 sizeof(*cp));
2526 if (!cmd)
2527 return -ENOMEM;
2528
2529 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2530 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2531 if (err < 0)
2532 mgmt_pending_remove(cmd);
2533
2534 return err;
2535 }
2536
2537 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2538 u16 len)
2539 {
2540 struct hci_conn *conn;
2541 struct mgmt_cp_pin_code_reply *cp = data;
2542 struct hci_cp_pin_code_reply reply;
2543 struct pending_cmd *cmd;
2544 int err;
2545
2546 BT_DBG("");
2547
2548 hci_dev_lock(hdev);
2549
2550 if (!hdev_is_powered(hdev)) {
2551 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2552 MGMT_STATUS_NOT_POWERED);
2553 goto failed;
2554 }
2555
2556 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2557 if (!conn) {
2558 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2559 MGMT_STATUS_NOT_CONNECTED);
2560 goto failed;
2561 }
2562
2563 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2564 struct mgmt_cp_pin_code_neg_reply ncp;
2565
2566 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2567
2568 BT_ERR("PIN code is not 16 bytes long");
2569
2570 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2571 if (err >= 0)
2572 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2573 MGMT_STATUS_INVALID_PARAMS);
2574
2575 goto failed;
2576 }
2577
2578 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2579 if (!cmd) {
2580 err = -ENOMEM;
2581 goto failed;
2582 }
2583
2584 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2585 reply.pin_len = cp->pin_len;
2586 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2587
2588 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2589 if (err < 0)
2590 mgmt_pending_remove(cmd);
2591
2592 failed:
2593 hci_dev_unlock(hdev);
2594 return err;
2595 }
2596
2597 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2598 u16 len)
2599 {
2600 struct mgmt_cp_set_io_capability *cp = data;
2601
2602 BT_DBG("");
2603
2604 hci_dev_lock(hdev);
2605
2606 hdev->io_capability = cp->io_capability;
2607
2608 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2609 hdev->io_capability);
2610
2611 hci_dev_unlock(hdev);
2612
2613 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
2614 0);
2615 }
2616
2617 static struct pending_cmd *find_pairing(struct hci_conn *conn)
2618 {
2619 struct hci_dev *hdev = conn->hdev;
2620 struct pending_cmd *cmd;
2621
2622 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2623 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2624 continue;
2625
2626 if (cmd->user_data != conn)
2627 continue;
2628
2629 return cmd;
2630 }
2631
2632 return NULL;
2633 }
2634
2635 static void pairing_complete(struct pending_cmd *cmd, u8 status)
2636 {
2637 struct mgmt_rp_pair_device rp;
2638 struct hci_conn *conn = cmd->user_data;
2639
2640 bacpy(&rp.addr.bdaddr, &conn->dst);
2641 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2642
2643 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
2644 &rp, sizeof(rp));
2645
2646 /* So we don't get further callbacks for this connection */
2647 conn->connect_cfm_cb = NULL;
2648 conn->security_cfm_cb = NULL;
2649 conn->disconn_cfm_cb = NULL;
2650
2651 hci_conn_drop(conn);
2652
2653 mgmt_pending_remove(cmd);
2654 }
2655
2656 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2657 {
2658 struct pending_cmd *cmd;
2659
2660 BT_DBG("status %u", status);
2661
2662 cmd = find_pairing(conn);
2663 if (!cmd)
2664 BT_DBG("Unable to find a pending command");
2665 else
2666 pairing_complete(cmd, mgmt_status(status));
2667 }
2668
2669 static void le_connect_complete_cb(struct hci_conn *conn, u8 status)
2670 {
2671 struct pending_cmd *cmd;
2672
2673 BT_DBG("status %u", status);
2674
2675 if (!status)
2676 return;
2677
2678 cmd = find_pairing(conn);
2679 if (!cmd)
2680 BT_DBG("Unable to find a pending command");
2681 else
2682 pairing_complete(cmd, mgmt_status(status));
2683 }
2684
2685 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2686 u16 len)
2687 {
2688 struct mgmt_cp_pair_device *cp = data;
2689 struct mgmt_rp_pair_device rp;
2690 struct pending_cmd *cmd;
2691 u8 sec_level, auth_type;
2692 struct hci_conn *conn;
2693 int err;
2694
2695 BT_DBG("");
2696
2697 memset(&rp, 0, sizeof(rp));
2698 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2699 rp.addr.type = cp->addr.type;
2700
2701 if (!bdaddr_type_is_valid(cp->addr.type))
2702 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2703 MGMT_STATUS_INVALID_PARAMS,
2704 &rp, sizeof(rp));
2705
2706 hci_dev_lock(hdev);
2707
2708 if (!hdev_is_powered(hdev)) {
2709 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2710 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2711 goto unlock;
2712 }
2713
2714 sec_level = BT_SECURITY_MEDIUM;
2715 if (cp->io_cap == 0x03)
2716 auth_type = HCI_AT_DEDICATED_BONDING;
2717 else
2718 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
2719
2720 if (cp->addr.type == BDADDR_BREDR)
2721 conn = hci_connect(hdev, ACL_LINK, &cp->addr.bdaddr,
2722 cp->addr.type, sec_level, auth_type);
2723 else
2724 conn = hci_connect(hdev, LE_LINK, &cp->addr.bdaddr,
2725 cp->addr.type, sec_level, auth_type);
2726
2727 if (IS_ERR(conn)) {
2728 int status;
2729
2730 if (PTR_ERR(conn) == -EBUSY)
2731 status = MGMT_STATUS_BUSY;
2732 else
2733 status = MGMT_STATUS_CONNECT_FAILED;
2734
2735 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2736 status, &rp,
2737 sizeof(rp));
2738 goto unlock;
2739 }
2740
2741 if (conn->connect_cfm_cb) {
2742 hci_conn_drop(conn);
2743 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2744 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2745 goto unlock;
2746 }
2747
2748 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2749 if (!cmd) {
2750 err = -ENOMEM;
2751 hci_conn_drop(conn);
2752 goto unlock;
2753 }
2754
2755 /* For LE, just connecting isn't a proof that the pairing finished */
2756 if (cp->addr.type == BDADDR_BREDR)
2757 conn->connect_cfm_cb = pairing_complete_cb;
2758 else
2759 conn->connect_cfm_cb = le_connect_complete_cb;
2760
2761 conn->security_cfm_cb = pairing_complete_cb;
2762 conn->disconn_cfm_cb = pairing_complete_cb;
2763 conn->io_capability = cp->io_cap;
2764 cmd->user_data = conn;
2765
2766 if (conn->state == BT_CONNECTED &&
2767 hci_conn_security(conn, sec_level, auth_type))
2768 pairing_complete(cmd, 0);
2769
2770 err = 0;
2771
2772 unlock:
2773 hci_dev_unlock(hdev);
2774 return err;
2775 }
2776
2777 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2778 u16 len)
2779 {
2780 struct mgmt_addr_info *addr = data;
2781 struct pending_cmd *cmd;
2782 struct hci_conn *conn;
2783 int err;
2784
2785 BT_DBG("");
2786
2787 hci_dev_lock(hdev);
2788
2789 if (!hdev_is_powered(hdev)) {
2790 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2791 MGMT_STATUS_NOT_POWERED);
2792 goto unlock;
2793 }
2794
2795 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
2796 if (!cmd) {
2797 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2798 MGMT_STATUS_INVALID_PARAMS);
2799 goto unlock;
2800 }
2801
2802 conn = cmd->user_data;
2803
2804 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
2805 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2806 MGMT_STATUS_INVALID_PARAMS);
2807 goto unlock;
2808 }
2809
2810 pairing_complete(cmd, MGMT_STATUS_CANCELLED);
2811
2812 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
2813 addr, sizeof(*addr));
2814 unlock:
2815 hci_dev_unlock(hdev);
2816 return err;
2817 }
2818
2819 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
2820 struct mgmt_addr_info *addr, u16 mgmt_op,
2821 u16 hci_op, __le32 passkey)
2822 {
2823 struct pending_cmd *cmd;
2824 struct hci_conn *conn;
2825 int err;
2826
2827 hci_dev_lock(hdev);
2828
2829 if (!hdev_is_powered(hdev)) {
2830 err = cmd_complete(sk, hdev->id, mgmt_op,
2831 MGMT_STATUS_NOT_POWERED, addr,
2832 sizeof(*addr));
2833 goto done;
2834 }
2835
2836 if (addr->type == BDADDR_BREDR)
2837 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
2838 else
2839 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
2840
2841 if (!conn) {
2842 err = cmd_complete(sk, hdev->id, mgmt_op,
2843 MGMT_STATUS_NOT_CONNECTED, addr,
2844 sizeof(*addr));
2845 goto done;
2846 }
2847
2848 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
2849 /* Continue with pairing via SMP */
2850 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
2851
2852 if (!err)
2853 err = cmd_complete(sk, hdev->id, mgmt_op,
2854 MGMT_STATUS_SUCCESS, addr,
2855 sizeof(*addr));
2856 else
2857 err = cmd_complete(sk, hdev->id, mgmt_op,
2858 MGMT_STATUS_FAILED, addr,
2859 sizeof(*addr));
2860
2861 goto done;
2862 }
2863
2864 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
2865 if (!cmd) {
2866 err = -ENOMEM;
2867 goto done;
2868 }
2869
2870 /* Continue with pairing via HCI */
2871 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
2872 struct hci_cp_user_passkey_reply cp;
2873
2874 bacpy(&cp.bdaddr, &addr->bdaddr);
2875 cp.passkey = passkey;
2876 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
2877 } else
2878 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
2879 &addr->bdaddr);
2880
2881 if (err < 0)
2882 mgmt_pending_remove(cmd);
2883
2884 done:
2885 hci_dev_unlock(hdev);
2886 return err;
2887 }
2888
2889 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2890 void *data, u16 len)
2891 {
2892 struct mgmt_cp_pin_code_neg_reply *cp = data;
2893
2894 BT_DBG("");
2895
2896 return user_pairing_resp(sk, hdev, &cp->addr,
2897 MGMT_OP_PIN_CODE_NEG_REPLY,
2898 HCI_OP_PIN_CODE_NEG_REPLY, 0);
2899 }
2900
2901 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2902 u16 len)
2903 {
2904 struct mgmt_cp_user_confirm_reply *cp = data;
2905
2906 BT_DBG("");
2907
2908 if (len != sizeof(*cp))
2909 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
2910 MGMT_STATUS_INVALID_PARAMS);
2911
2912 return user_pairing_resp(sk, hdev, &cp->addr,
2913 MGMT_OP_USER_CONFIRM_REPLY,
2914 HCI_OP_USER_CONFIRM_REPLY, 0);
2915 }
2916
2917 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
2918 void *data, u16 len)
2919 {
2920 struct mgmt_cp_user_confirm_neg_reply *cp = data;
2921
2922 BT_DBG("");
2923
2924 return user_pairing_resp(sk, hdev, &cp->addr,
2925 MGMT_OP_USER_CONFIRM_NEG_REPLY,
2926 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
2927 }
2928
2929 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2930 u16 len)
2931 {
2932 struct mgmt_cp_user_passkey_reply *cp = data;
2933
2934 BT_DBG("");
2935
2936 return user_pairing_resp(sk, hdev, &cp->addr,
2937 MGMT_OP_USER_PASSKEY_REPLY,
2938 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
2939 }
2940
2941 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
2942 void *data, u16 len)
2943 {
2944 struct mgmt_cp_user_passkey_neg_reply *cp = data;
2945
2946 BT_DBG("");
2947
2948 return user_pairing_resp(sk, hdev, &cp->addr,
2949 MGMT_OP_USER_PASSKEY_NEG_REPLY,
2950 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
2951 }
2952
2953 static void update_name(struct hci_request *req)
2954 {
2955 struct hci_dev *hdev = req->hdev;
2956 struct hci_cp_write_local_name cp;
2957
2958 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
2959
2960 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
2961 }
2962
2963 static void set_name_complete(struct hci_dev *hdev, u8 status)
2964 {
2965 struct mgmt_cp_set_local_name *cp;
2966 struct pending_cmd *cmd;
2967
2968 BT_DBG("status 0x%02x", status);
2969
2970 hci_dev_lock(hdev);
2971
2972 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
2973 if (!cmd)
2974 goto unlock;
2975
2976 cp = cmd->param;
2977
2978 if (status)
2979 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
2980 mgmt_status(status));
2981 else
2982 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2983 cp, sizeof(*cp));
2984
2985 mgmt_pending_remove(cmd);
2986
2987 unlock:
2988 hci_dev_unlock(hdev);
2989 }
2990
2991 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
2992 u16 len)
2993 {
2994 struct mgmt_cp_set_local_name *cp = data;
2995 struct pending_cmd *cmd;
2996 struct hci_request req;
2997 int err;
2998
2999 BT_DBG("");
3000
3001 hci_dev_lock(hdev);
3002
3003 /* If the old values are the same as the new ones just return a
3004 * direct command complete event.
3005 */
3006 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3007 !memcmp(hdev->short_name, cp->short_name,
3008 sizeof(hdev->short_name))) {
3009 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3010 data, len);
3011 goto failed;
3012 }
3013
3014 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3015
3016 if (!hdev_is_powered(hdev)) {
3017 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3018
3019 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3020 data, len);
3021 if (err < 0)
3022 goto failed;
3023
3024 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
3025 sk);
3026
3027 goto failed;
3028 }
3029
3030 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3031 if (!cmd) {
3032 err = -ENOMEM;
3033 goto failed;
3034 }
3035
3036 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3037
3038 hci_req_init(&req, hdev);
3039
3040 if (lmp_bredr_capable(hdev)) {
3041 update_name(&req);
3042 update_eir(&req);
3043 }
3044
3045 /* The name is stored in the scan response data and so
3046 * no need to udpate the advertising data here.
3047 */
3048 if (lmp_le_capable(hdev))
3049 update_scan_rsp_data(&req);
3050
3051 err = hci_req_run(&req, set_name_complete);
3052 if (err < 0)
3053 mgmt_pending_remove(cmd);
3054
3055 failed:
3056 hci_dev_unlock(hdev);
3057 return err;
3058 }
3059
3060 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3061 void *data, u16 data_len)
3062 {
3063 struct pending_cmd *cmd;
3064 int err;
3065
3066 BT_DBG("%s", hdev->name);
3067
3068 hci_dev_lock(hdev);
3069
3070 if (!hdev_is_powered(hdev)) {
3071 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3072 MGMT_STATUS_NOT_POWERED);
3073 goto unlock;
3074 }
3075
3076 if (!lmp_ssp_capable(hdev)) {
3077 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3078 MGMT_STATUS_NOT_SUPPORTED);
3079 goto unlock;
3080 }
3081
3082 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3083 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3084 MGMT_STATUS_BUSY);
3085 goto unlock;
3086 }
3087
3088 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3089 if (!cmd) {
3090 err = -ENOMEM;
3091 goto unlock;
3092 }
3093
3094 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
3095 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
3096 0, NULL);
3097 else
3098 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3099
3100 if (err < 0)
3101 mgmt_pending_remove(cmd);
3102
3103 unlock:
3104 hci_dev_unlock(hdev);
3105 return err;
3106 }
3107
3108 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3109 void *data, u16 len)
3110 {
3111 int err;
3112
3113 BT_DBG("%s ", hdev->name);
3114
3115 hci_dev_lock(hdev);
3116
3117 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3118 struct mgmt_cp_add_remote_oob_data *cp = data;
3119 u8 status;
3120
3121 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3122 cp->hash, cp->randomizer);
3123 if (err < 0)
3124 status = MGMT_STATUS_FAILED;
3125 else
3126 status = MGMT_STATUS_SUCCESS;
3127
3128 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3129 status, &cp->addr, sizeof(cp->addr));
3130 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3131 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3132 u8 status;
3133
3134 err = hci_add_remote_oob_ext_data(hdev, &cp->addr.bdaddr,
3135 cp->hash192,
3136 cp->randomizer192,
3137 cp->hash256,
3138 cp->randomizer256);
3139 if (err < 0)
3140 status = MGMT_STATUS_FAILED;
3141 else
3142 status = MGMT_STATUS_SUCCESS;
3143
3144 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3145 status, &cp->addr, sizeof(cp->addr));
3146 } else {
3147 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3148 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3149 MGMT_STATUS_INVALID_PARAMS);
3150 }
3151
3152 hci_dev_unlock(hdev);
3153 return err;
3154 }
3155
3156 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3157 void *data, u16 len)
3158 {
3159 struct mgmt_cp_remove_remote_oob_data *cp = data;
3160 u8 status;
3161 int err;
3162
3163 BT_DBG("%s", hdev->name);
3164
3165 hci_dev_lock(hdev);
3166
3167 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
3168 if (err < 0)
3169 status = MGMT_STATUS_INVALID_PARAMS;
3170 else
3171 status = MGMT_STATUS_SUCCESS;
3172
3173 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3174 status, &cp->addr, sizeof(cp->addr));
3175
3176 hci_dev_unlock(hdev);
3177 return err;
3178 }
3179
3180 static int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
3181 {
3182 struct pending_cmd *cmd;
3183 u8 type;
3184 int err;
3185
3186 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3187
3188 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3189 if (!cmd)
3190 return -ENOENT;
3191
3192 type = hdev->discovery.type;
3193
3194 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3195 &type, sizeof(type));
3196 mgmt_pending_remove(cmd);
3197
3198 return err;
3199 }
3200
3201 static void start_discovery_complete(struct hci_dev *hdev, u8 status)
3202 {
3203 BT_DBG("status %d", status);
3204
3205 if (status) {
3206 hci_dev_lock(hdev);
3207 mgmt_start_discovery_failed(hdev, status);
3208 hci_dev_unlock(hdev);
3209 return;
3210 }
3211
3212 hci_dev_lock(hdev);
3213 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3214 hci_dev_unlock(hdev);
3215
3216 switch (hdev->discovery.type) {
3217 case DISCOV_TYPE_LE:
3218 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
3219 DISCOV_LE_TIMEOUT);
3220 break;
3221
3222 case DISCOV_TYPE_INTERLEAVED:
3223 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
3224 DISCOV_INTERLEAVED_TIMEOUT);
3225 break;
3226
3227 case DISCOV_TYPE_BREDR:
3228 break;
3229
3230 default:
3231 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
3232 }
3233 }
3234
3235 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3236 void *data, u16 len)
3237 {
3238 struct mgmt_cp_start_discovery *cp = data;
3239 struct pending_cmd *cmd;
3240 struct hci_cp_le_set_scan_param param_cp;
3241 struct hci_cp_le_set_scan_enable enable_cp;
3242 struct hci_cp_inquiry inq_cp;
3243 struct hci_request req;
3244 /* General inquiry access code (GIAC) */
3245 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3246 u8 status;
3247 int err;
3248
3249 BT_DBG("%s", hdev->name);
3250
3251 hci_dev_lock(hdev);
3252
3253 if (!hdev_is_powered(hdev)) {
3254 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3255 MGMT_STATUS_NOT_POWERED);
3256 goto failed;
3257 }
3258
3259 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3260 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3261 MGMT_STATUS_BUSY);
3262 goto failed;
3263 }
3264
3265 if (hdev->discovery.state != DISCOVERY_STOPPED) {
3266 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3267 MGMT_STATUS_BUSY);
3268 goto failed;
3269 }
3270
3271 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
3272 if (!cmd) {
3273 err = -ENOMEM;
3274 goto failed;
3275 }
3276
3277 hdev->discovery.type = cp->type;
3278
3279 hci_req_init(&req, hdev);
3280
3281 switch (hdev->discovery.type) {
3282 case DISCOV_TYPE_BREDR:
3283 status = mgmt_bredr_support(hdev);
3284 if (status) {
3285 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3286 status);
3287 mgmt_pending_remove(cmd);
3288 goto failed;
3289 }
3290
3291 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3292 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3293 MGMT_STATUS_BUSY);
3294 mgmt_pending_remove(cmd);
3295 goto failed;
3296 }
3297
3298 hci_inquiry_cache_flush(hdev);
3299
3300 memset(&inq_cp, 0, sizeof(inq_cp));
3301 memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
3302 inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
3303 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
3304 break;
3305
3306 case DISCOV_TYPE_LE:
3307 case DISCOV_TYPE_INTERLEAVED:
3308 status = mgmt_le_support(hdev);
3309 if (status) {
3310 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3311 status);
3312 mgmt_pending_remove(cmd);
3313 goto failed;
3314 }
3315
3316 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3317 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3318 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3319 MGMT_STATUS_NOT_SUPPORTED);
3320 mgmt_pending_remove(cmd);
3321 goto failed;
3322 }
3323
3324 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3325 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3326 MGMT_STATUS_REJECTED);
3327 mgmt_pending_remove(cmd);
3328 goto failed;
3329 }
3330
3331 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
3332 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3333 MGMT_STATUS_BUSY);
3334 mgmt_pending_remove(cmd);
3335 goto failed;
3336 }
3337
3338 memset(&param_cp, 0, sizeof(param_cp));
3339 param_cp.type = LE_SCAN_ACTIVE;
3340 param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
3341 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3342 param_cp.own_address_type = hdev->own_addr_type;
3343 hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
3344 &param_cp);
3345
3346 memset(&enable_cp, 0, sizeof(enable_cp));
3347 enable_cp.enable = LE_SCAN_ENABLE;
3348 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3349 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
3350 &enable_cp);
3351 break;
3352
3353 default:
3354 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3355 MGMT_STATUS_INVALID_PARAMS);
3356 mgmt_pending_remove(cmd);
3357 goto failed;
3358 }
3359
3360 err = hci_req_run(&req, start_discovery_complete);
3361 if (err < 0)
3362 mgmt_pending_remove(cmd);
3363 else
3364 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3365
3366 failed:
3367 hci_dev_unlock(hdev);
3368 return err;
3369 }
3370
3371 static int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
3372 {
3373 struct pending_cmd *cmd;
3374 int err;
3375
3376 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
3377 if (!cmd)
3378 return -ENOENT;
3379
3380 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3381 &hdev->discovery.type, sizeof(hdev->discovery.type));
3382 mgmt_pending_remove(cmd);
3383
3384 return err;
3385 }
3386
3387 static void stop_discovery_complete(struct hci_dev *hdev, u8 status)
3388 {
3389 BT_DBG("status %d", status);
3390
3391 hci_dev_lock(hdev);
3392
3393 if (status) {
3394 mgmt_stop_discovery_failed(hdev, status);
3395 goto unlock;
3396 }
3397
3398 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3399
3400 unlock:
3401 hci_dev_unlock(hdev);
3402 }
3403
3404 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
3405 u16 len)
3406 {
3407 struct mgmt_cp_stop_discovery *mgmt_cp = data;
3408 struct pending_cmd *cmd;
3409 struct hci_cp_remote_name_req_cancel cp;
3410 struct inquiry_entry *e;
3411 struct hci_request req;
3412 struct hci_cp_le_set_scan_enable enable_cp;
3413 int err;
3414
3415 BT_DBG("%s", hdev->name);
3416
3417 hci_dev_lock(hdev);
3418
3419 if (!hci_discovery_active(hdev)) {
3420 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3421 MGMT_STATUS_REJECTED, &mgmt_cp->type,
3422 sizeof(mgmt_cp->type));
3423 goto unlock;
3424 }
3425
3426 if (hdev->discovery.type != mgmt_cp->type) {
3427 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3428 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
3429 sizeof(mgmt_cp->type));
3430 goto unlock;
3431 }
3432
3433 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
3434 if (!cmd) {
3435 err = -ENOMEM;
3436 goto unlock;
3437 }
3438
3439 hci_req_init(&req, hdev);
3440
3441 switch (hdev->discovery.state) {
3442 case DISCOVERY_FINDING:
3443 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3444 hci_req_add(&req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3445 } else {
3446 cancel_delayed_work(&hdev->le_scan_disable);
3447
3448 memset(&enable_cp, 0, sizeof(enable_cp));
3449 enable_cp.enable = LE_SCAN_DISABLE;
3450 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE,
3451 sizeof(enable_cp), &enable_cp);
3452 }
3453
3454 break;
3455
3456 case DISCOVERY_RESOLVING:
3457 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
3458 NAME_PENDING);
3459 if (!e) {
3460 mgmt_pending_remove(cmd);
3461 err = cmd_complete(sk, hdev->id,
3462 MGMT_OP_STOP_DISCOVERY, 0,
3463 &mgmt_cp->type,
3464 sizeof(mgmt_cp->type));
3465 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3466 goto unlock;
3467 }
3468
3469 bacpy(&cp.bdaddr, &e->data.bdaddr);
3470 hci_req_add(&req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
3471 &cp);
3472
3473 break;
3474
3475 default:
3476 BT_DBG("unknown discovery state %u", hdev->discovery.state);
3477
3478 mgmt_pending_remove(cmd);
3479 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3480 MGMT_STATUS_FAILED, &mgmt_cp->type,
3481 sizeof(mgmt_cp->type));
3482 goto unlock;
3483 }
3484
3485 err = hci_req_run(&req, stop_discovery_complete);
3486 if (err < 0)
3487 mgmt_pending_remove(cmd);
3488 else
3489 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
3490
3491 unlock:
3492 hci_dev_unlock(hdev);
3493 return err;
3494 }
3495
3496 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
3497 u16 len)
3498 {
3499 struct mgmt_cp_confirm_name *cp = data;
3500 struct inquiry_entry *e;
3501 int err;
3502
3503 BT_DBG("%s", hdev->name);
3504
3505 hci_dev_lock(hdev);
3506
3507 if (!hci_discovery_active(hdev)) {
3508 err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3509 MGMT_STATUS_FAILED);
3510 goto failed;
3511 }
3512
3513 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
3514 if (!e) {
3515 err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3516 MGMT_STATUS_INVALID_PARAMS);
3517 goto failed;
3518 }
3519
3520 if (cp->name_known) {
3521 e->name_state = NAME_KNOWN;
3522 list_del(&e->list);
3523 } else {
3524 e->name_state = NAME_NEEDED;
3525 hci_inquiry_cache_update_resolve(hdev, e);
3526 }
3527
3528 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
3529 sizeof(cp->addr));
3530
3531 failed:
3532 hci_dev_unlock(hdev);
3533 return err;
3534 }
3535
3536 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
3537 u16 len)
3538 {
3539 struct mgmt_cp_block_device *cp = data;
3540 u8 status;
3541 int err;
3542
3543 BT_DBG("%s", hdev->name);
3544
3545 if (!bdaddr_type_is_valid(cp->addr.type))
3546 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
3547 MGMT_STATUS_INVALID_PARAMS,
3548 &cp->addr, sizeof(cp->addr));
3549
3550 hci_dev_lock(hdev);
3551
3552 err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type);
3553 if (err < 0)
3554 status = MGMT_STATUS_FAILED;
3555 else
3556 status = MGMT_STATUS_SUCCESS;
3557
3558 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
3559 &cp->addr, sizeof(cp->addr));
3560
3561 hci_dev_unlock(hdev);
3562
3563 return err;
3564 }
3565
3566 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
3567 u16 len)
3568 {
3569 struct mgmt_cp_unblock_device *cp = data;
3570 u8 status;
3571 int err;
3572
3573 BT_DBG("%s", hdev->name);
3574
3575 if (!bdaddr_type_is_valid(cp->addr.type))
3576 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
3577 MGMT_STATUS_INVALID_PARAMS,
3578 &cp->addr, sizeof(cp->addr));
3579
3580 hci_dev_lock(hdev);
3581
3582 err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type);
3583 if (err < 0)
3584 status = MGMT_STATUS_INVALID_PARAMS;
3585 else
3586 status = MGMT_STATUS_SUCCESS;
3587
3588 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
3589 &cp->addr, sizeof(cp->addr));
3590
3591 hci_dev_unlock(hdev);
3592
3593 return err;
3594 }
3595
3596 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
3597 u16 len)
3598 {
3599 struct mgmt_cp_set_device_id *cp = data;
3600 struct hci_request req;
3601 int err;
3602 __u16 source;
3603
3604 BT_DBG("%s", hdev->name);
3605
3606 source = __le16_to_cpu(cp->source);
3607
3608 if (source > 0x0002)
3609 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
3610 MGMT_STATUS_INVALID_PARAMS);
3611
3612 hci_dev_lock(hdev);
3613
3614 hdev->devid_source = source;
3615 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
3616 hdev->devid_product = __le16_to_cpu(cp->product);
3617 hdev->devid_version = __le16_to_cpu(cp->version);
3618
3619 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
3620
3621 hci_req_init(&req, hdev);
3622 update_eir(&req);
3623 hci_req_run(&req, NULL);
3624
3625 hci_dev_unlock(hdev);
3626
3627 return err;
3628 }
3629
3630 static void set_advertising_complete(struct hci_dev *hdev, u8 status)
3631 {
3632 struct cmd_lookup match = { NULL, hdev };
3633
3634 if (status) {
3635 u8 mgmt_err = mgmt_status(status);
3636
3637 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
3638 cmd_status_rsp, &mgmt_err);
3639 return;
3640 }
3641
3642 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
3643 &match);
3644
3645 new_settings(hdev, match.sk);
3646
3647 if (match.sk)
3648 sock_put(match.sk);
3649 }
3650
3651 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
3652 u16 len)
3653 {
3654 struct mgmt_mode *cp = data;
3655 struct pending_cmd *cmd;
3656 struct hci_request req;
3657 u8 val, enabled, status;
3658 int err;
3659
3660 BT_DBG("request for %s", hdev->name);
3661
3662 status = mgmt_le_support(hdev);
3663 if (status)
3664 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3665 status);
3666
3667 if (cp->val != 0x00 && cp->val != 0x01)
3668 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3669 MGMT_STATUS_INVALID_PARAMS);
3670
3671 hci_dev_lock(hdev);
3672
3673 val = !!cp->val;
3674 enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
3675
3676 /* The following conditions are ones which mean that we should
3677 * not do any HCI communication but directly send a mgmt
3678 * response to user space (after toggling the flag if
3679 * necessary).
3680 */
3681 if (!hdev_is_powered(hdev) || val == enabled ||
3682 hci_conn_num(hdev, LE_LINK) > 0) {
3683 bool changed = false;
3684
3685 if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3686 change_bit(HCI_ADVERTISING, &hdev->dev_flags);
3687 changed = true;
3688 }
3689
3690 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
3691 if (err < 0)
3692 goto unlock;
3693
3694 if (changed)
3695 err = new_settings(hdev, sk);
3696
3697 goto unlock;
3698 }
3699
3700 if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
3701 mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
3702 err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3703 MGMT_STATUS_BUSY);
3704 goto unlock;
3705 }
3706
3707 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
3708 if (!cmd) {
3709 err = -ENOMEM;
3710 goto unlock;
3711 }
3712
3713 hci_req_init(&req, hdev);
3714
3715 if (val)
3716 enable_advertising(&req);
3717 else
3718 disable_advertising(&req);
3719
3720 err = hci_req_run(&req, set_advertising_complete);
3721 if (err < 0)
3722 mgmt_pending_remove(cmd);
3723
3724 unlock:
3725 hci_dev_unlock(hdev);
3726 return err;
3727 }
3728
3729 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
3730 void *data, u16 len)
3731 {
3732 struct mgmt_cp_set_static_address *cp = data;
3733 int err;
3734
3735 BT_DBG("%s", hdev->name);
3736
3737 if (!lmp_le_capable(hdev))
3738 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
3739 MGMT_STATUS_NOT_SUPPORTED);
3740
3741 if (hdev_is_powered(hdev))
3742 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
3743 MGMT_STATUS_REJECTED);
3744
3745 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
3746 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
3747 return cmd_status(sk, hdev->id,
3748 MGMT_OP_SET_STATIC_ADDRESS,
3749 MGMT_STATUS_INVALID_PARAMS);
3750
3751 /* Two most significant bits shall be set */
3752 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
3753 return cmd_status(sk, hdev->id,
3754 MGMT_OP_SET_STATIC_ADDRESS,
3755 MGMT_STATUS_INVALID_PARAMS);
3756 }
3757
3758 hci_dev_lock(hdev);
3759
3760 bacpy(&hdev->static_addr, &cp->bdaddr);
3761
3762 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0);
3763
3764 hci_dev_unlock(hdev);
3765
3766 return err;
3767 }
3768
3769 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
3770 void *data, u16 len)
3771 {
3772 struct mgmt_cp_set_scan_params *cp = data;
3773 __u16 interval, window;
3774 int err;
3775
3776 BT_DBG("%s", hdev->name);
3777
3778 if (!lmp_le_capable(hdev))
3779 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3780 MGMT_STATUS_NOT_SUPPORTED);
3781
3782 interval = __le16_to_cpu(cp->interval);
3783
3784 if (interval < 0x0004 || interval > 0x4000)
3785 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3786 MGMT_STATUS_INVALID_PARAMS);
3787
3788 window = __le16_to_cpu(cp->window);
3789
3790 if (window < 0x0004 || window > 0x4000)
3791 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3792 MGMT_STATUS_INVALID_PARAMS);
3793
3794 if (window > interval)
3795 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3796 MGMT_STATUS_INVALID_PARAMS);
3797
3798 hci_dev_lock(hdev);
3799
3800 hdev->le_scan_interval = interval;
3801 hdev->le_scan_window = window;
3802
3803 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);
3804
3805 hci_dev_unlock(hdev);
3806
3807 return err;
3808 }
3809
3810 static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
3811 {
3812 struct pending_cmd *cmd;
3813
3814 BT_DBG("status 0x%02x", status);
3815
3816 hci_dev_lock(hdev);
3817
3818 cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
3819 if (!cmd)
3820 goto unlock;
3821
3822 if (status) {
3823 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3824 mgmt_status(status));
3825 } else {
3826 struct mgmt_mode *cp = cmd->param;
3827
3828 if (cp->val)
3829 set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
3830 else
3831 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
3832
3833 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
3834 new_settings(hdev, cmd->sk);
3835 }
3836
3837 mgmt_pending_remove(cmd);
3838
3839 unlock:
3840 hci_dev_unlock(hdev);
3841 }
3842
3843 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
3844 void *data, u16 len)
3845 {
3846 struct mgmt_mode *cp = data;
3847 struct pending_cmd *cmd;
3848 struct hci_request req;
3849 int err;
3850
3851 BT_DBG("%s", hdev->name);
3852
3853 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
3854 hdev->hci_ver < BLUETOOTH_VER_1_2)
3855 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3856 MGMT_STATUS_NOT_SUPPORTED);
3857
3858 if (cp->val != 0x00 && cp->val != 0x01)
3859 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3860 MGMT_STATUS_INVALID_PARAMS);
3861
3862 if (!hdev_is_powered(hdev))
3863 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3864 MGMT_STATUS_NOT_POWERED);
3865
3866 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3867 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3868 MGMT_STATUS_REJECTED);
3869
3870 hci_dev_lock(hdev);
3871
3872 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
3873 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3874 MGMT_STATUS_BUSY);
3875 goto unlock;
3876 }
3877
3878 if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
3879 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
3880 hdev);
3881 goto unlock;
3882 }
3883
3884 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
3885 data, len);
3886 if (!cmd) {
3887 err = -ENOMEM;
3888 goto unlock;
3889 }
3890
3891 hci_req_init(&req, hdev);
3892
3893 write_fast_connectable(&req, cp->val);
3894
3895 err = hci_req_run(&req, fast_connectable_complete);
3896 if (err < 0) {
3897 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3898 MGMT_STATUS_FAILED);
3899 mgmt_pending_remove(cmd);
3900 }
3901
3902 unlock:
3903 hci_dev_unlock(hdev);
3904
3905 return err;
3906 }
3907
3908 static void set_bredr_scan(struct hci_request *req)
3909 {
3910 struct hci_dev *hdev = req->hdev;
3911 u8 scan = 0;
3912
3913 /* Ensure that fast connectable is disabled. This function will
3914 * not do anything if the page scan parameters are already what
3915 * they should be.
3916 */
3917 write_fast_connectable(req, false);
3918
3919 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3920 scan |= SCAN_PAGE;
3921 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
3922 scan |= SCAN_INQUIRY;
3923
3924 if (scan)
3925 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
3926 }
3927
3928 static void set_bredr_complete(struct hci_dev *hdev, u8 status)
3929 {
3930 struct pending_cmd *cmd;
3931
3932 BT_DBG("status 0x%02x", status);
3933
3934 hci_dev_lock(hdev);
3935
3936 cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
3937 if (!cmd)
3938 goto unlock;
3939
3940 if (status) {
3941 u8 mgmt_err = mgmt_status(status);
3942
3943 /* We need to restore the flag if related HCI commands
3944 * failed.
3945 */
3946 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3947
3948 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
3949 } else {
3950 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
3951 new_settings(hdev, cmd->sk);
3952 }
3953
3954 mgmt_pending_remove(cmd);
3955
3956 unlock:
3957 hci_dev_unlock(hdev);
3958 }
3959
3960 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
3961 {
3962 struct mgmt_mode *cp = data;
3963 struct pending_cmd *cmd;
3964 struct hci_request req;
3965 int err;
3966
3967 BT_DBG("request for %s", hdev->name);
3968
3969 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
3970 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
3971 MGMT_STATUS_NOT_SUPPORTED);
3972
3973 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
3974 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
3975 MGMT_STATUS_REJECTED);
3976
3977 if (cp->val != 0x00 && cp->val != 0x01)
3978 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
3979 MGMT_STATUS_INVALID_PARAMS);
3980
3981 hci_dev_lock(hdev);
3982
3983 if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3984 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
3985 goto unlock;
3986 }
3987
3988 if (!hdev_is_powered(hdev)) {
3989 if (!cp->val) {
3990 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
3991 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
3992 clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
3993 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
3994 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
3995 }
3996
3997 change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3998
3999 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4000 if (err < 0)
4001 goto unlock;
4002
4003 err = new_settings(hdev, sk);
4004 goto unlock;
4005 }
4006
4007 /* Reject disabling when powered on */
4008 if (!cp->val) {
4009 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4010 MGMT_STATUS_REJECTED);
4011 goto unlock;
4012 }
4013
4014 if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
4015 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4016 MGMT_STATUS_BUSY);
4017 goto unlock;
4018 }
4019
4020 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4021 if (!cmd) {
4022 err = -ENOMEM;
4023 goto unlock;
4024 }
4025
4026 /* We need to flip the bit already here so that update_adv_data
4027 * generates the correct flags.
4028 */
4029 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4030
4031 hci_req_init(&req, hdev);
4032
4033 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4034 set_bredr_scan(&req);
4035
4036 /* Since only the advertising data flags will change, there
4037 * is no need to update the scan response data.
4038 */
4039 update_adv_data(&req);
4040
4041 err = hci_req_run(&req, set_bredr_complete);
4042 if (err < 0)
4043 mgmt_pending_remove(cmd);
4044
4045 unlock:
4046 hci_dev_unlock(hdev);
4047 return err;
4048 }
4049
4050 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4051 void *data, u16 len)
4052 {
4053 struct mgmt_mode *cp = data;
4054 struct pending_cmd *cmd;
4055 u8 val, status;
4056 int err;
4057
4058 BT_DBG("request for %s", hdev->name);
4059
4060 status = mgmt_bredr_support(hdev);
4061 if (status)
4062 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4063 status);
4064
4065 if (!lmp_sc_capable(hdev) &&
4066 !test_bit(HCI_FORCE_SC, &hdev->dev_flags))
4067 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4068 MGMT_STATUS_NOT_SUPPORTED);
4069
4070 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4071 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4072 MGMT_STATUS_INVALID_PARAMS);
4073
4074 hci_dev_lock(hdev);
4075
4076 if (!hdev_is_powered(hdev)) {
4077 bool changed;
4078
4079 if (cp->val) {
4080 changed = !test_and_set_bit(HCI_SC_ENABLED,
4081 &hdev->dev_flags);
4082 if (cp->val == 0x02)
4083 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4084 else
4085 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4086 } else {
4087 changed = test_and_clear_bit(HCI_SC_ENABLED,
4088 &hdev->dev_flags);
4089 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4090 }
4091
4092 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4093 if (err < 0)
4094 goto failed;
4095
4096 if (changed)
4097 err = new_settings(hdev, sk);
4098
4099 goto failed;
4100 }
4101
4102 if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4103 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4104 MGMT_STATUS_BUSY);
4105 goto failed;
4106 }
4107
4108 val = !!cp->val;
4109
4110 if (val == test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
4111 (cp->val == 0x02) == test_bit(HCI_SC_ONLY, &hdev->dev_flags)) {
4112 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4113 goto failed;
4114 }
4115
4116 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4117 if (!cmd) {
4118 err = -ENOMEM;
4119 goto failed;
4120 }
4121
4122 err = hci_send_cmd(hdev, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4123 if (err < 0) {
4124 mgmt_pending_remove(cmd);
4125 goto failed;
4126 }
4127
4128 if (cp->val == 0x02)
4129 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4130 else
4131 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4132
4133 failed:
4134 hci_dev_unlock(hdev);
4135 return err;
4136 }
4137
4138 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
4139 void *data, u16 len)
4140 {
4141 struct mgmt_mode *cp = data;
4142 bool changed;
4143 int err;
4144
4145 BT_DBG("request for %s", hdev->name);
4146
4147 if (cp->val != 0x00 && cp->val != 0x01)
4148 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
4149 MGMT_STATUS_INVALID_PARAMS);
4150
4151 hci_dev_lock(hdev);
4152
4153 if (cp->val)
4154 changed = !test_and_set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
4155 else
4156 changed = test_and_clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
4157
4158 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
4159 if (err < 0)
4160 goto unlock;
4161
4162 if (changed)
4163 err = new_settings(hdev, sk);
4164
4165 unlock:
4166 hci_dev_unlock(hdev);
4167 return err;
4168 }
4169
4170 static bool irk_is_valid(struct mgmt_irk_info *irk)
4171 {
4172 switch (irk->addr.type) {
4173 case BDADDR_LE_PUBLIC:
4174 return true;
4175
4176 case BDADDR_LE_RANDOM:
4177 /* Two most significant bits shall be set */
4178 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4179 return false;
4180 return true;
4181 }
4182
4183 return false;
4184 }
4185
4186 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4187 u16 len)
4188 {
4189 struct mgmt_cp_load_irks *cp = cp_data;
4190 u16 irk_count, expected_len;
4191 int i, err;
4192
4193 BT_DBG("request for %s", hdev->name);
4194
4195 if (!lmp_le_capable(hdev))
4196 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4197 MGMT_STATUS_NOT_SUPPORTED);
4198
4199 irk_count = __le16_to_cpu(cp->irk_count);
4200
4201 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
4202 if (expected_len != len) {
4203 BT_ERR("load_irks: expected %u bytes, got %u bytes",
4204 len, expected_len);
4205 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4206 MGMT_STATUS_INVALID_PARAMS);
4207 }
4208
4209 BT_DBG("%s irk_count %u", hdev->name, irk_count);
4210
4211 for (i = 0; i < irk_count; i++) {
4212 struct mgmt_irk_info *key = &cp->irks[i];
4213
4214 if (!irk_is_valid(key))
4215 return cmd_status(sk, hdev->id,
4216 MGMT_OP_LOAD_IRKS,
4217 MGMT_STATUS_INVALID_PARAMS);
4218 }
4219
4220 hci_dev_lock(hdev);
4221
4222 hci_smp_irks_clear(hdev);
4223
4224 for (i = 0; i < irk_count; i++) {
4225 struct mgmt_irk_info *irk = &cp->irks[i];
4226 u8 addr_type;
4227
4228 if (irk->addr.type == BDADDR_LE_PUBLIC)
4229 addr_type = ADDR_LE_DEV_PUBLIC;
4230 else
4231 addr_type = ADDR_LE_DEV_RANDOM;
4232
4233 hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
4234 BDADDR_ANY);
4235 }
4236
4237 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4238
4239 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
4240
4241 hci_dev_unlock(hdev);
4242
4243 return err;
4244 }
4245
4246 static bool ltk_is_valid(struct mgmt_ltk_info *key)
4247 {
4248 if (key->master != 0x00 && key->master != 0x01)
4249 return false;
4250
4251 switch (key->addr.type) {
4252 case BDADDR_LE_PUBLIC:
4253 return true;
4254
4255 case BDADDR_LE_RANDOM:
4256 /* Two most significant bits shall be set */
4257 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4258 return false;
4259 return true;
4260 }
4261
4262 return false;
4263 }
4264
4265 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
4266 void *cp_data, u16 len)
4267 {
4268 struct mgmt_cp_load_long_term_keys *cp = cp_data;
4269 u16 key_count, expected_len;
4270 int i, err;
4271
4272 BT_DBG("request for %s", hdev->name);
4273
4274 if (!lmp_le_capable(hdev))
4275 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4276 MGMT_STATUS_NOT_SUPPORTED);
4277
4278 key_count = __le16_to_cpu(cp->key_count);
4279
4280 expected_len = sizeof(*cp) + key_count *
4281 sizeof(struct mgmt_ltk_info);
4282 if (expected_len != len) {
4283 BT_ERR("load_keys: expected %u bytes, got %u bytes",
4284 len, expected_len);
4285 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4286 MGMT_STATUS_INVALID_PARAMS);
4287 }
4288
4289 BT_DBG("%s key_count %u", hdev->name, key_count);
4290
4291 for (i = 0; i < key_count; i++) {
4292 struct mgmt_ltk_info *key = &cp->keys[i];
4293
4294 if (!ltk_is_valid(key))
4295 return cmd_status(sk, hdev->id,
4296 MGMT_OP_LOAD_LONG_TERM_KEYS,
4297 MGMT_STATUS_INVALID_PARAMS);
4298 }
4299
4300 hci_dev_lock(hdev);
4301
4302 hci_smp_ltks_clear(hdev);
4303
4304 for (i = 0; i < key_count; i++) {
4305 struct mgmt_ltk_info *key = &cp->keys[i];
4306 u8 type, addr_type;
4307
4308 if (key->addr.type == BDADDR_LE_PUBLIC)
4309 addr_type = ADDR_LE_DEV_PUBLIC;
4310 else
4311 addr_type = ADDR_LE_DEV_RANDOM;
4312
4313 if (key->master)
4314 type = HCI_SMP_LTK;
4315 else
4316 type = HCI_SMP_LTK_SLAVE;
4317
4318 hci_add_ltk(hdev, &key->addr.bdaddr, addr_type,
4319 type, 0, key->type, key->val,
4320 key->enc_size, key->ediv, key->rand);
4321 }
4322
4323 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
4324 NULL, 0);
4325
4326 hci_dev_unlock(hdev);
4327
4328 return err;
4329 }
4330
4331 static const struct mgmt_handler {
4332 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
4333 u16 data_len);
4334 bool var_len;
4335 size_t data_len;
4336 } mgmt_handlers[] = {
4337 { NULL }, /* 0x0000 (no command) */
4338 { read_version, false, MGMT_READ_VERSION_SIZE },
4339 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
4340 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
4341 { read_controller_info, false, MGMT_READ_INFO_SIZE },
4342 { set_powered, false, MGMT_SETTING_SIZE },
4343 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
4344 { set_connectable, false, MGMT_SETTING_SIZE },
4345 { set_fast_connectable, false, MGMT_SETTING_SIZE },
4346 { set_pairable, false, MGMT_SETTING_SIZE },
4347 { set_link_security, false, MGMT_SETTING_SIZE },
4348 { set_ssp, false, MGMT_SETTING_SIZE },
4349 { set_hs, false, MGMT_SETTING_SIZE },
4350 { set_le, false, MGMT_SETTING_SIZE },
4351 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
4352 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
4353 { add_uuid, false, MGMT_ADD_UUID_SIZE },
4354 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
4355 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
4356 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
4357 { disconnect, false, MGMT_DISCONNECT_SIZE },
4358 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
4359 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
4360 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
4361 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
4362 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
4363 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
4364 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
4365 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
4366 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
4367 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
4368 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
4369 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
4370 { add_remote_oob_data, true, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
4371 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
4372 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
4373 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
4374 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
4375 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
4376 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
4377 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
4378 { set_advertising, false, MGMT_SETTING_SIZE },
4379 { set_bredr, false, MGMT_SETTING_SIZE },
4380 { set_static_address, false, MGMT_SET_STATIC_ADDRESS_SIZE },
4381 { set_scan_params, false, MGMT_SET_SCAN_PARAMS_SIZE },
4382 { set_secure_conn, false, MGMT_SETTING_SIZE },
4383 { set_debug_keys, false, MGMT_SETTING_SIZE },
4384 { },
4385 { load_irks, true, MGMT_LOAD_IRKS_SIZE },
4386 };
4387
4388
4389 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
4390 {
4391 void *buf;
4392 u8 *cp;
4393 struct mgmt_hdr *hdr;
4394 u16 opcode, index, len;
4395 struct hci_dev *hdev = NULL;
4396 const struct mgmt_handler *handler;
4397 int err;
4398
4399 BT_DBG("got %zu bytes", msglen);
4400
4401 if (msglen < sizeof(*hdr))
4402 return -EINVAL;
4403
4404 buf = kmalloc(msglen, GFP_KERNEL);
4405 if (!buf)
4406 return -ENOMEM;
4407
4408 if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
4409 err = -EFAULT;
4410 goto done;
4411 }
4412
4413 hdr = buf;
4414 opcode = __le16_to_cpu(hdr->opcode);
4415 index = __le16_to_cpu(hdr->index);
4416 len = __le16_to_cpu(hdr->len);
4417
4418 if (len != msglen - sizeof(*hdr)) {
4419 err = -EINVAL;
4420 goto done;
4421 }
4422
4423 if (index != MGMT_INDEX_NONE) {
4424 hdev = hci_dev_get(index);
4425 if (!hdev) {
4426 err = cmd_status(sk, index, opcode,
4427 MGMT_STATUS_INVALID_INDEX);
4428 goto done;
4429 }
4430
4431 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
4432 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4433 err = cmd_status(sk, index, opcode,
4434 MGMT_STATUS_INVALID_INDEX);
4435 goto done;
4436 }
4437 }
4438
4439 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
4440 mgmt_handlers[opcode].func == NULL) {
4441 BT_DBG("Unknown op %u", opcode);
4442 err = cmd_status(sk, index, opcode,
4443 MGMT_STATUS_UNKNOWN_COMMAND);
4444 goto done;
4445 }
4446
4447 if ((hdev && opcode < MGMT_OP_READ_INFO) ||
4448 (!hdev && opcode >= MGMT_OP_READ_INFO)) {
4449 err = cmd_status(sk, index, opcode,
4450 MGMT_STATUS_INVALID_INDEX);
4451 goto done;
4452 }
4453
4454 handler = &mgmt_handlers[opcode];
4455
4456 if ((handler->var_len && len < handler->data_len) ||
4457 (!handler->var_len && len != handler->data_len)) {
4458 err = cmd_status(sk, index, opcode,
4459 MGMT_STATUS_INVALID_PARAMS);
4460 goto done;
4461 }
4462
4463 if (hdev)
4464 mgmt_init_hdev(sk, hdev);
4465
4466 cp = buf + sizeof(*hdr);
4467
4468 err = handler->func(sk, hdev, cp, len);
4469 if (err < 0)
4470 goto done;
4471
4472 err = msglen;
4473
4474 done:
4475 if (hdev)
4476 hci_dev_put(hdev);
4477
4478 kfree(buf);
4479 return err;
4480 }
4481
4482 void mgmt_index_added(struct hci_dev *hdev)
4483 {
4484 if (hdev->dev_type != HCI_BREDR)
4485 return;
4486
4487 mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
4488 }
4489
4490 void mgmt_index_removed(struct hci_dev *hdev)
4491 {
4492 u8 status = MGMT_STATUS_INVALID_INDEX;
4493
4494 if (hdev->dev_type != HCI_BREDR)
4495 return;
4496
4497 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
4498
4499 mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
4500 }
4501
4502 static void powered_complete(struct hci_dev *hdev, u8 status)
4503 {
4504 struct cmd_lookup match = { NULL, hdev };
4505
4506 BT_DBG("status 0x%02x", status);
4507
4508 hci_dev_lock(hdev);
4509
4510 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
4511
4512 new_settings(hdev, match.sk);
4513
4514 hci_dev_unlock(hdev);
4515
4516 if (match.sk)
4517 sock_put(match.sk);
4518 }
4519
4520 static int powered_update_hci(struct hci_dev *hdev)
4521 {
4522 struct hci_request req;
4523 u8 link_sec;
4524
4525 hci_req_init(&req, hdev);
4526
4527 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
4528 !lmp_host_ssp_capable(hdev)) {
4529 u8 ssp = 1;
4530
4531 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
4532 }
4533
4534 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
4535 lmp_bredr_capable(hdev)) {
4536 struct hci_cp_write_le_host_supported cp;
4537
4538 cp.le = 1;
4539 cp.simul = lmp_le_br_capable(hdev);
4540
4541 /* Check first if we already have the right
4542 * host state (host features set)
4543 */
4544 if (cp.le != lmp_host_le_capable(hdev) ||
4545 cp.simul != lmp_host_le_br_capable(hdev))
4546 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
4547 sizeof(cp), &cp);
4548 }
4549
4550 if (lmp_le_capable(hdev)) {
4551 /* Set random address to static address if configured */
4552 if (bacmp(&hdev->static_addr, BDADDR_ANY))
4553 hci_req_add(&req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
4554 &hdev->static_addr);
4555
4556 /* Make sure the controller has a good default for
4557 * advertising data. This also applies to the case
4558 * where BR/EDR was toggled during the AUTO_OFF phase.
4559 */
4560 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
4561 update_adv_data(&req);
4562 update_scan_rsp_data(&req);
4563 }
4564
4565 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
4566 enable_advertising(&req);
4567 }
4568
4569 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4570 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
4571 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
4572 sizeof(link_sec), &link_sec);
4573
4574 if (lmp_bredr_capable(hdev)) {
4575 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
4576 set_bredr_scan(&req);
4577 update_class(&req);
4578 update_name(&req);
4579 update_eir(&req);
4580 }
4581
4582 return hci_req_run(&req, powered_complete);
4583 }
4584
4585 int mgmt_powered(struct hci_dev *hdev, u8 powered)
4586 {
4587 struct cmd_lookup match = { NULL, hdev };
4588 u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
4589 u8 zero_cod[] = { 0, 0, 0 };
4590 int err;
4591
4592 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
4593 return 0;
4594
4595 if (powered) {
4596 if (powered_update_hci(hdev) == 0)
4597 return 0;
4598
4599 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
4600 &match);
4601 goto new_settings;
4602 }
4603
4604 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
4605 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered);
4606
4607 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
4608 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
4609 zero_cod, sizeof(zero_cod), NULL);
4610
4611 new_settings:
4612 err = new_settings(hdev, match.sk);
4613
4614 if (match.sk)
4615 sock_put(match.sk);
4616
4617 return err;
4618 }
4619
4620 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
4621 {
4622 struct pending_cmd *cmd;
4623 u8 status;
4624
4625 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
4626 if (!cmd)
4627 return;
4628
4629 if (err == -ERFKILL)
4630 status = MGMT_STATUS_RFKILLED;
4631 else
4632 status = MGMT_STATUS_FAILED;
4633
4634 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
4635
4636 mgmt_pending_remove(cmd);
4637 }
4638
4639 void mgmt_discoverable_timeout(struct hci_dev *hdev)
4640 {
4641 struct hci_request req;
4642
4643 hci_dev_lock(hdev);
4644
4645 /* When discoverable timeout triggers, then just make sure
4646 * the limited discoverable flag is cleared. Even in the case
4647 * of a timeout triggered from general discoverable, it is
4648 * safe to unconditionally clear the flag.
4649 */
4650 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
4651 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4652
4653 hci_req_init(&req, hdev);
4654 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4655 u8 scan = SCAN_PAGE;
4656 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
4657 sizeof(scan), &scan);
4658 }
4659 update_class(&req);
4660 update_adv_data(&req);
4661 hci_req_run(&req, NULL);
4662
4663 hdev->discov_timeout = 0;
4664
4665 new_settings(hdev, NULL);
4666
4667 hci_dev_unlock(hdev);
4668 }
4669
4670 void mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
4671 {
4672 bool changed;
4673
4674 /* Nothing needed here if there's a pending command since that
4675 * commands request completion callback takes care of everything
4676 * necessary.
4677 */
4678 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
4679 return;
4680
4681 if (discoverable) {
4682 changed = !test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4683 } else {
4684 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
4685 changed = test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4686 }
4687
4688 if (changed) {
4689 struct hci_request req;
4690
4691 /* In case this change in discoverable was triggered by
4692 * a disabling of connectable there could be a need to
4693 * update the advertising flags.
4694 */
4695 hci_req_init(&req, hdev);
4696 update_adv_data(&req);
4697 hci_req_run(&req, NULL);
4698
4699 new_settings(hdev, NULL);
4700 }
4701 }
4702
4703 void mgmt_connectable(struct hci_dev *hdev, u8 connectable)
4704 {
4705 bool changed;
4706
4707 /* Nothing needed here if there's a pending command since that
4708 * commands request completion callback takes care of everything
4709 * necessary.
4710 */
4711 if (mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
4712 return;
4713
4714 if (connectable)
4715 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
4716 else
4717 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
4718
4719 if (changed)
4720 new_settings(hdev, NULL);
4721 }
4722
4723 void mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
4724 {
4725 u8 mgmt_err = mgmt_status(status);
4726
4727 if (scan & SCAN_PAGE)
4728 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
4729 cmd_status_rsp, &mgmt_err);
4730
4731 if (scan & SCAN_INQUIRY)
4732 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
4733 cmd_status_rsp, &mgmt_err);
4734 }
4735
4736 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
4737 bool persistent)
4738 {
4739 struct mgmt_ev_new_link_key ev;
4740
4741 memset(&ev, 0, sizeof(ev));
4742
4743 ev.store_hint = persistent;
4744 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
4745 ev.key.addr.type = BDADDR_BREDR;
4746 ev.key.type = key->type;
4747 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
4748 ev.key.pin_len = key->pin_len;
4749
4750 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
4751 }
4752
4753 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent)
4754 {
4755 struct mgmt_ev_new_long_term_key ev;
4756
4757 memset(&ev, 0, sizeof(ev));
4758
4759 ev.store_hint = persistent;
4760 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
4761 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
4762 ev.key.type = key->authenticated;
4763 ev.key.enc_size = key->enc_size;
4764 ev.key.ediv = key->ediv;
4765
4766 if (key->type == HCI_SMP_LTK)
4767 ev.key.master = 1;
4768
4769 memcpy(ev.key.rand, key->rand, sizeof(key->rand));
4770 memcpy(ev.key.val, key->val, sizeof(key->val));
4771
4772 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
4773 }
4774
4775 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
4776 u8 data_len)
4777 {
4778 eir[eir_len++] = sizeof(type) + data_len;
4779 eir[eir_len++] = type;
4780 memcpy(&eir[eir_len], data, data_len);
4781 eir_len += data_len;
4782
4783 return eir_len;
4784 }
4785
4786 void mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4787 u8 addr_type, u32 flags, u8 *name, u8 name_len,
4788 u8 *dev_class)
4789 {
4790 char buf[512];
4791 struct mgmt_ev_device_connected *ev = (void *) buf;
4792 u16 eir_len = 0;
4793
4794 bacpy(&ev->addr.bdaddr, bdaddr);
4795 ev->addr.type = link_to_bdaddr(link_type, addr_type);
4796
4797 ev->flags = __cpu_to_le32(flags);
4798
4799 if (name_len > 0)
4800 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
4801 name, name_len);
4802
4803 if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
4804 eir_len = eir_append_data(ev->eir, eir_len,
4805 EIR_CLASS_OF_DEV, dev_class, 3);
4806
4807 ev->eir_len = cpu_to_le16(eir_len);
4808
4809 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
4810 sizeof(*ev) + eir_len, NULL);
4811 }
4812
4813 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
4814 {
4815 struct mgmt_cp_disconnect *cp = cmd->param;
4816 struct sock **sk = data;
4817 struct mgmt_rp_disconnect rp;
4818
4819 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4820 rp.addr.type = cp->addr.type;
4821
4822 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
4823 sizeof(rp));
4824
4825 *sk = cmd->sk;
4826 sock_hold(*sk);
4827
4828 mgmt_pending_remove(cmd);
4829 }
4830
4831 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
4832 {
4833 struct hci_dev *hdev = data;
4834 struct mgmt_cp_unpair_device *cp = cmd->param;
4835 struct mgmt_rp_unpair_device rp;
4836
4837 memset(&rp, 0, sizeof(rp));
4838 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4839 rp.addr.type = cp->addr.type;
4840
4841 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
4842
4843 cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
4844
4845 mgmt_pending_remove(cmd);
4846 }
4847
4848 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
4849 u8 link_type, u8 addr_type, u8 reason)
4850 {
4851 struct mgmt_ev_device_disconnected ev;
4852 struct sock *sk = NULL;
4853
4854 if (link_type != ACL_LINK && link_type != LE_LINK)
4855 return;
4856
4857 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
4858
4859 bacpy(&ev.addr.bdaddr, bdaddr);
4860 ev.addr.type = link_to_bdaddr(link_type, addr_type);
4861 ev.reason = reason;
4862
4863 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
4864
4865 if (sk)
4866 sock_put(sk);
4867
4868 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
4869 hdev);
4870 }
4871
4872 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
4873 u8 link_type, u8 addr_type, u8 status)
4874 {
4875 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
4876 struct mgmt_cp_disconnect *cp;
4877 struct mgmt_rp_disconnect rp;
4878 struct pending_cmd *cmd;
4879
4880 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
4881 hdev);
4882
4883 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
4884 if (!cmd)
4885 return;
4886
4887 cp = cmd->param;
4888
4889 if (bacmp(bdaddr, &cp->addr.bdaddr))
4890 return;
4891
4892 if (cp->addr.type != bdaddr_type)
4893 return;
4894
4895 bacpy(&rp.addr.bdaddr, bdaddr);
4896 rp.addr.type = bdaddr_type;
4897
4898 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
4899 mgmt_status(status), &rp, sizeof(rp));
4900
4901 mgmt_pending_remove(cmd);
4902 }
4903
4904 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4905 u8 addr_type, u8 status)
4906 {
4907 struct mgmt_ev_connect_failed ev;
4908
4909 bacpy(&ev.addr.bdaddr, bdaddr);
4910 ev.addr.type = link_to_bdaddr(link_type, addr_type);
4911 ev.status = mgmt_status(status);
4912
4913 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
4914 }
4915
4916 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
4917 {
4918 struct mgmt_ev_pin_code_request ev;
4919
4920 bacpy(&ev.addr.bdaddr, bdaddr);
4921 ev.addr.type = BDADDR_BREDR;
4922 ev.secure = secure;
4923
4924 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
4925 }
4926
4927 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4928 u8 status)
4929 {
4930 struct pending_cmd *cmd;
4931 struct mgmt_rp_pin_code_reply rp;
4932
4933 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
4934 if (!cmd)
4935 return;
4936
4937 bacpy(&rp.addr.bdaddr, bdaddr);
4938 rp.addr.type = BDADDR_BREDR;
4939
4940 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
4941 mgmt_status(status), &rp, sizeof(rp));
4942
4943 mgmt_pending_remove(cmd);
4944 }
4945
4946 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4947 u8 status)
4948 {
4949 struct pending_cmd *cmd;
4950 struct mgmt_rp_pin_code_reply rp;
4951
4952 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
4953 if (!cmd)
4954 return;
4955
4956 bacpy(&rp.addr.bdaddr, bdaddr);
4957 rp.addr.type = BDADDR_BREDR;
4958
4959 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
4960 mgmt_status(status), &rp, sizeof(rp));
4961
4962 mgmt_pending_remove(cmd);
4963 }
4964
4965 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
4966 u8 link_type, u8 addr_type, __le32 value,
4967 u8 confirm_hint)
4968 {
4969 struct mgmt_ev_user_confirm_request ev;
4970
4971 BT_DBG("%s", hdev->name);
4972
4973 bacpy(&ev.addr.bdaddr, bdaddr);
4974 ev.addr.type = link_to_bdaddr(link_type, addr_type);
4975 ev.confirm_hint = confirm_hint;
4976 ev.value = value;
4977
4978 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
4979 NULL);
4980 }
4981
4982 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
4983 u8 link_type, u8 addr_type)
4984 {
4985 struct mgmt_ev_user_passkey_request ev;
4986
4987 BT_DBG("%s", hdev->name);
4988
4989 bacpy(&ev.addr.bdaddr, bdaddr);
4990 ev.addr.type = link_to_bdaddr(link_type, addr_type);
4991
4992 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
4993 NULL);
4994 }
4995
4996 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4997 u8 link_type, u8 addr_type, u8 status,
4998 u8 opcode)
4999 {
5000 struct pending_cmd *cmd;
5001 struct mgmt_rp_user_confirm_reply rp;
5002 int err;
5003
5004 cmd = mgmt_pending_find(opcode, hdev);
5005 if (!cmd)
5006 return -ENOENT;
5007
5008 bacpy(&rp.addr.bdaddr, bdaddr);
5009 rp.addr.type = link_to_bdaddr(link_type, addr_type);
5010 err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
5011 &rp, sizeof(rp));
5012
5013 mgmt_pending_remove(cmd);
5014
5015 return err;
5016 }
5017
5018 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5019 u8 link_type, u8 addr_type, u8 status)
5020 {
5021 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5022 status, MGMT_OP_USER_CONFIRM_REPLY);
5023 }
5024
5025 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5026 u8 link_type, u8 addr_type, u8 status)
5027 {
5028 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5029 status,
5030 MGMT_OP_USER_CONFIRM_NEG_REPLY);
5031 }
5032
5033 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5034 u8 link_type, u8 addr_type, u8 status)
5035 {
5036 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5037 status, MGMT_OP_USER_PASSKEY_REPLY);
5038 }
5039
5040 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5041 u8 link_type, u8 addr_type, u8 status)
5042 {
5043 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5044 status,
5045 MGMT_OP_USER_PASSKEY_NEG_REPLY);
5046 }
5047
5048 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
5049 u8 link_type, u8 addr_type, u32 passkey,
5050 u8 entered)
5051 {
5052 struct mgmt_ev_passkey_notify ev;
5053
5054 BT_DBG("%s", hdev->name);
5055
5056 bacpy(&ev.addr.bdaddr, bdaddr);
5057 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5058 ev.passkey = __cpu_to_le32(passkey);
5059 ev.entered = entered;
5060
5061 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
5062 }
5063
5064 void mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5065 u8 addr_type, u8 status)
5066 {
5067 struct mgmt_ev_auth_failed ev;
5068
5069 bacpy(&ev.addr.bdaddr, bdaddr);
5070 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5071 ev.status = mgmt_status(status);
5072
5073 mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
5074 }
5075
5076 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
5077 {
5078 struct cmd_lookup match = { NULL, hdev };
5079 bool changed;
5080
5081 if (status) {
5082 u8 mgmt_err = mgmt_status(status);
5083 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
5084 cmd_status_rsp, &mgmt_err);
5085 return;
5086 }
5087
5088 if (test_bit(HCI_AUTH, &hdev->flags))
5089 changed = !test_and_set_bit(HCI_LINK_SECURITY,
5090 &hdev->dev_flags);
5091 else
5092 changed = test_and_clear_bit(HCI_LINK_SECURITY,
5093 &hdev->dev_flags);
5094
5095 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
5096 &match);
5097
5098 if (changed)
5099 new_settings(hdev, match.sk);
5100
5101 if (match.sk)
5102 sock_put(match.sk);
5103 }
5104
5105 static void clear_eir(struct hci_request *req)
5106 {
5107 struct hci_dev *hdev = req->hdev;
5108 struct hci_cp_write_eir cp;
5109
5110 if (!lmp_ext_inq_capable(hdev))
5111 return;
5112
5113 memset(hdev->eir, 0, sizeof(hdev->eir));
5114
5115 memset(&cp, 0, sizeof(cp));
5116
5117 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
5118 }
5119
5120 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
5121 {
5122 struct cmd_lookup match = { NULL, hdev };
5123 struct hci_request req;
5124 bool changed = false;
5125
5126 if (status) {
5127 u8 mgmt_err = mgmt_status(status);
5128
5129 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
5130 &hdev->dev_flags)) {
5131 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
5132 new_settings(hdev, NULL);
5133 }
5134
5135 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
5136 &mgmt_err);
5137 return;
5138 }
5139
5140 if (enable) {
5141 changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
5142 } else {
5143 changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
5144 if (!changed)
5145 changed = test_and_clear_bit(HCI_HS_ENABLED,
5146 &hdev->dev_flags);
5147 else
5148 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
5149 }
5150
5151 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
5152
5153 if (changed)
5154 new_settings(hdev, match.sk);
5155
5156 if (match.sk)
5157 sock_put(match.sk);
5158
5159 hci_req_init(&req, hdev);
5160
5161 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
5162 update_eir(&req);
5163 else
5164 clear_eir(&req);
5165
5166 hci_req_run(&req, NULL);
5167 }
5168
5169 void mgmt_sc_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
5170 {
5171 struct cmd_lookup match = { NULL, hdev };
5172 bool changed = false;
5173
5174 if (status) {
5175 u8 mgmt_err = mgmt_status(status);
5176
5177 if (enable) {
5178 if (test_and_clear_bit(HCI_SC_ENABLED,
5179 &hdev->dev_flags))
5180 new_settings(hdev, NULL);
5181 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
5182 }
5183
5184 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
5185 cmd_status_rsp, &mgmt_err);
5186 return;
5187 }
5188
5189 if (enable) {
5190 changed = !test_and_set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
5191 } else {
5192 changed = test_and_clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
5193 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
5194 }
5195
5196 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
5197 settings_rsp, &match);
5198
5199 if (changed)
5200 new_settings(hdev, match.sk);
5201
5202 if (match.sk)
5203 sock_put(match.sk);
5204 }
5205
5206 static void sk_lookup(struct pending_cmd *cmd, void *data)
5207 {
5208 struct cmd_lookup *match = data;
5209
5210 if (match->sk == NULL) {
5211 match->sk = cmd->sk;
5212 sock_hold(match->sk);
5213 }
5214 }
5215
5216 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
5217 u8 status)
5218 {
5219 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
5220
5221 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
5222 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
5223 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
5224
5225 if (!status)
5226 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
5227 NULL);
5228
5229 if (match.sk)
5230 sock_put(match.sk);
5231 }
5232
5233 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
5234 {
5235 struct mgmt_cp_set_local_name ev;
5236 struct pending_cmd *cmd;
5237
5238 if (status)
5239 return;
5240
5241 memset(&ev, 0, sizeof(ev));
5242 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
5243 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
5244
5245 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
5246 if (!cmd) {
5247 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
5248
5249 /* If this is a HCI command related to powering on the
5250 * HCI dev don't send any mgmt signals.
5251 */
5252 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5253 return;
5254 }
5255
5256 mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
5257 cmd ? cmd->sk : NULL);
5258 }
5259
5260 void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
5261 u8 *randomizer192, u8 *hash256,
5262 u8 *randomizer256, u8 status)
5263 {
5264 struct pending_cmd *cmd;
5265
5266 BT_DBG("%s status %u", hdev->name, status);
5267
5268 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
5269 if (!cmd)
5270 return;
5271
5272 if (status) {
5273 cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5274 mgmt_status(status));
5275 } else {
5276 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
5277 hash256 && randomizer256) {
5278 struct mgmt_rp_read_local_oob_ext_data rp;
5279
5280 memcpy(rp.hash192, hash192, sizeof(rp.hash192));
5281 memcpy(rp.randomizer192, randomizer192,
5282 sizeof(rp.randomizer192));
5283
5284 memcpy(rp.hash256, hash256, sizeof(rp.hash256));
5285 memcpy(rp.randomizer256, randomizer256,
5286 sizeof(rp.randomizer256));
5287
5288 cmd_complete(cmd->sk, hdev->id,
5289 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
5290 &rp, sizeof(rp));
5291 } else {
5292 struct mgmt_rp_read_local_oob_data rp;
5293
5294 memcpy(rp.hash, hash192, sizeof(rp.hash));
5295 memcpy(rp.randomizer, randomizer192,
5296 sizeof(rp.randomizer));
5297
5298 cmd_complete(cmd->sk, hdev->id,
5299 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
5300 &rp, sizeof(rp));
5301 }
5302 }
5303
5304 mgmt_pending_remove(cmd);
5305 }
5306
5307 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5308 u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name, u8
5309 ssp, u8 *eir, u16 eir_len)
5310 {
5311 char buf[512];
5312 struct mgmt_ev_device_found *ev = (void *) buf;
5313 size_t ev_size;
5314
5315 if (!hci_discovery_active(hdev))
5316 return;
5317
5318 /* Leave 5 bytes for a potential CoD field */
5319 if (sizeof(*ev) + eir_len + 5 > sizeof(buf))
5320 return;
5321
5322 memset(buf, 0, sizeof(buf));
5323
5324 bacpy(&ev->addr.bdaddr, bdaddr);
5325 ev->addr.type = link_to_bdaddr(link_type, addr_type);
5326 ev->rssi = rssi;
5327 if (cfm_name)
5328 ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_CONFIRM_NAME);
5329 if (!ssp)
5330 ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_LEGACY_PAIRING);
5331
5332 if (eir_len > 0)
5333 memcpy(ev->eir, eir, eir_len);
5334
5335 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
5336 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
5337 dev_class, 3);
5338
5339 ev->eir_len = cpu_to_le16(eir_len);
5340 ev_size = sizeof(*ev) + eir_len;
5341
5342 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
5343 }
5344
5345 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5346 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
5347 {
5348 struct mgmt_ev_device_found *ev;
5349 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
5350 u16 eir_len;
5351
5352 ev = (struct mgmt_ev_device_found *) buf;
5353
5354 memset(buf, 0, sizeof(buf));
5355
5356 bacpy(&ev->addr.bdaddr, bdaddr);
5357 ev->addr.type = link_to_bdaddr(link_type, addr_type);
5358 ev->rssi = rssi;
5359
5360 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
5361 name_len);
5362
5363 ev->eir_len = cpu_to_le16(eir_len);
5364
5365 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
5366 }
5367
5368 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
5369 {
5370 struct mgmt_ev_discovering ev;
5371 struct pending_cmd *cmd;
5372
5373 BT_DBG("%s discovering %u", hdev->name, discovering);
5374
5375 if (discovering)
5376 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
5377 else
5378 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
5379
5380 if (cmd != NULL) {
5381 u8 type = hdev->discovery.type;
5382
5383 cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
5384 sizeof(type));
5385 mgmt_pending_remove(cmd);
5386 }
5387
5388 memset(&ev, 0, sizeof(ev));
5389 ev.type = hdev->discovery.type;
5390 ev.discovering = discovering;
5391
5392 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
5393 }
5394
5395 int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
5396 {
5397 struct pending_cmd *cmd;
5398 struct mgmt_ev_device_blocked ev;
5399
5400 cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, hdev);
5401
5402 bacpy(&ev.addr.bdaddr, bdaddr);
5403 ev.addr.type = type;
5404
5405 return mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &ev, sizeof(ev),
5406 cmd ? cmd->sk : NULL);
5407 }
5408
5409 int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
5410 {
5411 struct pending_cmd *cmd;
5412 struct mgmt_ev_device_unblocked ev;
5413
5414 cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, hdev);
5415
5416 bacpy(&ev.addr.bdaddr, bdaddr);
5417 ev.addr.type = type;
5418
5419 return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &ev, sizeof(ev),
5420 cmd ? cmd->sk : NULL);
5421 }
5422
5423 static void adv_enable_complete(struct hci_dev *hdev, u8 status)
5424 {
5425 BT_DBG("%s status %u", hdev->name, status);
5426
5427 /* Clear the advertising mgmt setting if we failed to re-enable it */
5428 if (status) {
5429 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
5430 new_settings(hdev, NULL);
5431 }
5432 }
5433
5434 void mgmt_reenable_advertising(struct hci_dev *hdev)
5435 {
5436 struct hci_request req;
5437
5438 if (hci_conn_num(hdev, LE_LINK) > 0)
5439 return;
5440
5441 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
5442 return;
5443
5444 hci_req_init(&req, hdev);
5445 enable_advertising(&req);
5446
5447 /* If this fails we have no option but to let user space know
5448 * that we've disabled advertising.
5449 */
5450 if (hci_req_run(&req, adv_enable_complete) < 0) {
5451 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
5452 new_settings(hdev, NULL);
5453 }
5454 }