]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - net/bluetooth/mgmt.c
Bluetooth: Don't clear HCI_DISCOVERABLE when powering off
[mirror_ubuntu-artful-kernel.git] / net / bluetooth / mgmt.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI Management interface */
26
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/mgmt.h>
33
34 #include "smp.h"
35
36 #define MGMT_VERSION 1
37 #define MGMT_REVISION 5
38
39 static const u16 mgmt_commands[] = {
40 MGMT_OP_READ_INDEX_LIST,
41 MGMT_OP_READ_INFO,
42 MGMT_OP_SET_POWERED,
43 MGMT_OP_SET_DISCOVERABLE,
44 MGMT_OP_SET_CONNECTABLE,
45 MGMT_OP_SET_FAST_CONNECTABLE,
46 MGMT_OP_SET_PAIRABLE,
47 MGMT_OP_SET_LINK_SECURITY,
48 MGMT_OP_SET_SSP,
49 MGMT_OP_SET_HS,
50 MGMT_OP_SET_LE,
51 MGMT_OP_SET_DEV_CLASS,
52 MGMT_OP_SET_LOCAL_NAME,
53 MGMT_OP_ADD_UUID,
54 MGMT_OP_REMOVE_UUID,
55 MGMT_OP_LOAD_LINK_KEYS,
56 MGMT_OP_LOAD_LONG_TERM_KEYS,
57 MGMT_OP_DISCONNECT,
58 MGMT_OP_GET_CONNECTIONS,
59 MGMT_OP_PIN_CODE_REPLY,
60 MGMT_OP_PIN_CODE_NEG_REPLY,
61 MGMT_OP_SET_IO_CAPABILITY,
62 MGMT_OP_PAIR_DEVICE,
63 MGMT_OP_CANCEL_PAIR_DEVICE,
64 MGMT_OP_UNPAIR_DEVICE,
65 MGMT_OP_USER_CONFIRM_REPLY,
66 MGMT_OP_USER_CONFIRM_NEG_REPLY,
67 MGMT_OP_USER_PASSKEY_REPLY,
68 MGMT_OP_USER_PASSKEY_NEG_REPLY,
69 MGMT_OP_READ_LOCAL_OOB_DATA,
70 MGMT_OP_ADD_REMOTE_OOB_DATA,
71 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
72 MGMT_OP_START_DISCOVERY,
73 MGMT_OP_STOP_DISCOVERY,
74 MGMT_OP_CONFIRM_NAME,
75 MGMT_OP_BLOCK_DEVICE,
76 MGMT_OP_UNBLOCK_DEVICE,
77 MGMT_OP_SET_DEVICE_ID,
78 MGMT_OP_SET_ADVERTISING,
79 MGMT_OP_SET_BREDR,
80 MGMT_OP_SET_STATIC_ADDRESS,
81 MGMT_OP_SET_SCAN_PARAMS,
82 MGMT_OP_SET_SECURE_CONN,
83 MGMT_OP_SET_DEBUG_KEYS,
84 MGMT_OP_SET_PRIVACY,
85 MGMT_OP_LOAD_IRKS,
86 };
87
88 static const u16 mgmt_events[] = {
89 MGMT_EV_CONTROLLER_ERROR,
90 MGMT_EV_INDEX_ADDED,
91 MGMT_EV_INDEX_REMOVED,
92 MGMT_EV_NEW_SETTINGS,
93 MGMT_EV_CLASS_OF_DEV_CHANGED,
94 MGMT_EV_LOCAL_NAME_CHANGED,
95 MGMT_EV_NEW_LINK_KEY,
96 MGMT_EV_NEW_LONG_TERM_KEY,
97 MGMT_EV_DEVICE_CONNECTED,
98 MGMT_EV_DEVICE_DISCONNECTED,
99 MGMT_EV_CONNECT_FAILED,
100 MGMT_EV_PIN_CODE_REQUEST,
101 MGMT_EV_USER_CONFIRM_REQUEST,
102 MGMT_EV_USER_PASSKEY_REQUEST,
103 MGMT_EV_AUTH_FAILED,
104 MGMT_EV_DEVICE_FOUND,
105 MGMT_EV_DISCOVERING,
106 MGMT_EV_DEVICE_BLOCKED,
107 MGMT_EV_DEVICE_UNBLOCKED,
108 MGMT_EV_DEVICE_UNPAIRED,
109 MGMT_EV_PASSKEY_NOTIFY,
110 MGMT_EV_NEW_IRK,
111 };
112
113 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
114
115 #define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
116 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
117
118 struct pending_cmd {
119 struct list_head list;
120 u16 opcode;
121 int index;
122 void *param;
123 struct sock *sk;
124 void *user_data;
125 };
126
127 /* HCI to MGMT error code conversion table */
128 static u8 mgmt_status_table[] = {
129 MGMT_STATUS_SUCCESS,
130 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
131 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
132 MGMT_STATUS_FAILED, /* Hardware Failure */
133 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
134 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
135 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
136 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
137 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
138 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
139 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
140 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
141 MGMT_STATUS_BUSY, /* Command Disallowed */
142 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
143 MGMT_STATUS_REJECTED, /* Rejected Security */
144 MGMT_STATUS_REJECTED, /* Rejected Personal */
145 MGMT_STATUS_TIMEOUT, /* Host Timeout */
146 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
147 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
148 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
149 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
150 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
151 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
152 MGMT_STATUS_BUSY, /* Repeated Attempts */
153 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
154 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
155 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
156 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
157 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
158 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
159 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
160 MGMT_STATUS_FAILED, /* Unspecified Error */
161 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
162 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
163 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
164 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
165 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
166 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
167 MGMT_STATUS_FAILED, /* Unit Link Key Used */
168 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
169 MGMT_STATUS_TIMEOUT, /* Instant Passed */
170 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
171 MGMT_STATUS_FAILED, /* Transaction Collision */
172 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
173 MGMT_STATUS_REJECTED, /* QoS Rejected */
174 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
175 MGMT_STATUS_REJECTED, /* Insufficient Security */
176 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
177 MGMT_STATUS_BUSY, /* Role Switch Pending */
178 MGMT_STATUS_FAILED, /* Slot Violation */
179 MGMT_STATUS_FAILED, /* Role Switch Failed */
180 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
181 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
182 MGMT_STATUS_BUSY, /* Host Busy Pairing */
183 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
184 MGMT_STATUS_BUSY, /* Controller Busy */
185 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
186 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
187 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
188 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
189 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
190 };
191
192 static u8 mgmt_status(u8 hci_status)
193 {
194 if (hci_status < ARRAY_SIZE(mgmt_status_table))
195 return mgmt_status_table[hci_status];
196
197 return MGMT_STATUS_FAILED;
198 }
199
200 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
201 {
202 struct sk_buff *skb;
203 struct mgmt_hdr *hdr;
204 struct mgmt_ev_cmd_status *ev;
205 int err;
206
207 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
208
209 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
210 if (!skb)
211 return -ENOMEM;
212
213 hdr = (void *) skb_put(skb, sizeof(*hdr));
214
215 hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_STATUS);
216 hdr->index = cpu_to_le16(index);
217 hdr->len = cpu_to_le16(sizeof(*ev));
218
219 ev = (void *) skb_put(skb, sizeof(*ev));
220 ev->status = status;
221 ev->opcode = cpu_to_le16(cmd);
222
223 err = sock_queue_rcv_skb(sk, skb);
224 if (err < 0)
225 kfree_skb(skb);
226
227 return err;
228 }
229
230 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
231 void *rp, size_t rp_len)
232 {
233 struct sk_buff *skb;
234 struct mgmt_hdr *hdr;
235 struct mgmt_ev_cmd_complete *ev;
236 int err;
237
238 BT_DBG("sock %p", sk);
239
240 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
241 if (!skb)
242 return -ENOMEM;
243
244 hdr = (void *) skb_put(skb, sizeof(*hdr));
245
246 hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_COMPLETE);
247 hdr->index = cpu_to_le16(index);
248 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
249
250 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
251 ev->opcode = cpu_to_le16(cmd);
252 ev->status = status;
253
254 if (rp)
255 memcpy(ev->data, rp, rp_len);
256
257 err = sock_queue_rcv_skb(sk, skb);
258 if (err < 0)
259 kfree_skb(skb);
260
261 return err;
262 }
263
264 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
265 u16 data_len)
266 {
267 struct mgmt_rp_read_version rp;
268
269 BT_DBG("sock %p", sk);
270
271 rp.version = MGMT_VERSION;
272 rp.revision = __constant_cpu_to_le16(MGMT_REVISION);
273
274 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
275 sizeof(rp));
276 }
277
278 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
279 u16 data_len)
280 {
281 struct mgmt_rp_read_commands *rp;
282 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
283 const u16 num_events = ARRAY_SIZE(mgmt_events);
284 __le16 *opcode;
285 size_t rp_size;
286 int i, err;
287
288 BT_DBG("sock %p", sk);
289
290 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
291
292 rp = kmalloc(rp_size, GFP_KERNEL);
293 if (!rp)
294 return -ENOMEM;
295
296 rp->num_commands = __constant_cpu_to_le16(num_commands);
297 rp->num_events = __constant_cpu_to_le16(num_events);
298
299 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
300 put_unaligned_le16(mgmt_commands[i], opcode);
301
302 for (i = 0; i < num_events; i++, opcode++)
303 put_unaligned_le16(mgmt_events[i], opcode);
304
305 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
306 rp_size);
307 kfree(rp);
308
309 return err;
310 }
311
312 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
313 u16 data_len)
314 {
315 struct mgmt_rp_read_index_list *rp;
316 struct hci_dev *d;
317 size_t rp_len;
318 u16 count;
319 int err;
320
321 BT_DBG("sock %p", sk);
322
323 read_lock(&hci_dev_list_lock);
324
325 count = 0;
326 list_for_each_entry(d, &hci_dev_list, list) {
327 if (d->dev_type == HCI_BREDR)
328 count++;
329 }
330
331 rp_len = sizeof(*rp) + (2 * count);
332 rp = kmalloc(rp_len, GFP_ATOMIC);
333 if (!rp) {
334 read_unlock(&hci_dev_list_lock);
335 return -ENOMEM;
336 }
337
338 count = 0;
339 list_for_each_entry(d, &hci_dev_list, list) {
340 if (test_bit(HCI_SETUP, &d->dev_flags))
341 continue;
342
343 if (test_bit(HCI_USER_CHANNEL, &d->dev_flags))
344 continue;
345
346 if (d->dev_type == HCI_BREDR) {
347 rp->index[count++] = cpu_to_le16(d->id);
348 BT_DBG("Added hci%u", d->id);
349 }
350 }
351
352 rp->num_controllers = cpu_to_le16(count);
353 rp_len = sizeof(*rp) + (2 * count);
354
355 read_unlock(&hci_dev_list_lock);
356
357 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
358 rp_len);
359
360 kfree(rp);
361
362 return err;
363 }
364
365 static u32 get_supported_settings(struct hci_dev *hdev)
366 {
367 u32 settings = 0;
368
369 settings |= MGMT_SETTING_POWERED;
370 settings |= MGMT_SETTING_PAIRABLE;
371 settings |= MGMT_SETTING_DEBUG_KEYS;
372
373 if (lmp_bredr_capable(hdev)) {
374 settings |= MGMT_SETTING_CONNECTABLE;
375 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
376 settings |= MGMT_SETTING_FAST_CONNECTABLE;
377 settings |= MGMT_SETTING_DISCOVERABLE;
378 settings |= MGMT_SETTING_BREDR;
379 settings |= MGMT_SETTING_LINK_SECURITY;
380
381 if (lmp_ssp_capable(hdev)) {
382 settings |= MGMT_SETTING_SSP;
383 settings |= MGMT_SETTING_HS;
384 }
385
386 if (lmp_sc_capable(hdev) ||
387 test_bit(HCI_FORCE_SC, &hdev->dev_flags))
388 settings |= MGMT_SETTING_SECURE_CONN;
389 }
390
391 if (lmp_le_capable(hdev)) {
392 settings |= MGMT_SETTING_LE;
393 settings |= MGMT_SETTING_ADVERTISING;
394 settings |= MGMT_SETTING_PRIVACY;
395 }
396
397 return settings;
398 }
399
400 static u32 get_current_settings(struct hci_dev *hdev)
401 {
402 u32 settings = 0;
403
404 if (hdev_is_powered(hdev))
405 settings |= MGMT_SETTING_POWERED;
406
407 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
408 settings |= MGMT_SETTING_CONNECTABLE;
409
410 if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
411 settings |= MGMT_SETTING_FAST_CONNECTABLE;
412
413 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
414 settings |= MGMT_SETTING_DISCOVERABLE;
415
416 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
417 settings |= MGMT_SETTING_PAIRABLE;
418
419 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
420 settings |= MGMT_SETTING_BREDR;
421
422 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
423 settings |= MGMT_SETTING_LE;
424
425 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
426 settings |= MGMT_SETTING_LINK_SECURITY;
427
428 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
429 settings |= MGMT_SETTING_SSP;
430
431 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
432 settings |= MGMT_SETTING_HS;
433
434 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
435 settings |= MGMT_SETTING_ADVERTISING;
436
437 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
438 settings |= MGMT_SETTING_SECURE_CONN;
439
440 if (test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags))
441 settings |= MGMT_SETTING_DEBUG_KEYS;
442
443 if (test_bit(HCI_PRIVACY, &hdev->dev_flags))
444 settings |= MGMT_SETTING_PRIVACY;
445
446 return settings;
447 }
448
449 #define PNP_INFO_SVCLASS_ID 0x1200
450
451 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
452 {
453 u8 *ptr = data, *uuids_start = NULL;
454 struct bt_uuid *uuid;
455
456 if (len < 4)
457 return ptr;
458
459 list_for_each_entry(uuid, &hdev->uuids, list) {
460 u16 uuid16;
461
462 if (uuid->size != 16)
463 continue;
464
465 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
466 if (uuid16 < 0x1100)
467 continue;
468
469 if (uuid16 == PNP_INFO_SVCLASS_ID)
470 continue;
471
472 if (!uuids_start) {
473 uuids_start = ptr;
474 uuids_start[0] = 1;
475 uuids_start[1] = EIR_UUID16_ALL;
476 ptr += 2;
477 }
478
479 /* Stop if not enough space to put next UUID */
480 if ((ptr - data) + sizeof(u16) > len) {
481 uuids_start[1] = EIR_UUID16_SOME;
482 break;
483 }
484
485 *ptr++ = (uuid16 & 0x00ff);
486 *ptr++ = (uuid16 & 0xff00) >> 8;
487 uuids_start[0] += sizeof(uuid16);
488 }
489
490 return ptr;
491 }
492
493 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
494 {
495 u8 *ptr = data, *uuids_start = NULL;
496 struct bt_uuid *uuid;
497
498 if (len < 6)
499 return ptr;
500
501 list_for_each_entry(uuid, &hdev->uuids, list) {
502 if (uuid->size != 32)
503 continue;
504
505 if (!uuids_start) {
506 uuids_start = ptr;
507 uuids_start[0] = 1;
508 uuids_start[1] = EIR_UUID32_ALL;
509 ptr += 2;
510 }
511
512 /* Stop if not enough space to put next UUID */
513 if ((ptr - data) + sizeof(u32) > len) {
514 uuids_start[1] = EIR_UUID32_SOME;
515 break;
516 }
517
518 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
519 ptr += sizeof(u32);
520 uuids_start[0] += sizeof(u32);
521 }
522
523 return ptr;
524 }
525
526 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
527 {
528 u8 *ptr = data, *uuids_start = NULL;
529 struct bt_uuid *uuid;
530
531 if (len < 18)
532 return ptr;
533
534 list_for_each_entry(uuid, &hdev->uuids, list) {
535 if (uuid->size != 128)
536 continue;
537
538 if (!uuids_start) {
539 uuids_start = ptr;
540 uuids_start[0] = 1;
541 uuids_start[1] = EIR_UUID128_ALL;
542 ptr += 2;
543 }
544
545 /* Stop if not enough space to put next UUID */
546 if ((ptr - data) + 16 > len) {
547 uuids_start[1] = EIR_UUID128_SOME;
548 break;
549 }
550
551 memcpy(ptr, uuid->uuid, 16);
552 ptr += 16;
553 uuids_start[0] += 16;
554 }
555
556 return ptr;
557 }
558
559 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
560 {
561 struct pending_cmd *cmd;
562
563 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
564 if (cmd->opcode == opcode)
565 return cmd;
566 }
567
568 return NULL;
569 }
570
571 static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
572 {
573 u8 ad_len = 0;
574 size_t name_len;
575
576 name_len = strlen(hdev->dev_name);
577 if (name_len > 0) {
578 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
579
580 if (name_len > max_len) {
581 name_len = max_len;
582 ptr[1] = EIR_NAME_SHORT;
583 } else
584 ptr[1] = EIR_NAME_COMPLETE;
585
586 ptr[0] = name_len + 1;
587
588 memcpy(ptr + 2, hdev->dev_name, name_len);
589
590 ad_len += (name_len + 2);
591 ptr += (name_len + 2);
592 }
593
594 return ad_len;
595 }
596
597 static void update_scan_rsp_data(struct hci_request *req)
598 {
599 struct hci_dev *hdev = req->hdev;
600 struct hci_cp_le_set_scan_rsp_data cp;
601 u8 len;
602
603 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
604 return;
605
606 memset(&cp, 0, sizeof(cp));
607
608 len = create_scan_rsp_data(hdev, cp.data);
609
610 if (hdev->scan_rsp_data_len == len &&
611 memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
612 return;
613
614 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
615 hdev->scan_rsp_data_len = len;
616
617 cp.length = len;
618
619 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
620 }
621
622 static u8 get_adv_discov_flags(struct hci_dev *hdev)
623 {
624 struct pending_cmd *cmd;
625
626 /* If there's a pending mgmt command the flags will not yet have
627 * their final values, so check for this first.
628 */
629 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
630 if (cmd) {
631 struct mgmt_mode *cp = cmd->param;
632 if (cp->val == 0x01)
633 return LE_AD_GENERAL;
634 else if (cp->val == 0x02)
635 return LE_AD_LIMITED;
636 } else {
637 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
638 return LE_AD_LIMITED;
639 else if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
640 return LE_AD_GENERAL;
641 }
642
643 return 0;
644 }
645
646 static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
647 {
648 u8 ad_len = 0, flags = 0;
649
650 flags |= get_adv_discov_flags(hdev);
651
652 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
653 flags |= LE_AD_NO_BREDR;
654
655 if (flags) {
656 BT_DBG("adv flags 0x%02x", flags);
657
658 ptr[0] = 2;
659 ptr[1] = EIR_FLAGS;
660 ptr[2] = flags;
661
662 ad_len += 3;
663 ptr += 3;
664 }
665
666 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
667 ptr[0] = 2;
668 ptr[1] = EIR_TX_POWER;
669 ptr[2] = (u8) hdev->adv_tx_power;
670
671 ad_len += 3;
672 ptr += 3;
673 }
674
675 return ad_len;
676 }
677
678 static void update_adv_data(struct hci_request *req)
679 {
680 struct hci_dev *hdev = req->hdev;
681 struct hci_cp_le_set_adv_data cp;
682 u8 len;
683
684 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
685 return;
686
687 memset(&cp, 0, sizeof(cp));
688
689 len = create_adv_data(hdev, cp.data);
690
691 if (hdev->adv_data_len == len &&
692 memcmp(cp.data, hdev->adv_data, len) == 0)
693 return;
694
695 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
696 hdev->adv_data_len = len;
697
698 cp.length = len;
699
700 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
701 }
702
703 static void create_eir(struct hci_dev *hdev, u8 *data)
704 {
705 u8 *ptr = data;
706 size_t name_len;
707
708 name_len = strlen(hdev->dev_name);
709
710 if (name_len > 0) {
711 /* EIR Data type */
712 if (name_len > 48) {
713 name_len = 48;
714 ptr[1] = EIR_NAME_SHORT;
715 } else
716 ptr[1] = EIR_NAME_COMPLETE;
717
718 /* EIR Data length */
719 ptr[0] = name_len + 1;
720
721 memcpy(ptr + 2, hdev->dev_name, name_len);
722
723 ptr += (name_len + 2);
724 }
725
726 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
727 ptr[0] = 2;
728 ptr[1] = EIR_TX_POWER;
729 ptr[2] = (u8) hdev->inq_tx_power;
730
731 ptr += 3;
732 }
733
734 if (hdev->devid_source > 0) {
735 ptr[0] = 9;
736 ptr[1] = EIR_DEVICE_ID;
737
738 put_unaligned_le16(hdev->devid_source, ptr + 2);
739 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
740 put_unaligned_le16(hdev->devid_product, ptr + 6);
741 put_unaligned_le16(hdev->devid_version, ptr + 8);
742
743 ptr += 10;
744 }
745
746 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
747 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
748 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
749 }
750
751 static void update_eir(struct hci_request *req)
752 {
753 struct hci_dev *hdev = req->hdev;
754 struct hci_cp_write_eir cp;
755
756 if (!hdev_is_powered(hdev))
757 return;
758
759 if (!lmp_ext_inq_capable(hdev))
760 return;
761
762 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
763 return;
764
765 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
766 return;
767
768 memset(&cp, 0, sizeof(cp));
769
770 create_eir(hdev, cp.data);
771
772 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
773 return;
774
775 memcpy(hdev->eir, cp.data, sizeof(cp.data));
776
777 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
778 }
779
780 static u8 get_service_classes(struct hci_dev *hdev)
781 {
782 struct bt_uuid *uuid;
783 u8 val = 0;
784
785 list_for_each_entry(uuid, &hdev->uuids, list)
786 val |= uuid->svc_hint;
787
788 return val;
789 }
790
791 static void update_class(struct hci_request *req)
792 {
793 struct hci_dev *hdev = req->hdev;
794 u8 cod[3];
795
796 BT_DBG("%s", hdev->name);
797
798 if (!hdev_is_powered(hdev))
799 return;
800
801 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
802 return;
803
804 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
805 return;
806
807 cod[0] = hdev->minor_class;
808 cod[1] = hdev->major_class;
809 cod[2] = get_service_classes(hdev);
810
811 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
812 cod[1] |= 0x20;
813
814 if (memcmp(cod, hdev->dev_class, 3) == 0)
815 return;
816
817 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
818 }
819
820 static u8 get_adv_type(struct hci_dev *hdev)
821 {
822 struct pending_cmd *cmd;
823 bool connectable;
824
825 /* If there's a pending mgmt command the flag will not yet have
826 * it's final value, so check for this first.
827 */
828 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
829 if (cmd) {
830 struct mgmt_mode *cp = cmd->param;
831 connectable = !!cp->val;
832 } else {
833 connectable = test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
834 }
835
836 return connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
837 }
838
839 static void enable_advertising(struct hci_request *req)
840 {
841 struct hci_dev *hdev = req->hdev;
842 struct hci_cp_le_set_adv_param cp;
843 u8 own_addr_type, enable = 0x01;
844 bool require_privacy;
845
846 require_privacy = !test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
847
848 if (hci_update_random_address(req, require_privacy, &own_addr_type) < 0)
849 return;
850
851 memset(&cp, 0, sizeof(cp));
852 cp.min_interval = __constant_cpu_to_le16(0x0800);
853 cp.max_interval = __constant_cpu_to_le16(0x0800);
854 cp.type = get_adv_type(hdev);
855 cp.own_address_type = own_addr_type;
856 cp.channel_map = hdev->le_adv_channel_map;
857
858 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
859
860 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
861 }
862
863 static void disable_advertising(struct hci_request *req)
864 {
865 u8 enable = 0x00;
866
867 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
868 }
869
870 static void service_cache_off(struct work_struct *work)
871 {
872 struct hci_dev *hdev = container_of(work, struct hci_dev,
873 service_cache.work);
874 struct hci_request req;
875
876 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
877 return;
878
879 hci_req_init(&req, hdev);
880
881 hci_dev_lock(hdev);
882
883 update_eir(&req);
884 update_class(&req);
885
886 hci_dev_unlock(hdev);
887
888 hci_req_run(&req, NULL);
889 }
890
891 static void rpa_expired(struct work_struct *work)
892 {
893 struct hci_dev *hdev = container_of(work, struct hci_dev,
894 rpa_expired.work);
895 struct hci_request req;
896
897 BT_DBG("");
898
899 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
900
901 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags) ||
902 hci_conn_num(hdev, LE_LINK) > 0)
903 return;
904
905 /* The generation of a new RPA and programming it into the
906 * controller happens in the enable_advertising() function.
907 */
908
909 hci_req_init(&req, hdev);
910
911 disable_advertising(&req);
912 enable_advertising(&req);
913
914 hci_req_run(&req, NULL);
915 }
916
917 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
918 {
919 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
920 return;
921
922 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
923 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
924
925 /* Non-mgmt controlled devices get this bit set
926 * implicitly so that pairing works for them, however
927 * for mgmt we require user-space to explicitly enable
928 * it
929 */
930 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
931 }
932
933 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
934 void *data, u16 data_len)
935 {
936 struct mgmt_rp_read_info rp;
937
938 BT_DBG("sock %p %s", sk, hdev->name);
939
940 hci_dev_lock(hdev);
941
942 memset(&rp, 0, sizeof(rp));
943
944 bacpy(&rp.bdaddr, &hdev->bdaddr);
945
946 rp.version = hdev->hci_ver;
947 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
948
949 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
950 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
951
952 memcpy(rp.dev_class, hdev->dev_class, 3);
953
954 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
955 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
956
957 hci_dev_unlock(hdev);
958
959 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
960 sizeof(rp));
961 }
962
963 static void mgmt_pending_free(struct pending_cmd *cmd)
964 {
965 sock_put(cmd->sk);
966 kfree(cmd->param);
967 kfree(cmd);
968 }
969
970 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
971 struct hci_dev *hdev, void *data,
972 u16 len)
973 {
974 struct pending_cmd *cmd;
975
976 cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
977 if (!cmd)
978 return NULL;
979
980 cmd->opcode = opcode;
981 cmd->index = hdev->id;
982
983 cmd->param = kmalloc(len, GFP_KERNEL);
984 if (!cmd->param) {
985 kfree(cmd);
986 return NULL;
987 }
988
989 if (data)
990 memcpy(cmd->param, data, len);
991
992 cmd->sk = sk;
993 sock_hold(sk);
994
995 list_add(&cmd->list, &hdev->mgmt_pending);
996
997 return cmd;
998 }
999
1000 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
1001 void (*cb)(struct pending_cmd *cmd,
1002 void *data),
1003 void *data)
1004 {
1005 struct pending_cmd *cmd, *tmp;
1006
1007 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
1008 if (opcode > 0 && cmd->opcode != opcode)
1009 continue;
1010
1011 cb(cmd, data);
1012 }
1013 }
1014
1015 static void mgmt_pending_remove(struct pending_cmd *cmd)
1016 {
1017 list_del(&cmd->list);
1018 mgmt_pending_free(cmd);
1019 }
1020
1021 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1022 {
1023 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1024
1025 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
1026 sizeof(settings));
1027 }
1028
1029 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1030 u16 len)
1031 {
1032 struct mgmt_mode *cp = data;
1033 struct pending_cmd *cmd;
1034 int err;
1035
1036 BT_DBG("request for %s", hdev->name);
1037
1038 if (cp->val != 0x00 && cp->val != 0x01)
1039 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1040 MGMT_STATUS_INVALID_PARAMS);
1041
1042 hci_dev_lock(hdev);
1043
1044 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
1045 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1046 MGMT_STATUS_BUSY);
1047 goto failed;
1048 }
1049
1050 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1051 cancel_delayed_work(&hdev->power_off);
1052
1053 if (cp->val) {
1054 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
1055 data, len);
1056 err = mgmt_powered(hdev, 1);
1057 goto failed;
1058 }
1059 }
1060
1061 if (!!cp->val == hdev_is_powered(hdev)) {
1062 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1063 goto failed;
1064 }
1065
1066 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1067 if (!cmd) {
1068 err = -ENOMEM;
1069 goto failed;
1070 }
1071
1072 if (cp->val)
1073 queue_work(hdev->req_workqueue, &hdev->power_on);
1074 else
1075 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1076
1077 err = 0;
1078
1079 failed:
1080 hci_dev_unlock(hdev);
1081 return err;
1082 }
1083
1084 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
1085 struct sock *skip_sk)
1086 {
1087 struct sk_buff *skb;
1088 struct mgmt_hdr *hdr;
1089
1090 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
1091 if (!skb)
1092 return -ENOMEM;
1093
1094 hdr = (void *) skb_put(skb, sizeof(*hdr));
1095 hdr->opcode = cpu_to_le16(event);
1096 if (hdev)
1097 hdr->index = cpu_to_le16(hdev->id);
1098 else
1099 hdr->index = __constant_cpu_to_le16(MGMT_INDEX_NONE);
1100 hdr->len = cpu_to_le16(data_len);
1101
1102 if (data)
1103 memcpy(skb_put(skb, data_len), data, data_len);
1104
1105 /* Time stamp */
1106 __net_timestamp(skb);
1107
1108 hci_send_to_control(skb, skip_sk);
1109 kfree_skb(skb);
1110
1111 return 0;
1112 }
1113
1114 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1115 {
1116 __le32 ev;
1117
1118 ev = cpu_to_le32(get_current_settings(hdev));
1119
1120 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
1121 }
1122
1123 struct cmd_lookup {
1124 struct sock *sk;
1125 struct hci_dev *hdev;
1126 u8 mgmt_status;
1127 };
1128
1129 static void settings_rsp(struct pending_cmd *cmd, void *data)
1130 {
1131 struct cmd_lookup *match = data;
1132
1133 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1134
1135 list_del(&cmd->list);
1136
1137 if (match->sk == NULL) {
1138 match->sk = cmd->sk;
1139 sock_hold(match->sk);
1140 }
1141
1142 mgmt_pending_free(cmd);
1143 }
1144
1145 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
1146 {
1147 u8 *status = data;
1148
1149 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1150 mgmt_pending_remove(cmd);
1151 }
1152
1153 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1154 {
1155 if (!lmp_bredr_capable(hdev))
1156 return MGMT_STATUS_NOT_SUPPORTED;
1157 else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1158 return MGMT_STATUS_REJECTED;
1159 else
1160 return MGMT_STATUS_SUCCESS;
1161 }
1162
1163 static u8 mgmt_le_support(struct hci_dev *hdev)
1164 {
1165 if (!lmp_le_capable(hdev))
1166 return MGMT_STATUS_NOT_SUPPORTED;
1167 else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1168 return MGMT_STATUS_REJECTED;
1169 else
1170 return MGMT_STATUS_SUCCESS;
1171 }
1172
1173 static void set_discoverable_complete(struct hci_dev *hdev, u8 status)
1174 {
1175 struct pending_cmd *cmd;
1176 struct mgmt_mode *cp;
1177 struct hci_request req;
1178 bool changed;
1179
1180 BT_DBG("status 0x%02x", status);
1181
1182 hci_dev_lock(hdev);
1183
1184 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1185 if (!cmd)
1186 goto unlock;
1187
1188 if (status) {
1189 u8 mgmt_err = mgmt_status(status);
1190 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1191 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1192 goto remove_cmd;
1193 }
1194
1195 cp = cmd->param;
1196 if (cp->val) {
1197 changed = !test_and_set_bit(HCI_DISCOVERABLE,
1198 &hdev->dev_flags);
1199
1200 if (hdev->discov_timeout > 0) {
1201 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1202 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1203 to);
1204 }
1205 } else {
1206 changed = test_and_clear_bit(HCI_DISCOVERABLE,
1207 &hdev->dev_flags);
1208 }
1209
1210 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1211
1212 if (changed)
1213 new_settings(hdev, cmd->sk);
1214
1215 /* When the discoverable mode gets changed, make sure
1216 * that class of device has the limited discoverable
1217 * bit correctly set.
1218 */
1219 hci_req_init(&req, hdev);
1220 update_class(&req);
1221 hci_req_run(&req, NULL);
1222
1223 remove_cmd:
1224 mgmt_pending_remove(cmd);
1225
1226 unlock:
1227 hci_dev_unlock(hdev);
1228 }
1229
1230 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1231 u16 len)
1232 {
1233 struct mgmt_cp_set_discoverable *cp = data;
1234 struct pending_cmd *cmd;
1235 struct hci_request req;
1236 u16 timeout;
1237 u8 scan;
1238 int err;
1239
1240 BT_DBG("request for %s", hdev->name);
1241
1242 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1243 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1244 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1245 MGMT_STATUS_REJECTED);
1246
1247 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1248 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1249 MGMT_STATUS_INVALID_PARAMS);
1250
1251 timeout = __le16_to_cpu(cp->timeout);
1252
1253 /* Disabling discoverable requires that no timeout is set,
1254 * and enabling limited discoverable requires a timeout.
1255 */
1256 if ((cp->val == 0x00 && timeout > 0) ||
1257 (cp->val == 0x02 && timeout == 0))
1258 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1259 MGMT_STATUS_INVALID_PARAMS);
1260
1261 hci_dev_lock(hdev);
1262
1263 if (!hdev_is_powered(hdev) && timeout > 0) {
1264 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1265 MGMT_STATUS_NOT_POWERED);
1266 goto failed;
1267 }
1268
1269 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1270 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1271 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1272 MGMT_STATUS_BUSY);
1273 goto failed;
1274 }
1275
1276 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
1277 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1278 MGMT_STATUS_REJECTED);
1279 goto failed;
1280 }
1281
1282 if (!hdev_is_powered(hdev)) {
1283 bool changed = false;
1284
1285 /* Setting limited discoverable when powered off is
1286 * not a valid operation since it requires a timeout
1287 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1288 */
1289 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
1290 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1291 changed = true;
1292 }
1293
1294 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1295 if (err < 0)
1296 goto failed;
1297
1298 if (changed)
1299 err = new_settings(hdev, sk);
1300
1301 goto failed;
1302 }
1303
1304 /* If the current mode is the same, then just update the timeout
1305 * value with the new value. And if only the timeout gets updated,
1306 * then no need for any HCI transactions.
1307 */
1308 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
1309 (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
1310 &hdev->dev_flags)) {
1311 cancel_delayed_work(&hdev->discov_off);
1312 hdev->discov_timeout = timeout;
1313
1314 if (cp->val && hdev->discov_timeout > 0) {
1315 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1316 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1317 to);
1318 }
1319
1320 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1321 goto failed;
1322 }
1323
1324 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1325 if (!cmd) {
1326 err = -ENOMEM;
1327 goto failed;
1328 }
1329
1330 /* Cancel any potential discoverable timeout that might be
1331 * still active and store new timeout value. The arming of
1332 * the timeout happens in the complete handler.
1333 */
1334 cancel_delayed_work(&hdev->discov_off);
1335 hdev->discov_timeout = timeout;
1336
1337 /* Limited discoverable mode */
1338 if (cp->val == 0x02)
1339 set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1340 else
1341 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1342
1343 hci_req_init(&req, hdev);
1344
1345 /* The procedure for LE-only controllers is much simpler - just
1346 * update the advertising data.
1347 */
1348 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1349 goto update_ad;
1350
1351 scan = SCAN_PAGE;
1352
1353 if (cp->val) {
1354 struct hci_cp_write_current_iac_lap hci_cp;
1355
1356 if (cp->val == 0x02) {
1357 /* Limited discoverable mode */
1358 hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1359 hci_cp.iac_lap[0] = 0x00; /* LIAC */
1360 hci_cp.iac_lap[1] = 0x8b;
1361 hci_cp.iac_lap[2] = 0x9e;
1362 hci_cp.iac_lap[3] = 0x33; /* GIAC */
1363 hci_cp.iac_lap[4] = 0x8b;
1364 hci_cp.iac_lap[5] = 0x9e;
1365 } else {
1366 /* General discoverable mode */
1367 hci_cp.num_iac = 1;
1368 hci_cp.iac_lap[0] = 0x33; /* GIAC */
1369 hci_cp.iac_lap[1] = 0x8b;
1370 hci_cp.iac_lap[2] = 0x9e;
1371 }
1372
1373 hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1374 (hci_cp.num_iac * 3) + 1, &hci_cp);
1375
1376 scan |= SCAN_INQUIRY;
1377 } else {
1378 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1379 }
1380
1381 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1382
1383 update_ad:
1384 update_adv_data(&req);
1385
1386 err = hci_req_run(&req, set_discoverable_complete);
1387 if (err < 0)
1388 mgmt_pending_remove(cmd);
1389
1390 failed:
1391 hci_dev_unlock(hdev);
1392 return err;
1393 }
1394
1395 static void write_fast_connectable(struct hci_request *req, bool enable)
1396 {
1397 struct hci_dev *hdev = req->hdev;
1398 struct hci_cp_write_page_scan_activity acp;
1399 u8 type;
1400
1401 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1402 return;
1403
1404 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1405 return;
1406
1407 if (enable) {
1408 type = PAGE_SCAN_TYPE_INTERLACED;
1409
1410 /* 160 msec page scan interval */
1411 acp.interval = __constant_cpu_to_le16(0x0100);
1412 } else {
1413 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1414
1415 /* default 1.28 sec page scan */
1416 acp.interval = __constant_cpu_to_le16(0x0800);
1417 }
1418
1419 acp.window = __constant_cpu_to_le16(0x0012);
1420
1421 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1422 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1423 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1424 sizeof(acp), &acp);
1425
1426 if (hdev->page_scan_type != type)
1427 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1428 }
1429
1430 static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1431 {
1432 struct pending_cmd *cmd;
1433 struct mgmt_mode *cp;
1434 bool changed;
1435
1436 BT_DBG("status 0x%02x", status);
1437
1438 hci_dev_lock(hdev);
1439
1440 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1441 if (!cmd)
1442 goto unlock;
1443
1444 if (status) {
1445 u8 mgmt_err = mgmt_status(status);
1446 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1447 goto remove_cmd;
1448 }
1449
1450 cp = cmd->param;
1451 if (cp->val)
1452 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1453 else
1454 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1455
1456 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1457
1458 if (changed)
1459 new_settings(hdev, cmd->sk);
1460
1461 remove_cmd:
1462 mgmt_pending_remove(cmd);
1463
1464 unlock:
1465 hci_dev_unlock(hdev);
1466 }
1467
1468 static int set_connectable_update_settings(struct hci_dev *hdev,
1469 struct sock *sk, u8 val)
1470 {
1471 bool changed = false;
1472 int err;
1473
1474 if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1475 changed = true;
1476
1477 if (val) {
1478 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1479 } else {
1480 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1481 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1482 }
1483
1484 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1485 if (err < 0)
1486 return err;
1487
1488 if (changed)
1489 return new_settings(hdev, sk);
1490
1491 return 0;
1492 }
1493
1494 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1495 u16 len)
1496 {
1497 struct mgmt_mode *cp = data;
1498 struct pending_cmd *cmd;
1499 struct hci_request req;
1500 u8 scan;
1501 int err;
1502
1503 BT_DBG("request for %s", hdev->name);
1504
1505 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1506 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1507 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1508 MGMT_STATUS_REJECTED);
1509
1510 if (cp->val != 0x00 && cp->val != 0x01)
1511 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1512 MGMT_STATUS_INVALID_PARAMS);
1513
1514 hci_dev_lock(hdev);
1515
1516 if (!hdev_is_powered(hdev)) {
1517 err = set_connectable_update_settings(hdev, sk, cp->val);
1518 goto failed;
1519 }
1520
1521 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1522 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1523 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1524 MGMT_STATUS_BUSY);
1525 goto failed;
1526 }
1527
1528 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1529 if (!cmd) {
1530 err = -ENOMEM;
1531 goto failed;
1532 }
1533
1534 hci_req_init(&req, hdev);
1535
1536 /* If BR/EDR is not enabled and we disable advertising as a
1537 * by-product of disabling connectable, we need to update the
1538 * advertising flags.
1539 */
1540 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1541 if (!cp->val) {
1542 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1543 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1544 }
1545 update_adv_data(&req);
1546 } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
1547 if (cp->val) {
1548 scan = SCAN_PAGE;
1549 } else {
1550 scan = 0;
1551
1552 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1553 hdev->discov_timeout > 0)
1554 cancel_delayed_work(&hdev->discov_off);
1555 }
1556
1557 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1558 }
1559
1560 /* If we're going from non-connectable to connectable or
1561 * vice-versa when fast connectable is enabled ensure that fast
1562 * connectable gets disabled. write_fast_connectable won't do
1563 * anything if the page scan parameters are already what they
1564 * should be.
1565 */
1566 if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1567 write_fast_connectable(&req, false);
1568
1569 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) &&
1570 hci_conn_num(hdev, LE_LINK) == 0) {
1571 disable_advertising(&req);
1572 enable_advertising(&req);
1573 }
1574
1575 err = hci_req_run(&req, set_connectable_complete);
1576 if (err < 0) {
1577 mgmt_pending_remove(cmd);
1578 if (err == -ENODATA)
1579 err = set_connectable_update_settings(hdev, sk,
1580 cp->val);
1581 goto failed;
1582 }
1583
1584 failed:
1585 hci_dev_unlock(hdev);
1586 return err;
1587 }
1588
1589 static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
1590 u16 len)
1591 {
1592 struct mgmt_mode *cp = data;
1593 bool changed;
1594 int err;
1595
1596 BT_DBG("request for %s", hdev->name);
1597
1598 if (cp->val != 0x00 && cp->val != 0x01)
1599 return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE,
1600 MGMT_STATUS_INVALID_PARAMS);
1601
1602 hci_dev_lock(hdev);
1603
1604 if (cp->val)
1605 changed = !test_and_set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1606 else
1607 changed = test_and_clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1608
1609 err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
1610 if (err < 0)
1611 goto unlock;
1612
1613 if (changed)
1614 err = new_settings(hdev, sk);
1615
1616 unlock:
1617 hci_dev_unlock(hdev);
1618 return err;
1619 }
1620
1621 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1622 u16 len)
1623 {
1624 struct mgmt_mode *cp = data;
1625 struct pending_cmd *cmd;
1626 u8 val, status;
1627 int err;
1628
1629 BT_DBG("request for %s", hdev->name);
1630
1631 status = mgmt_bredr_support(hdev);
1632 if (status)
1633 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1634 status);
1635
1636 if (cp->val != 0x00 && cp->val != 0x01)
1637 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1638 MGMT_STATUS_INVALID_PARAMS);
1639
1640 hci_dev_lock(hdev);
1641
1642 if (!hdev_is_powered(hdev)) {
1643 bool changed = false;
1644
1645 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1646 &hdev->dev_flags)) {
1647 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
1648 changed = true;
1649 }
1650
1651 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1652 if (err < 0)
1653 goto failed;
1654
1655 if (changed)
1656 err = new_settings(hdev, sk);
1657
1658 goto failed;
1659 }
1660
1661 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1662 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1663 MGMT_STATUS_BUSY);
1664 goto failed;
1665 }
1666
1667 val = !!cp->val;
1668
1669 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1670 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1671 goto failed;
1672 }
1673
1674 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1675 if (!cmd) {
1676 err = -ENOMEM;
1677 goto failed;
1678 }
1679
1680 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1681 if (err < 0) {
1682 mgmt_pending_remove(cmd);
1683 goto failed;
1684 }
1685
1686 failed:
1687 hci_dev_unlock(hdev);
1688 return err;
1689 }
1690
1691 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1692 {
1693 struct mgmt_mode *cp = data;
1694 struct pending_cmd *cmd;
1695 u8 status;
1696 int err;
1697
1698 BT_DBG("request for %s", hdev->name);
1699
1700 status = mgmt_bredr_support(hdev);
1701 if (status)
1702 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1703
1704 if (!lmp_ssp_capable(hdev))
1705 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1706 MGMT_STATUS_NOT_SUPPORTED);
1707
1708 if (cp->val != 0x00 && cp->val != 0x01)
1709 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1710 MGMT_STATUS_INVALID_PARAMS);
1711
1712 hci_dev_lock(hdev);
1713
1714 if (!hdev_is_powered(hdev)) {
1715 bool changed;
1716
1717 if (cp->val) {
1718 changed = !test_and_set_bit(HCI_SSP_ENABLED,
1719 &hdev->dev_flags);
1720 } else {
1721 changed = test_and_clear_bit(HCI_SSP_ENABLED,
1722 &hdev->dev_flags);
1723 if (!changed)
1724 changed = test_and_clear_bit(HCI_HS_ENABLED,
1725 &hdev->dev_flags);
1726 else
1727 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1728 }
1729
1730 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1731 if (err < 0)
1732 goto failed;
1733
1734 if (changed)
1735 err = new_settings(hdev, sk);
1736
1737 goto failed;
1738 }
1739
1740 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev) ||
1741 mgmt_pending_find(MGMT_OP_SET_HS, hdev)) {
1742 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1743 MGMT_STATUS_BUSY);
1744 goto failed;
1745 }
1746
1747 if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1748 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1749 goto failed;
1750 }
1751
1752 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1753 if (!cmd) {
1754 err = -ENOMEM;
1755 goto failed;
1756 }
1757
1758 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1759 if (err < 0) {
1760 mgmt_pending_remove(cmd);
1761 goto failed;
1762 }
1763
1764 failed:
1765 hci_dev_unlock(hdev);
1766 return err;
1767 }
1768
1769 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1770 {
1771 struct mgmt_mode *cp = data;
1772 bool changed;
1773 u8 status;
1774 int err;
1775
1776 BT_DBG("request for %s", hdev->name);
1777
1778 status = mgmt_bredr_support(hdev);
1779 if (status)
1780 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1781
1782 if (!lmp_ssp_capable(hdev))
1783 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1784 MGMT_STATUS_NOT_SUPPORTED);
1785
1786 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
1787 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1788 MGMT_STATUS_REJECTED);
1789
1790 if (cp->val != 0x00 && cp->val != 0x01)
1791 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1792 MGMT_STATUS_INVALID_PARAMS);
1793
1794 hci_dev_lock(hdev);
1795
1796 if (cp->val) {
1797 changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1798 } else {
1799 if (hdev_is_powered(hdev)) {
1800 err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1801 MGMT_STATUS_REJECTED);
1802 goto unlock;
1803 }
1804
1805 changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1806 }
1807
1808 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1809 if (err < 0)
1810 goto unlock;
1811
1812 if (changed)
1813 err = new_settings(hdev, sk);
1814
1815 unlock:
1816 hci_dev_unlock(hdev);
1817 return err;
1818 }
1819
1820 static void le_enable_complete(struct hci_dev *hdev, u8 status)
1821 {
1822 struct cmd_lookup match = { NULL, hdev };
1823
1824 if (status) {
1825 u8 mgmt_err = mgmt_status(status);
1826
1827 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1828 &mgmt_err);
1829 return;
1830 }
1831
1832 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1833
1834 new_settings(hdev, match.sk);
1835
1836 if (match.sk)
1837 sock_put(match.sk);
1838
1839 /* Make sure the controller has a good default for
1840 * advertising data. Restrict the update to when LE
1841 * has actually been enabled. During power on, the
1842 * update in powered_update_hci will take care of it.
1843 */
1844 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1845 struct hci_request req;
1846
1847 hci_dev_lock(hdev);
1848
1849 hci_req_init(&req, hdev);
1850 update_adv_data(&req);
1851 update_scan_rsp_data(&req);
1852 hci_req_run(&req, NULL);
1853
1854 hci_dev_unlock(hdev);
1855 }
1856 }
1857
1858 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1859 {
1860 struct mgmt_mode *cp = data;
1861 struct hci_cp_write_le_host_supported hci_cp;
1862 struct pending_cmd *cmd;
1863 struct hci_request req;
1864 int err;
1865 u8 val, enabled;
1866
1867 BT_DBG("request for %s", hdev->name);
1868
1869 if (!lmp_le_capable(hdev))
1870 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1871 MGMT_STATUS_NOT_SUPPORTED);
1872
1873 if (cp->val != 0x00 && cp->val != 0x01)
1874 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1875 MGMT_STATUS_INVALID_PARAMS);
1876
1877 /* LE-only devices do not allow toggling LE on/off */
1878 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1879 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1880 MGMT_STATUS_REJECTED);
1881
1882 hci_dev_lock(hdev);
1883
1884 val = !!cp->val;
1885 enabled = lmp_host_le_capable(hdev);
1886
1887 if (!hdev_is_powered(hdev) || val == enabled) {
1888 bool changed = false;
1889
1890 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1891 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1892 changed = true;
1893 }
1894
1895 if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
1896 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
1897 changed = true;
1898 }
1899
1900 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1901 if (err < 0)
1902 goto unlock;
1903
1904 if (changed)
1905 err = new_settings(hdev, sk);
1906
1907 goto unlock;
1908 }
1909
1910 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
1911 mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
1912 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1913 MGMT_STATUS_BUSY);
1914 goto unlock;
1915 }
1916
1917 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
1918 if (!cmd) {
1919 err = -ENOMEM;
1920 goto unlock;
1921 }
1922
1923 hci_req_init(&req, hdev);
1924
1925 memset(&hci_cp, 0, sizeof(hci_cp));
1926
1927 if (val) {
1928 hci_cp.le = val;
1929 hci_cp.simul = lmp_le_br_capable(hdev);
1930 } else {
1931 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1932 disable_advertising(&req);
1933 }
1934
1935 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
1936 &hci_cp);
1937
1938 err = hci_req_run(&req, le_enable_complete);
1939 if (err < 0)
1940 mgmt_pending_remove(cmd);
1941
1942 unlock:
1943 hci_dev_unlock(hdev);
1944 return err;
1945 }
1946
1947 /* This is a helper function to test for pending mgmt commands that can
1948 * cause CoD or EIR HCI commands. We can only allow one such pending
1949 * mgmt command at a time since otherwise we cannot easily track what
1950 * the current values are, will be, and based on that calculate if a new
1951 * HCI command needs to be sent and if yes with what value.
1952 */
1953 static bool pending_eir_or_class(struct hci_dev *hdev)
1954 {
1955 struct pending_cmd *cmd;
1956
1957 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
1958 switch (cmd->opcode) {
1959 case MGMT_OP_ADD_UUID:
1960 case MGMT_OP_REMOVE_UUID:
1961 case MGMT_OP_SET_DEV_CLASS:
1962 case MGMT_OP_SET_POWERED:
1963 return true;
1964 }
1965 }
1966
1967 return false;
1968 }
1969
1970 static const u8 bluetooth_base_uuid[] = {
1971 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
1972 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1973 };
1974
1975 static u8 get_uuid_size(const u8 *uuid)
1976 {
1977 u32 val;
1978
1979 if (memcmp(uuid, bluetooth_base_uuid, 12))
1980 return 128;
1981
1982 val = get_unaligned_le32(&uuid[12]);
1983 if (val > 0xffff)
1984 return 32;
1985
1986 return 16;
1987 }
1988
1989 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
1990 {
1991 struct pending_cmd *cmd;
1992
1993 hci_dev_lock(hdev);
1994
1995 cmd = mgmt_pending_find(mgmt_op, hdev);
1996 if (!cmd)
1997 goto unlock;
1998
1999 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
2000 hdev->dev_class, 3);
2001
2002 mgmt_pending_remove(cmd);
2003
2004 unlock:
2005 hci_dev_unlock(hdev);
2006 }
2007
2008 static void add_uuid_complete(struct hci_dev *hdev, u8 status)
2009 {
2010 BT_DBG("status 0x%02x", status);
2011
2012 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2013 }
2014
2015 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2016 {
2017 struct mgmt_cp_add_uuid *cp = data;
2018 struct pending_cmd *cmd;
2019 struct hci_request req;
2020 struct bt_uuid *uuid;
2021 int err;
2022
2023 BT_DBG("request for %s", hdev->name);
2024
2025 hci_dev_lock(hdev);
2026
2027 if (pending_eir_or_class(hdev)) {
2028 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2029 MGMT_STATUS_BUSY);
2030 goto failed;
2031 }
2032
2033 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2034 if (!uuid) {
2035 err = -ENOMEM;
2036 goto failed;
2037 }
2038
2039 memcpy(uuid->uuid, cp->uuid, 16);
2040 uuid->svc_hint = cp->svc_hint;
2041 uuid->size = get_uuid_size(cp->uuid);
2042
2043 list_add_tail(&uuid->list, &hdev->uuids);
2044
2045 hci_req_init(&req, hdev);
2046
2047 update_class(&req);
2048 update_eir(&req);
2049
2050 err = hci_req_run(&req, add_uuid_complete);
2051 if (err < 0) {
2052 if (err != -ENODATA)
2053 goto failed;
2054
2055 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2056 hdev->dev_class, 3);
2057 goto failed;
2058 }
2059
2060 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2061 if (!cmd) {
2062 err = -ENOMEM;
2063 goto failed;
2064 }
2065
2066 err = 0;
2067
2068 failed:
2069 hci_dev_unlock(hdev);
2070 return err;
2071 }
2072
2073 static bool enable_service_cache(struct hci_dev *hdev)
2074 {
2075 if (!hdev_is_powered(hdev))
2076 return false;
2077
2078 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2079 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2080 CACHE_TIMEOUT);
2081 return true;
2082 }
2083
2084 return false;
2085 }
2086
2087 static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
2088 {
2089 BT_DBG("status 0x%02x", status);
2090
2091 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2092 }
2093
2094 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2095 u16 len)
2096 {
2097 struct mgmt_cp_remove_uuid *cp = data;
2098 struct pending_cmd *cmd;
2099 struct bt_uuid *match, *tmp;
2100 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2101 struct hci_request req;
2102 int err, found;
2103
2104 BT_DBG("request for %s", hdev->name);
2105
2106 hci_dev_lock(hdev);
2107
2108 if (pending_eir_or_class(hdev)) {
2109 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2110 MGMT_STATUS_BUSY);
2111 goto unlock;
2112 }
2113
2114 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2115 hci_uuids_clear(hdev);
2116
2117 if (enable_service_cache(hdev)) {
2118 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2119 0, hdev->dev_class, 3);
2120 goto unlock;
2121 }
2122
2123 goto update_class;
2124 }
2125
2126 found = 0;
2127
2128 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2129 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2130 continue;
2131
2132 list_del(&match->list);
2133 kfree(match);
2134 found++;
2135 }
2136
2137 if (found == 0) {
2138 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2139 MGMT_STATUS_INVALID_PARAMS);
2140 goto unlock;
2141 }
2142
2143 update_class:
2144 hci_req_init(&req, hdev);
2145
2146 update_class(&req);
2147 update_eir(&req);
2148
2149 err = hci_req_run(&req, remove_uuid_complete);
2150 if (err < 0) {
2151 if (err != -ENODATA)
2152 goto unlock;
2153
2154 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2155 hdev->dev_class, 3);
2156 goto unlock;
2157 }
2158
2159 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2160 if (!cmd) {
2161 err = -ENOMEM;
2162 goto unlock;
2163 }
2164
2165 err = 0;
2166
2167 unlock:
2168 hci_dev_unlock(hdev);
2169 return err;
2170 }
2171
2172 static void set_class_complete(struct hci_dev *hdev, u8 status)
2173 {
2174 BT_DBG("status 0x%02x", status);
2175
2176 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2177 }
2178
2179 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2180 u16 len)
2181 {
2182 struct mgmt_cp_set_dev_class *cp = data;
2183 struct pending_cmd *cmd;
2184 struct hci_request req;
2185 int err;
2186
2187 BT_DBG("request for %s", hdev->name);
2188
2189 if (!lmp_bredr_capable(hdev))
2190 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2191 MGMT_STATUS_NOT_SUPPORTED);
2192
2193 hci_dev_lock(hdev);
2194
2195 if (pending_eir_or_class(hdev)) {
2196 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2197 MGMT_STATUS_BUSY);
2198 goto unlock;
2199 }
2200
2201 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2202 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2203 MGMT_STATUS_INVALID_PARAMS);
2204 goto unlock;
2205 }
2206
2207 hdev->major_class = cp->major;
2208 hdev->minor_class = cp->minor;
2209
2210 if (!hdev_is_powered(hdev)) {
2211 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2212 hdev->dev_class, 3);
2213 goto unlock;
2214 }
2215
2216 hci_req_init(&req, hdev);
2217
2218 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2219 hci_dev_unlock(hdev);
2220 cancel_delayed_work_sync(&hdev->service_cache);
2221 hci_dev_lock(hdev);
2222 update_eir(&req);
2223 }
2224
2225 update_class(&req);
2226
2227 err = hci_req_run(&req, set_class_complete);
2228 if (err < 0) {
2229 if (err != -ENODATA)
2230 goto unlock;
2231
2232 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2233 hdev->dev_class, 3);
2234 goto unlock;
2235 }
2236
2237 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2238 if (!cmd) {
2239 err = -ENOMEM;
2240 goto unlock;
2241 }
2242
2243 err = 0;
2244
2245 unlock:
2246 hci_dev_unlock(hdev);
2247 return err;
2248 }
2249
2250 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2251 u16 len)
2252 {
2253 struct mgmt_cp_load_link_keys *cp = data;
2254 u16 key_count, expected_len;
2255 bool changed;
2256 int i;
2257
2258 BT_DBG("request for %s", hdev->name);
2259
2260 if (!lmp_bredr_capable(hdev))
2261 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2262 MGMT_STATUS_NOT_SUPPORTED);
2263
2264 key_count = __le16_to_cpu(cp->key_count);
2265
2266 expected_len = sizeof(*cp) + key_count *
2267 sizeof(struct mgmt_link_key_info);
2268 if (expected_len != len) {
2269 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2270 len, expected_len);
2271 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2272 MGMT_STATUS_INVALID_PARAMS);
2273 }
2274
2275 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2276 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2277 MGMT_STATUS_INVALID_PARAMS);
2278
2279 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2280 key_count);
2281
2282 for (i = 0; i < key_count; i++) {
2283 struct mgmt_link_key_info *key = &cp->keys[i];
2284
2285 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2286 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2287 MGMT_STATUS_INVALID_PARAMS);
2288 }
2289
2290 hci_dev_lock(hdev);
2291
2292 hci_link_keys_clear(hdev);
2293
2294 if (cp->debug_keys)
2295 changed = !test_and_set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
2296 else
2297 changed = test_and_clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
2298
2299 if (changed)
2300 new_settings(hdev, NULL);
2301
2302 for (i = 0; i < key_count; i++) {
2303 struct mgmt_link_key_info *key = &cp->keys[i];
2304
2305 hci_add_link_key(hdev, NULL, 0, &key->addr.bdaddr, key->val,
2306 key->type, key->pin_len);
2307 }
2308
2309 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2310
2311 hci_dev_unlock(hdev);
2312
2313 return 0;
2314 }
2315
2316 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2317 u8 addr_type, struct sock *skip_sk)
2318 {
2319 struct mgmt_ev_device_unpaired ev;
2320
2321 bacpy(&ev.addr.bdaddr, bdaddr);
2322 ev.addr.type = addr_type;
2323
2324 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2325 skip_sk);
2326 }
2327
2328 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2329 u16 len)
2330 {
2331 struct mgmt_cp_unpair_device *cp = data;
2332 struct mgmt_rp_unpair_device rp;
2333 struct hci_cp_disconnect dc;
2334 struct pending_cmd *cmd;
2335 struct hci_conn *conn;
2336 int err;
2337
2338 memset(&rp, 0, sizeof(rp));
2339 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2340 rp.addr.type = cp->addr.type;
2341
2342 if (!bdaddr_type_is_valid(cp->addr.type))
2343 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2344 MGMT_STATUS_INVALID_PARAMS,
2345 &rp, sizeof(rp));
2346
2347 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2348 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2349 MGMT_STATUS_INVALID_PARAMS,
2350 &rp, sizeof(rp));
2351
2352 hci_dev_lock(hdev);
2353
2354 if (!hdev_is_powered(hdev)) {
2355 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2356 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2357 goto unlock;
2358 }
2359
2360 if (cp->addr.type == BDADDR_BREDR) {
2361 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2362 } else {
2363 u8 addr_type;
2364
2365 if (cp->addr.type == BDADDR_LE_PUBLIC)
2366 addr_type = ADDR_LE_DEV_PUBLIC;
2367 else
2368 addr_type = ADDR_LE_DEV_RANDOM;
2369
2370 hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2371
2372 err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2373 }
2374
2375 if (err < 0) {
2376 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2377 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
2378 goto unlock;
2379 }
2380
2381 if (cp->disconnect) {
2382 if (cp->addr.type == BDADDR_BREDR)
2383 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2384 &cp->addr.bdaddr);
2385 else
2386 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2387 &cp->addr.bdaddr);
2388 } else {
2389 conn = NULL;
2390 }
2391
2392 if (!conn) {
2393 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2394 &rp, sizeof(rp));
2395 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2396 goto unlock;
2397 }
2398
2399 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2400 sizeof(*cp));
2401 if (!cmd) {
2402 err = -ENOMEM;
2403 goto unlock;
2404 }
2405
2406 dc.handle = cpu_to_le16(conn->handle);
2407 dc.reason = 0x13; /* Remote User Terminated Connection */
2408 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2409 if (err < 0)
2410 mgmt_pending_remove(cmd);
2411
2412 unlock:
2413 hci_dev_unlock(hdev);
2414 return err;
2415 }
2416
2417 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2418 u16 len)
2419 {
2420 struct mgmt_cp_disconnect *cp = data;
2421 struct mgmt_rp_disconnect rp;
2422 struct hci_cp_disconnect dc;
2423 struct pending_cmd *cmd;
2424 struct hci_conn *conn;
2425 int err;
2426
2427 BT_DBG("");
2428
2429 memset(&rp, 0, sizeof(rp));
2430 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2431 rp.addr.type = cp->addr.type;
2432
2433 if (!bdaddr_type_is_valid(cp->addr.type))
2434 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2435 MGMT_STATUS_INVALID_PARAMS,
2436 &rp, sizeof(rp));
2437
2438 hci_dev_lock(hdev);
2439
2440 if (!test_bit(HCI_UP, &hdev->flags)) {
2441 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2442 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2443 goto failed;
2444 }
2445
2446 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
2447 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2448 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2449 goto failed;
2450 }
2451
2452 if (cp->addr.type == BDADDR_BREDR)
2453 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2454 &cp->addr.bdaddr);
2455 else
2456 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2457
2458 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2459 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2460 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
2461 goto failed;
2462 }
2463
2464 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2465 if (!cmd) {
2466 err = -ENOMEM;
2467 goto failed;
2468 }
2469
2470 dc.handle = cpu_to_le16(conn->handle);
2471 dc.reason = HCI_ERROR_REMOTE_USER_TERM;
2472
2473 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2474 if (err < 0)
2475 mgmt_pending_remove(cmd);
2476
2477 failed:
2478 hci_dev_unlock(hdev);
2479 return err;
2480 }
2481
2482 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2483 {
2484 switch (link_type) {
2485 case LE_LINK:
2486 switch (addr_type) {
2487 case ADDR_LE_DEV_PUBLIC:
2488 return BDADDR_LE_PUBLIC;
2489
2490 default:
2491 /* Fallback to LE Random address type */
2492 return BDADDR_LE_RANDOM;
2493 }
2494
2495 default:
2496 /* Fallback to BR/EDR type */
2497 return BDADDR_BREDR;
2498 }
2499 }
2500
2501 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2502 u16 data_len)
2503 {
2504 struct mgmt_rp_get_connections *rp;
2505 struct hci_conn *c;
2506 size_t rp_len;
2507 int err;
2508 u16 i;
2509
2510 BT_DBG("");
2511
2512 hci_dev_lock(hdev);
2513
2514 if (!hdev_is_powered(hdev)) {
2515 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2516 MGMT_STATUS_NOT_POWERED);
2517 goto unlock;
2518 }
2519
2520 i = 0;
2521 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2522 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2523 i++;
2524 }
2525
2526 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2527 rp = kmalloc(rp_len, GFP_KERNEL);
2528 if (!rp) {
2529 err = -ENOMEM;
2530 goto unlock;
2531 }
2532
2533 i = 0;
2534 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2535 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2536 continue;
2537 bacpy(&rp->addr[i].bdaddr, &c->dst);
2538 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2539 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2540 continue;
2541 i++;
2542 }
2543
2544 rp->conn_count = cpu_to_le16(i);
2545
2546 /* Recalculate length in case of filtered SCO connections, etc */
2547 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2548
2549 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2550 rp_len);
2551
2552 kfree(rp);
2553
2554 unlock:
2555 hci_dev_unlock(hdev);
2556 return err;
2557 }
2558
2559 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2560 struct mgmt_cp_pin_code_neg_reply *cp)
2561 {
2562 struct pending_cmd *cmd;
2563 int err;
2564
2565 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2566 sizeof(*cp));
2567 if (!cmd)
2568 return -ENOMEM;
2569
2570 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2571 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2572 if (err < 0)
2573 mgmt_pending_remove(cmd);
2574
2575 return err;
2576 }
2577
2578 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2579 u16 len)
2580 {
2581 struct hci_conn *conn;
2582 struct mgmt_cp_pin_code_reply *cp = data;
2583 struct hci_cp_pin_code_reply reply;
2584 struct pending_cmd *cmd;
2585 int err;
2586
2587 BT_DBG("");
2588
2589 hci_dev_lock(hdev);
2590
2591 if (!hdev_is_powered(hdev)) {
2592 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2593 MGMT_STATUS_NOT_POWERED);
2594 goto failed;
2595 }
2596
2597 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2598 if (!conn) {
2599 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2600 MGMT_STATUS_NOT_CONNECTED);
2601 goto failed;
2602 }
2603
2604 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2605 struct mgmt_cp_pin_code_neg_reply ncp;
2606
2607 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2608
2609 BT_ERR("PIN code is not 16 bytes long");
2610
2611 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2612 if (err >= 0)
2613 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2614 MGMT_STATUS_INVALID_PARAMS);
2615
2616 goto failed;
2617 }
2618
2619 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2620 if (!cmd) {
2621 err = -ENOMEM;
2622 goto failed;
2623 }
2624
2625 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2626 reply.pin_len = cp->pin_len;
2627 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2628
2629 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2630 if (err < 0)
2631 mgmt_pending_remove(cmd);
2632
2633 failed:
2634 hci_dev_unlock(hdev);
2635 return err;
2636 }
2637
2638 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2639 u16 len)
2640 {
2641 struct mgmt_cp_set_io_capability *cp = data;
2642
2643 BT_DBG("");
2644
2645 hci_dev_lock(hdev);
2646
2647 hdev->io_capability = cp->io_capability;
2648
2649 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2650 hdev->io_capability);
2651
2652 hci_dev_unlock(hdev);
2653
2654 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
2655 0);
2656 }
2657
2658 static struct pending_cmd *find_pairing(struct hci_conn *conn)
2659 {
2660 struct hci_dev *hdev = conn->hdev;
2661 struct pending_cmd *cmd;
2662
2663 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2664 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2665 continue;
2666
2667 if (cmd->user_data != conn)
2668 continue;
2669
2670 return cmd;
2671 }
2672
2673 return NULL;
2674 }
2675
2676 static void pairing_complete(struct pending_cmd *cmd, u8 status)
2677 {
2678 struct mgmt_rp_pair_device rp;
2679 struct hci_conn *conn = cmd->user_data;
2680
2681 bacpy(&rp.addr.bdaddr, &conn->dst);
2682 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2683
2684 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
2685 &rp, sizeof(rp));
2686
2687 /* So we don't get further callbacks for this connection */
2688 conn->connect_cfm_cb = NULL;
2689 conn->security_cfm_cb = NULL;
2690 conn->disconn_cfm_cb = NULL;
2691
2692 hci_conn_drop(conn);
2693
2694 mgmt_pending_remove(cmd);
2695 }
2696
2697 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2698 {
2699 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2700 struct pending_cmd *cmd;
2701
2702 cmd = find_pairing(conn);
2703 if (cmd)
2704 pairing_complete(cmd, status);
2705 }
2706
2707 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2708 {
2709 struct pending_cmd *cmd;
2710
2711 BT_DBG("status %u", status);
2712
2713 cmd = find_pairing(conn);
2714 if (!cmd)
2715 BT_DBG("Unable to find a pending command");
2716 else
2717 pairing_complete(cmd, mgmt_status(status));
2718 }
2719
2720 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2721 {
2722 struct pending_cmd *cmd;
2723
2724 BT_DBG("status %u", status);
2725
2726 if (!status)
2727 return;
2728
2729 cmd = find_pairing(conn);
2730 if (!cmd)
2731 BT_DBG("Unable to find a pending command");
2732 else
2733 pairing_complete(cmd, mgmt_status(status));
2734 }
2735
2736 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2737 u16 len)
2738 {
2739 struct mgmt_cp_pair_device *cp = data;
2740 struct mgmt_rp_pair_device rp;
2741 struct pending_cmd *cmd;
2742 u8 sec_level, auth_type;
2743 struct hci_conn *conn;
2744 int err;
2745
2746 BT_DBG("");
2747
2748 memset(&rp, 0, sizeof(rp));
2749 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2750 rp.addr.type = cp->addr.type;
2751
2752 if (!bdaddr_type_is_valid(cp->addr.type))
2753 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2754 MGMT_STATUS_INVALID_PARAMS,
2755 &rp, sizeof(rp));
2756
2757 hci_dev_lock(hdev);
2758
2759 if (!hdev_is_powered(hdev)) {
2760 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2761 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2762 goto unlock;
2763 }
2764
2765 sec_level = BT_SECURITY_MEDIUM;
2766 if (cp->io_cap == 0x03)
2767 auth_type = HCI_AT_DEDICATED_BONDING;
2768 else
2769 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
2770
2771 if (cp->addr.type == BDADDR_BREDR)
2772 conn = hci_connect(hdev, ACL_LINK, &cp->addr.bdaddr,
2773 cp->addr.type, sec_level, auth_type);
2774 else
2775 conn = hci_connect(hdev, LE_LINK, &cp->addr.bdaddr,
2776 cp->addr.type, sec_level, auth_type);
2777
2778 if (IS_ERR(conn)) {
2779 int status;
2780
2781 if (PTR_ERR(conn) == -EBUSY)
2782 status = MGMT_STATUS_BUSY;
2783 else
2784 status = MGMT_STATUS_CONNECT_FAILED;
2785
2786 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2787 status, &rp,
2788 sizeof(rp));
2789 goto unlock;
2790 }
2791
2792 if (conn->connect_cfm_cb) {
2793 hci_conn_drop(conn);
2794 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2795 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2796 goto unlock;
2797 }
2798
2799 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2800 if (!cmd) {
2801 err = -ENOMEM;
2802 hci_conn_drop(conn);
2803 goto unlock;
2804 }
2805
2806 /* For LE, just connecting isn't a proof that the pairing finished */
2807 if (cp->addr.type == BDADDR_BREDR) {
2808 conn->connect_cfm_cb = pairing_complete_cb;
2809 conn->security_cfm_cb = pairing_complete_cb;
2810 conn->disconn_cfm_cb = pairing_complete_cb;
2811 } else {
2812 conn->connect_cfm_cb = le_pairing_complete_cb;
2813 conn->security_cfm_cb = le_pairing_complete_cb;
2814 conn->disconn_cfm_cb = le_pairing_complete_cb;
2815 }
2816
2817 conn->io_capability = cp->io_cap;
2818 cmd->user_data = conn;
2819
2820 if (conn->state == BT_CONNECTED &&
2821 hci_conn_security(conn, sec_level, auth_type))
2822 pairing_complete(cmd, 0);
2823
2824 err = 0;
2825
2826 unlock:
2827 hci_dev_unlock(hdev);
2828 return err;
2829 }
2830
2831 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2832 u16 len)
2833 {
2834 struct mgmt_addr_info *addr = data;
2835 struct pending_cmd *cmd;
2836 struct hci_conn *conn;
2837 int err;
2838
2839 BT_DBG("");
2840
2841 hci_dev_lock(hdev);
2842
2843 if (!hdev_is_powered(hdev)) {
2844 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2845 MGMT_STATUS_NOT_POWERED);
2846 goto unlock;
2847 }
2848
2849 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
2850 if (!cmd) {
2851 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2852 MGMT_STATUS_INVALID_PARAMS);
2853 goto unlock;
2854 }
2855
2856 conn = cmd->user_data;
2857
2858 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
2859 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2860 MGMT_STATUS_INVALID_PARAMS);
2861 goto unlock;
2862 }
2863
2864 pairing_complete(cmd, MGMT_STATUS_CANCELLED);
2865
2866 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
2867 addr, sizeof(*addr));
2868 unlock:
2869 hci_dev_unlock(hdev);
2870 return err;
2871 }
2872
2873 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
2874 struct mgmt_addr_info *addr, u16 mgmt_op,
2875 u16 hci_op, __le32 passkey)
2876 {
2877 struct pending_cmd *cmd;
2878 struct hci_conn *conn;
2879 int err;
2880
2881 hci_dev_lock(hdev);
2882
2883 if (!hdev_is_powered(hdev)) {
2884 err = cmd_complete(sk, hdev->id, mgmt_op,
2885 MGMT_STATUS_NOT_POWERED, addr,
2886 sizeof(*addr));
2887 goto done;
2888 }
2889
2890 if (addr->type == BDADDR_BREDR)
2891 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
2892 else
2893 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
2894
2895 if (!conn) {
2896 err = cmd_complete(sk, hdev->id, mgmt_op,
2897 MGMT_STATUS_NOT_CONNECTED, addr,
2898 sizeof(*addr));
2899 goto done;
2900 }
2901
2902 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
2903 /* Continue with pairing via SMP */
2904 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
2905
2906 if (!err)
2907 err = cmd_complete(sk, hdev->id, mgmt_op,
2908 MGMT_STATUS_SUCCESS, addr,
2909 sizeof(*addr));
2910 else
2911 err = cmd_complete(sk, hdev->id, mgmt_op,
2912 MGMT_STATUS_FAILED, addr,
2913 sizeof(*addr));
2914
2915 goto done;
2916 }
2917
2918 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
2919 if (!cmd) {
2920 err = -ENOMEM;
2921 goto done;
2922 }
2923
2924 /* Continue with pairing via HCI */
2925 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
2926 struct hci_cp_user_passkey_reply cp;
2927
2928 bacpy(&cp.bdaddr, &addr->bdaddr);
2929 cp.passkey = passkey;
2930 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
2931 } else
2932 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
2933 &addr->bdaddr);
2934
2935 if (err < 0)
2936 mgmt_pending_remove(cmd);
2937
2938 done:
2939 hci_dev_unlock(hdev);
2940 return err;
2941 }
2942
2943 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2944 void *data, u16 len)
2945 {
2946 struct mgmt_cp_pin_code_neg_reply *cp = data;
2947
2948 BT_DBG("");
2949
2950 return user_pairing_resp(sk, hdev, &cp->addr,
2951 MGMT_OP_PIN_CODE_NEG_REPLY,
2952 HCI_OP_PIN_CODE_NEG_REPLY, 0);
2953 }
2954
2955 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2956 u16 len)
2957 {
2958 struct mgmt_cp_user_confirm_reply *cp = data;
2959
2960 BT_DBG("");
2961
2962 if (len != sizeof(*cp))
2963 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
2964 MGMT_STATUS_INVALID_PARAMS);
2965
2966 return user_pairing_resp(sk, hdev, &cp->addr,
2967 MGMT_OP_USER_CONFIRM_REPLY,
2968 HCI_OP_USER_CONFIRM_REPLY, 0);
2969 }
2970
2971 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
2972 void *data, u16 len)
2973 {
2974 struct mgmt_cp_user_confirm_neg_reply *cp = data;
2975
2976 BT_DBG("");
2977
2978 return user_pairing_resp(sk, hdev, &cp->addr,
2979 MGMT_OP_USER_CONFIRM_NEG_REPLY,
2980 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
2981 }
2982
2983 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2984 u16 len)
2985 {
2986 struct mgmt_cp_user_passkey_reply *cp = data;
2987
2988 BT_DBG("");
2989
2990 return user_pairing_resp(sk, hdev, &cp->addr,
2991 MGMT_OP_USER_PASSKEY_REPLY,
2992 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
2993 }
2994
2995 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
2996 void *data, u16 len)
2997 {
2998 struct mgmt_cp_user_passkey_neg_reply *cp = data;
2999
3000 BT_DBG("");
3001
3002 return user_pairing_resp(sk, hdev, &cp->addr,
3003 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3004 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3005 }
3006
3007 static void update_name(struct hci_request *req)
3008 {
3009 struct hci_dev *hdev = req->hdev;
3010 struct hci_cp_write_local_name cp;
3011
3012 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3013
3014 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3015 }
3016
3017 static void set_name_complete(struct hci_dev *hdev, u8 status)
3018 {
3019 struct mgmt_cp_set_local_name *cp;
3020 struct pending_cmd *cmd;
3021
3022 BT_DBG("status 0x%02x", status);
3023
3024 hci_dev_lock(hdev);
3025
3026 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3027 if (!cmd)
3028 goto unlock;
3029
3030 cp = cmd->param;
3031
3032 if (status)
3033 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3034 mgmt_status(status));
3035 else
3036 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3037 cp, sizeof(*cp));
3038
3039 mgmt_pending_remove(cmd);
3040
3041 unlock:
3042 hci_dev_unlock(hdev);
3043 }
3044
3045 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3046 u16 len)
3047 {
3048 struct mgmt_cp_set_local_name *cp = data;
3049 struct pending_cmd *cmd;
3050 struct hci_request req;
3051 int err;
3052
3053 BT_DBG("");
3054
3055 hci_dev_lock(hdev);
3056
3057 /* If the old values are the same as the new ones just return a
3058 * direct command complete event.
3059 */
3060 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3061 !memcmp(hdev->short_name, cp->short_name,
3062 sizeof(hdev->short_name))) {
3063 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3064 data, len);
3065 goto failed;
3066 }
3067
3068 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3069
3070 if (!hdev_is_powered(hdev)) {
3071 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3072
3073 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3074 data, len);
3075 if (err < 0)
3076 goto failed;
3077
3078 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
3079 sk);
3080
3081 goto failed;
3082 }
3083
3084 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3085 if (!cmd) {
3086 err = -ENOMEM;
3087 goto failed;
3088 }
3089
3090 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3091
3092 hci_req_init(&req, hdev);
3093
3094 if (lmp_bredr_capable(hdev)) {
3095 update_name(&req);
3096 update_eir(&req);
3097 }
3098
3099 /* The name is stored in the scan response data and so
3100 * no need to udpate the advertising data here.
3101 */
3102 if (lmp_le_capable(hdev))
3103 update_scan_rsp_data(&req);
3104
3105 err = hci_req_run(&req, set_name_complete);
3106 if (err < 0)
3107 mgmt_pending_remove(cmd);
3108
3109 failed:
3110 hci_dev_unlock(hdev);
3111 return err;
3112 }
3113
3114 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3115 void *data, u16 data_len)
3116 {
3117 struct pending_cmd *cmd;
3118 int err;
3119
3120 BT_DBG("%s", hdev->name);
3121
3122 hci_dev_lock(hdev);
3123
3124 if (!hdev_is_powered(hdev)) {
3125 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3126 MGMT_STATUS_NOT_POWERED);
3127 goto unlock;
3128 }
3129
3130 if (!lmp_ssp_capable(hdev)) {
3131 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3132 MGMT_STATUS_NOT_SUPPORTED);
3133 goto unlock;
3134 }
3135
3136 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3137 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3138 MGMT_STATUS_BUSY);
3139 goto unlock;
3140 }
3141
3142 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3143 if (!cmd) {
3144 err = -ENOMEM;
3145 goto unlock;
3146 }
3147
3148 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
3149 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
3150 0, NULL);
3151 else
3152 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3153
3154 if (err < 0)
3155 mgmt_pending_remove(cmd);
3156
3157 unlock:
3158 hci_dev_unlock(hdev);
3159 return err;
3160 }
3161
3162 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3163 void *data, u16 len)
3164 {
3165 int err;
3166
3167 BT_DBG("%s ", hdev->name);
3168
3169 hci_dev_lock(hdev);
3170
3171 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3172 struct mgmt_cp_add_remote_oob_data *cp = data;
3173 u8 status;
3174
3175 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3176 cp->hash, cp->randomizer);
3177 if (err < 0)
3178 status = MGMT_STATUS_FAILED;
3179 else
3180 status = MGMT_STATUS_SUCCESS;
3181
3182 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3183 status, &cp->addr, sizeof(cp->addr));
3184 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3185 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3186 u8 status;
3187
3188 err = hci_add_remote_oob_ext_data(hdev, &cp->addr.bdaddr,
3189 cp->hash192,
3190 cp->randomizer192,
3191 cp->hash256,
3192 cp->randomizer256);
3193 if (err < 0)
3194 status = MGMT_STATUS_FAILED;
3195 else
3196 status = MGMT_STATUS_SUCCESS;
3197
3198 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3199 status, &cp->addr, sizeof(cp->addr));
3200 } else {
3201 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3202 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3203 MGMT_STATUS_INVALID_PARAMS);
3204 }
3205
3206 hci_dev_unlock(hdev);
3207 return err;
3208 }
3209
3210 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3211 void *data, u16 len)
3212 {
3213 struct mgmt_cp_remove_remote_oob_data *cp = data;
3214 u8 status;
3215 int err;
3216
3217 BT_DBG("%s", hdev->name);
3218
3219 hci_dev_lock(hdev);
3220
3221 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
3222 if (err < 0)
3223 status = MGMT_STATUS_INVALID_PARAMS;
3224 else
3225 status = MGMT_STATUS_SUCCESS;
3226
3227 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3228 status, &cp->addr, sizeof(cp->addr));
3229
3230 hci_dev_unlock(hdev);
3231 return err;
3232 }
3233
3234 static int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
3235 {
3236 struct pending_cmd *cmd;
3237 u8 type;
3238 int err;
3239
3240 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3241
3242 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3243 if (!cmd)
3244 return -ENOENT;
3245
3246 type = hdev->discovery.type;
3247
3248 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3249 &type, sizeof(type));
3250 mgmt_pending_remove(cmd);
3251
3252 return err;
3253 }
3254
3255 static void start_discovery_complete(struct hci_dev *hdev, u8 status)
3256 {
3257 BT_DBG("status %d", status);
3258
3259 if (status) {
3260 hci_dev_lock(hdev);
3261 mgmt_start_discovery_failed(hdev, status);
3262 hci_dev_unlock(hdev);
3263 return;
3264 }
3265
3266 hci_dev_lock(hdev);
3267 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3268 hci_dev_unlock(hdev);
3269
3270 switch (hdev->discovery.type) {
3271 case DISCOV_TYPE_LE:
3272 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
3273 DISCOV_LE_TIMEOUT);
3274 break;
3275
3276 case DISCOV_TYPE_INTERLEAVED:
3277 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
3278 DISCOV_INTERLEAVED_TIMEOUT);
3279 break;
3280
3281 case DISCOV_TYPE_BREDR:
3282 break;
3283
3284 default:
3285 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
3286 }
3287 }
3288
3289 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3290 void *data, u16 len)
3291 {
3292 struct mgmt_cp_start_discovery *cp = data;
3293 struct pending_cmd *cmd;
3294 struct hci_cp_le_set_scan_param param_cp;
3295 struct hci_cp_le_set_scan_enable enable_cp;
3296 struct hci_cp_inquiry inq_cp;
3297 struct hci_request req;
3298 /* General inquiry access code (GIAC) */
3299 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3300 u8 status, own_addr_type;
3301 int err;
3302
3303 BT_DBG("%s", hdev->name);
3304
3305 hci_dev_lock(hdev);
3306
3307 if (!hdev_is_powered(hdev)) {
3308 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3309 MGMT_STATUS_NOT_POWERED);
3310 goto failed;
3311 }
3312
3313 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3314 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3315 MGMT_STATUS_BUSY);
3316 goto failed;
3317 }
3318
3319 if (hdev->discovery.state != DISCOVERY_STOPPED) {
3320 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3321 MGMT_STATUS_BUSY);
3322 goto failed;
3323 }
3324
3325 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
3326 if (!cmd) {
3327 err = -ENOMEM;
3328 goto failed;
3329 }
3330
3331 hdev->discovery.type = cp->type;
3332
3333 hci_req_init(&req, hdev);
3334
3335 switch (hdev->discovery.type) {
3336 case DISCOV_TYPE_BREDR:
3337 status = mgmt_bredr_support(hdev);
3338 if (status) {
3339 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3340 status);
3341 mgmt_pending_remove(cmd);
3342 goto failed;
3343 }
3344
3345 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3346 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3347 MGMT_STATUS_BUSY);
3348 mgmt_pending_remove(cmd);
3349 goto failed;
3350 }
3351
3352 hci_inquiry_cache_flush(hdev);
3353
3354 memset(&inq_cp, 0, sizeof(inq_cp));
3355 memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
3356 inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
3357 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
3358 break;
3359
3360 case DISCOV_TYPE_LE:
3361 case DISCOV_TYPE_INTERLEAVED:
3362 status = mgmt_le_support(hdev);
3363 if (status) {
3364 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3365 status);
3366 mgmt_pending_remove(cmd);
3367 goto failed;
3368 }
3369
3370 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3371 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3372 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3373 MGMT_STATUS_NOT_SUPPORTED);
3374 mgmt_pending_remove(cmd);
3375 goto failed;
3376 }
3377
3378 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3379 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3380 MGMT_STATUS_REJECTED);
3381 mgmt_pending_remove(cmd);
3382 goto failed;
3383 }
3384
3385 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
3386 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3387 MGMT_STATUS_BUSY);
3388 mgmt_pending_remove(cmd);
3389 goto failed;
3390 }
3391
3392 memset(&param_cp, 0, sizeof(param_cp));
3393
3394 /* All active scans will be done with either a resolvable
3395 * private address (when privacy feature has been enabled)
3396 * or unresolvable private address.
3397 */
3398 err = hci_update_random_address(&req, true, &own_addr_type);
3399 if (err < 0) {
3400 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3401 MGMT_STATUS_FAILED);
3402 mgmt_pending_remove(cmd);
3403 goto failed;
3404 }
3405
3406 param_cp.type = LE_SCAN_ACTIVE;
3407 param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
3408 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3409 param_cp.own_address_type = own_addr_type;
3410 hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
3411 &param_cp);
3412
3413 memset(&enable_cp, 0, sizeof(enable_cp));
3414 enable_cp.enable = LE_SCAN_ENABLE;
3415 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3416 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
3417 &enable_cp);
3418 break;
3419
3420 default:
3421 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3422 MGMT_STATUS_INVALID_PARAMS);
3423 mgmt_pending_remove(cmd);
3424 goto failed;
3425 }
3426
3427 err = hci_req_run(&req, start_discovery_complete);
3428 if (err < 0)
3429 mgmt_pending_remove(cmd);
3430 else
3431 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3432
3433 failed:
3434 hci_dev_unlock(hdev);
3435 return err;
3436 }
3437
3438 static int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
3439 {
3440 struct pending_cmd *cmd;
3441 int err;
3442
3443 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
3444 if (!cmd)
3445 return -ENOENT;
3446
3447 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3448 &hdev->discovery.type, sizeof(hdev->discovery.type));
3449 mgmt_pending_remove(cmd);
3450
3451 return err;
3452 }
3453
3454 static void stop_discovery_complete(struct hci_dev *hdev, u8 status)
3455 {
3456 BT_DBG("status %d", status);
3457
3458 hci_dev_lock(hdev);
3459
3460 if (status) {
3461 mgmt_stop_discovery_failed(hdev, status);
3462 goto unlock;
3463 }
3464
3465 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3466
3467 unlock:
3468 hci_dev_unlock(hdev);
3469 }
3470
3471 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
3472 u16 len)
3473 {
3474 struct mgmt_cp_stop_discovery *mgmt_cp = data;
3475 struct pending_cmd *cmd;
3476 struct hci_cp_remote_name_req_cancel cp;
3477 struct inquiry_entry *e;
3478 struct hci_request req;
3479 struct hci_cp_le_set_scan_enable enable_cp;
3480 int err;
3481
3482 BT_DBG("%s", hdev->name);
3483
3484 hci_dev_lock(hdev);
3485
3486 if (!hci_discovery_active(hdev)) {
3487 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3488 MGMT_STATUS_REJECTED, &mgmt_cp->type,
3489 sizeof(mgmt_cp->type));
3490 goto unlock;
3491 }
3492
3493 if (hdev->discovery.type != mgmt_cp->type) {
3494 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3495 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
3496 sizeof(mgmt_cp->type));
3497 goto unlock;
3498 }
3499
3500 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
3501 if (!cmd) {
3502 err = -ENOMEM;
3503 goto unlock;
3504 }
3505
3506 hci_req_init(&req, hdev);
3507
3508 switch (hdev->discovery.state) {
3509 case DISCOVERY_FINDING:
3510 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3511 hci_req_add(&req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3512 } else {
3513 cancel_delayed_work(&hdev->le_scan_disable);
3514
3515 memset(&enable_cp, 0, sizeof(enable_cp));
3516 enable_cp.enable = LE_SCAN_DISABLE;
3517 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE,
3518 sizeof(enable_cp), &enable_cp);
3519 }
3520
3521 break;
3522
3523 case DISCOVERY_RESOLVING:
3524 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
3525 NAME_PENDING);
3526 if (!e) {
3527 mgmt_pending_remove(cmd);
3528 err = cmd_complete(sk, hdev->id,
3529 MGMT_OP_STOP_DISCOVERY, 0,
3530 &mgmt_cp->type,
3531 sizeof(mgmt_cp->type));
3532 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3533 goto unlock;
3534 }
3535
3536 bacpy(&cp.bdaddr, &e->data.bdaddr);
3537 hci_req_add(&req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
3538 &cp);
3539
3540 break;
3541
3542 default:
3543 BT_DBG("unknown discovery state %u", hdev->discovery.state);
3544
3545 mgmt_pending_remove(cmd);
3546 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3547 MGMT_STATUS_FAILED, &mgmt_cp->type,
3548 sizeof(mgmt_cp->type));
3549 goto unlock;
3550 }
3551
3552 err = hci_req_run(&req, stop_discovery_complete);
3553 if (err < 0)
3554 mgmt_pending_remove(cmd);
3555 else
3556 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
3557
3558 unlock:
3559 hci_dev_unlock(hdev);
3560 return err;
3561 }
3562
3563 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
3564 u16 len)
3565 {
3566 struct mgmt_cp_confirm_name *cp = data;
3567 struct inquiry_entry *e;
3568 int err;
3569
3570 BT_DBG("%s", hdev->name);
3571
3572 hci_dev_lock(hdev);
3573
3574 if (!hci_discovery_active(hdev)) {
3575 err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3576 MGMT_STATUS_FAILED);
3577 goto failed;
3578 }
3579
3580 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
3581 if (!e) {
3582 err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3583 MGMT_STATUS_INVALID_PARAMS);
3584 goto failed;
3585 }
3586
3587 if (cp->name_known) {
3588 e->name_state = NAME_KNOWN;
3589 list_del(&e->list);
3590 } else {
3591 e->name_state = NAME_NEEDED;
3592 hci_inquiry_cache_update_resolve(hdev, e);
3593 }
3594
3595 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
3596 sizeof(cp->addr));
3597
3598 failed:
3599 hci_dev_unlock(hdev);
3600 return err;
3601 }
3602
3603 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
3604 u16 len)
3605 {
3606 struct mgmt_cp_block_device *cp = data;
3607 u8 status;
3608 int err;
3609
3610 BT_DBG("%s", hdev->name);
3611
3612 if (!bdaddr_type_is_valid(cp->addr.type))
3613 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
3614 MGMT_STATUS_INVALID_PARAMS,
3615 &cp->addr, sizeof(cp->addr));
3616
3617 hci_dev_lock(hdev);
3618
3619 err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type);
3620 if (err < 0)
3621 status = MGMT_STATUS_FAILED;
3622 else
3623 status = MGMT_STATUS_SUCCESS;
3624
3625 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
3626 &cp->addr, sizeof(cp->addr));
3627
3628 hci_dev_unlock(hdev);
3629
3630 return err;
3631 }
3632
3633 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
3634 u16 len)
3635 {
3636 struct mgmt_cp_unblock_device *cp = data;
3637 u8 status;
3638 int err;
3639
3640 BT_DBG("%s", hdev->name);
3641
3642 if (!bdaddr_type_is_valid(cp->addr.type))
3643 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
3644 MGMT_STATUS_INVALID_PARAMS,
3645 &cp->addr, sizeof(cp->addr));
3646
3647 hci_dev_lock(hdev);
3648
3649 err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type);
3650 if (err < 0)
3651 status = MGMT_STATUS_INVALID_PARAMS;
3652 else
3653 status = MGMT_STATUS_SUCCESS;
3654
3655 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
3656 &cp->addr, sizeof(cp->addr));
3657
3658 hci_dev_unlock(hdev);
3659
3660 return err;
3661 }
3662
3663 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
3664 u16 len)
3665 {
3666 struct mgmt_cp_set_device_id *cp = data;
3667 struct hci_request req;
3668 int err;
3669 __u16 source;
3670
3671 BT_DBG("%s", hdev->name);
3672
3673 source = __le16_to_cpu(cp->source);
3674
3675 if (source > 0x0002)
3676 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
3677 MGMT_STATUS_INVALID_PARAMS);
3678
3679 hci_dev_lock(hdev);
3680
3681 hdev->devid_source = source;
3682 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
3683 hdev->devid_product = __le16_to_cpu(cp->product);
3684 hdev->devid_version = __le16_to_cpu(cp->version);
3685
3686 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
3687
3688 hci_req_init(&req, hdev);
3689 update_eir(&req);
3690 hci_req_run(&req, NULL);
3691
3692 hci_dev_unlock(hdev);
3693
3694 return err;
3695 }
3696
3697 static void set_advertising_complete(struct hci_dev *hdev, u8 status)
3698 {
3699 struct cmd_lookup match = { NULL, hdev };
3700
3701 if (status) {
3702 u8 mgmt_err = mgmt_status(status);
3703
3704 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
3705 cmd_status_rsp, &mgmt_err);
3706 return;
3707 }
3708
3709 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
3710 &match);
3711
3712 new_settings(hdev, match.sk);
3713
3714 if (match.sk)
3715 sock_put(match.sk);
3716 }
3717
3718 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
3719 u16 len)
3720 {
3721 struct mgmt_mode *cp = data;
3722 struct pending_cmd *cmd;
3723 struct hci_request req;
3724 u8 val, enabled, status;
3725 int err;
3726
3727 BT_DBG("request for %s", hdev->name);
3728
3729 status = mgmt_le_support(hdev);
3730 if (status)
3731 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3732 status);
3733
3734 if (cp->val != 0x00 && cp->val != 0x01)
3735 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3736 MGMT_STATUS_INVALID_PARAMS);
3737
3738 hci_dev_lock(hdev);
3739
3740 val = !!cp->val;
3741 enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
3742
3743 /* The following conditions are ones which mean that we should
3744 * not do any HCI communication but directly send a mgmt
3745 * response to user space (after toggling the flag if
3746 * necessary).
3747 */
3748 if (!hdev_is_powered(hdev) || val == enabled ||
3749 hci_conn_num(hdev, LE_LINK) > 0) {
3750 bool changed = false;
3751
3752 if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3753 change_bit(HCI_ADVERTISING, &hdev->dev_flags);
3754 changed = true;
3755 }
3756
3757 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
3758 if (err < 0)
3759 goto unlock;
3760
3761 if (changed)
3762 err = new_settings(hdev, sk);
3763
3764 goto unlock;
3765 }
3766
3767 if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
3768 mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
3769 err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3770 MGMT_STATUS_BUSY);
3771 goto unlock;
3772 }
3773
3774 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
3775 if (!cmd) {
3776 err = -ENOMEM;
3777 goto unlock;
3778 }
3779
3780 hci_req_init(&req, hdev);
3781
3782 if (val)
3783 enable_advertising(&req);
3784 else
3785 disable_advertising(&req);
3786
3787 err = hci_req_run(&req, set_advertising_complete);
3788 if (err < 0)
3789 mgmt_pending_remove(cmd);
3790
3791 unlock:
3792 hci_dev_unlock(hdev);
3793 return err;
3794 }
3795
3796 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
3797 void *data, u16 len)
3798 {
3799 struct mgmt_cp_set_static_address *cp = data;
3800 int err;
3801
3802 BT_DBG("%s", hdev->name);
3803
3804 if (!lmp_le_capable(hdev))
3805 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
3806 MGMT_STATUS_NOT_SUPPORTED);
3807
3808 if (hdev_is_powered(hdev))
3809 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
3810 MGMT_STATUS_REJECTED);
3811
3812 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
3813 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
3814 return cmd_status(sk, hdev->id,
3815 MGMT_OP_SET_STATIC_ADDRESS,
3816 MGMT_STATUS_INVALID_PARAMS);
3817
3818 /* Two most significant bits shall be set */
3819 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
3820 return cmd_status(sk, hdev->id,
3821 MGMT_OP_SET_STATIC_ADDRESS,
3822 MGMT_STATUS_INVALID_PARAMS);
3823 }
3824
3825 hci_dev_lock(hdev);
3826
3827 bacpy(&hdev->static_addr, &cp->bdaddr);
3828
3829 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0);
3830
3831 hci_dev_unlock(hdev);
3832
3833 return err;
3834 }
3835
3836 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
3837 void *data, u16 len)
3838 {
3839 struct mgmt_cp_set_scan_params *cp = data;
3840 __u16 interval, window;
3841 int err;
3842
3843 BT_DBG("%s", hdev->name);
3844
3845 if (!lmp_le_capable(hdev))
3846 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3847 MGMT_STATUS_NOT_SUPPORTED);
3848
3849 interval = __le16_to_cpu(cp->interval);
3850
3851 if (interval < 0x0004 || interval > 0x4000)
3852 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3853 MGMT_STATUS_INVALID_PARAMS);
3854
3855 window = __le16_to_cpu(cp->window);
3856
3857 if (window < 0x0004 || window > 0x4000)
3858 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3859 MGMT_STATUS_INVALID_PARAMS);
3860
3861 if (window > interval)
3862 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3863 MGMT_STATUS_INVALID_PARAMS);
3864
3865 hci_dev_lock(hdev);
3866
3867 hdev->le_scan_interval = interval;
3868 hdev->le_scan_window = window;
3869
3870 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);
3871
3872 hci_dev_unlock(hdev);
3873
3874 return err;
3875 }
3876
3877 static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
3878 {
3879 struct pending_cmd *cmd;
3880
3881 BT_DBG("status 0x%02x", status);
3882
3883 hci_dev_lock(hdev);
3884
3885 cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
3886 if (!cmd)
3887 goto unlock;
3888
3889 if (status) {
3890 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3891 mgmt_status(status));
3892 } else {
3893 struct mgmt_mode *cp = cmd->param;
3894
3895 if (cp->val)
3896 set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
3897 else
3898 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
3899
3900 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
3901 new_settings(hdev, cmd->sk);
3902 }
3903
3904 mgmt_pending_remove(cmd);
3905
3906 unlock:
3907 hci_dev_unlock(hdev);
3908 }
3909
3910 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
3911 void *data, u16 len)
3912 {
3913 struct mgmt_mode *cp = data;
3914 struct pending_cmd *cmd;
3915 struct hci_request req;
3916 int err;
3917
3918 BT_DBG("%s", hdev->name);
3919
3920 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
3921 hdev->hci_ver < BLUETOOTH_VER_1_2)
3922 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3923 MGMT_STATUS_NOT_SUPPORTED);
3924
3925 if (cp->val != 0x00 && cp->val != 0x01)
3926 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3927 MGMT_STATUS_INVALID_PARAMS);
3928
3929 if (!hdev_is_powered(hdev))
3930 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3931 MGMT_STATUS_NOT_POWERED);
3932
3933 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3934 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3935 MGMT_STATUS_REJECTED);
3936
3937 hci_dev_lock(hdev);
3938
3939 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
3940 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3941 MGMT_STATUS_BUSY);
3942 goto unlock;
3943 }
3944
3945 if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
3946 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
3947 hdev);
3948 goto unlock;
3949 }
3950
3951 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
3952 data, len);
3953 if (!cmd) {
3954 err = -ENOMEM;
3955 goto unlock;
3956 }
3957
3958 hci_req_init(&req, hdev);
3959
3960 write_fast_connectable(&req, cp->val);
3961
3962 err = hci_req_run(&req, fast_connectable_complete);
3963 if (err < 0) {
3964 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3965 MGMT_STATUS_FAILED);
3966 mgmt_pending_remove(cmd);
3967 }
3968
3969 unlock:
3970 hci_dev_unlock(hdev);
3971
3972 return err;
3973 }
3974
3975 static void set_bredr_scan(struct hci_request *req)
3976 {
3977 struct hci_dev *hdev = req->hdev;
3978 u8 scan = 0;
3979
3980 /* Ensure that fast connectable is disabled. This function will
3981 * not do anything if the page scan parameters are already what
3982 * they should be.
3983 */
3984 write_fast_connectable(req, false);
3985
3986 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3987 scan |= SCAN_PAGE;
3988 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
3989 scan |= SCAN_INQUIRY;
3990
3991 if (scan)
3992 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
3993 }
3994
3995 static void set_bredr_complete(struct hci_dev *hdev, u8 status)
3996 {
3997 struct pending_cmd *cmd;
3998
3999 BT_DBG("status 0x%02x", status);
4000
4001 hci_dev_lock(hdev);
4002
4003 cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
4004 if (!cmd)
4005 goto unlock;
4006
4007 if (status) {
4008 u8 mgmt_err = mgmt_status(status);
4009
4010 /* We need to restore the flag if related HCI commands
4011 * failed.
4012 */
4013 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4014
4015 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4016 } else {
4017 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4018 new_settings(hdev, cmd->sk);
4019 }
4020
4021 mgmt_pending_remove(cmd);
4022
4023 unlock:
4024 hci_dev_unlock(hdev);
4025 }
4026
4027 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4028 {
4029 struct mgmt_mode *cp = data;
4030 struct pending_cmd *cmd;
4031 struct hci_request req;
4032 int err;
4033
4034 BT_DBG("request for %s", hdev->name);
4035
4036 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4037 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4038 MGMT_STATUS_NOT_SUPPORTED);
4039
4040 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4041 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4042 MGMT_STATUS_REJECTED);
4043
4044 if (cp->val != 0x00 && cp->val != 0x01)
4045 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4046 MGMT_STATUS_INVALID_PARAMS);
4047
4048 hci_dev_lock(hdev);
4049
4050 if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4051 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4052 goto unlock;
4053 }
4054
4055 if (!hdev_is_powered(hdev)) {
4056 if (!cp->val) {
4057 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4058 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4059 clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4060 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4061 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4062 }
4063
4064 change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4065
4066 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4067 if (err < 0)
4068 goto unlock;
4069
4070 err = new_settings(hdev, sk);
4071 goto unlock;
4072 }
4073
4074 /* Reject disabling when powered on */
4075 if (!cp->val) {
4076 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4077 MGMT_STATUS_REJECTED);
4078 goto unlock;
4079 }
4080
4081 if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
4082 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4083 MGMT_STATUS_BUSY);
4084 goto unlock;
4085 }
4086
4087 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4088 if (!cmd) {
4089 err = -ENOMEM;
4090 goto unlock;
4091 }
4092
4093 /* We need to flip the bit already here so that update_adv_data
4094 * generates the correct flags.
4095 */
4096 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4097
4098 hci_req_init(&req, hdev);
4099
4100 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4101 set_bredr_scan(&req);
4102
4103 /* Since only the advertising data flags will change, there
4104 * is no need to update the scan response data.
4105 */
4106 update_adv_data(&req);
4107
4108 err = hci_req_run(&req, set_bredr_complete);
4109 if (err < 0)
4110 mgmt_pending_remove(cmd);
4111
4112 unlock:
4113 hci_dev_unlock(hdev);
4114 return err;
4115 }
4116
4117 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4118 void *data, u16 len)
4119 {
4120 struct mgmt_mode *cp = data;
4121 struct pending_cmd *cmd;
4122 u8 val, status;
4123 int err;
4124
4125 BT_DBG("request for %s", hdev->name);
4126
4127 status = mgmt_bredr_support(hdev);
4128 if (status)
4129 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4130 status);
4131
4132 if (!lmp_sc_capable(hdev) &&
4133 !test_bit(HCI_FORCE_SC, &hdev->dev_flags))
4134 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4135 MGMT_STATUS_NOT_SUPPORTED);
4136
4137 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4138 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4139 MGMT_STATUS_INVALID_PARAMS);
4140
4141 hci_dev_lock(hdev);
4142
4143 if (!hdev_is_powered(hdev)) {
4144 bool changed;
4145
4146 if (cp->val) {
4147 changed = !test_and_set_bit(HCI_SC_ENABLED,
4148 &hdev->dev_flags);
4149 if (cp->val == 0x02)
4150 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4151 else
4152 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4153 } else {
4154 changed = test_and_clear_bit(HCI_SC_ENABLED,
4155 &hdev->dev_flags);
4156 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4157 }
4158
4159 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4160 if (err < 0)
4161 goto failed;
4162
4163 if (changed)
4164 err = new_settings(hdev, sk);
4165
4166 goto failed;
4167 }
4168
4169 if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4170 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4171 MGMT_STATUS_BUSY);
4172 goto failed;
4173 }
4174
4175 val = !!cp->val;
4176
4177 if (val == test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
4178 (cp->val == 0x02) == test_bit(HCI_SC_ONLY, &hdev->dev_flags)) {
4179 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4180 goto failed;
4181 }
4182
4183 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4184 if (!cmd) {
4185 err = -ENOMEM;
4186 goto failed;
4187 }
4188
4189 err = hci_send_cmd(hdev, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4190 if (err < 0) {
4191 mgmt_pending_remove(cmd);
4192 goto failed;
4193 }
4194
4195 if (cp->val == 0x02)
4196 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4197 else
4198 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4199
4200 failed:
4201 hci_dev_unlock(hdev);
4202 return err;
4203 }
4204
4205 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
4206 void *data, u16 len)
4207 {
4208 struct mgmt_mode *cp = data;
4209 bool changed;
4210 int err;
4211
4212 BT_DBG("request for %s", hdev->name);
4213
4214 if (cp->val != 0x00 && cp->val != 0x01)
4215 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
4216 MGMT_STATUS_INVALID_PARAMS);
4217
4218 hci_dev_lock(hdev);
4219
4220 if (cp->val)
4221 changed = !test_and_set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
4222 else
4223 changed = test_and_clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
4224
4225 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
4226 if (err < 0)
4227 goto unlock;
4228
4229 if (changed)
4230 err = new_settings(hdev, sk);
4231
4232 unlock:
4233 hci_dev_unlock(hdev);
4234 return err;
4235 }
4236
4237 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4238 u16 len)
4239 {
4240 struct mgmt_cp_set_privacy *cp = cp_data;
4241 bool changed;
4242 int err;
4243
4244 BT_DBG("request for %s", hdev->name);
4245
4246 if (!lmp_le_capable(hdev))
4247 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4248 MGMT_STATUS_NOT_SUPPORTED);
4249
4250 if (cp->privacy != 0x00 && cp->privacy != 0x01)
4251 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4252 MGMT_STATUS_INVALID_PARAMS);
4253
4254 if (hdev_is_powered(hdev))
4255 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4256 MGMT_STATUS_REJECTED);
4257
4258 hci_dev_lock(hdev);
4259
4260 /* If user space supports this command it is also expected to
4261 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
4262 */
4263 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4264
4265 if (cp->privacy) {
4266 changed = !test_and_set_bit(HCI_PRIVACY, &hdev->dev_flags);
4267 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
4268 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4269 } else {
4270 changed = test_and_clear_bit(HCI_PRIVACY, &hdev->dev_flags);
4271 memset(hdev->irk, 0, sizeof(hdev->irk));
4272 clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4273 }
4274
4275 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
4276 if (err < 0)
4277 goto unlock;
4278
4279 if (changed)
4280 err = new_settings(hdev, sk);
4281
4282 unlock:
4283 hci_dev_unlock(hdev);
4284 return err;
4285 }
4286
4287 static bool irk_is_valid(struct mgmt_irk_info *irk)
4288 {
4289 switch (irk->addr.type) {
4290 case BDADDR_LE_PUBLIC:
4291 return true;
4292
4293 case BDADDR_LE_RANDOM:
4294 /* Two most significant bits shall be set */
4295 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4296 return false;
4297 return true;
4298 }
4299
4300 return false;
4301 }
4302
4303 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4304 u16 len)
4305 {
4306 struct mgmt_cp_load_irks *cp = cp_data;
4307 u16 irk_count, expected_len;
4308 int i, err;
4309
4310 BT_DBG("request for %s", hdev->name);
4311
4312 if (!lmp_le_capable(hdev))
4313 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4314 MGMT_STATUS_NOT_SUPPORTED);
4315
4316 irk_count = __le16_to_cpu(cp->irk_count);
4317
4318 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
4319 if (expected_len != len) {
4320 BT_ERR("load_irks: expected %u bytes, got %u bytes",
4321 len, expected_len);
4322 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4323 MGMT_STATUS_INVALID_PARAMS);
4324 }
4325
4326 BT_DBG("%s irk_count %u", hdev->name, irk_count);
4327
4328 for (i = 0; i < irk_count; i++) {
4329 struct mgmt_irk_info *key = &cp->irks[i];
4330
4331 if (!irk_is_valid(key))
4332 return cmd_status(sk, hdev->id,
4333 MGMT_OP_LOAD_IRKS,
4334 MGMT_STATUS_INVALID_PARAMS);
4335 }
4336
4337 hci_dev_lock(hdev);
4338
4339 hci_smp_irks_clear(hdev);
4340
4341 for (i = 0; i < irk_count; i++) {
4342 struct mgmt_irk_info *irk = &cp->irks[i];
4343 u8 addr_type;
4344
4345 if (irk->addr.type == BDADDR_LE_PUBLIC)
4346 addr_type = ADDR_LE_DEV_PUBLIC;
4347 else
4348 addr_type = ADDR_LE_DEV_RANDOM;
4349
4350 hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
4351 BDADDR_ANY);
4352 }
4353
4354 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4355
4356 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
4357
4358 hci_dev_unlock(hdev);
4359
4360 return err;
4361 }
4362
4363 static bool ltk_is_valid(struct mgmt_ltk_info *key)
4364 {
4365 if (key->master != 0x00 && key->master != 0x01)
4366 return false;
4367
4368 switch (key->addr.type) {
4369 case BDADDR_LE_PUBLIC:
4370 return true;
4371
4372 case BDADDR_LE_RANDOM:
4373 /* Two most significant bits shall be set */
4374 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4375 return false;
4376 return true;
4377 }
4378
4379 return false;
4380 }
4381
4382 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
4383 void *cp_data, u16 len)
4384 {
4385 struct mgmt_cp_load_long_term_keys *cp = cp_data;
4386 u16 key_count, expected_len;
4387 int i, err;
4388
4389 BT_DBG("request for %s", hdev->name);
4390
4391 if (!lmp_le_capable(hdev))
4392 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4393 MGMT_STATUS_NOT_SUPPORTED);
4394
4395 key_count = __le16_to_cpu(cp->key_count);
4396
4397 expected_len = sizeof(*cp) + key_count *
4398 sizeof(struct mgmt_ltk_info);
4399 if (expected_len != len) {
4400 BT_ERR("load_keys: expected %u bytes, got %u bytes",
4401 len, expected_len);
4402 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4403 MGMT_STATUS_INVALID_PARAMS);
4404 }
4405
4406 BT_DBG("%s key_count %u", hdev->name, key_count);
4407
4408 for (i = 0; i < key_count; i++) {
4409 struct mgmt_ltk_info *key = &cp->keys[i];
4410
4411 if (!ltk_is_valid(key))
4412 return cmd_status(sk, hdev->id,
4413 MGMT_OP_LOAD_LONG_TERM_KEYS,
4414 MGMT_STATUS_INVALID_PARAMS);
4415 }
4416
4417 hci_dev_lock(hdev);
4418
4419 hci_smp_ltks_clear(hdev);
4420
4421 for (i = 0; i < key_count; i++) {
4422 struct mgmt_ltk_info *key = &cp->keys[i];
4423 u8 type, addr_type;
4424
4425 if (key->addr.type == BDADDR_LE_PUBLIC)
4426 addr_type = ADDR_LE_DEV_PUBLIC;
4427 else
4428 addr_type = ADDR_LE_DEV_RANDOM;
4429
4430 if (key->master)
4431 type = HCI_SMP_LTK;
4432 else
4433 type = HCI_SMP_LTK_SLAVE;
4434
4435 hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
4436 key->type, key->val, key->enc_size, key->ediv,
4437 key->rand);
4438 }
4439
4440 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
4441 NULL, 0);
4442
4443 hci_dev_unlock(hdev);
4444
4445 return err;
4446 }
4447
4448 static const struct mgmt_handler {
4449 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
4450 u16 data_len);
4451 bool var_len;
4452 size_t data_len;
4453 } mgmt_handlers[] = {
4454 { NULL }, /* 0x0000 (no command) */
4455 { read_version, false, MGMT_READ_VERSION_SIZE },
4456 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
4457 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
4458 { read_controller_info, false, MGMT_READ_INFO_SIZE },
4459 { set_powered, false, MGMT_SETTING_SIZE },
4460 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
4461 { set_connectable, false, MGMT_SETTING_SIZE },
4462 { set_fast_connectable, false, MGMT_SETTING_SIZE },
4463 { set_pairable, false, MGMT_SETTING_SIZE },
4464 { set_link_security, false, MGMT_SETTING_SIZE },
4465 { set_ssp, false, MGMT_SETTING_SIZE },
4466 { set_hs, false, MGMT_SETTING_SIZE },
4467 { set_le, false, MGMT_SETTING_SIZE },
4468 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
4469 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
4470 { add_uuid, false, MGMT_ADD_UUID_SIZE },
4471 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
4472 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
4473 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
4474 { disconnect, false, MGMT_DISCONNECT_SIZE },
4475 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
4476 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
4477 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
4478 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
4479 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
4480 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
4481 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
4482 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
4483 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
4484 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
4485 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
4486 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
4487 { add_remote_oob_data, true, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
4488 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
4489 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
4490 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
4491 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
4492 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
4493 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
4494 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
4495 { set_advertising, false, MGMT_SETTING_SIZE },
4496 { set_bredr, false, MGMT_SETTING_SIZE },
4497 { set_static_address, false, MGMT_SET_STATIC_ADDRESS_SIZE },
4498 { set_scan_params, false, MGMT_SET_SCAN_PARAMS_SIZE },
4499 { set_secure_conn, false, MGMT_SETTING_SIZE },
4500 { set_debug_keys, false, MGMT_SETTING_SIZE },
4501 { set_privacy, false, MGMT_SET_PRIVACY_SIZE },
4502 { load_irks, true, MGMT_LOAD_IRKS_SIZE },
4503 };
4504
4505
4506 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
4507 {
4508 void *buf;
4509 u8 *cp;
4510 struct mgmt_hdr *hdr;
4511 u16 opcode, index, len;
4512 struct hci_dev *hdev = NULL;
4513 const struct mgmt_handler *handler;
4514 int err;
4515
4516 BT_DBG("got %zu bytes", msglen);
4517
4518 if (msglen < sizeof(*hdr))
4519 return -EINVAL;
4520
4521 buf = kmalloc(msglen, GFP_KERNEL);
4522 if (!buf)
4523 return -ENOMEM;
4524
4525 if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
4526 err = -EFAULT;
4527 goto done;
4528 }
4529
4530 hdr = buf;
4531 opcode = __le16_to_cpu(hdr->opcode);
4532 index = __le16_to_cpu(hdr->index);
4533 len = __le16_to_cpu(hdr->len);
4534
4535 if (len != msglen - sizeof(*hdr)) {
4536 err = -EINVAL;
4537 goto done;
4538 }
4539
4540 if (index != MGMT_INDEX_NONE) {
4541 hdev = hci_dev_get(index);
4542 if (!hdev) {
4543 err = cmd_status(sk, index, opcode,
4544 MGMT_STATUS_INVALID_INDEX);
4545 goto done;
4546 }
4547
4548 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
4549 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4550 err = cmd_status(sk, index, opcode,
4551 MGMT_STATUS_INVALID_INDEX);
4552 goto done;
4553 }
4554 }
4555
4556 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
4557 mgmt_handlers[opcode].func == NULL) {
4558 BT_DBG("Unknown op %u", opcode);
4559 err = cmd_status(sk, index, opcode,
4560 MGMT_STATUS_UNKNOWN_COMMAND);
4561 goto done;
4562 }
4563
4564 if ((hdev && opcode < MGMT_OP_READ_INFO) ||
4565 (!hdev && opcode >= MGMT_OP_READ_INFO)) {
4566 err = cmd_status(sk, index, opcode,
4567 MGMT_STATUS_INVALID_INDEX);
4568 goto done;
4569 }
4570
4571 handler = &mgmt_handlers[opcode];
4572
4573 if ((handler->var_len && len < handler->data_len) ||
4574 (!handler->var_len && len != handler->data_len)) {
4575 err = cmd_status(sk, index, opcode,
4576 MGMT_STATUS_INVALID_PARAMS);
4577 goto done;
4578 }
4579
4580 if (hdev)
4581 mgmt_init_hdev(sk, hdev);
4582
4583 cp = buf + sizeof(*hdr);
4584
4585 err = handler->func(sk, hdev, cp, len);
4586 if (err < 0)
4587 goto done;
4588
4589 err = msglen;
4590
4591 done:
4592 if (hdev)
4593 hci_dev_put(hdev);
4594
4595 kfree(buf);
4596 return err;
4597 }
4598
4599 void mgmt_index_added(struct hci_dev *hdev)
4600 {
4601 if (hdev->dev_type != HCI_BREDR)
4602 return;
4603
4604 mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
4605 }
4606
4607 void mgmt_index_removed(struct hci_dev *hdev)
4608 {
4609 u8 status = MGMT_STATUS_INVALID_INDEX;
4610
4611 if (hdev->dev_type != HCI_BREDR)
4612 return;
4613
4614 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
4615
4616 mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
4617 }
4618
4619 static void powered_complete(struct hci_dev *hdev, u8 status)
4620 {
4621 struct cmd_lookup match = { NULL, hdev };
4622
4623 BT_DBG("status 0x%02x", status);
4624
4625 hci_dev_lock(hdev);
4626
4627 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
4628
4629 new_settings(hdev, match.sk);
4630
4631 hci_dev_unlock(hdev);
4632
4633 if (match.sk)
4634 sock_put(match.sk);
4635 }
4636
4637 static int powered_update_hci(struct hci_dev *hdev)
4638 {
4639 struct hci_request req;
4640 u8 link_sec;
4641
4642 hci_req_init(&req, hdev);
4643
4644 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
4645 !lmp_host_ssp_capable(hdev)) {
4646 u8 ssp = 1;
4647
4648 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
4649 }
4650
4651 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
4652 lmp_bredr_capable(hdev)) {
4653 struct hci_cp_write_le_host_supported cp;
4654
4655 cp.le = 1;
4656 cp.simul = lmp_le_br_capable(hdev);
4657
4658 /* Check first if we already have the right
4659 * host state (host features set)
4660 */
4661 if (cp.le != lmp_host_le_capable(hdev) ||
4662 cp.simul != lmp_host_le_br_capable(hdev))
4663 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
4664 sizeof(cp), &cp);
4665 }
4666
4667 if (lmp_le_capable(hdev)) {
4668 /* Make sure the controller has a good default for
4669 * advertising data. This also applies to the case
4670 * where BR/EDR was toggled during the AUTO_OFF phase.
4671 */
4672 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
4673 update_adv_data(&req);
4674 update_scan_rsp_data(&req);
4675 }
4676
4677 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
4678 enable_advertising(&req);
4679 }
4680
4681 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4682 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
4683 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
4684 sizeof(link_sec), &link_sec);
4685
4686 if (lmp_bredr_capable(hdev)) {
4687 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
4688 set_bredr_scan(&req);
4689 update_class(&req);
4690 update_name(&req);
4691 update_eir(&req);
4692 }
4693
4694 return hci_req_run(&req, powered_complete);
4695 }
4696
4697 int mgmt_powered(struct hci_dev *hdev, u8 powered)
4698 {
4699 struct cmd_lookup match = { NULL, hdev };
4700 u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
4701 u8 zero_cod[] = { 0, 0, 0 };
4702 int err;
4703
4704 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
4705 return 0;
4706
4707 if (powered) {
4708 if (powered_update_hci(hdev) == 0)
4709 return 0;
4710
4711 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
4712 &match);
4713 goto new_settings;
4714 }
4715
4716 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
4717 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered);
4718
4719 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
4720 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
4721 zero_cod, sizeof(zero_cod), NULL);
4722
4723 new_settings:
4724 err = new_settings(hdev, match.sk);
4725
4726 if (match.sk)
4727 sock_put(match.sk);
4728
4729 return err;
4730 }
4731
4732 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
4733 {
4734 struct pending_cmd *cmd;
4735 u8 status;
4736
4737 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
4738 if (!cmd)
4739 return;
4740
4741 if (err == -ERFKILL)
4742 status = MGMT_STATUS_RFKILLED;
4743 else
4744 status = MGMT_STATUS_FAILED;
4745
4746 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
4747
4748 mgmt_pending_remove(cmd);
4749 }
4750
4751 void mgmt_discoverable_timeout(struct hci_dev *hdev)
4752 {
4753 struct hci_request req;
4754
4755 hci_dev_lock(hdev);
4756
4757 /* When discoverable timeout triggers, then just make sure
4758 * the limited discoverable flag is cleared. Even in the case
4759 * of a timeout triggered from general discoverable, it is
4760 * safe to unconditionally clear the flag.
4761 */
4762 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
4763 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4764
4765 hci_req_init(&req, hdev);
4766 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4767 u8 scan = SCAN_PAGE;
4768 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
4769 sizeof(scan), &scan);
4770 }
4771 update_class(&req);
4772 update_adv_data(&req);
4773 hci_req_run(&req, NULL);
4774
4775 hdev->discov_timeout = 0;
4776
4777 new_settings(hdev, NULL);
4778
4779 hci_dev_unlock(hdev);
4780 }
4781
4782 void mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
4783 {
4784 bool changed;
4785
4786 /* Nothing needed here if there's a pending command since that
4787 * commands request completion callback takes care of everything
4788 * necessary.
4789 */
4790 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
4791 return;
4792
4793 /* Powering off may clear the scan mode - don't let that interfere */
4794 if (!discoverable && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
4795 return;
4796
4797 if (discoverable) {
4798 changed = !test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4799 } else {
4800 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
4801 changed = test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4802 }
4803
4804 if (changed) {
4805 struct hci_request req;
4806
4807 /* In case this change in discoverable was triggered by
4808 * a disabling of connectable there could be a need to
4809 * update the advertising flags.
4810 */
4811 hci_req_init(&req, hdev);
4812 update_adv_data(&req);
4813 hci_req_run(&req, NULL);
4814
4815 new_settings(hdev, NULL);
4816 }
4817 }
4818
4819 void mgmt_connectable(struct hci_dev *hdev, u8 connectable)
4820 {
4821 bool changed;
4822
4823 /* Nothing needed here if there's a pending command since that
4824 * commands request completion callback takes care of everything
4825 * necessary.
4826 */
4827 if (mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
4828 return;
4829
4830 if (connectable)
4831 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
4832 else
4833 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
4834
4835 if (changed)
4836 new_settings(hdev, NULL);
4837 }
4838
4839 void mgmt_advertising(struct hci_dev *hdev, u8 advertising)
4840 {
4841 if (advertising)
4842 set_bit(HCI_ADVERTISING, &hdev->dev_flags);
4843 else
4844 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
4845 }
4846
4847 void mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
4848 {
4849 u8 mgmt_err = mgmt_status(status);
4850
4851 if (scan & SCAN_PAGE)
4852 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
4853 cmd_status_rsp, &mgmt_err);
4854
4855 if (scan & SCAN_INQUIRY)
4856 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
4857 cmd_status_rsp, &mgmt_err);
4858 }
4859
4860 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
4861 bool persistent)
4862 {
4863 struct mgmt_ev_new_link_key ev;
4864
4865 memset(&ev, 0, sizeof(ev));
4866
4867 ev.store_hint = persistent;
4868 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
4869 ev.key.addr.type = BDADDR_BREDR;
4870 ev.key.type = key->type;
4871 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
4872 ev.key.pin_len = key->pin_len;
4873
4874 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
4875 }
4876
4877 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key)
4878 {
4879 struct mgmt_ev_new_long_term_key ev;
4880
4881 memset(&ev, 0, sizeof(ev));
4882
4883 /* Devices using resolvable or non-resolvable random addresses
4884 * without providing an indentity resolving key don't require
4885 * to store long term keys. Their addresses will change the
4886 * next time around.
4887 *
4888 * Only when a remote device provides an identity address
4889 * make sure the long term key is stored. If the remote
4890 * identity is known, the long term keys are internally
4891 * mapped to the identity address. So allow static random
4892 * and public addresses here.
4893 */
4894 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
4895 (key->bdaddr.b[5] & 0xc0) != 0xc0)
4896 ev.store_hint = 0x00;
4897 else
4898 ev.store_hint = 0x01;
4899
4900 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
4901 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
4902 ev.key.type = key->authenticated;
4903 ev.key.enc_size = key->enc_size;
4904 ev.key.ediv = key->ediv;
4905
4906 if (key->type == HCI_SMP_LTK)
4907 ev.key.master = 1;
4908
4909 memcpy(ev.key.rand, key->rand, sizeof(key->rand));
4910 memcpy(ev.key.val, key->val, sizeof(key->val));
4911
4912 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
4913 }
4914
4915 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
4916 {
4917 struct mgmt_ev_new_irk ev;
4918
4919 memset(&ev, 0, sizeof(ev));
4920
4921 /* For identity resolving keys from devices that are already
4922 * using a public address or static random address, do not
4923 * ask for storing this key. The identity resolving key really
4924 * is only mandatory for devices using resovlable random
4925 * addresses.
4926 *
4927 * Storing all identity resolving keys has the downside that
4928 * they will be also loaded on next boot of they system. More
4929 * identity resolving keys, means more time during scanning is
4930 * needed to actually resolve these addresses.
4931 */
4932 if (bacmp(&irk->rpa, BDADDR_ANY))
4933 ev.store_hint = 0x01;
4934 else
4935 ev.store_hint = 0x00;
4936
4937 bacpy(&ev.rpa, &irk->rpa);
4938 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
4939 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
4940 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
4941
4942 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
4943 }
4944
4945 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
4946 u8 data_len)
4947 {
4948 eir[eir_len++] = sizeof(type) + data_len;
4949 eir[eir_len++] = type;
4950 memcpy(&eir[eir_len], data, data_len);
4951 eir_len += data_len;
4952
4953 return eir_len;
4954 }
4955
4956 void mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4957 u8 addr_type, u32 flags, u8 *name, u8 name_len,
4958 u8 *dev_class)
4959 {
4960 char buf[512];
4961 struct mgmt_ev_device_connected *ev = (void *) buf;
4962 u16 eir_len = 0;
4963
4964 bacpy(&ev->addr.bdaddr, bdaddr);
4965 ev->addr.type = link_to_bdaddr(link_type, addr_type);
4966
4967 ev->flags = __cpu_to_le32(flags);
4968
4969 if (name_len > 0)
4970 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
4971 name, name_len);
4972
4973 if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
4974 eir_len = eir_append_data(ev->eir, eir_len,
4975 EIR_CLASS_OF_DEV, dev_class, 3);
4976
4977 ev->eir_len = cpu_to_le16(eir_len);
4978
4979 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
4980 sizeof(*ev) + eir_len, NULL);
4981 }
4982
4983 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
4984 {
4985 struct mgmt_cp_disconnect *cp = cmd->param;
4986 struct sock **sk = data;
4987 struct mgmt_rp_disconnect rp;
4988
4989 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4990 rp.addr.type = cp->addr.type;
4991
4992 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
4993 sizeof(rp));
4994
4995 *sk = cmd->sk;
4996 sock_hold(*sk);
4997
4998 mgmt_pending_remove(cmd);
4999 }
5000
5001 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
5002 {
5003 struct hci_dev *hdev = data;
5004 struct mgmt_cp_unpair_device *cp = cmd->param;
5005 struct mgmt_rp_unpair_device rp;
5006
5007 memset(&rp, 0, sizeof(rp));
5008 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5009 rp.addr.type = cp->addr.type;
5010
5011 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
5012
5013 cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
5014
5015 mgmt_pending_remove(cmd);
5016 }
5017
5018 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
5019 u8 link_type, u8 addr_type, u8 reason,
5020 bool mgmt_connected)
5021 {
5022 struct mgmt_ev_device_disconnected ev;
5023 struct sock *sk = NULL;
5024
5025 if (!mgmt_connected)
5026 return;
5027
5028 if (link_type != ACL_LINK && link_type != LE_LINK)
5029 return;
5030
5031 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
5032
5033 bacpy(&ev.addr.bdaddr, bdaddr);
5034 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5035 ev.reason = reason;
5036
5037 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
5038
5039 if (sk)
5040 sock_put(sk);
5041
5042 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
5043 hdev);
5044 }
5045
5046 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
5047 u8 link_type, u8 addr_type, u8 status)
5048 {
5049 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
5050 struct mgmt_cp_disconnect *cp;
5051 struct mgmt_rp_disconnect rp;
5052 struct pending_cmd *cmd;
5053
5054 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
5055 hdev);
5056
5057 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
5058 if (!cmd)
5059 return;
5060
5061 cp = cmd->param;
5062
5063 if (bacmp(bdaddr, &cp->addr.bdaddr))
5064 return;
5065
5066 if (cp->addr.type != bdaddr_type)
5067 return;
5068
5069 bacpy(&rp.addr.bdaddr, bdaddr);
5070 rp.addr.type = bdaddr_type;
5071
5072 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
5073 mgmt_status(status), &rp, sizeof(rp));
5074
5075 mgmt_pending_remove(cmd);
5076 }
5077
5078 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5079 u8 addr_type, u8 status)
5080 {
5081 struct mgmt_ev_connect_failed ev;
5082
5083 bacpy(&ev.addr.bdaddr, bdaddr);
5084 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5085 ev.status = mgmt_status(status);
5086
5087 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
5088 }
5089
5090 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
5091 {
5092 struct mgmt_ev_pin_code_request ev;
5093
5094 bacpy(&ev.addr.bdaddr, bdaddr);
5095 ev.addr.type = BDADDR_BREDR;
5096 ev.secure = secure;
5097
5098 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
5099 }
5100
5101 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5102 u8 status)
5103 {
5104 struct pending_cmd *cmd;
5105 struct mgmt_rp_pin_code_reply rp;
5106
5107 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
5108 if (!cmd)
5109 return;
5110
5111 bacpy(&rp.addr.bdaddr, bdaddr);
5112 rp.addr.type = BDADDR_BREDR;
5113
5114 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
5115 mgmt_status(status), &rp, sizeof(rp));
5116
5117 mgmt_pending_remove(cmd);
5118 }
5119
5120 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5121 u8 status)
5122 {
5123 struct pending_cmd *cmd;
5124 struct mgmt_rp_pin_code_reply rp;
5125
5126 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
5127 if (!cmd)
5128 return;
5129
5130 bacpy(&rp.addr.bdaddr, bdaddr);
5131 rp.addr.type = BDADDR_BREDR;
5132
5133 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
5134 mgmt_status(status), &rp, sizeof(rp));
5135
5136 mgmt_pending_remove(cmd);
5137 }
5138
5139 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
5140 u8 link_type, u8 addr_type, __le32 value,
5141 u8 confirm_hint)
5142 {
5143 struct mgmt_ev_user_confirm_request ev;
5144
5145 BT_DBG("%s", hdev->name);
5146
5147 bacpy(&ev.addr.bdaddr, bdaddr);
5148 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5149 ev.confirm_hint = confirm_hint;
5150 ev.value = value;
5151
5152 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
5153 NULL);
5154 }
5155
5156 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
5157 u8 link_type, u8 addr_type)
5158 {
5159 struct mgmt_ev_user_passkey_request ev;
5160
5161 BT_DBG("%s", hdev->name);
5162
5163 bacpy(&ev.addr.bdaddr, bdaddr);
5164 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5165
5166 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
5167 NULL);
5168 }
5169
5170 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5171 u8 link_type, u8 addr_type, u8 status,
5172 u8 opcode)
5173 {
5174 struct pending_cmd *cmd;
5175 struct mgmt_rp_user_confirm_reply rp;
5176 int err;
5177
5178 cmd = mgmt_pending_find(opcode, hdev);
5179 if (!cmd)
5180 return -ENOENT;
5181
5182 bacpy(&rp.addr.bdaddr, bdaddr);
5183 rp.addr.type = link_to_bdaddr(link_type, addr_type);
5184 err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
5185 &rp, sizeof(rp));
5186
5187 mgmt_pending_remove(cmd);
5188
5189 return err;
5190 }
5191
5192 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5193 u8 link_type, u8 addr_type, u8 status)
5194 {
5195 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5196 status, MGMT_OP_USER_CONFIRM_REPLY);
5197 }
5198
5199 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5200 u8 link_type, u8 addr_type, u8 status)
5201 {
5202 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5203 status,
5204 MGMT_OP_USER_CONFIRM_NEG_REPLY);
5205 }
5206
5207 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5208 u8 link_type, u8 addr_type, u8 status)
5209 {
5210 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5211 status, MGMT_OP_USER_PASSKEY_REPLY);
5212 }
5213
5214 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5215 u8 link_type, u8 addr_type, u8 status)
5216 {
5217 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5218 status,
5219 MGMT_OP_USER_PASSKEY_NEG_REPLY);
5220 }
5221
5222 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
5223 u8 link_type, u8 addr_type, u32 passkey,
5224 u8 entered)
5225 {
5226 struct mgmt_ev_passkey_notify ev;
5227
5228 BT_DBG("%s", hdev->name);
5229
5230 bacpy(&ev.addr.bdaddr, bdaddr);
5231 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5232 ev.passkey = __cpu_to_le32(passkey);
5233 ev.entered = entered;
5234
5235 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
5236 }
5237
5238 void mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5239 u8 addr_type, u8 status)
5240 {
5241 struct mgmt_ev_auth_failed ev;
5242
5243 bacpy(&ev.addr.bdaddr, bdaddr);
5244 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5245 ev.status = mgmt_status(status);
5246
5247 mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
5248 }
5249
5250 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
5251 {
5252 struct cmd_lookup match = { NULL, hdev };
5253 bool changed;
5254
5255 if (status) {
5256 u8 mgmt_err = mgmt_status(status);
5257 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
5258 cmd_status_rsp, &mgmt_err);
5259 return;
5260 }
5261
5262 if (test_bit(HCI_AUTH, &hdev->flags))
5263 changed = !test_and_set_bit(HCI_LINK_SECURITY,
5264 &hdev->dev_flags);
5265 else
5266 changed = test_and_clear_bit(HCI_LINK_SECURITY,
5267 &hdev->dev_flags);
5268
5269 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
5270 &match);
5271
5272 if (changed)
5273 new_settings(hdev, match.sk);
5274
5275 if (match.sk)
5276 sock_put(match.sk);
5277 }
5278
5279 static void clear_eir(struct hci_request *req)
5280 {
5281 struct hci_dev *hdev = req->hdev;
5282 struct hci_cp_write_eir cp;
5283
5284 if (!lmp_ext_inq_capable(hdev))
5285 return;
5286
5287 memset(hdev->eir, 0, sizeof(hdev->eir));
5288
5289 memset(&cp, 0, sizeof(cp));
5290
5291 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
5292 }
5293
5294 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
5295 {
5296 struct cmd_lookup match = { NULL, hdev };
5297 struct hci_request req;
5298 bool changed = false;
5299
5300 if (status) {
5301 u8 mgmt_err = mgmt_status(status);
5302
5303 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
5304 &hdev->dev_flags)) {
5305 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
5306 new_settings(hdev, NULL);
5307 }
5308
5309 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
5310 &mgmt_err);
5311 return;
5312 }
5313
5314 if (enable) {
5315 changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
5316 } else {
5317 changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
5318 if (!changed)
5319 changed = test_and_clear_bit(HCI_HS_ENABLED,
5320 &hdev->dev_flags);
5321 else
5322 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
5323 }
5324
5325 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
5326
5327 if (changed)
5328 new_settings(hdev, match.sk);
5329
5330 if (match.sk)
5331 sock_put(match.sk);
5332
5333 hci_req_init(&req, hdev);
5334
5335 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
5336 update_eir(&req);
5337 else
5338 clear_eir(&req);
5339
5340 hci_req_run(&req, NULL);
5341 }
5342
5343 void mgmt_sc_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
5344 {
5345 struct cmd_lookup match = { NULL, hdev };
5346 bool changed = false;
5347
5348 if (status) {
5349 u8 mgmt_err = mgmt_status(status);
5350
5351 if (enable) {
5352 if (test_and_clear_bit(HCI_SC_ENABLED,
5353 &hdev->dev_flags))
5354 new_settings(hdev, NULL);
5355 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
5356 }
5357
5358 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
5359 cmd_status_rsp, &mgmt_err);
5360 return;
5361 }
5362
5363 if (enable) {
5364 changed = !test_and_set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
5365 } else {
5366 changed = test_and_clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
5367 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
5368 }
5369
5370 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
5371 settings_rsp, &match);
5372
5373 if (changed)
5374 new_settings(hdev, match.sk);
5375
5376 if (match.sk)
5377 sock_put(match.sk);
5378 }
5379
5380 static void sk_lookup(struct pending_cmd *cmd, void *data)
5381 {
5382 struct cmd_lookup *match = data;
5383
5384 if (match->sk == NULL) {
5385 match->sk = cmd->sk;
5386 sock_hold(match->sk);
5387 }
5388 }
5389
5390 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
5391 u8 status)
5392 {
5393 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
5394
5395 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
5396 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
5397 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
5398
5399 if (!status)
5400 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
5401 NULL);
5402
5403 if (match.sk)
5404 sock_put(match.sk);
5405 }
5406
5407 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
5408 {
5409 struct mgmt_cp_set_local_name ev;
5410 struct pending_cmd *cmd;
5411
5412 if (status)
5413 return;
5414
5415 memset(&ev, 0, sizeof(ev));
5416 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
5417 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
5418
5419 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
5420 if (!cmd) {
5421 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
5422
5423 /* If this is a HCI command related to powering on the
5424 * HCI dev don't send any mgmt signals.
5425 */
5426 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5427 return;
5428 }
5429
5430 mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
5431 cmd ? cmd->sk : NULL);
5432 }
5433
5434 void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
5435 u8 *randomizer192, u8 *hash256,
5436 u8 *randomizer256, u8 status)
5437 {
5438 struct pending_cmd *cmd;
5439
5440 BT_DBG("%s status %u", hdev->name, status);
5441
5442 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
5443 if (!cmd)
5444 return;
5445
5446 if (status) {
5447 cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5448 mgmt_status(status));
5449 } else {
5450 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
5451 hash256 && randomizer256) {
5452 struct mgmt_rp_read_local_oob_ext_data rp;
5453
5454 memcpy(rp.hash192, hash192, sizeof(rp.hash192));
5455 memcpy(rp.randomizer192, randomizer192,
5456 sizeof(rp.randomizer192));
5457
5458 memcpy(rp.hash256, hash256, sizeof(rp.hash256));
5459 memcpy(rp.randomizer256, randomizer256,
5460 sizeof(rp.randomizer256));
5461
5462 cmd_complete(cmd->sk, hdev->id,
5463 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
5464 &rp, sizeof(rp));
5465 } else {
5466 struct mgmt_rp_read_local_oob_data rp;
5467
5468 memcpy(rp.hash, hash192, sizeof(rp.hash));
5469 memcpy(rp.randomizer, randomizer192,
5470 sizeof(rp.randomizer));
5471
5472 cmd_complete(cmd->sk, hdev->id,
5473 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
5474 &rp, sizeof(rp));
5475 }
5476 }
5477
5478 mgmt_pending_remove(cmd);
5479 }
5480
5481 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5482 u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name, u8
5483 ssp, u8 *eir, u16 eir_len)
5484 {
5485 char buf[512];
5486 struct mgmt_ev_device_found *ev = (void *) buf;
5487 struct smp_irk *irk;
5488 size_t ev_size;
5489
5490 if (!hci_discovery_active(hdev))
5491 return;
5492
5493 /* Leave 5 bytes for a potential CoD field */
5494 if (sizeof(*ev) + eir_len + 5 > sizeof(buf))
5495 return;
5496
5497 memset(buf, 0, sizeof(buf));
5498
5499 irk = hci_get_irk(hdev, bdaddr, addr_type);
5500 if (irk) {
5501 bacpy(&ev->addr.bdaddr, &irk->bdaddr);
5502 ev->addr.type = link_to_bdaddr(link_type, irk->addr_type);
5503 } else {
5504 bacpy(&ev->addr.bdaddr, bdaddr);
5505 ev->addr.type = link_to_bdaddr(link_type, addr_type);
5506 }
5507
5508 ev->rssi = rssi;
5509 if (cfm_name)
5510 ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_CONFIRM_NAME);
5511 if (!ssp)
5512 ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_LEGACY_PAIRING);
5513
5514 if (eir_len > 0)
5515 memcpy(ev->eir, eir, eir_len);
5516
5517 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
5518 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
5519 dev_class, 3);
5520
5521 ev->eir_len = cpu_to_le16(eir_len);
5522 ev_size = sizeof(*ev) + eir_len;
5523
5524 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
5525 }
5526
5527 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5528 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
5529 {
5530 struct mgmt_ev_device_found *ev;
5531 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
5532 u16 eir_len;
5533
5534 ev = (struct mgmt_ev_device_found *) buf;
5535
5536 memset(buf, 0, sizeof(buf));
5537
5538 bacpy(&ev->addr.bdaddr, bdaddr);
5539 ev->addr.type = link_to_bdaddr(link_type, addr_type);
5540 ev->rssi = rssi;
5541
5542 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
5543 name_len);
5544
5545 ev->eir_len = cpu_to_le16(eir_len);
5546
5547 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
5548 }
5549
5550 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
5551 {
5552 struct mgmt_ev_discovering ev;
5553 struct pending_cmd *cmd;
5554
5555 BT_DBG("%s discovering %u", hdev->name, discovering);
5556
5557 if (discovering)
5558 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
5559 else
5560 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
5561
5562 if (cmd != NULL) {
5563 u8 type = hdev->discovery.type;
5564
5565 cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
5566 sizeof(type));
5567 mgmt_pending_remove(cmd);
5568 }
5569
5570 memset(&ev, 0, sizeof(ev));
5571 ev.type = hdev->discovery.type;
5572 ev.discovering = discovering;
5573
5574 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
5575 }
5576
5577 int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
5578 {
5579 struct pending_cmd *cmd;
5580 struct mgmt_ev_device_blocked ev;
5581
5582 cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, hdev);
5583
5584 bacpy(&ev.addr.bdaddr, bdaddr);
5585 ev.addr.type = type;
5586
5587 return mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &ev, sizeof(ev),
5588 cmd ? cmd->sk : NULL);
5589 }
5590
5591 int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
5592 {
5593 struct pending_cmd *cmd;
5594 struct mgmt_ev_device_unblocked ev;
5595
5596 cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, hdev);
5597
5598 bacpy(&ev.addr.bdaddr, bdaddr);
5599 ev.addr.type = type;
5600
5601 return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &ev, sizeof(ev),
5602 cmd ? cmd->sk : NULL);
5603 }
5604
5605 static void adv_enable_complete(struct hci_dev *hdev, u8 status)
5606 {
5607 BT_DBG("%s status %u", hdev->name, status);
5608
5609 /* Clear the advertising mgmt setting if we failed to re-enable it */
5610 if (status) {
5611 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
5612 new_settings(hdev, NULL);
5613 }
5614 }
5615
5616 void mgmt_reenable_advertising(struct hci_dev *hdev)
5617 {
5618 struct hci_request req;
5619
5620 if (hci_conn_num(hdev, LE_LINK) > 0)
5621 return;
5622
5623 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
5624 return;
5625
5626 hci_req_init(&req, hdev);
5627 enable_advertising(&req);
5628
5629 /* If this fails we have no option but to let user space know
5630 * that we've disabled advertising.
5631 */
5632 if (hci_req_run(&req, adv_enable_complete) < 0) {
5633 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
5634 new_settings(hdev, NULL);
5635 }
5636 }