]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/bluetooth/mgmt.c
0a82f08cd191e1059b5b2b6f9845dbdc7e46de08
[mirror_ubuntu-bionic-kernel.git] / net / bluetooth / mgmt.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI Management interface */
26
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/l2cap.h>
33 #include <net/bluetooth/mgmt.h>
34
35 #include "smp.h"
36
37 #define MGMT_VERSION 1
38 #define MGMT_REVISION 7
39
40 static const u16 mgmt_commands[] = {
41 MGMT_OP_READ_INDEX_LIST,
42 MGMT_OP_READ_INFO,
43 MGMT_OP_SET_POWERED,
44 MGMT_OP_SET_DISCOVERABLE,
45 MGMT_OP_SET_CONNECTABLE,
46 MGMT_OP_SET_FAST_CONNECTABLE,
47 MGMT_OP_SET_PAIRABLE,
48 MGMT_OP_SET_LINK_SECURITY,
49 MGMT_OP_SET_SSP,
50 MGMT_OP_SET_HS,
51 MGMT_OP_SET_LE,
52 MGMT_OP_SET_DEV_CLASS,
53 MGMT_OP_SET_LOCAL_NAME,
54 MGMT_OP_ADD_UUID,
55 MGMT_OP_REMOVE_UUID,
56 MGMT_OP_LOAD_LINK_KEYS,
57 MGMT_OP_LOAD_LONG_TERM_KEYS,
58 MGMT_OP_DISCONNECT,
59 MGMT_OP_GET_CONNECTIONS,
60 MGMT_OP_PIN_CODE_REPLY,
61 MGMT_OP_PIN_CODE_NEG_REPLY,
62 MGMT_OP_SET_IO_CAPABILITY,
63 MGMT_OP_PAIR_DEVICE,
64 MGMT_OP_CANCEL_PAIR_DEVICE,
65 MGMT_OP_UNPAIR_DEVICE,
66 MGMT_OP_USER_CONFIRM_REPLY,
67 MGMT_OP_USER_CONFIRM_NEG_REPLY,
68 MGMT_OP_USER_PASSKEY_REPLY,
69 MGMT_OP_USER_PASSKEY_NEG_REPLY,
70 MGMT_OP_READ_LOCAL_OOB_DATA,
71 MGMT_OP_ADD_REMOTE_OOB_DATA,
72 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
73 MGMT_OP_START_DISCOVERY,
74 MGMT_OP_STOP_DISCOVERY,
75 MGMT_OP_CONFIRM_NAME,
76 MGMT_OP_BLOCK_DEVICE,
77 MGMT_OP_UNBLOCK_DEVICE,
78 MGMT_OP_SET_DEVICE_ID,
79 MGMT_OP_SET_ADVERTISING,
80 MGMT_OP_SET_BREDR,
81 MGMT_OP_SET_STATIC_ADDRESS,
82 MGMT_OP_SET_SCAN_PARAMS,
83 MGMT_OP_SET_SECURE_CONN,
84 MGMT_OP_SET_DEBUG_KEYS,
85 MGMT_OP_SET_PRIVACY,
86 MGMT_OP_LOAD_IRKS,
87 MGMT_OP_GET_CONN_INFO,
88 MGMT_OP_GET_CLOCK_INFO,
89 MGMT_OP_ADD_DEVICE,
90 MGMT_OP_REMOVE_DEVICE,
91 MGMT_OP_LOAD_CONN_PARAM,
92 MGMT_OP_READ_UNCONF_INDEX_LIST,
93 };
94
95 static const u16 mgmt_events[] = {
96 MGMT_EV_CONTROLLER_ERROR,
97 MGMT_EV_INDEX_ADDED,
98 MGMT_EV_INDEX_REMOVED,
99 MGMT_EV_NEW_SETTINGS,
100 MGMT_EV_CLASS_OF_DEV_CHANGED,
101 MGMT_EV_LOCAL_NAME_CHANGED,
102 MGMT_EV_NEW_LINK_KEY,
103 MGMT_EV_NEW_LONG_TERM_KEY,
104 MGMT_EV_DEVICE_CONNECTED,
105 MGMT_EV_DEVICE_DISCONNECTED,
106 MGMT_EV_CONNECT_FAILED,
107 MGMT_EV_PIN_CODE_REQUEST,
108 MGMT_EV_USER_CONFIRM_REQUEST,
109 MGMT_EV_USER_PASSKEY_REQUEST,
110 MGMT_EV_AUTH_FAILED,
111 MGMT_EV_DEVICE_FOUND,
112 MGMT_EV_DISCOVERING,
113 MGMT_EV_DEVICE_BLOCKED,
114 MGMT_EV_DEVICE_UNBLOCKED,
115 MGMT_EV_DEVICE_UNPAIRED,
116 MGMT_EV_PASSKEY_NOTIFY,
117 MGMT_EV_NEW_IRK,
118 MGMT_EV_NEW_CSRK,
119 MGMT_EV_DEVICE_ADDED,
120 MGMT_EV_DEVICE_REMOVED,
121 MGMT_EV_NEW_CONN_PARAM,
122 MGMT_EV_UNCONF_INDEX_ADDED,
123 MGMT_EV_UNCONF_INDEX_REMOVED,
124 };
125
126 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
127
128 #define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
129 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
130
131 struct pending_cmd {
132 struct list_head list;
133 u16 opcode;
134 int index;
135 void *param;
136 struct sock *sk;
137 void *user_data;
138 };
139
140 /* HCI to MGMT error code conversion table */
141 static u8 mgmt_status_table[] = {
142 MGMT_STATUS_SUCCESS,
143 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
144 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
145 MGMT_STATUS_FAILED, /* Hardware Failure */
146 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
147 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
148 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
149 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
150 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
151 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
152 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
153 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
154 MGMT_STATUS_BUSY, /* Command Disallowed */
155 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
156 MGMT_STATUS_REJECTED, /* Rejected Security */
157 MGMT_STATUS_REJECTED, /* Rejected Personal */
158 MGMT_STATUS_TIMEOUT, /* Host Timeout */
159 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
160 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
161 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
162 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
163 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
164 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
165 MGMT_STATUS_BUSY, /* Repeated Attempts */
166 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
167 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
168 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
169 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
170 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
171 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
172 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
173 MGMT_STATUS_FAILED, /* Unspecified Error */
174 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
175 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
176 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
177 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
178 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
179 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
180 MGMT_STATUS_FAILED, /* Unit Link Key Used */
181 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
182 MGMT_STATUS_TIMEOUT, /* Instant Passed */
183 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
184 MGMT_STATUS_FAILED, /* Transaction Collision */
185 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
186 MGMT_STATUS_REJECTED, /* QoS Rejected */
187 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
188 MGMT_STATUS_REJECTED, /* Insufficient Security */
189 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
190 MGMT_STATUS_BUSY, /* Role Switch Pending */
191 MGMT_STATUS_FAILED, /* Slot Violation */
192 MGMT_STATUS_FAILED, /* Role Switch Failed */
193 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
194 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
195 MGMT_STATUS_BUSY, /* Host Busy Pairing */
196 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
197 MGMT_STATUS_BUSY, /* Controller Busy */
198 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
199 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
200 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
201 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
202 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
203 };
204
205 static u8 mgmt_status(u8 hci_status)
206 {
207 if (hci_status < ARRAY_SIZE(mgmt_status_table))
208 return mgmt_status_table[hci_status];
209
210 return MGMT_STATUS_FAILED;
211 }
212
213 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
214 {
215 struct sk_buff *skb;
216 struct mgmt_hdr *hdr;
217 struct mgmt_ev_cmd_status *ev;
218 int err;
219
220 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
221
222 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
223 if (!skb)
224 return -ENOMEM;
225
226 hdr = (void *) skb_put(skb, sizeof(*hdr));
227
228 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS);
229 hdr->index = cpu_to_le16(index);
230 hdr->len = cpu_to_le16(sizeof(*ev));
231
232 ev = (void *) skb_put(skb, sizeof(*ev));
233 ev->status = status;
234 ev->opcode = cpu_to_le16(cmd);
235
236 err = sock_queue_rcv_skb(sk, skb);
237 if (err < 0)
238 kfree_skb(skb);
239
240 return err;
241 }
242
243 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
244 void *rp, size_t rp_len)
245 {
246 struct sk_buff *skb;
247 struct mgmt_hdr *hdr;
248 struct mgmt_ev_cmd_complete *ev;
249 int err;
250
251 BT_DBG("sock %p", sk);
252
253 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
254 if (!skb)
255 return -ENOMEM;
256
257 hdr = (void *) skb_put(skb, sizeof(*hdr));
258
259 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
260 hdr->index = cpu_to_le16(index);
261 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
262
263 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
264 ev->opcode = cpu_to_le16(cmd);
265 ev->status = status;
266
267 if (rp)
268 memcpy(ev->data, rp, rp_len);
269
270 err = sock_queue_rcv_skb(sk, skb);
271 if (err < 0)
272 kfree_skb(skb);
273
274 return err;
275 }
276
277 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
278 u16 data_len)
279 {
280 struct mgmt_rp_read_version rp;
281
282 BT_DBG("sock %p", sk);
283
284 rp.version = MGMT_VERSION;
285 rp.revision = cpu_to_le16(MGMT_REVISION);
286
287 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
288 sizeof(rp));
289 }
290
291 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
292 u16 data_len)
293 {
294 struct mgmt_rp_read_commands *rp;
295 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
296 const u16 num_events = ARRAY_SIZE(mgmt_events);
297 __le16 *opcode;
298 size_t rp_size;
299 int i, err;
300
301 BT_DBG("sock %p", sk);
302
303 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
304
305 rp = kmalloc(rp_size, GFP_KERNEL);
306 if (!rp)
307 return -ENOMEM;
308
309 rp->num_commands = cpu_to_le16(num_commands);
310 rp->num_events = cpu_to_le16(num_events);
311
312 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
313 put_unaligned_le16(mgmt_commands[i], opcode);
314
315 for (i = 0; i < num_events; i++, opcode++)
316 put_unaligned_le16(mgmt_events[i], opcode);
317
318 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
319 rp_size);
320 kfree(rp);
321
322 return err;
323 }
324
325 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
326 u16 data_len)
327 {
328 struct mgmt_rp_read_index_list *rp;
329 struct hci_dev *d;
330 size_t rp_len;
331 u16 count;
332 int err;
333
334 BT_DBG("sock %p", sk);
335
336 read_lock(&hci_dev_list_lock);
337
338 count = 0;
339 list_for_each_entry(d, &hci_dev_list, list) {
340 if (d->dev_type == HCI_BREDR &&
341 !test_bit(HCI_UNCONFIGURED, &d->dev_flags))
342 count++;
343 }
344
345 rp_len = sizeof(*rp) + (2 * count);
346 rp = kmalloc(rp_len, GFP_ATOMIC);
347 if (!rp) {
348 read_unlock(&hci_dev_list_lock);
349 return -ENOMEM;
350 }
351
352 count = 0;
353 list_for_each_entry(d, &hci_dev_list, list) {
354 if (test_bit(HCI_SETUP, &d->dev_flags) ||
355 test_bit(HCI_USER_CHANNEL, &d->dev_flags))
356 continue;
357
358 /* Devices marked as raw-only are neither configured
359 * nor unconfigured controllers.
360 */
361 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
362 continue;
363
364 if (d->dev_type == HCI_BREDR &&
365 !test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
366 rp->index[count++] = cpu_to_le16(d->id);
367 BT_DBG("Added hci%u", d->id);
368 }
369 }
370
371 rp->num_controllers = cpu_to_le16(count);
372 rp_len = sizeof(*rp) + (2 * count);
373
374 read_unlock(&hci_dev_list_lock);
375
376 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
377 rp_len);
378
379 kfree(rp);
380
381 return err;
382 }
383
384 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
385 void *data, u16 data_len)
386 {
387 struct mgmt_rp_read_unconf_index_list *rp;
388 struct hci_dev *d;
389 size_t rp_len;
390 u16 count;
391 int err;
392
393 BT_DBG("sock %p", sk);
394
395 read_lock(&hci_dev_list_lock);
396
397 count = 0;
398 list_for_each_entry(d, &hci_dev_list, list) {
399 if (d->dev_type == HCI_BREDR &&
400 test_bit(HCI_UNCONFIGURED, &d->dev_flags))
401 count++;
402 }
403
404 rp_len = sizeof(*rp) + (2 * count);
405 rp = kmalloc(rp_len, GFP_ATOMIC);
406 if (!rp) {
407 read_unlock(&hci_dev_list_lock);
408 return -ENOMEM;
409 }
410
411 count = 0;
412 list_for_each_entry(d, &hci_dev_list, list) {
413 if (test_bit(HCI_SETUP, &d->dev_flags) ||
414 test_bit(HCI_USER_CHANNEL, &d->dev_flags))
415 continue;
416
417 /* Devices marked as raw-only are neither configured
418 * nor unconfigured controllers.
419 */
420 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
421 continue;
422
423 if (d->dev_type == HCI_BREDR &&
424 test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
425 rp->index[count++] = cpu_to_le16(d->id);
426 BT_DBG("Added hci%u", d->id);
427 }
428 }
429
430 rp->num_controllers = cpu_to_le16(count);
431 rp_len = sizeof(*rp) + (2 * count);
432
433 read_unlock(&hci_dev_list_lock);
434
435 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_UNCONF_INDEX_LIST,
436 0, rp, rp_len);
437
438 kfree(rp);
439
440 return err;
441 }
442
443 static u32 get_supported_settings(struct hci_dev *hdev)
444 {
445 u32 settings = 0;
446
447 settings |= MGMT_SETTING_POWERED;
448 settings |= MGMT_SETTING_PAIRABLE;
449 settings |= MGMT_SETTING_DEBUG_KEYS;
450
451 if (lmp_bredr_capable(hdev)) {
452 settings |= MGMT_SETTING_CONNECTABLE;
453 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
454 settings |= MGMT_SETTING_FAST_CONNECTABLE;
455 settings |= MGMT_SETTING_DISCOVERABLE;
456 settings |= MGMT_SETTING_BREDR;
457 settings |= MGMT_SETTING_LINK_SECURITY;
458
459 if (lmp_ssp_capable(hdev)) {
460 settings |= MGMT_SETTING_SSP;
461 settings |= MGMT_SETTING_HS;
462 }
463
464 if (lmp_sc_capable(hdev) ||
465 test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
466 settings |= MGMT_SETTING_SECURE_CONN;
467 }
468
469 if (lmp_le_capable(hdev)) {
470 settings |= MGMT_SETTING_LE;
471 settings |= MGMT_SETTING_ADVERTISING;
472 settings |= MGMT_SETTING_PRIVACY;
473 }
474
475 return settings;
476 }
477
478 static u32 get_current_settings(struct hci_dev *hdev)
479 {
480 u32 settings = 0;
481
482 if (hdev_is_powered(hdev))
483 settings |= MGMT_SETTING_POWERED;
484
485 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
486 settings |= MGMT_SETTING_CONNECTABLE;
487
488 if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
489 settings |= MGMT_SETTING_FAST_CONNECTABLE;
490
491 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
492 settings |= MGMT_SETTING_DISCOVERABLE;
493
494 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
495 settings |= MGMT_SETTING_PAIRABLE;
496
497 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
498 settings |= MGMT_SETTING_BREDR;
499
500 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
501 settings |= MGMT_SETTING_LE;
502
503 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
504 settings |= MGMT_SETTING_LINK_SECURITY;
505
506 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
507 settings |= MGMT_SETTING_SSP;
508
509 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
510 settings |= MGMT_SETTING_HS;
511
512 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
513 settings |= MGMT_SETTING_ADVERTISING;
514
515 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
516 settings |= MGMT_SETTING_SECURE_CONN;
517
518 if (test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags))
519 settings |= MGMT_SETTING_DEBUG_KEYS;
520
521 if (test_bit(HCI_PRIVACY, &hdev->dev_flags))
522 settings |= MGMT_SETTING_PRIVACY;
523
524 return settings;
525 }
526
527 #define PNP_INFO_SVCLASS_ID 0x1200
528
529 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
530 {
531 u8 *ptr = data, *uuids_start = NULL;
532 struct bt_uuid *uuid;
533
534 if (len < 4)
535 return ptr;
536
537 list_for_each_entry(uuid, &hdev->uuids, list) {
538 u16 uuid16;
539
540 if (uuid->size != 16)
541 continue;
542
543 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
544 if (uuid16 < 0x1100)
545 continue;
546
547 if (uuid16 == PNP_INFO_SVCLASS_ID)
548 continue;
549
550 if (!uuids_start) {
551 uuids_start = ptr;
552 uuids_start[0] = 1;
553 uuids_start[1] = EIR_UUID16_ALL;
554 ptr += 2;
555 }
556
557 /* Stop if not enough space to put next UUID */
558 if ((ptr - data) + sizeof(u16) > len) {
559 uuids_start[1] = EIR_UUID16_SOME;
560 break;
561 }
562
563 *ptr++ = (uuid16 & 0x00ff);
564 *ptr++ = (uuid16 & 0xff00) >> 8;
565 uuids_start[0] += sizeof(uuid16);
566 }
567
568 return ptr;
569 }
570
571 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
572 {
573 u8 *ptr = data, *uuids_start = NULL;
574 struct bt_uuid *uuid;
575
576 if (len < 6)
577 return ptr;
578
579 list_for_each_entry(uuid, &hdev->uuids, list) {
580 if (uuid->size != 32)
581 continue;
582
583 if (!uuids_start) {
584 uuids_start = ptr;
585 uuids_start[0] = 1;
586 uuids_start[1] = EIR_UUID32_ALL;
587 ptr += 2;
588 }
589
590 /* Stop if not enough space to put next UUID */
591 if ((ptr - data) + sizeof(u32) > len) {
592 uuids_start[1] = EIR_UUID32_SOME;
593 break;
594 }
595
596 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
597 ptr += sizeof(u32);
598 uuids_start[0] += sizeof(u32);
599 }
600
601 return ptr;
602 }
603
604 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
605 {
606 u8 *ptr = data, *uuids_start = NULL;
607 struct bt_uuid *uuid;
608
609 if (len < 18)
610 return ptr;
611
612 list_for_each_entry(uuid, &hdev->uuids, list) {
613 if (uuid->size != 128)
614 continue;
615
616 if (!uuids_start) {
617 uuids_start = ptr;
618 uuids_start[0] = 1;
619 uuids_start[1] = EIR_UUID128_ALL;
620 ptr += 2;
621 }
622
623 /* Stop if not enough space to put next UUID */
624 if ((ptr - data) + 16 > len) {
625 uuids_start[1] = EIR_UUID128_SOME;
626 break;
627 }
628
629 memcpy(ptr, uuid->uuid, 16);
630 ptr += 16;
631 uuids_start[0] += 16;
632 }
633
634 return ptr;
635 }
636
637 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
638 {
639 struct pending_cmd *cmd;
640
641 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
642 if (cmd->opcode == opcode)
643 return cmd;
644 }
645
646 return NULL;
647 }
648
649 static struct pending_cmd *mgmt_pending_find_data(u16 opcode,
650 struct hci_dev *hdev,
651 const void *data)
652 {
653 struct pending_cmd *cmd;
654
655 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
656 if (cmd->user_data != data)
657 continue;
658 if (cmd->opcode == opcode)
659 return cmd;
660 }
661
662 return NULL;
663 }
664
665 static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
666 {
667 u8 ad_len = 0;
668 size_t name_len;
669
670 name_len = strlen(hdev->dev_name);
671 if (name_len > 0) {
672 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
673
674 if (name_len > max_len) {
675 name_len = max_len;
676 ptr[1] = EIR_NAME_SHORT;
677 } else
678 ptr[1] = EIR_NAME_COMPLETE;
679
680 ptr[0] = name_len + 1;
681
682 memcpy(ptr + 2, hdev->dev_name, name_len);
683
684 ad_len += (name_len + 2);
685 ptr += (name_len + 2);
686 }
687
688 return ad_len;
689 }
690
691 static void update_scan_rsp_data(struct hci_request *req)
692 {
693 struct hci_dev *hdev = req->hdev;
694 struct hci_cp_le_set_scan_rsp_data cp;
695 u8 len;
696
697 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
698 return;
699
700 memset(&cp, 0, sizeof(cp));
701
702 len = create_scan_rsp_data(hdev, cp.data);
703
704 if (hdev->scan_rsp_data_len == len &&
705 memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
706 return;
707
708 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
709 hdev->scan_rsp_data_len = len;
710
711 cp.length = len;
712
713 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
714 }
715
716 static u8 get_adv_discov_flags(struct hci_dev *hdev)
717 {
718 struct pending_cmd *cmd;
719
720 /* If there's a pending mgmt command the flags will not yet have
721 * their final values, so check for this first.
722 */
723 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
724 if (cmd) {
725 struct mgmt_mode *cp = cmd->param;
726 if (cp->val == 0x01)
727 return LE_AD_GENERAL;
728 else if (cp->val == 0x02)
729 return LE_AD_LIMITED;
730 } else {
731 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
732 return LE_AD_LIMITED;
733 else if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
734 return LE_AD_GENERAL;
735 }
736
737 return 0;
738 }
739
740 static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
741 {
742 u8 ad_len = 0, flags = 0;
743
744 flags |= get_adv_discov_flags(hdev);
745
746 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
747 flags |= LE_AD_NO_BREDR;
748
749 if (flags) {
750 BT_DBG("adv flags 0x%02x", flags);
751
752 ptr[0] = 2;
753 ptr[1] = EIR_FLAGS;
754 ptr[2] = flags;
755
756 ad_len += 3;
757 ptr += 3;
758 }
759
760 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
761 ptr[0] = 2;
762 ptr[1] = EIR_TX_POWER;
763 ptr[2] = (u8) hdev->adv_tx_power;
764
765 ad_len += 3;
766 ptr += 3;
767 }
768
769 return ad_len;
770 }
771
772 static void update_adv_data(struct hci_request *req)
773 {
774 struct hci_dev *hdev = req->hdev;
775 struct hci_cp_le_set_adv_data cp;
776 u8 len;
777
778 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
779 return;
780
781 memset(&cp, 0, sizeof(cp));
782
783 len = create_adv_data(hdev, cp.data);
784
785 if (hdev->adv_data_len == len &&
786 memcmp(cp.data, hdev->adv_data, len) == 0)
787 return;
788
789 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
790 hdev->adv_data_len = len;
791
792 cp.length = len;
793
794 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
795 }
796
797 static void create_eir(struct hci_dev *hdev, u8 *data)
798 {
799 u8 *ptr = data;
800 size_t name_len;
801
802 name_len = strlen(hdev->dev_name);
803
804 if (name_len > 0) {
805 /* EIR Data type */
806 if (name_len > 48) {
807 name_len = 48;
808 ptr[1] = EIR_NAME_SHORT;
809 } else
810 ptr[1] = EIR_NAME_COMPLETE;
811
812 /* EIR Data length */
813 ptr[0] = name_len + 1;
814
815 memcpy(ptr + 2, hdev->dev_name, name_len);
816
817 ptr += (name_len + 2);
818 }
819
820 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
821 ptr[0] = 2;
822 ptr[1] = EIR_TX_POWER;
823 ptr[2] = (u8) hdev->inq_tx_power;
824
825 ptr += 3;
826 }
827
828 if (hdev->devid_source > 0) {
829 ptr[0] = 9;
830 ptr[1] = EIR_DEVICE_ID;
831
832 put_unaligned_le16(hdev->devid_source, ptr + 2);
833 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
834 put_unaligned_le16(hdev->devid_product, ptr + 6);
835 put_unaligned_le16(hdev->devid_version, ptr + 8);
836
837 ptr += 10;
838 }
839
840 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
841 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
842 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
843 }
844
845 static void update_eir(struct hci_request *req)
846 {
847 struct hci_dev *hdev = req->hdev;
848 struct hci_cp_write_eir cp;
849
850 if (!hdev_is_powered(hdev))
851 return;
852
853 if (!lmp_ext_inq_capable(hdev))
854 return;
855
856 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
857 return;
858
859 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
860 return;
861
862 memset(&cp, 0, sizeof(cp));
863
864 create_eir(hdev, cp.data);
865
866 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
867 return;
868
869 memcpy(hdev->eir, cp.data, sizeof(cp.data));
870
871 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
872 }
873
874 static u8 get_service_classes(struct hci_dev *hdev)
875 {
876 struct bt_uuid *uuid;
877 u8 val = 0;
878
879 list_for_each_entry(uuid, &hdev->uuids, list)
880 val |= uuid->svc_hint;
881
882 return val;
883 }
884
885 static void update_class(struct hci_request *req)
886 {
887 struct hci_dev *hdev = req->hdev;
888 u8 cod[3];
889
890 BT_DBG("%s", hdev->name);
891
892 if (!hdev_is_powered(hdev))
893 return;
894
895 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
896 return;
897
898 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
899 return;
900
901 cod[0] = hdev->minor_class;
902 cod[1] = hdev->major_class;
903 cod[2] = get_service_classes(hdev);
904
905 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
906 cod[1] |= 0x20;
907
908 if (memcmp(cod, hdev->dev_class, 3) == 0)
909 return;
910
911 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
912 }
913
914 static bool get_connectable(struct hci_dev *hdev)
915 {
916 struct pending_cmd *cmd;
917
918 /* If there's a pending mgmt command the flag will not yet have
919 * it's final value, so check for this first.
920 */
921 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
922 if (cmd) {
923 struct mgmt_mode *cp = cmd->param;
924 return cp->val;
925 }
926
927 return test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
928 }
929
930 static void enable_advertising(struct hci_request *req)
931 {
932 struct hci_dev *hdev = req->hdev;
933 struct hci_cp_le_set_adv_param cp;
934 u8 own_addr_type, enable = 0x01;
935 bool connectable;
936
937 /* Clear the HCI_ADVERTISING bit temporarily so that the
938 * hci_update_random_address knows that it's safe to go ahead
939 * and write a new random address. The flag will be set back on
940 * as soon as the SET_ADV_ENABLE HCI command completes.
941 */
942 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
943
944 connectable = get_connectable(hdev);
945
946 /* Set require_privacy to true only when non-connectable
947 * advertising is used. In that case it is fine to use a
948 * non-resolvable private address.
949 */
950 if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
951 return;
952
953 memset(&cp, 0, sizeof(cp));
954 cp.min_interval = cpu_to_le16(0x0800);
955 cp.max_interval = cpu_to_le16(0x0800);
956 cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
957 cp.own_address_type = own_addr_type;
958 cp.channel_map = hdev->le_adv_channel_map;
959
960 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
961
962 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
963 }
964
965 static void disable_advertising(struct hci_request *req)
966 {
967 u8 enable = 0x00;
968
969 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
970 }
971
972 static void service_cache_off(struct work_struct *work)
973 {
974 struct hci_dev *hdev = container_of(work, struct hci_dev,
975 service_cache.work);
976 struct hci_request req;
977
978 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
979 return;
980
981 hci_req_init(&req, hdev);
982
983 hci_dev_lock(hdev);
984
985 update_eir(&req);
986 update_class(&req);
987
988 hci_dev_unlock(hdev);
989
990 hci_req_run(&req, NULL);
991 }
992
993 static void rpa_expired(struct work_struct *work)
994 {
995 struct hci_dev *hdev = container_of(work, struct hci_dev,
996 rpa_expired.work);
997 struct hci_request req;
998
999 BT_DBG("");
1000
1001 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
1002
1003 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags) ||
1004 hci_conn_num(hdev, LE_LINK) > 0)
1005 return;
1006
1007 /* The generation of a new RPA and programming it into the
1008 * controller happens in the enable_advertising() function.
1009 */
1010
1011 hci_req_init(&req, hdev);
1012
1013 disable_advertising(&req);
1014 enable_advertising(&req);
1015
1016 hci_req_run(&req, NULL);
1017 }
1018
1019 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1020 {
1021 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
1022 return;
1023
1024 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1025 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1026
1027 /* Non-mgmt controlled devices get this bit set
1028 * implicitly so that pairing works for them, however
1029 * for mgmt we require user-space to explicitly enable
1030 * it
1031 */
1032 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1033 }
1034
1035 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1036 void *data, u16 data_len)
1037 {
1038 struct mgmt_rp_read_info rp;
1039
1040 BT_DBG("sock %p %s", sk, hdev->name);
1041
1042 hci_dev_lock(hdev);
1043
1044 memset(&rp, 0, sizeof(rp));
1045
1046 bacpy(&rp.bdaddr, &hdev->bdaddr);
1047
1048 rp.version = hdev->hci_ver;
1049 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1050
1051 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1052 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1053
1054 memcpy(rp.dev_class, hdev->dev_class, 3);
1055
1056 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1057 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1058
1059 hci_dev_unlock(hdev);
1060
1061 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1062 sizeof(rp));
1063 }
1064
1065 static void mgmt_pending_free(struct pending_cmd *cmd)
1066 {
1067 sock_put(cmd->sk);
1068 kfree(cmd->param);
1069 kfree(cmd);
1070 }
1071
1072 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
1073 struct hci_dev *hdev, void *data,
1074 u16 len)
1075 {
1076 struct pending_cmd *cmd;
1077
1078 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
1079 if (!cmd)
1080 return NULL;
1081
1082 cmd->opcode = opcode;
1083 cmd->index = hdev->id;
1084
1085 cmd->param = kmalloc(len, GFP_KERNEL);
1086 if (!cmd->param) {
1087 kfree(cmd);
1088 return NULL;
1089 }
1090
1091 if (data)
1092 memcpy(cmd->param, data, len);
1093
1094 cmd->sk = sk;
1095 sock_hold(sk);
1096
1097 list_add(&cmd->list, &hdev->mgmt_pending);
1098
1099 return cmd;
1100 }
1101
1102 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
1103 void (*cb)(struct pending_cmd *cmd,
1104 void *data),
1105 void *data)
1106 {
1107 struct pending_cmd *cmd, *tmp;
1108
1109 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
1110 if (opcode > 0 && cmd->opcode != opcode)
1111 continue;
1112
1113 cb(cmd, data);
1114 }
1115 }
1116
1117 static void mgmt_pending_remove(struct pending_cmd *cmd)
1118 {
1119 list_del(&cmd->list);
1120 mgmt_pending_free(cmd);
1121 }
1122
1123 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1124 {
1125 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1126
1127 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
1128 sizeof(settings));
1129 }
1130
1131 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status)
1132 {
1133 BT_DBG("%s status 0x%02x", hdev->name, status);
1134
1135 if (hci_conn_count(hdev) == 0) {
1136 cancel_delayed_work(&hdev->power_off);
1137 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1138 }
1139 }
1140
1141 static void hci_stop_discovery(struct hci_request *req)
1142 {
1143 struct hci_dev *hdev = req->hdev;
1144 struct hci_cp_remote_name_req_cancel cp;
1145 struct inquiry_entry *e;
1146
1147 switch (hdev->discovery.state) {
1148 case DISCOVERY_FINDING:
1149 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
1150 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1151 } else {
1152 cancel_delayed_work(&hdev->le_scan_disable);
1153 hci_req_add_le_scan_disable(req);
1154 }
1155
1156 break;
1157
1158 case DISCOVERY_RESOLVING:
1159 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
1160 NAME_PENDING);
1161 if (!e)
1162 return;
1163
1164 bacpy(&cp.bdaddr, &e->data.bdaddr);
1165 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
1166 &cp);
1167
1168 break;
1169
1170 default:
1171 /* Passive scanning */
1172 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1173 hci_req_add_le_scan_disable(req);
1174 break;
1175 }
1176 }
1177
1178 static int clean_up_hci_state(struct hci_dev *hdev)
1179 {
1180 struct hci_request req;
1181 struct hci_conn *conn;
1182
1183 hci_req_init(&req, hdev);
1184
1185 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1186 test_bit(HCI_PSCAN, &hdev->flags)) {
1187 u8 scan = 0x00;
1188 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1189 }
1190
1191 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1192 disable_advertising(&req);
1193
1194 hci_stop_discovery(&req);
1195
1196 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1197 struct hci_cp_disconnect dc;
1198 struct hci_cp_reject_conn_req rej;
1199
1200 switch (conn->state) {
1201 case BT_CONNECTED:
1202 case BT_CONFIG:
1203 dc.handle = cpu_to_le16(conn->handle);
1204 dc.reason = 0x15; /* Terminated due to Power Off */
1205 hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1206 break;
1207 case BT_CONNECT:
1208 if (conn->type == LE_LINK)
1209 hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL,
1210 0, NULL);
1211 else if (conn->type == ACL_LINK)
1212 hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL,
1213 6, &conn->dst);
1214 break;
1215 case BT_CONNECT2:
1216 bacpy(&rej.bdaddr, &conn->dst);
1217 rej.reason = 0x15; /* Terminated due to Power Off */
1218 if (conn->type == ACL_LINK)
1219 hci_req_add(&req, HCI_OP_REJECT_CONN_REQ,
1220 sizeof(rej), &rej);
1221 else if (conn->type == SCO_LINK)
1222 hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ,
1223 sizeof(rej), &rej);
1224 break;
1225 }
1226 }
1227
1228 return hci_req_run(&req, clean_up_hci_complete);
1229 }
1230
1231 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1232 u16 len)
1233 {
1234 struct mgmt_mode *cp = data;
1235 struct pending_cmd *cmd;
1236 int err;
1237
1238 BT_DBG("request for %s", hdev->name);
1239
1240 if (cp->val != 0x00 && cp->val != 0x01)
1241 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1242 MGMT_STATUS_INVALID_PARAMS);
1243
1244 hci_dev_lock(hdev);
1245
1246 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
1247 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1248 MGMT_STATUS_BUSY);
1249 goto failed;
1250 }
1251
1252 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1253 cancel_delayed_work(&hdev->power_off);
1254
1255 if (cp->val) {
1256 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
1257 data, len);
1258 err = mgmt_powered(hdev, 1);
1259 goto failed;
1260 }
1261 }
1262
1263 if (!!cp->val == hdev_is_powered(hdev)) {
1264 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1265 goto failed;
1266 }
1267
1268 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1269 if (!cmd) {
1270 err = -ENOMEM;
1271 goto failed;
1272 }
1273
1274 if (cp->val) {
1275 queue_work(hdev->req_workqueue, &hdev->power_on);
1276 err = 0;
1277 } else {
1278 /* Disconnect connections, stop scans, etc */
1279 err = clean_up_hci_state(hdev);
1280 if (!err)
1281 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1282 HCI_POWER_OFF_TIMEOUT);
1283
1284 /* ENODATA means there were no HCI commands queued */
1285 if (err == -ENODATA) {
1286 cancel_delayed_work(&hdev->power_off);
1287 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1288 err = 0;
1289 }
1290 }
1291
1292 failed:
1293 hci_dev_unlock(hdev);
1294 return err;
1295 }
1296
1297 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
1298 struct sock *skip_sk)
1299 {
1300 struct sk_buff *skb;
1301 struct mgmt_hdr *hdr;
1302
1303 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
1304 if (!skb)
1305 return -ENOMEM;
1306
1307 hdr = (void *) skb_put(skb, sizeof(*hdr));
1308 hdr->opcode = cpu_to_le16(event);
1309 if (hdev)
1310 hdr->index = cpu_to_le16(hdev->id);
1311 else
1312 hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
1313 hdr->len = cpu_to_le16(data_len);
1314
1315 if (data)
1316 memcpy(skb_put(skb, data_len), data, data_len);
1317
1318 /* Time stamp */
1319 __net_timestamp(skb);
1320
1321 hci_send_to_control(skb, skip_sk);
1322 kfree_skb(skb);
1323
1324 return 0;
1325 }
1326
1327 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1328 {
1329 __le32 ev;
1330
1331 ev = cpu_to_le32(get_current_settings(hdev));
1332
1333 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
1334 }
1335
1336 struct cmd_lookup {
1337 struct sock *sk;
1338 struct hci_dev *hdev;
1339 u8 mgmt_status;
1340 };
1341
1342 static void settings_rsp(struct pending_cmd *cmd, void *data)
1343 {
1344 struct cmd_lookup *match = data;
1345
1346 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1347
1348 list_del(&cmd->list);
1349
1350 if (match->sk == NULL) {
1351 match->sk = cmd->sk;
1352 sock_hold(match->sk);
1353 }
1354
1355 mgmt_pending_free(cmd);
1356 }
1357
1358 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
1359 {
1360 u8 *status = data;
1361
1362 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1363 mgmt_pending_remove(cmd);
1364 }
1365
1366 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1367 {
1368 if (!lmp_bredr_capable(hdev))
1369 return MGMT_STATUS_NOT_SUPPORTED;
1370 else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1371 return MGMT_STATUS_REJECTED;
1372 else
1373 return MGMT_STATUS_SUCCESS;
1374 }
1375
1376 static u8 mgmt_le_support(struct hci_dev *hdev)
1377 {
1378 if (!lmp_le_capable(hdev))
1379 return MGMT_STATUS_NOT_SUPPORTED;
1380 else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1381 return MGMT_STATUS_REJECTED;
1382 else
1383 return MGMT_STATUS_SUCCESS;
1384 }
1385
1386 static void set_discoverable_complete(struct hci_dev *hdev, u8 status)
1387 {
1388 struct pending_cmd *cmd;
1389 struct mgmt_mode *cp;
1390 struct hci_request req;
1391 bool changed;
1392
1393 BT_DBG("status 0x%02x", status);
1394
1395 hci_dev_lock(hdev);
1396
1397 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1398 if (!cmd)
1399 goto unlock;
1400
1401 if (status) {
1402 u8 mgmt_err = mgmt_status(status);
1403 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1404 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1405 goto remove_cmd;
1406 }
1407
1408 cp = cmd->param;
1409 if (cp->val) {
1410 changed = !test_and_set_bit(HCI_DISCOVERABLE,
1411 &hdev->dev_flags);
1412
1413 if (hdev->discov_timeout > 0) {
1414 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1415 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1416 to);
1417 }
1418 } else {
1419 changed = test_and_clear_bit(HCI_DISCOVERABLE,
1420 &hdev->dev_flags);
1421 }
1422
1423 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1424
1425 if (changed)
1426 new_settings(hdev, cmd->sk);
1427
1428 /* When the discoverable mode gets changed, make sure
1429 * that class of device has the limited discoverable
1430 * bit correctly set.
1431 */
1432 hci_req_init(&req, hdev);
1433 update_class(&req);
1434 hci_req_run(&req, NULL);
1435
1436 remove_cmd:
1437 mgmt_pending_remove(cmd);
1438
1439 unlock:
1440 hci_dev_unlock(hdev);
1441 }
1442
1443 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1444 u16 len)
1445 {
1446 struct mgmt_cp_set_discoverable *cp = data;
1447 struct pending_cmd *cmd;
1448 struct hci_request req;
1449 u16 timeout;
1450 u8 scan;
1451 int err;
1452
1453 BT_DBG("request for %s", hdev->name);
1454
1455 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1456 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1457 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1458 MGMT_STATUS_REJECTED);
1459
1460 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1461 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1462 MGMT_STATUS_INVALID_PARAMS);
1463
1464 timeout = __le16_to_cpu(cp->timeout);
1465
1466 /* Disabling discoverable requires that no timeout is set,
1467 * and enabling limited discoverable requires a timeout.
1468 */
1469 if ((cp->val == 0x00 && timeout > 0) ||
1470 (cp->val == 0x02 && timeout == 0))
1471 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1472 MGMT_STATUS_INVALID_PARAMS);
1473
1474 hci_dev_lock(hdev);
1475
1476 if (!hdev_is_powered(hdev) && timeout > 0) {
1477 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1478 MGMT_STATUS_NOT_POWERED);
1479 goto failed;
1480 }
1481
1482 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1483 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1484 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1485 MGMT_STATUS_BUSY);
1486 goto failed;
1487 }
1488
1489 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
1490 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1491 MGMT_STATUS_REJECTED);
1492 goto failed;
1493 }
1494
1495 if (!hdev_is_powered(hdev)) {
1496 bool changed = false;
1497
1498 /* Setting limited discoverable when powered off is
1499 * not a valid operation since it requires a timeout
1500 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1501 */
1502 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
1503 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1504 changed = true;
1505 }
1506
1507 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1508 if (err < 0)
1509 goto failed;
1510
1511 if (changed)
1512 err = new_settings(hdev, sk);
1513
1514 goto failed;
1515 }
1516
1517 /* If the current mode is the same, then just update the timeout
1518 * value with the new value. And if only the timeout gets updated,
1519 * then no need for any HCI transactions.
1520 */
1521 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
1522 (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
1523 &hdev->dev_flags)) {
1524 cancel_delayed_work(&hdev->discov_off);
1525 hdev->discov_timeout = timeout;
1526
1527 if (cp->val && hdev->discov_timeout > 0) {
1528 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1529 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1530 to);
1531 }
1532
1533 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1534 goto failed;
1535 }
1536
1537 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1538 if (!cmd) {
1539 err = -ENOMEM;
1540 goto failed;
1541 }
1542
1543 /* Cancel any potential discoverable timeout that might be
1544 * still active and store new timeout value. The arming of
1545 * the timeout happens in the complete handler.
1546 */
1547 cancel_delayed_work(&hdev->discov_off);
1548 hdev->discov_timeout = timeout;
1549
1550 /* Limited discoverable mode */
1551 if (cp->val == 0x02)
1552 set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1553 else
1554 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1555
1556 hci_req_init(&req, hdev);
1557
1558 /* The procedure for LE-only controllers is much simpler - just
1559 * update the advertising data.
1560 */
1561 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1562 goto update_ad;
1563
1564 scan = SCAN_PAGE;
1565
1566 if (cp->val) {
1567 struct hci_cp_write_current_iac_lap hci_cp;
1568
1569 if (cp->val == 0x02) {
1570 /* Limited discoverable mode */
1571 hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1572 hci_cp.iac_lap[0] = 0x00; /* LIAC */
1573 hci_cp.iac_lap[1] = 0x8b;
1574 hci_cp.iac_lap[2] = 0x9e;
1575 hci_cp.iac_lap[3] = 0x33; /* GIAC */
1576 hci_cp.iac_lap[4] = 0x8b;
1577 hci_cp.iac_lap[5] = 0x9e;
1578 } else {
1579 /* General discoverable mode */
1580 hci_cp.num_iac = 1;
1581 hci_cp.iac_lap[0] = 0x33; /* GIAC */
1582 hci_cp.iac_lap[1] = 0x8b;
1583 hci_cp.iac_lap[2] = 0x9e;
1584 }
1585
1586 hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1587 (hci_cp.num_iac * 3) + 1, &hci_cp);
1588
1589 scan |= SCAN_INQUIRY;
1590 } else {
1591 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1592 }
1593
1594 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1595
1596 update_ad:
1597 update_adv_data(&req);
1598
1599 err = hci_req_run(&req, set_discoverable_complete);
1600 if (err < 0)
1601 mgmt_pending_remove(cmd);
1602
1603 failed:
1604 hci_dev_unlock(hdev);
1605 return err;
1606 }
1607
1608 static void write_fast_connectable(struct hci_request *req, bool enable)
1609 {
1610 struct hci_dev *hdev = req->hdev;
1611 struct hci_cp_write_page_scan_activity acp;
1612 u8 type;
1613
1614 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1615 return;
1616
1617 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1618 return;
1619
1620 if (enable) {
1621 type = PAGE_SCAN_TYPE_INTERLACED;
1622
1623 /* 160 msec page scan interval */
1624 acp.interval = cpu_to_le16(0x0100);
1625 } else {
1626 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1627
1628 /* default 1.28 sec page scan */
1629 acp.interval = cpu_to_le16(0x0800);
1630 }
1631
1632 acp.window = cpu_to_le16(0x0012);
1633
1634 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1635 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1636 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1637 sizeof(acp), &acp);
1638
1639 if (hdev->page_scan_type != type)
1640 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1641 }
1642
1643 static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1644 {
1645 struct pending_cmd *cmd;
1646 struct mgmt_mode *cp;
1647 bool changed;
1648
1649 BT_DBG("status 0x%02x", status);
1650
1651 hci_dev_lock(hdev);
1652
1653 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1654 if (!cmd)
1655 goto unlock;
1656
1657 if (status) {
1658 u8 mgmt_err = mgmt_status(status);
1659 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1660 goto remove_cmd;
1661 }
1662
1663 cp = cmd->param;
1664 if (cp->val)
1665 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1666 else
1667 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1668
1669 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1670
1671 if (changed)
1672 new_settings(hdev, cmd->sk);
1673
1674 remove_cmd:
1675 mgmt_pending_remove(cmd);
1676
1677 unlock:
1678 hci_dev_unlock(hdev);
1679 }
1680
1681 static int set_connectable_update_settings(struct hci_dev *hdev,
1682 struct sock *sk, u8 val)
1683 {
1684 bool changed = false;
1685 int err;
1686
1687 if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1688 changed = true;
1689
1690 if (val) {
1691 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1692 } else {
1693 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1694 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1695 }
1696
1697 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1698 if (err < 0)
1699 return err;
1700
1701 if (changed)
1702 return new_settings(hdev, sk);
1703
1704 return 0;
1705 }
1706
1707 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1708 u16 len)
1709 {
1710 struct mgmt_mode *cp = data;
1711 struct pending_cmd *cmd;
1712 struct hci_request req;
1713 u8 scan;
1714 int err;
1715
1716 BT_DBG("request for %s", hdev->name);
1717
1718 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1719 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1720 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1721 MGMT_STATUS_REJECTED);
1722
1723 if (cp->val != 0x00 && cp->val != 0x01)
1724 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1725 MGMT_STATUS_INVALID_PARAMS);
1726
1727 hci_dev_lock(hdev);
1728
1729 if (!hdev_is_powered(hdev)) {
1730 err = set_connectable_update_settings(hdev, sk, cp->val);
1731 goto failed;
1732 }
1733
1734 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1735 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1736 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1737 MGMT_STATUS_BUSY);
1738 goto failed;
1739 }
1740
1741 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1742 if (!cmd) {
1743 err = -ENOMEM;
1744 goto failed;
1745 }
1746
1747 hci_req_init(&req, hdev);
1748
1749 /* If BR/EDR is not enabled and we disable advertising as a
1750 * by-product of disabling connectable, we need to update the
1751 * advertising flags.
1752 */
1753 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1754 if (!cp->val) {
1755 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1756 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1757 }
1758 update_adv_data(&req);
1759 } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
1760 if (cp->val) {
1761 scan = SCAN_PAGE;
1762 } else {
1763 scan = 0;
1764
1765 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1766 hdev->discov_timeout > 0)
1767 cancel_delayed_work(&hdev->discov_off);
1768 }
1769
1770 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1771 }
1772
1773 /* If we're going from non-connectable to connectable or
1774 * vice-versa when fast connectable is enabled ensure that fast
1775 * connectable gets disabled. write_fast_connectable won't do
1776 * anything if the page scan parameters are already what they
1777 * should be.
1778 */
1779 if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1780 write_fast_connectable(&req, false);
1781
1782 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) &&
1783 hci_conn_num(hdev, LE_LINK) == 0) {
1784 disable_advertising(&req);
1785 enable_advertising(&req);
1786 }
1787
1788 err = hci_req_run(&req, set_connectable_complete);
1789 if (err < 0) {
1790 mgmt_pending_remove(cmd);
1791 if (err == -ENODATA)
1792 err = set_connectable_update_settings(hdev, sk,
1793 cp->val);
1794 goto failed;
1795 }
1796
1797 failed:
1798 hci_dev_unlock(hdev);
1799 return err;
1800 }
1801
1802 static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
1803 u16 len)
1804 {
1805 struct mgmt_mode *cp = data;
1806 bool changed;
1807 int err;
1808
1809 BT_DBG("request for %s", hdev->name);
1810
1811 if (cp->val != 0x00 && cp->val != 0x01)
1812 return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE,
1813 MGMT_STATUS_INVALID_PARAMS);
1814
1815 hci_dev_lock(hdev);
1816
1817 if (cp->val)
1818 changed = !test_and_set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1819 else
1820 changed = test_and_clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1821
1822 err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
1823 if (err < 0)
1824 goto unlock;
1825
1826 if (changed)
1827 err = new_settings(hdev, sk);
1828
1829 unlock:
1830 hci_dev_unlock(hdev);
1831 return err;
1832 }
1833
1834 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1835 u16 len)
1836 {
1837 struct mgmt_mode *cp = data;
1838 struct pending_cmd *cmd;
1839 u8 val, status;
1840 int err;
1841
1842 BT_DBG("request for %s", hdev->name);
1843
1844 status = mgmt_bredr_support(hdev);
1845 if (status)
1846 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1847 status);
1848
1849 if (cp->val != 0x00 && cp->val != 0x01)
1850 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1851 MGMT_STATUS_INVALID_PARAMS);
1852
1853 hci_dev_lock(hdev);
1854
1855 if (!hdev_is_powered(hdev)) {
1856 bool changed = false;
1857
1858 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1859 &hdev->dev_flags)) {
1860 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
1861 changed = true;
1862 }
1863
1864 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1865 if (err < 0)
1866 goto failed;
1867
1868 if (changed)
1869 err = new_settings(hdev, sk);
1870
1871 goto failed;
1872 }
1873
1874 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1875 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1876 MGMT_STATUS_BUSY);
1877 goto failed;
1878 }
1879
1880 val = !!cp->val;
1881
1882 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1883 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1884 goto failed;
1885 }
1886
1887 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1888 if (!cmd) {
1889 err = -ENOMEM;
1890 goto failed;
1891 }
1892
1893 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1894 if (err < 0) {
1895 mgmt_pending_remove(cmd);
1896 goto failed;
1897 }
1898
1899 failed:
1900 hci_dev_unlock(hdev);
1901 return err;
1902 }
1903
1904 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1905 {
1906 struct mgmt_mode *cp = data;
1907 struct pending_cmd *cmd;
1908 u8 status;
1909 int err;
1910
1911 BT_DBG("request for %s", hdev->name);
1912
1913 status = mgmt_bredr_support(hdev);
1914 if (status)
1915 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1916
1917 if (!lmp_ssp_capable(hdev))
1918 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1919 MGMT_STATUS_NOT_SUPPORTED);
1920
1921 if (cp->val != 0x00 && cp->val != 0x01)
1922 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1923 MGMT_STATUS_INVALID_PARAMS);
1924
1925 hci_dev_lock(hdev);
1926
1927 if (!hdev_is_powered(hdev)) {
1928 bool changed;
1929
1930 if (cp->val) {
1931 changed = !test_and_set_bit(HCI_SSP_ENABLED,
1932 &hdev->dev_flags);
1933 } else {
1934 changed = test_and_clear_bit(HCI_SSP_ENABLED,
1935 &hdev->dev_flags);
1936 if (!changed)
1937 changed = test_and_clear_bit(HCI_HS_ENABLED,
1938 &hdev->dev_flags);
1939 else
1940 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1941 }
1942
1943 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1944 if (err < 0)
1945 goto failed;
1946
1947 if (changed)
1948 err = new_settings(hdev, sk);
1949
1950 goto failed;
1951 }
1952
1953 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev) ||
1954 mgmt_pending_find(MGMT_OP_SET_HS, hdev)) {
1955 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1956 MGMT_STATUS_BUSY);
1957 goto failed;
1958 }
1959
1960 if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1961 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1962 goto failed;
1963 }
1964
1965 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1966 if (!cmd) {
1967 err = -ENOMEM;
1968 goto failed;
1969 }
1970
1971 if (!cp->val && test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
1972 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
1973 sizeof(cp->val), &cp->val);
1974
1975 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1976 if (err < 0) {
1977 mgmt_pending_remove(cmd);
1978 goto failed;
1979 }
1980
1981 failed:
1982 hci_dev_unlock(hdev);
1983 return err;
1984 }
1985
1986 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1987 {
1988 struct mgmt_mode *cp = data;
1989 bool changed;
1990 u8 status;
1991 int err;
1992
1993 BT_DBG("request for %s", hdev->name);
1994
1995 status = mgmt_bredr_support(hdev);
1996 if (status)
1997 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1998
1999 if (!lmp_ssp_capable(hdev))
2000 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2001 MGMT_STATUS_NOT_SUPPORTED);
2002
2003 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
2004 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2005 MGMT_STATUS_REJECTED);
2006
2007 if (cp->val != 0x00 && cp->val != 0x01)
2008 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2009 MGMT_STATUS_INVALID_PARAMS);
2010
2011 hci_dev_lock(hdev);
2012
2013 if (cp->val) {
2014 changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2015 } else {
2016 if (hdev_is_powered(hdev)) {
2017 err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2018 MGMT_STATUS_REJECTED);
2019 goto unlock;
2020 }
2021
2022 changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2023 }
2024
2025 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2026 if (err < 0)
2027 goto unlock;
2028
2029 if (changed)
2030 err = new_settings(hdev, sk);
2031
2032 unlock:
2033 hci_dev_unlock(hdev);
2034 return err;
2035 }
2036
2037 static void le_enable_complete(struct hci_dev *hdev, u8 status)
2038 {
2039 struct cmd_lookup match = { NULL, hdev };
2040
2041 if (status) {
2042 u8 mgmt_err = mgmt_status(status);
2043
2044 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2045 &mgmt_err);
2046 return;
2047 }
2048
2049 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2050
2051 new_settings(hdev, match.sk);
2052
2053 if (match.sk)
2054 sock_put(match.sk);
2055
2056 /* Make sure the controller has a good default for
2057 * advertising data. Restrict the update to when LE
2058 * has actually been enabled. During power on, the
2059 * update in powered_update_hci will take care of it.
2060 */
2061 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2062 struct hci_request req;
2063
2064 hci_dev_lock(hdev);
2065
2066 hci_req_init(&req, hdev);
2067 update_adv_data(&req);
2068 update_scan_rsp_data(&req);
2069 hci_req_run(&req, NULL);
2070
2071 hci_dev_unlock(hdev);
2072 }
2073 }
2074
2075 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2076 {
2077 struct mgmt_mode *cp = data;
2078 struct hci_cp_write_le_host_supported hci_cp;
2079 struct pending_cmd *cmd;
2080 struct hci_request req;
2081 int err;
2082 u8 val, enabled;
2083
2084 BT_DBG("request for %s", hdev->name);
2085
2086 if (!lmp_le_capable(hdev))
2087 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2088 MGMT_STATUS_NOT_SUPPORTED);
2089
2090 if (cp->val != 0x00 && cp->val != 0x01)
2091 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2092 MGMT_STATUS_INVALID_PARAMS);
2093
2094 /* LE-only devices do not allow toggling LE on/off */
2095 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
2096 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2097 MGMT_STATUS_REJECTED);
2098
2099 hci_dev_lock(hdev);
2100
2101 val = !!cp->val;
2102 enabled = lmp_host_le_capable(hdev);
2103
2104 if (!hdev_is_powered(hdev) || val == enabled) {
2105 bool changed = false;
2106
2107 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2108 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2109 changed = true;
2110 }
2111
2112 if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
2113 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
2114 changed = true;
2115 }
2116
2117 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2118 if (err < 0)
2119 goto unlock;
2120
2121 if (changed)
2122 err = new_settings(hdev, sk);
2123
2124 goto unlock;
2125 }
2126
2127 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
2128 mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2129 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2130 MGMT_STATUS_BUSY);
2131 goto unlock;
2132 }
2133
2134 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2135 if (!cmd) {
2136 err = -ENOMEM;
2137 goto unlock;
2138 }
2139
2140 hci_req_init(&req, hdev);
2141
2142 memset(&hci_cp, 0, sizeof(hci_cp));
2143
2144 if (val) {
2145 hci_cp.le = val;
2146 hci_cp.simul = lmp_le_br_capable(hdev);
2147 } else {
2148 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
2149 disable_advertising(&req);
2150 }
2151
2152 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2153 &hci_cp);
2154
2155 err = hci_req_run(&req, le_enable_complete);
2156 if (err < 0)
2157 mgmt_pending_remove(cmd);
2158
2159 unlock:
2160 hci_dev_unlock(hdev);
2161 return err;
2162 }
2163
2164 /* This is a helper function to test for pending mgmt commands that can
2165 * cause CoD or EIR HCI commands. We can only allow one such pending
2166 * mgmt command at a time since otherwise we cannot easily track what
2167 * the current values are, will be, and based on that calculate if a new
2168 * HCI command needs to be sent and if yes with what value.
2169 */
2170 static bool pending_eir_or_class(struct hci_dev *hdev)
2171 {
2172 struct pending_cmd *cmd;
2173
2174 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2175 switch (cmd->opcode) {
2176 case MGMT_OP_ADD_UUID:
2177 case MGMT_OP_REMOVE_UUID:
2178 case MGMT_OP_SET_DEV_CLASS:
2179 case MGMT_OP_SET_POWERED:
2180 return true;
2181 }
2182 }
2183
2184 return false;
2185 }
2186
2187 static const u8 bluetooth_base_uuid[] = {
2188 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2189 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2190 };
2191
2192 static u8 get_uuid_size(const u8 *uuid)
2193 {
2194 u32 val;
2195
2196 if (memcmp(uuid, bluetooth_base_uuid, 12))
2197 return 128;
2198
2199 val = get_unaligned_le32(&uuid[12]);
2200 if (val > 0xffff)
2201 return 32;
2202
2203 return 16;
2204 }
2205
2206 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2207 {
2208 struct pending_cmd *cmd;
2209
2210 hci_dev_lock(hdev);
2211
2212 cmd = mgmt_pending_find(mgmt_op, hdev);
2213 if (!cmd)
2214 goto unlock;
2215
2216 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
2217 hdev->dev_class, 3);
2218
2219 mgmt_pending_remove(cmd);
2220
2221 unlock:
2222 hci_dev_unlock(hdev);
2223 }
2224
2225 static void add_uuid_complete(struct hci_dev *hdev, u8 status)
2226 {
2227 BT_DBG("status 0x%02x", status);
2228
2229 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2230 }
2231
2232 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2233 {
2234 struct mgmt_cp_add_uuid *cp = data;
2235 struct pending_cmd *cmd;
2236 struct hci_request req;
2237 struct bt_uuid *uuid;
2238 int err;
2239
2240 BT_DBG("request for %s", hdev->name);
2241
2242 hci_dev_lock(hdev);
2243
2244 if (pending_eir_or_class(hdev)) {
2245 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2246 MGMT_STATUS_BUSY);
2247 goto failed;
2248 }
2249
2250 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2251 if (!uuid) {
2252 err = -ENOMEM;
2253 goto failed;
2254 }
2255
2256 memcpy(uuid->uuid, cp->uuid, 16);
2257 uuid->svc_hint = cp->svc_hint;
2258 uuid->size = get_uuid_size(cp->uuid);
2259
2260 list_add_tail(&uuid->list, &hdev->uuids);
2261
2262 hci_req_init(&req, hdev);
2263
2264 update_class(&req);
2265 update_eir(&req);
2266
2267 err = hci_req_run(&req, add_uuid_complete);
2268 if (err < 0) {
2269 if (err != -ENODATA)
2270 goto failed;
2271
2272 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2273 hdev->dev_class, 3);
2274 goto failed;
2275 }
2276
2277 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2278 if (!cmd) {
2279 err = -ENOMEM;
2280 goto failed;
2281 }
2282
2283 err = 0;
2284
2285 failed:
2286 hci_dev_unlock(hdev);
2287 return err;
2288 }
2289
2290 static bool enable_service_cache(struct hci_dev *hdev)
2291 {
2292 if (!hdev_is_powered(hdev))
2293 return false;
2294
2295 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2296 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2297 CACHE_TIMEOUT);
2298 return true;
2299 }
2300
2301 return false;
2302 }
2303
2304 static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
2305 {
2306 BT_DBG("status 0x%02x", status);
2307
2308 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2309 }
2310
2311 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2312 u16 len)
2313 {
2314 struct mgmt_cp_remove_uuid *cp = data;
2315 struct pending_cmd *cmd;
2316 struct bt_uuid *match, *tmp;
2317 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2318 struct hci_request req;
2319 int err, found;
2320
2321 BT_DBG("request for %s", hdev->name);
2322
2323 hci_dev_lock(hdev);
2324
2325 if (pending_eir_or_class(hdev)) {
2326 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2327 MGMT_STATUS_BUSY);
2328 goto unlock;
2329 }
2330
2331 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2332 hci_uuids_clear(hdev);
2333
2334 if (enable_service_cache(hdev)) {
2335 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2336 0, hdev->dev_class, 3);
2337 goto unlock;
2338 }
2339
2340 goto update_class;
2341 }
2342
2343 found = 0;
2344
2345 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2346 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2347 continue;
2348
2349 list_del(&match->list);
2350 kfree(match);
2351 found++;
2352 }
2353
2354 if (found == 0) {
2355 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2356 MGMT_STATUS_INVALID_PARAMS);
2357 goto unlock;
2358 }
2359
2360 update_class:
2361 hci_req_init(&req, hdev);
2362
2363 update_class(&req);
2364 update_eir(&req);
2365
2366 err = hci_req_run(&req, remove_uuid_complete);
2367 if (err < 0) {
2368 if (err != -ENODATA)
2369 goto unlock;
2370
2371 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2372 hdev->dev_class, 3);
2373 goto unlock;
2374 }
2375
2376 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2377 if (!cmd) {
2378 err = -ENOMEM;
2379 goto unlock;
2380 }
2381
2382 err = 0;
2383
2384 unlock:
2385 hci_dev_unlock(hdev);
2386 return err;
2387 }
2388
2389 static void set_class_complete(struct hci_dev *hdev, u8 status)
2390 {
2391 BT_DBG("status 0x%02x", status);
2392
2393 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2394 }
2395
2396 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2397 u16 len)
2398 {
2399 struct mgmt_cp_set_dev_class *cp = data;
2400 struct pending_cmd *cmd;
2401 struct hci_request req;
2402 int err;
2403
2404 BT_DBG("request for %s", hdev->name);
2405
2406 if (!lmp_bredr_capable(hdev))
2407 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2408 MGMT_STATUS_NOT_SUPPORTED);
2409
2410 hci_dev_lock(hdev);
2411
2412 if (pending_eir_or_class(hdev)) {
2413 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2414 MGMT_STATUS_BUSY);
2415 goto unlock;
2416 }
2417
2418 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2419 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2420 MGMT_STATUS_INVALID_PARAMS);
2421 goto unlock;
2422 }
2423
2424 hdev->major_class = cp->major;
2425 hdev->minor_class = cp->minor;
2426
2427 if (!hdev_is_powered(hdev)) {
2428 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2429 hdev->dev_class, 3);
2430 goto unlock;
2431 }
2432
2433 hci_req_init(&req, hdev);
2434
2435 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2436 hci_dev_unlock(hdev);
2437 cancel_delayed_work_sync(&hdev->service_cache);
2438 hci_dev_lock(hdev);
2439 update_eir(&req);
2440 }
2441
2442 update_class(&req);
2443
2444 err = hci_req_run(&req, set_class_complete);
2445 if (err < 0) {
2446 if (err != -ENODATA)
2447 goto unlock;
2448
2449 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2450 hdev->dev_class, 3);
2451 goto unlock;
2452 }
2453
2454 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2455 if (!cmd) {
2456 err = -ENOMEM;
2457 goto unlock;
2458 }
2459
2460 err = 0;
2461
2462 unlock:
2463 hci_dev_unlock(hdev);
2464 return err;
2465 }
2466
2467 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2468 u16 len)
2469 {
2470 struct mgmt_cp_load_link_keys *cp = data;
2471 u16 key_count, expected_len;
2472 bool changed;
2473 int i;
2474
2475 BT_DBG("request for %s", hdev->name);
2476
2477 if (!lmp_bredr_capable(hdev))
2478 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2479 MGMT_STATUS_NOT_SUPPORTED);
2480
2481 key_count = __le16_to_cpu(cp->key_count);
2482
2483 expected_len = sizeof(*cp) + key_count *
2484 sizeof(struct mgmt_link_key_info);
2485 if (expected_len != len) {
2486 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2487 expected_len, len);
2488 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2489 MGMT_STATUS_INVALID_PARAMS);
2490 }
2491
2492 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2493 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2494 MGMT_STATUS_INVALID_PARAMS);
2495
2496 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2497 key_count);
2498
2499 for (i = 0; i < key_count; i++) {
2500 struct mgmt_link_key_info *key = &cp->keys[i];
2501
2502 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2503 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2504 MGMT_STATUS_INVALID_PARAMS);
2505 }
2506
2507 hci_dev_lock(hdev);
2508
2509 hci_link_keys_clear(hdev);
2510
2511 if (cp->debug_keys)
2512 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
2513 &hdev->dev_flags);
2514 else
2515 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
2516 &hdev->dev_flags);
2517
2518 if (changed)
2519 new_settings(hdev, NULL);
2520
2521 for (i = 0; i < key_count; i++) {
2522 struct mgmt_link_key_info *key = &cp->keys[i];
2523
2524 /* Always ignore debug keys and require a new pairing if
2525 * the user wants to use them.
2526 */
2527 if (key->type == HCI_LK_DEBUG_COMBINATION)
2528 continue;
2529
2530 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2531 key->type, key->pin_len, NULL);
2532 }
2533
2534 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2535
2536 hci_dev_unlock(hdev);
2537
2538 return 0;
2539 }
2540
2541 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2542 u8 addr_type, struct sock *skip_sk)
2543 {
2544 struct mgmt_ev_device_unpaired ev;
2545
2546 bacpy(&ev.addr.bdaddr, bdaddr);
2547 ev.addr.type = addr_type;
2548
2549 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2550 skip_sk);
2551 }
2552
2553 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2554 u16 len)
2555 {
2556 struct mgmt_cp_unpair_device *cp = data;
2557 struct mgmt_rp_unpair_device rp;
2558 struct hci_cp_disconnect dc;
2559 struct pending_cmd *cmd;
2560 struct hci_conn *conn;
2561 int err;
2562
2563 memset(&rp, 0, sizeof(rp));
2564 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2565 rp.addr.type = cp->addr.type;
2566
2567 if (!bdaddr_type_is_valid(cp->addr.type))
2568 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2569 MGMT_STATUS_INVALID_PARAMS,
2570 &rp, sizeof(rp));
2571
2572 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2573 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2574 MGMT_STATUS_INVALID_PARAMS,
2575 &rp, sizeof(rp));
2576
2577 hci_dev_lock(hdev);
2578
2579 if (!hdev_is_powered(hdev)) {
2580 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2581 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2582 goto unlock;
2583 }
2584
2585 if (cp->addr.type == BDADDR_BREDR) {
2586 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2587 } else {
2588 u8 addr_type;
2589
2590 if (cp->addr.type == BDADDR_LE_PUBLIC)
2591 addr_type = ADDR_LE_DEV_PUBLIC;
2592 else
2593 addr_type = ADDR_LE_DEV_RANDOM;
2594
2595 hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2596
2597 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2598
2599 err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2600 }
2601
2602 if (err < 0) {
2603 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2604 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
2605 goto unlock;
2606 }
2607
2608 if (cp->disconnect) {
2609 if (cp->addr.type == BDADDR_BREDR)
2610 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2611 &cp->addr.bdaddr);
2612 else
2613 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2614 &cp->addr.bdaddr);
2615 } else {
2616 conn = NULL;
2617 }
2618
2619 if (!conn) {
2620 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2621 &rp, sizeof(rp));
2622 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2623 goto unlock;
2624 }
2625
2626 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2627 sizeof(*cp));
2628 if (!cmd) {
2629 err = -ENOMEM;
2630 goto unlock;
2631 }
2632
2633 dc.handle = cpu_to_le16(conn->handle);
2634 dc.reason = 0x13; /* Remote User Terminated Connection */
2635 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2636 if (err < 0)
2637 mgmt_pending_remove(cmd);
2638
2639 unlock:
2640 hci_dev_unlock(hdev);
2641 return err;
2642 }
2643
2644 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2645 u16 len)
2646 {
2647 struct mgmt_cp_disconnect *cp = data;
2648 struct mgmt_rp_disconnect rp;
2649 struct hci_cp_disconnect dc;
2650 struct pending_cmd *cmd;
2651 struct hci_conn *conn;
2652 int err;
2653
2654 BT_DBG("");
2655
2656 memset(&rp, 0, sizeof(rp));
2657 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2658 rp.addr.type = cp->addr.type;
2659
2660 if (!bdaddr_type_is_valid(cp->addr.type))
2661 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2662 MGMT_STATUS_INVALID_PARAMS,
2663 &rp, sizeof(rp));
2664
2665 hci_dev_lock(hdev);
2666
2667 if (!test_bit(HCI_UP, &hdev->flags)) {
2668 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2669 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2670 goto failed;
2671 }
2672
2673 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
2674 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2675 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2676 goto failed;
2677 }
2678
2679 if (cp->addr.type == BDADDR_BREDR)
2680 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2681 &cp->addr.bdaddr);
2682 else
2683 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2684
2685 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2686 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2687 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
2688 goto failed;
2689 }
2690
2691 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2692 if (!cmd) {
2693 err = -ENOMEM;
2694 goto failed;
2695 }
2696
2697 dc.handle = cpu_to_le16(conn->handle);
2698 dc.reason = HCI_ERROR_REMOTE_USER_TERM;
2699
2700 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2701 if (err < 0)
2702 mgmt_pending_remove(cmd);
2703
2704 failed:
2705 hci_dev_unlock(hdev);
2706 return err;
2707 }
2708
2709 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2710 {
2711 switch (link_type) {
2712 case LE_LINK:
2713 switch (addr_type) {
2714 case ADDR_LE_DEV_PUBLIC:
2715 return BDADDR_LE_PUBLIC;
2716
2717 default:
2718 /* Fallback to LE Random address type */
2719 return BDADDR_LE_RANDOM;
2720 }
2721
2722 default:
2723 /* Fallback to BR/EDR type */
2724 return BDADDR_BREDR;
2725 }
2726 }
2727
2728 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2729 u16 data_len)
2730 {
2731 struct mgmt_rp_get_connections *rp;
2732 struct hci_conn *c;
2733 size_t rp_len;
2734 int err;
2735 u16 i;
2736
2737 BT_DBG("");
2738
2739 hci_dev_lock(hdev);
2740
2741 if (!hdev_is_powered(hdev)) {
2742 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2743 MGMT_STATUS_NOT_POWERED);
2744 goto unlock;
2745 }
2746
2747 i = 0;
2748 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2749 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2750 i++;
2751 }
2752
2753 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2754 rp = kmalloc(rp_len, GFP_KERNEL);
2755 if (!rp) {
2756 err = -ENOMEM;
2757 goto unlock;
2758 }
2759
2760 i = 0;
2761 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2762 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2763 continue;
2764 bacpy(&rp->addr[i].bdaddr, &c->dst);
2765 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2766 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2767 continue;
2768 i++;
2769 }
2770
2771 rp->conn_count = cpu_to_le16(i);
2772
2773 /* Recalculate length in case of filtered SCO connections, etc */
2774 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2775
2776 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2777 rp_len);
2778
2779 kfree(rp);
2780
2781 unlock:
2782 hci_dev_unlock(hdev);
2783 return err;
2784 }
2785
2786 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2787 struct mgmt_cp_pin_code_neg_reply *cp)
2788 {
2789 struct pending_cmd *cmd;
2790 int err;
2791
2792 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2793 sizeof(*cp));
2794 if (!cmd)
2795 return -ENOMEM;
2796
2797 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2798 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2799 if (err < 0)
2800 mgmt_pending_remove(cmd);
2801
2802 return err;
2803 }
2804
2805 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2806 u16 len)
2807 {
2808 struct hci_conn *conn;
2809 struct mgmt_cp_pin_code_reply *cp = data;
2810 struct hci_cp_pin_code_reply reply;
2811 struct pending_cmd *cmd;
2812 int err;
2813
2814 BT_DBG("");
2815
2816 hci_dev_lock(hdev);
2817
2818 if (!hdev_is_powered(hdev)) {
2819 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2820 MGMT_STATUS_NOT_POWERED);
2821 goto failed;
2822 }
2823
2824 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2825 if (!conn) {
2826 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2827 MGMT_STATUS_NOT_CONNECTED);
2828 goto failed;
2829 }
2830
2831 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2832 struct mgmt_cp_pin_code_neg_reply ncp;
2833
2834 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2835
2836 BT_ERR("PIN code is not 16 bytes long");
2837
2838 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2839 if (err >= 0)
2840 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2841 MGMT_STATUS_INVALID_PARAMS);
2842
2843 goto failed;
2844 }
2845
2846 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2847 if (!cmd) {
2848 err = -ENOMEM;
2849 goto failed;
2850 }
2851
2852 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2853 reply.pin_len = cp->pin_len;
2854 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2855
2856 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2857 if (err < 0)
2858 mgmt_pending_remove(cmd);
2859
2860 failed:
2861 hci_dev_unlock(hdev);
2862 return err;
2863 }
2864
2865 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2866 u16 len)
2867 {
2868 struct mgmt_cp_set_io_capability *cp = data;
2869
2870 BT_DBG("");
2871
2872 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2873 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2874 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
2875
2876 hci_dev_lock(hdev);
2877
2878 hdev->io_capability = cp->io_capability;
2879
2880 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2881 hdev->io_capability);
2882
2883 hci_dev_unlock(hdev);
2884
2885 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
2886 0);
2887 }
2888
2889 static struct pending_cmd *find_pairing(struct hci_conn *conn)
2890 {
2891 struct hci_dev *hdev = conn->hdev;
2892 struct pending_cmd *cmd;
2893
2894 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2895 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2896 continue;
2897
2898 if (cmd->user_data != conn)
2899 continue;
2900
2901 return cmd;
2902 }
2903
2904 return NULL;
2905 }
2906
2907 static void pairing_complete(struct pending_cmd *cmd, u8 status)
2908 {
2909 struct mgmt_rp_pair_device rp;
2910 struct hci_conn *conn = cmd->user_data;
2911
2912 bacpy(&rp.addr.bdaddr, &conn->dst);
2913 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2914
2915 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
2916 &rp, sizeof(rp));
2917
2918 /* So we don't get further callbacks for this connection */
2919 conn->connect_cfm_cb = NULL;
2920 conn->security_cfm_cb = NULL;
2921 conn->disconn_cfm_cb = NULL;
2922
2923 hci_conn_drop(conn);
2924
2925 mgmt_pending_remove(cmd);
2926 }
2927
2928 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2929 {
2930 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2931 struct pending_cmd *cmd;
2932
2933 cmd = find_pairing(conn);
2934 if (cmd)
2935 pairing_complete(cmd, status);
2936 }
2937
2938 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2939 {
2940 struct pending_cmd *cmd;
2941
2942 BT_DBG("status %u", status);
2943
2944 cmd = find_pairing(conn);
2945 if (!cmd)
2946 BT_DBG("Unable to find a pending command");
2947 else
2948 pairing_complete(cmd, mgmt_status(status));
2949 }
2950
2951 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2952 {
2953 struct pending_cmd *cmd;
2954
2955 BT_DBG("status %u", status);
2956
2957 if (!status)
2958 return;
2959
2960 cmd = find_pairing(conn);
2961 if (!cmd)
2962 BT_DBG("Unable to find a pending command");
2963 else
2964 pairing_complete(cmd, mgmt_status(status));
2965 }
2966
2967 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2968 u16 len)
2969 {
2970 struct mgmt_cp_pair_device *cp = data;
2971 struct mgmt_rp_pair_device rp;
2972 struct pending_cmd *cmd;
2973 u8 sec_level, auth_type;
2974 struct hci_conn *conn;
2975 int err;
2976
2977 BT_DBG("");
2978
2979 memset(&rp, 0, sizeof(rp));
2980 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2981 rp.addr.type = cp->addr.type;
2982
2983 if (!bdaddr_type_is_valid(cp->addr.type))
2984 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2985 MGMT_STATUS_INVALID_PARAMS,
2986 &rp, sizeof(rp));
2987
2988 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
2989 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2990 MGMT_STATUS_INVALID_PARAMS,
2991 &rp, sizeof(rp));
2992
2993 hci_dev_lock(hdev);
2994
2995 if (!hdev_is_powered(hdev)) {
2996 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2997 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2998 goto unlock;
2999 }
3000
3001 sec_level = BT_SECURITY_MEDIUM;
3002 auth_type = HCI_AT_DEDICATED_BONDING;
3003
3004 if (cp->addr.type == BDADDR_BREDR) {
3005 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3006 auth_type);
3007 } else {
3008 u8 addr_type;
3009
3010 /* Convert from L2CAP channel address type to HCI address type
3011 */
3012 if (cp->addr.type == BDADDR_LE_PUBLIC)
3013 addr_type = ADDR_LE_DEV_PUBLIC;
3014 else
3015 addr_type = ADDR_LE_DEV_RANDOM;
3016
3017 /* When pairing a new device, it is expected to remember
3018 * this device for future connections. Adding the connection
3019 * parameter information ahead of time allows tracking
3020 * of the slave preferred values and will speed up any
3021 * further connection establishment.
3022 *
3023 * If connection parameters already exist, then they
3024 * will be kept and this function does nothing.
3025 */
3026 hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3027
3028 conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
3029 sec_level, auth_type);
3030 }
3031
3032 if (IS_ERR(conn)) {
3033 int status;
3034
3035 if (PTR_ERR(conn) == -EBUSY)
3036 status = MGMT_STATUS_BUSY;
3037 else
3038 status = MGMT_STATUS_CONNECT_FAILED;
3039
3040 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3041 status, &rp,
3042 sizeof(rp));
3043 goto unlock;
3044 }
3045
3046 if (conn->connect_cfm_cb) {
3047 hci_conn_drop(conn);
3048 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3049 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3050 goto unlock;
3051 }
3052
3053 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3054 if (!cmd) {
3055 err = -ENOMEM;
3056 hci_conn_drop(conn);
3057 goto unlock;
3058 }
3059
3060 /* For LE, just connecting isn't a proof that the pairing finished */
3061 if (cp->addr.type == BDADDR_BREDR) {
3062 conn->connect_cfm_cb = pairing_complete_cb;
3063 conn->security_cfm_cb = pairing_complete_cb;
3064 conn->disconn_cfm_cb = pairing_complete_cb;
3065 } else {
3066 conn->connect_cfm_cb = le_pairing_complete_cb;
3067 conn->security_cfm_cb = le_pairing_complete_cb;
3068 conn->disconn_cfm_cb = le_pairing_complete_cb;
3069 }
3070
3071 conn->io_capability = cp->io_cap;
3072 cmd->user_data = conn;
3073
3074 if (conn->state == BT_CONNECTED &&
3075 hci_conn_security(conn, sec_level, auth_type))
3076 pairing_complete(cmd, 0);
3077
3078 err = 0;
3079
3080 unlock:
3081 hci_dev_unlock(hdev);
3082 return err;
3083 }
3084
3085 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3086 u16 len)
3087 {
3088 struct mgmt_addr_info *addr = data;
3089 struct pending_cmd *cmd;
3090 struct hci_conn *conn;
3091 int err;
3092
3093 BT_DBG("");
3094
3095 hci_dev_lock(hdev);
3096
3097 if (!hdev_is_powered(hdev)) {
3098 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3099 MGMT_STATUS_NOT_POWERED);
3100 goto unlock;
3101 }
3102
3103 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3104 if (!cmd) {
3105 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3106 MGMT_STATUS_INVALID_PARAMS);
3107 goto unlock;
3108 }
3109
3110 conn = cmd->user_data;
3111
3112 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3113 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3114 MGMT_STATUS_INVALID_PARAMS);
3115 goto unlock;
3116 }
3117
3118 pairing_complete(cmd, MGMT_STATUS_CANCELLED);
3119
3120 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3121 addr, sizeof(*addr));
3122 unlock:
3123 hci_dev_unlock(hdev);
3124 return err;
3125 }
3126
3127 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3128 struct mgmt_addr_info *addr, u16 mgmt_op,
3129 u16 hci_op, __le32 passkey)
3130 {
3131 struct pending_cmd *cmd;
3132 struct hci_conn *conn;
3133 int err;
3134
3135 hci_dev_lock(hdev);
3136
3137 if (!hdev_is_powered(hdev)) {
3138 err = cmd_complete(sk, hdev->id, mgmt_op,
3139 MGMT_STATUS_NOT_POWERED, addr,
3140 sizeof(*addr));
3141 goto done;
3142 }
3143
3144 if (addr->type == BDADDR_BREDR)
3145 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3146 else
3147 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
3148
3149 if (!conn) {
3150 err = cmd_complete(sk, hdev->id, mgmt_op,
3151 MGMT_STATUS_NOT_CONNECTED, addr,
3152 sizeof(*addr));
3153 goto done;
3154 }
3155
3156 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3157 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3158 if (!err)
3159 err = cmd_complete(sk, hdev->id, mgmt_op,
3160 MGMT_STATUS_SUCCESS, addr,
3161 sizeof(*addr));
3162 else
3163 err = cmd_complete(sk, hdev->id, mgmt_op,
3164 MGMT_STATUS_FAILED, addr,
3165 sizeof(*addr));
3166
3167 goto done;
3168 }
3169
3170 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3171 if (!cmd) {
3172 err = -ENOMEM;
3173 goto done;
3174 }
3175
3176 /* Continue with pairing via HCI */
3177 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3178 struct hci_cp_user_passkey_reply cp;
3179
3180 bacpy(&cp.bdaddr, &addr->bdaddr);
3181 cp.passkey = passkey;
3182 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3183 } else
3184 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3185 &addr->bdaddr);
3186
3187 if (err < 0)
3188 mgmt_pending_remove(cmd);
3189
3190 done:
3191 hci_dev_unlock(hdev);
3192 return err;
3193 }
3194
3195 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3196 void *data, u16 len)
3197 {
3198 struct mgmt_cp_pin_code_neg_reply *cp = data;
3199
3200 BT_DBG("");
3201
3202 return user_pairing_resp(sk, hdev, &cp->addr,
3203 MGMT_OP_PIN_CODE_NEG_REPLY,
3204 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3205 }
3206
3207 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3208 u16 len)
3209 {
3210 struct mgmt_cp_user_confirm_reply *cp = data;
3211
3212 BT_DBG("");
3213
3214 if (len != sizeof(*cp))
3215 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3216 MGMT_STATUS_INVALID_PARAMS);
3217
3218 return user_pairing_resp(sk, hdev, &cp->addr,
3219 MGMT_OP_USER_CONFIRM_REPLY,
3220 HCI_OP_USER_CONFIRM_REPLY, 0);
3221 }
3222
3223 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3224 void *data, u16 len)
3225 {
3226 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3227
3228 BT_DBG("");
3229
3230 return user_pairing_resp(sk, hdev, &cp->addr,
3231 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3232 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3233 }
3234
3235 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3236 u16 len)
3237 {
3238 struct mgmt_cp_user_passkey_reply *cp = data;
3239
3240 BT_DBG("");
3241
3242 return user_pairing_resp(sk, hdev, &cp->addr,
3243 MGMT_OP_USER_PASSKEY_REPLY,
3244 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3245 }
3246
3247 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3248 void *data, u16 len)
3249 {
3250 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3251
3252 BT_DBG("");
3253
3254 return user_pairing_resp(sk, hdev, &cp->addr,
3255 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3256 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3257 }
3258
3259 static void update_name(struct hci_request *req)
3260 {
3261 struct hci_dev *hdev = req->hdev;
3262 struct hci_cp_write_local_name cp;
3263
3264 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3265
3266 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3267 }
3268
3269 static void set_name_complete(struct hci_dev *hdev, u8 status)
3270 {
3271 struct mgmt_cp_set_local_name *cp;
3272 struct pending_cmd *cmd;
3273
3274 BT_DBG("status 0x%02x", status);
3275
3276 hci_dev_lock(hdev);
3277
3278 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3279 if (!cmd)
3280 goto unlock;
3281
3282 cp = cmd->param;
3283
3284 if (status)
3285 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3286 mgmt_status(status));
3287 else
3288 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3289 cp, sizeof(*cp));
3290
3291 mgmt_pending_remove(cmd);
3292
3293 unlock:
3294 hci_dev_unlock(hdev);
3295 }
3296
3297 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3298 u16 len)
3299 {
3300 struct mgmt_cp_set_local_name *cp = data;
3301 struct pending_cmd *cmd;
3302 struct hci_request req;
3303 int err;
3304
3305 BT_DBG("");
3306
3307 hci_dev_lock(hdev);
3308
3309 /* If the old values are the same as the new ones just return a
3310 * direct command complete event.
3311 */
3312 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3313 !memcmp(hdev->short_name, cp->short_name,
3314 sizeof(hdev->short_name))) {
3315 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3316 data, len);
3317 goto failed;
3318 }
3319
3320 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3321
3322 if (!hdev_is_powered(hdev)) {
3323 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3324
3325 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3326 data, len);
3327 if (err < 0)
3328 goto failed;
3329
3330 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
3331 sk);
3332
3333 goto failed;
3334 }
3335
3336 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3337 if (!cmd) {
3338 err = -ENOMEM;
3339 goto failed;
3340 }
3341
3342 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3343
3344 hci_req_init(&req, hdev);
3345
3346 if (lmp_bredr_capable(hdev)) {
3347 update_name(&req);
3348 update_eir(&req);
3349 }
3350
3351 /* The name is stored in the scan response data and so
3352 * no need to udpate the advertising data here.
3353 */
3354 if (lmp_le_capable(hdev))
3355 update_scan_rsp_data(&req);
3356
3357 err = hci_req_run(&req, set_name_complete);
3358 if (err < 0)
3359 mgmt_pending_remove(cmd);
3360
3361 failed:
3362 hci_dev_unlock(hdev);
3363 return err;
3364 }
3365
3366 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3367 void *data, u16 data_len)
3368 {
3369 struct pending_cmd *cmd;
3370 int err;
3371
3372 BT_DBG("%s", hdev->name);
3373
3374 hci_dev_lock(hdev);
3375
3376 if (!hdev_is_powered(hdev)) {
3377 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3378 MGMT_STATUS_NOT_POWERED);
3379 goto unlock;
3380 }
3381
3382 if (!lmp_ssp_capable(hdev)) {
3383 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3384 MGMT_STATUS_NOT_SUPPORTED);
3385 goto unlock;
3386 }
3387
3388 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3389 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3390 MGMT_STATUS_BUSY);
3391 goto unlock;
3392 }
3393
3394 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3395 if (!cmd) {
3396 err = -ENOMEM;
3397 goto unlock;
3398 }
3399
3400 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
3401 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
3402 0, NULL);
3403 else
3404 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3405
3406 if (err < 0)
3407 mgmt_pending_remove(cmd);
3408
3409 unlock:
3410 hci_dev_unlock(hdev);
3411 return err;
3412 }
3413
3414 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3415 void *data, u16 len)
3416 {
3417 int err;
3418
3419 BT_DBG("%s ", hdev->name);
3420
3421 hci_dev_lock(hdev);
3422
3423 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3424 struct mgmt_cp_add_remote_oob_data *cp = data;
3425 u8 status;
3426
3427 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3428 cp->hash, cp->randomizer);
3429 if (err < 0)
3430 status = MGMT_STATUS_FAILED;
3431 else
3432 status = MGMT_STATUS_SUCCESS;
3433
3434 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3435 status, &cp->addr, sizeof(cp->addr));
3436 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3437 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3438 u8 status;
3439
3440 err = hci_add_remote_oob_ext_data(hdev, &cp->addr.bdaddr,
3441 cp->hash192,
3442 cp->randomizer192,
3443 cp->hash256,
3444 cp->randomizer256);
3445 if (err < 0)
3446 status = MGMT_STATUS_FAILED;
3447 else
3448 status = MGMT_STATUS_SUCCESS;
3449
3450 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3451 status, &cp->addr, sizeof(cp->addr));
3452 } else {
3453 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3454 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3455 MGMT_STATUS_INVALID_PARAMS);
3456 }
3457
3458 hci_dev_unlock(hdev);
3459 return err;
3460 }
3461
3462 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3463 void *data, u16 len)
3464 {
3465 struct mgmt_cp_remove_remote_oob_data *cp = data;
3466 u8 status;
3467 int err;
3468
3469 BT_DBG("%s", hdev->name);
3470
3471 hci_dev_lock(hdev);
3472
3473 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
3474 if (err < 0)
3475 status = MGMT_STATUS_INVALID_PARAMS;
3476 else
3477 status = MGMT_STATUS_SUCCESS;
3478
3479 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3480 status, &cp->addr, sizeof(cp->addr));
3481
3482 hci_dev_unlock(hdev);
3483 return err;
3484 }
3485
3486 static int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
3487 {
3488 struct pending_cmd *cmd;
3489 u8 type;
3490 int err;
3491
3492 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3493
3494 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3495 if (!cmd)
3496 return -ENOENT;
3497
3498 type = hdev->discovery.type;
3499
3500 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3501 &type, sizeof(type));
3502 mgmt_pending_remove(cmd);
3503
3504 return err;
3505 }
3506
3507 static void start_discovery_complete(struct hci_dev *hdev, u8 status)
3508 {
3509 unsigned long timeout = 0;
3510
3511 BT_DBG("status %d", status);
3512
3513 if (status) {
3514 hci_dev_lock(hdev);
3515 mgmt_start_discovery_failed(hdev, status);
3516 hci_dev_unlock(hdev);
3517 return;
3518 }
3519
3520 hci_dev_lock(hdev);
3521 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3522 hci_dev_unlock(hdev);
3523
3524 switch (hdev->discovery.type) {
3525 case DISCOV_TYPE_LE:
3526 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3527 break;
3528
3529 case DISCOV_TYPE_INTERLEAVED:
3530 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
3531 break;
3532
3533 case DISCOV_TYPE_BREDR:
3534 break;
3535
3536 default:
3537 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
3538 }
3539
3540 if (!timeout)
3541 return;
3542
3543 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable, timeout);
3544 }
3545
3546 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3547 void *data, u16 len)
3548 {
3549 struct mgmt_cp_start_discovery *cp = data;
3550 struct pending_cmd *cmd;
3551 struct hci_cp_le_set_scan_param param_cp;
3552 struct hci_cp_le_set_scan_enable enable_cp;
3553 struct hci_cp_inquiry inq_cp;
3554 struct hci_request req;
3555 /* General inquiry access code (GIAC) */
3556 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3557 u8 status, own_addr_type;
3558 int err;
3559
3560 BT_DBG("%s", hdev->name);
3561
3562 hci_dev_lock(hdev);
3563
3564 if (!hdev_is_powered(hdev)) {
3565 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3566 MGMT_STATUS_NOT_POWERED);
3567 goto failed;
3568 }
3569
3570 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3571 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3572 MGMT_STATUS_BUSY);
3573 goto failed;
3574 }
3575
3576 if (hdev->discovery.state != DISCOVERY_STOPPED) {
3577 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3578 MGMT_STATUS_BUSY);
3579 goto failed;
3580 }
3581
3582 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
3583 if (!cmd) {
3584 err = -ENOMEM;
3585 goto failed;
3586 }
3587
3588 hdev->discovery.type = cp->type;
3589
3590 hci_req_init(&req, hdev);
3591
3592 switch (hdev->discovery.type) {
3593 case DISCOV_TYPE_BREDR:
3594 status = mgmt_bredr_support(hdev);
3595 if (status) {
3596 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3597 status);
3598 mgmt_pending_remove(cmd);
3599 goto failed;
3600 }
3601
3602 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3603 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3604 MGMT_STATUS_BUSY);
3605 mgmt_pending_remove(cmd);
3606 goto failed;
3607 }
3608
3609 hci_inquiry_cache_flush(hdev);
3610
3611 memset(&inq_cp, 0, sizeof(inq_cp));
3612 memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
3613 inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
3614 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
3615 break;
3616
3617 case DISCOV_TYPE_LE:
3618 case DISCOV_TYPE_INTERLEAVED:
3619 status = mgmt_le_support(hdev);
3620 if (status) {
3621 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3622 status);
3623 mgmt_pending_remove(cmd);
3624 goto failed;
3625 }
3626
3627 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3628 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3629 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3630 MGMT_STATUS_NOT_SUPPORTED);
3631 mgmt_pending_remove(cmd);
3632 goto failed;
3633 }
3634
3635 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3636 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3637 MGMT_STATUS_REJECTED);
3638 mgmt_pending_remove(cmd);
3639 goto failed;
3640 }
3641
3642 /* If controller is scanning, it means the background scanning
3643 * is running. Thus, we should temporarily stop it in order to
3644 * set the discovery scanning parameters.
3645 */
3646 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
3647 hci_req_add_le_scan_disable(&req);
3648
3649 memset(&param_cp, 0, sizeof(param_cp));
3650
3651 /* All active scans will be done with either a resolvable
3652 * private address (when privacy feature has been enabled)
3653 * or unresolvable private address.
3654 */
3655 err = hci_update_random_address(&req, true, &own_addr_type);
3656 if (err < 0) {
3657 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3658 MGMT_STATUS_FAILED);
3659 mgmt_pending_remove(cmd);
3660 goto failed;
3661 }
3662
3663 param_cp.type = LE_SCAN_ACTIVE;
3664 param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
3665 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3666 param_cp.own_address_type = own_addr_type;
3667 hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
3668 &param_cp);
3669
3670 memset(&enable_cp, 0, sizeof(enable_cp));
3671 enable_cp.enable = LE_SCAN_ENABLE;
3672 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3673 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
3674 &enable_cp);
3675 break;
3676
3677 default:
3678 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3679 MGMT_STATUS_INVALID_PARAMS);
3680 mgmt_pending_remove(cmd);
3681 goto failed;
3682 }
3683
3684 err = hci_req_run(&req, start_discovery_complete);
3685 if (err < 0)
3686 mgmt_pending_remove(cmd);
3687 else
3688 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3689
3690 failed:
3691 hci_dev_unlock(hdev);
3692 return err;
3693 }
3694
3695 static int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
3696 {
3697 struct pending_cmd *cmd;
3698 int err;
3699
3700 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
3701 if (!cmd)
3702 return -ENOENT;
3703
3704 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3705 &hdev->discovery.type, sizeof(hdev->discovery.type));
3706 mgmt_pending_remove(cmd);
3707
3708 return err;
3709 }
3710
3711 static void stop_discovery_complete(struct hci_dev *hdev, u8 status)
3712 {
3713 BT_DBG("status %d", status);
3714
3715 hci_dev_lock(hdev);
3716
3717 if (status) {
3718 mgmt_stop_discovery_failed(hdev, status);
3719 goto unlock;
3720 }
3721
3722 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3723
3724 unlock:
3725 hci_dev_unlock(hdev);
3726 }
3727
3728 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
3729 u16 len)
3730 {
3731 struct mgmt_cp_stop_discovery *mgmt_cp = data;
3732 struct pending_cmd *cmd;
3733 struct hci_request req;
3734 int err;
3735
3736 BT_DBG("%s", hdev->name);
3737
3738 hci_dev_lock(hdev);
3739
3740 if (!hci_discovery_active(hdev)) {
3741 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3742 MGMT_STATUS_REJECTED, &mgmt_cp->type,
3743 sizeof(mgmt_cp->type));
3744 goto unlock;
3745 }
3746
3747 if (hdev->discovery.type != mgmt_cp->type) {
3748 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3749 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
3750 sizeof(mgmt_cp->type));
3751 goto unlock;
3752 }
3753
3754 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
3755 if (!cmd) {
3756 err = -ENOMEM;
3757 goto unlock;
3758 }
3759
3760 hci_req_init(&req, hdev);
3761
3762 hci_stop_discovery(&req);
3763
3764 err = hci_req_run(&req, stop_discovery_complete);
3765 if (!err) {
3766 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
3767 goto unlock;
3768 }
3769
3770 mgmt_pending_remove(cmd);
3771
3772 /* If no HCI commands were sent we're done */
3773 if (err == -ENODATA) {
3774 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
3775 &mgmt_cp->type, sizeof(mgmt_cp->type));
3776 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3777 }
3778
3779 unlock:
3780 hci_dev_unlock(hdev);
3781 return err;
3782 }
3783
3784 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
3785 u16 len)
3786 {
3787 struct mgmt_cp_confirm_name *cp = data;
3788 struct inquiry_entry *e;
3789 int err;
3790
3791 BT_DBG("%s", hdev->name);
3792
3793 hci_dev_lock(hdev);
3794
3795 if (!hci_discovery_active(hdev)) {
3796 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3797 MGMT_STATUS_FAILED, &cp->addr,
3798 sizeof(cp->addr));
3799 goto failed;
3800 }
3801
3802 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
3803 if (!e) {
3804 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3805 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
3806 sizeof(cp->addr));
3807 goto failed;
3808 }
3809
3810 if (cp->name_known) {
3811 e->name_state = NAME_KNOWN;
3812 list_del(&e->list);
3813 } else {
3814 e->name_state = NAME_NEEDED;
3815 hci_inquiry_cache_update_resolve(hdev, e);
3816 }
3817
3818 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
3819 sizeof(cp->addr));
3820
3821 failed:
3822 hci_dev_unlock(hdev);
3823 return err;
3824 }
3825
3826 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
3827 u16 len)
3828 {
3829 struct mgmt_cp_block_device *cp = data;
3830 u8 status;
3831 int err;
3832
3833 BT_DBG("%s", hdev->name);
3834
3835 if (!bdaddr_type_is_valid(cp->addr.type))
3836 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
3837 MGMT_STATUS_INVALID_PARAMS,
3838 &cp->addr, sizeof(cp->addr));
3839
3840 hci_dev_lock(hdev);
3841
3842 err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type);
3843 if (err < 0) {
3844 status = MGMT_STATUS_FAILED;
3845 goto done;
3846 }
3847
3848 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
3849 sk);
3850 status = MGMT_STATUS_SUCCESS;
3851
3852 done:
3853 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
3854 &cp->addr, sizeof(cp->addr));
3855
3856 hci_dev_unlock(hdev);
3857
3858 return err;
3859 }
3860
3861 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
3862 u16 len)
3863 {
3864 struct mgmt_cp_unblock_device *cp = data;
3865 u8 status;
3866 int err;
3867
3868 BT_DBG("%s", hdev->name);
3869
3870 if (!bdaddr_type_is_valid(cp->addr.type))
3871 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
3872 MGMT_STATUS_INVALID_PARAMS,
3873 &cp->addr, sizeof(cp->addr));
3874
3875 hci_dev_lock(hdev);
3876
3877 err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type);
3878 if (err < 0) {
3879 status = MGMT_STATUS_INVALID_PARAMS;
3880 goto done;
3881 }
3882
3883 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
3884 sk);
3885 status = MGMT_STATUS_SUCCESS;
3886
3887 done:
3888 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
3889 &cp->addr, sizeof(cp->addr));
3890
3891 hci_dev_unlock(hdev);
3892
3893 return err;
3894 }
3895
3896 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
3897 u16 len)
3898 {
3899 struct mgmt_cp_set_device_id *cp = data;
3900 struct hci_request req;
3901 int err;
3902 __u16 source;
3903
3904 BT_DBG("%s", hdev->name);
3905
3906 source = __le16_to_cpu(cp->source);
3907
3908 if (source > 0x0002)
3909 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
3910 MGMT_STATUS_INVALID_PARAMS);
3911
3912 hci_dev_lock(hdev);
3913
3914 hdev->devid_source = source;
3915 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
3916 hdev->devid_product = __le16_to_cpu(cp->product);
3917 hdev->devid_version = __le16_to_cpu(cp->version);
3918
3919 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
3920
3921 hci_req_init(&req, hdev);
3922 update_eir(&req);
3923 hci_req_run(&req, NULL);
3924
3925 hci_dev_unlock(hdev);
3926
3927 return err;
3928 }
3929
3930 static void set_advertising_complete(struct hci_dev *hdev, u8 status)
3931 {
3932 struct cmd_lookup match = { NULL, hdev };
3933
3934 if (status) {
3935 u8 mgmt_err = mgmt_status(status);
3936
3937 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
3938 cmd_status_rsp, &mgmt_err);
3939 return;
3940 }
3941
3942 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
3943 &match);
3944
3945 new_settings(hdev, match.sk);
3946
3947 if (match.sk)
3948 sock_put(match.sk);
3949 }
3950
3951 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
3952 u16 len)
3953 {
3954 struct mgmt_mode *cp = data;
3955 struct pending_cmd *cmd;
3956 struct hci_request req;
3957 u8 val, enabled, status;
3958 int err;
3959
3960 BT_DBG("request for %s", hdev->name);
3961
3962 status = mgmt_le_support(hdev);
3963 if (status)
3964 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3965 status);
3966
3967 if (cp->val != 0x00 && cp->val != 0x01)
3968 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3969 MGMT_STATUS_INVALID_PARAMS);
3970
3971 hci_dev_lock(hdev);
3972
3973 val = !!cp->val;
3974 enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
3975
3976 /* The following conditions are ones which mean that we should
3977 * not do any HCI communication but directly send a mgmt
3978 * response to user space (after toggling the flag if
3979 * necessary).
3980 */
3981 if (!hdev_is_powered(hdev) || val == enabled ||
3982 hci_conn_num(hdev, LE_LINK) > 0) {
3983 bool changed = false;
3984
3985 if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3986 change_bit(HCI_ADVERTISING, &hdev->dev_flags);
3987 changed = true;
3988 }
3989
3990 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
3991 if (err < 0)
3992 goto unlock;
3993
3994 if (changed)
3995 err = new_settings(hdev, sk);
3996
3997 goto unlock;
3998 }
3999
4000 if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
4001 mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
4002 err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4003 MGMT_STATUS_BUSY);
4004 goto unlock;
4005 }
4006
4007 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
4008 if (!cmd) {
4009 err = -ENOMEM;
4010 goto unlock;
4011 }
4012
4013 hci_req_init(&req, hdev);
4014
4015 if (val)
4016 enable_advertising(&req);
4017 else
4018 disable_advertising(&req);
4019
4020 err = hci_req_run(&req, set_advertising_complete);
4021 if (err < 0)
4022 mgmt_pending_remove(cmd);
4023
4024 unlock:
4025 hci_dev_unlock(hdev);
4026 return err;
4027 }
4028
4029 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
4030 void *data, u16 len)
4031 {
4032 struct mgmt_cp_set_static_address *cp = data;
4033 int err;
4034
4035 BT_DBG("%s", hdev->name);
4036
4037 if (!lmp_le_capable(hdev))
4038 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4039 MGMT_STATUS_NOT_SUPPORTED);
4040
4041 if (hdev_is_powered(hdev))
4042 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4043 MGMT_STATUS_REJECTED);
4044
4045 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
4046 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
4047 return cmd_status(sk, hdev->id,
4048 MGMT_OP_SET_STATIC_ADDRESS,
4049 MGMT_STATUS_INVALID_PARAMS);
4050
4051 /* Two most significant bits shall be set */
4052 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
4053 return cmd_status(sk, hdev->id,
4054 MGMT_OP_SET_STATIC_ADDRESS,
4055 MGMT_STATUS_INVALID_PARAMS);
4056 }
4057
4058 hci_dev_lock(hdev);
4059
4060 bacpy(&hdev->static_addr, &cp->bdaddr);
4061
4062 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0);
4063
4064 hci_dev_unlock(hdev);
4065
4066 return err;
4067 }
4068
4069 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
4070 void *data, u16 len)
4071 {
4072 struct mgmt_cp_set_scan_params *cp = data;
4073 __u16 interval, window;
4074 int err;
4075
4076 BT_DBG("%s", hdev->name);
4077
4078 if (!lmp_le_capable(hdev))
4079 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4080 MGMT_STATUS_NOT_SUPPORTED);
4081
4082 interval = __le16_to_cpu(cp->interval);
4083
4084 if (interval < 0x0004 || interval > 0x4000)
4085 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4086 MGMT_STATUS_INVALID_PARAMS);
4087
4088 window = __le16_to_cpu(cp->window);
4089
4090 if (window < 0x0004 || window > 0x4000)
4091 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4092 MGMT_STATUS_INVALID_PARAMS);
4093
4094 if (window > interval)
4095 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4096 MGMT_STATUS_INVALID_PARAMS);
4097
4098 hci_dev_lock(hdev);
4099
4100 hdev->le_scan_interval = interval;
4101 hdev->le_scan_window = window;
4102
4103 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);
4104
4105 /* If background scan is running, restart it so new parameters are
4106 * loaded.
4107 */
4108 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
4109 hdev->discovery.state == DISCOVERY_STOPPED) {
4110 struct hci_request req;
4111
4112 hci_req_init(&req, hdev);
4113
4114 hci_req_add_le_scan_disable(&req);
4115 hci_req_add_le_passive_scan(&req);
4116
4117 hci_req_run(&req, NULL);
4118 }
4119
4120 hci_dev_unlock(hdev);
4121
4122 return err;
4123 }
4124
4125 static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
4126 {
4127 struct pending_cmd *cmd;
4128
4129 BT_DBG("status 0x%02x", status);
4130
4131 hci_dev_lock(hdev);
4132
4133 cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4134 if (!cmd)
4135 goto unlock;
4136
4137 if (status) {
4138 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4139 mgmt_status(status));
4140 } else {
4141 struct mgmt_mode *cp = cmd->param;
4142
4143 if (cp->val)
4144 set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4145 else
4146 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4147
4148 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4149 new_settings(hdev, cmd->sk);
4150 }
4151
4152 mgmt_pending_remove(cmd);
4153
4154 unlock:
4155 hci_dev_unlock(hdev);
4156 }
4157
4158 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4159 void *data, u16 len)
4160 {
4161 struct mgmt_mode *cp = data;
4162 struct pending_cmd *cmd;
4163 struct hci_request req;
4164 int err;
4165
4166 BT_DBG("%s", hdev->name);
4167
4168 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
4169 hdev->hci_ver < BLUETOOTH_VER_1_2)
4170 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4171 MGMT_STATUS_NOT_SUPPORTED);
4172
4173 if (cp->val != 0x00 && cp->val != 0x01)
4174 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4175 MGMT_STATUS_INVALID_PARAMS);
4176
4177 if (!hdev_is_powered(hdev))
4178 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4179 MGMT_STATUS_NOT_POWERED);
4180
4181 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4182 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4183 MGMT_STATUS_REJECTED);
4184
4185 hci_dev_lock(hdev);
4186
4187 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4188 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4189 MGMT_STATUS_BUSY);
4190 goto unlock;
4191 }
4192
4193 if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
4194 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4195 hdev);
4196 goto unlock;
4197 }
4198
4199 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4200 data, len);
4201 if (!cmd) {
4202 err = -ENOMEM;
4203 goto unlock;
4204 }
4205
4206 hci_req_init(&req, hdev);
4207
4208 write_fast_connectable(&req, cp->val);
4209
4210 err = hci_req_run(&req, fast_connectable_complete);
4211 if (err < 0) {
4212 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4213 MGMT_STATUS_FAILED);
4214 mgmt_pending_remove(cmd);
4215 }
4216
4217 unlock:
4218 hci_dev_unlock(hdev);
4219
4220 return err;
4221 }
4222
4223 static void set_bredr_scan(struct hci_request *req)
4224 {
4225 struct hci_dev *hdev = req->hdev;
4226 u8 scan = 0;
4227
4228 /* Ensure that fast connectable is disabled. This function will
4229 * not do anything if the page scan parameters are already what
4230 * they should be.
4231 */
4232 write_fast_connectable(req, false);
4233
4234 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4235 scan |= SCAN_PAGE;
4236 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
4237 scan |= SCAN_INQUIRY;
4238
4239 if (scan)
4240 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
4241 }
4242
4243 static void set_bredr_complete(struct hci_dev *hdev, u8 status)
4244 {
4245 struct pending_cmd *cmd;
4246
4247 BT_DBG("status 0x%02x", status);
4248
4249 hci_dev_lock(hdev);
4250
4251 cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
4252 if (!cmd)
4253 goto unlock;
4254
4255 if (status) {
4256 u8 mgmt_err = mgmt_status(status);
4257
4258 /* We need to restore the flag if related HCI commands
4259 * failed.
4260 */
4261 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4262
4263 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4264 } else {
4265 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4266 new_settings(hdev, cmd->sk);
4267 }
4268
4269 mgmt_pending_remove(cmd);
4270
4271 unlock:
4272 hci_dev_unlock(hdev);
4273 }
4274
4275 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4276 {
4277 struct mgmt_mode *cp = data;
4278 struct pending_cmd *cmd;
4279 struct hci_request req;
4280 int err;
4281
4282 BT_DBG("request for %s", hdev->name);
4283
4284 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4285 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4286 MGMT_STATUS_NOT_SUPPORTED);
4287
4288 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4289 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4290 MGMT_STATUS_REJECTED);
4291
4292 if (cp->val != 0x00 && cp->val != 0x01)
4293 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4294 MGMT_STATUS_INVALID_PARAMS);
4295
4296 hci_dev_lock(hdev);
4297
4298 if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4299 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4300 goto unlock;
4301 }
4302
4303 if (!hdev_is_powered(hdev)) {
4304 if (!cp->val) {
4305 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4306 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4307 clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4308 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4309 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4310 }
4311
4312 change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4313
4314 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4315 if (err < 0)
4316 goto unlock;
4317
4318 err = new_settings(hdev, sk);
4319 goto unlock;
4320 }
4321
4322 /* Reject disabling when powered on */
4323 if (!cp->val) {
4324 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4325 MGMT_STATUS_REJECTED);
4326 goto unlock;
4327 }
4328
4329 if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
4330 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4331 MGMT_STATUS_BUSY);
4332 goto unlock;
4333 }
4334
4335 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4336 if (!cmd) {
4337 err = -ENOMEM;
4338 goto unlock;
4339 }
4340
4341 /* We need to flip the bit already here so that update_adv_data
4342 * generates the correct flags.
4343 */
4344 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4345
4346 hci_req_init(&req, hdev);
4347
4348 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4349 set_bredr_scan(&req);
4350
4351 /* Since only the advertising data flags will change, there
4352 * is no need to update the scan response data.
4353 */
4354 update_adv_data(&req);
4355
4356 err = hci_req_run(&req, set_bredr_complete);
4357 if (err < 0)
4358 mgmt_pending_remove(cmd);
4359
4360 unlock:
4361 hci_dev_unlock(hdev);
4362 return err;
4363 }
4364
4365 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4366 void *data, u16 len)
4367 {
4368 struct mgmt_mode *cp = data;
4369 struct pending_cmd *cmd;
4370 u8 val, status;
4371 int err;
4372
4373 BT_DBG("request for %s", hdev->name);
4374
4375 status = mgmt_bredr_support(hdev);
4376 if (status)
4377 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4378 status);
4379
4380 if (!lmp_sc_capable(hdev) &&
4381 !test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
4382 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4383 MGMT_STATUS_NOT_SUPPORTED);
4384
4385 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4386 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4387 MGMT_STATUS_INVALID_PARAMS);
4388
4389 hci_dev_lock(hdev);
4390
4391 if (!hdev_is_powered(hdev)) {
4392 bool changed;
4393
4394 if (cp->val) {
4395 changed = !test_and_set_bit(HCI_SC_ENABLED,
4396 &hdev->dev_flags);
4397 if (cp->val == 0x02)
4398 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4399 else
4400 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4401 } else {
4402 changed = test_and_clear_bit(HCI_SC_ENABLED,
4403 &hdev->dev_flags);
4404 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4405 }
4406
4407 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4408 if (err < 0)
4409 goto failed;
4410
4411 if (changed)
4412 err = new_settings(hdev, sk);
4413
4414 goto failed;
4415 }
4416
4417 if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4418 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4419 MGMT_STATUS_BUSY);
4420 goto failed;
4421 }
4422
4423 val = !!cp->val;
4424
4425 if (val == test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
4426 (cp->val == 0x02) == test_bit(HCI_SC_ONLY, &hdev->dev_flags)) {
4427 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4428 goto failed;
4429 }
4430
4431 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4432 if (!cmd) {
4433 err = -ENOMEM;
4434 goto failed;
4435 }
4436
4437 err = hci_send_cmd(hdev, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4438 if (err < 0) {
4439 mgmt_pending_remove(cmd);
4440 goto failed;
4441 }
4442
4443 if (cp->val == 0x02)
4444 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4445 else
4446 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4447
4448 failed:
4449 hci_dev_unlock(hdev);
4450 return err;
4451 }
4452
4453 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
4454 void *data, u16 len)
4455 {
4456 struct mgmt_mode *cp = data;
4457 bool changed, use_changed;
4458 int err;
4459
4460 BT_DBG("request for %s", hdev->name);
4461
4462 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4463 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
4464 MGMT_STATUS_INVALID_PARAMS);
4465
4466 hci_dev_lock(hdev);
4467
4468 if (cp->val)
4469 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
4470 &hdev->dev_flags);
4471 else
4472 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
4473 &hdev->dev_flags);
4474
4475 if (cp->val == 0x02)
4476 use_changed = !test_and_set_bit(HCI_USE_DEBUG_KEYS,
4477 &hdev->dev_flags);
4478 else
4479 use_changed = test_and_clear_bit(HCI_USE_DEBUG_KEYS,
4480 &hdev->dev_flags);
4481
4482 if (hdev_is_powered(hdev) && use_changed &&
4483 test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
4484 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
4485 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
4486 sizeof(mode), &mode);
4487 }
4488
4489 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
4490 if (err < 0)
4491 goto unlock;
4492
4493 if (changed)
4494 err = new_settings(hdev, sk);
4495
4496 unlock:
4497 hci_dev_unlock(hdev);
4498 return err;
4499 }
4500
4501 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4502 u16 len)
4503 {
4504 struct mgmt_cp_set_privacy *cp = cp_data;
4505 bool changed;
4506 int err;
4507
4508 BT_DBG("request for %s", hdev->name);
4509
4510 if (!lmp_le_capable(hdev))
4511 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4512 MGMT_STATUS_NOT_SUPPORTED);
4513
4514 if (cp->privacy != 0x00 && cp->privacy != 0x01)
4515 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4516 MGMT_STATUS_INVALID_PARAMS);
4517
4518 if (hdev_is_powered(hdev))
4519 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4520 MGMT_STATUS_REJECTED);
4521
4522 hci_dev_lock(hdev);
4523
4524 /* If user space supports this command it is also expected to
4525 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
4526 */
4527 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4528
4529 if (cp->privacy) {
4530 changed = !test_and_set_bit(HCI_PRIVACY, &hdev->dev_flags);
4531 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
4532 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4533 } else {
4534 changed = test_and_clear_bit(HCI_PRIVACY, &hdev->dev_flags);
4535 memset(hdev->irk, 0, sizeof(hdev->irk));
4536 clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4537 }
4538
4539 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
4540 if (err < 0)
4541 goto unlock;
4542
4543 if (changed)
4544 err = new_settings(hdev, sk);
4545
4546 unlock:
4547 hci_dev_unlock(hdev);
4548 return err;
4549 }
4550
4551 static bool irk_is_valid(struct mgmt_irk_info *irk)
4552 {
4553 switch (irk->addr.type) {
4554 case BDADDR_LE_PUBLIC:
4555 return true;
4556
4557 case BDADDR_LE_RANDOM:
4558 /* Two most significant bits shall be set */
4559 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4560 return false;
4561 return true;
4562 }
4563
4564 return false;
4565 }
4566
4567 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4568 u16 len)
4569 {
4570 struct mgmt_cp_load_irks *cp = cp_data;
4571 u16 irk_count, expected_len;
4572 int i, err;
4573
4574 BT_DBG("request for %s", hdev->name);
4575
4576 if (!lmp_le_capable(hdev))
4577 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4578 MGMT_STATUS_NOT_SUPPORTED);
4579
4580 irk_count = __le16_to_cpu(cp->irk_count);
4581
4582 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
4583 if (expected_len != len) {
4584 BT_ERR("load_irks: expected %u bytes, got %u bytes",
4585 expected_len, len);
4586 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4587 MGMT_STATUS_INVALID_PARAMS);
4588 }
4589
4590 BT_DBG("%s irk_count %u", hdev->name, irk_count);
4591
4592 for (i = 0; i < irk_count; i++) {
4593 struct mgmt_irk_info *key = &cp->irks[i];
4594
4595 if (!irk_is_valid(key))
4596 return cmd_status(sk, hdev->id,
4597 MGMT_OP_LOAD_IRKS,
4598 MGMT_STATUS_INVALID_PARAMS);
4599 }
4600
4601 hci_dev_lock(hdev);
4602
4603 hci_smp_irks_clear(hdev);
4604
4605 for (i = 0; i < irk_count; i++) {
4606 struct mgmt_irk_info *irk = &cp->irks[i];
4607 u8 addr_type;
4608
4609 if (irk->addr.type == BDADDR_LE_PUBLIC)
4610 addr_type = ADDR_LE_DEV_PUBLIC;
4611 else
4612 addr_type = ADDR_LE_DEV_RANDOM;
4613
4614 hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
4615 BDADDR_ANY);
4616 }
4617
4618 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4619
4620 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
4621
4622 hci_dev_unlock(hdev);
4623
4624 return err;
4625 }
4626
4627 static bool ltk_is_valid(struct mgmt_ltk_info *key)
4628 {
4629 if (key->master != 0x00 && key->master != 0x01)
4630 return false;
4631
4632 switch (key->addr.type) {
4633 case BDADDR_LE_PUBLIC:
4634 return true;
4635
4636 case BDADDR_LE_RANDOM:
4637 /* Two most significant bits shall be set */
4638 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4639 return false;
4640 return true;
4641 }
4642
4643 return false;
4644 }
4645
4646 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
4647 void *cp_data, u16 len)
4648 {
4649 struct mgmt_cp_load_long_term_keys *cp = cp_data;
4650 u16 key_count, expected_len;
4651 int i, err;
4652
4653 BT_DBG("request for %s", hdev->name);
4654
4655 if (!lmp_le_capable(hdev))
4656 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4657 MGMT_STATUS_NOT_SUPPORTED);
4658
4659 key_count = __le16_to_cpu(cp->key_count);
4660
4661 expected_len = sizeof(*cp) + key_count *
4662 sizeof(struct mgmt_ltk_info);
4663 if (expected_len != len) {
4664 BT_ERR("load_keys: expected %u bytes, got %u bytes",
4665 expected_len, len);
4666 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4667 MGMT_STATUS_INVALID_PARAMS);
4668 }
4669
4670 BT_DBG("%s key_count %u", hdev->name, key_count);
4671
4672 for (i = 0; i < key_count; i++) {
4673 struct mgmt_ltk_info *key = &cp->keys[i];
4674
4675 if (!ltk_is_valid(key))
4676 return cmd_status(sk, hdev->id,
4677 MGMT_OP_LOAD_LONG_TERM_KEYS,
4678 MGMT_STATUS_INVALID_PARAMS);
4679 }
4680
4681 hci_dev_lock(hdev);
4682
4683 hci_smp_ltks_clear(hdev);
4684
4685 for (i = 0; i < key_count; i++) {
4686 struct mgmt_ltk_info *key = &cp->keys[i];
4687 u8 type, addr_type, authenticated;
4688
4689 if (key->addr.type == BDADDR_LE_PUBLIC)
4690 addr_type = ADDR_LE_DEV_PUBLIC;
4691 else
4692 addr_type = ADDR_LE_DEV_RANDOM;
4693
4694 if (key->master)
4695 type = SMP_LTK;
4696 else
4697 type = SMP_LTK_SLAVE;
4698
4699 switch (key->type) {
4700 case MGMT_LTK_UNAUTHENTICATED:
4701 authenticated = 0x00;
4702 break;
4703 case MGMT_LTK_AUTHENTICATED:
4704 authenticated = 0x01;
4705 break;
4706 default:
4707 continue;
4708 }
4709
4710 hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
4711 authenticated, key->val, key->enc_size, key->ediv,
4712 key->rand);
4713 }
4714
4715 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
4716 NULL, 0);
4717
4718 hci_dev_unlock(hdev);
4719
4720 return err;
4721 }
4722
4723 struct cmd_conn_lookup {
4724 struct hci_conn *conn;
4725 bool valid_tx_power;
4726 u8 mgmt_status;
4727 };
4728
4729 static void get_conn_info_complete(struct pending_cmd *cmd, void *data)
4730 {
4731 struct cmd_conn_lookup *match = data;
4732 struct mgmt_cp_get_conn_info *cp;
4733 struct mgmt_rp_get_conn_info rp;
4734 struct hci_conn *conn = cmd->user_data;
4735
4736 if (conn != match->conn)
4737 return;
4738
4739 cp = (struct mgmt_cp_get_conn_info *) cmd->param;
4740
4741 memset(&rp, 0, sizeof(rp));
4742 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4743 rp.addr.type = cp->addr.type;
4744
4745 if (!match->mgmt_status) {
4746 rp.rssi = conn->rssi;
4747
4748 if (match->valid_tx_power) {
4749 rp.tx_power = conn->tx_power;
4750 rp.max_tx_power = conn->max_tx_power;
4751 } else {
4752 rp.tx_power = HCI_TX_POWER_INVALID;
4753 rp.max_tx_power = HCI_TX_POWER_INVALID;
4754 }
4755 }
4756
4757 cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
4758 match->mgmt_status, &rp, sizeof(rp));
4759
4760 hci_conn_drop(conn);
4761
4762 mgmt_pending_remove(cmd);
4763 }
4764
4765 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 status)
4766 {
4767 struct hci_cp_read_rssi *cp;
4768 struct hci_conn *conn;
4769 struct cmd_conn_lookup match;
4770 u16 handle;
4771
4772 BT_DBG("status 0x%02x", status);
4773
4774 hci_dev_lock(hdev);
4775
4776 /* TX power data is valid in case request completed successfully,
4777 * otherwise we assume it's not valid. At the moment we assume that
4778 * either both or none of current and max values are valid to keep code
4779 * simple.
4780 */
4781 match.valid_tx_power = !status;
4782
4783 /* Commands sent in request are either Read RSSI or Read Transmit Power
4784 * Level so we check which one was last sent to retrieve connection
4785 * handle. Both commands have handle as first parameter so it's safe to
4786 * cast data on the same command struct.
4787 *
4788 * First command sent is always Read RSSI and we fail only if it fails.
4789 * In other case we simply override error to indicate success as we
4790 * already remembered if TX power value is actually valid.
4791 */
4792 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
4793 if (!cp) {
4794 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
4795 status = 0;
4796 }
4797
4798 if (!cp) {
4799 BT_ERR("invalid sent_cmd in response");
4800 goto unlock;
4801 }
4802
4803 handle = __le16_to_cpu(cp->handle);
4804 conn = hci_conn_hash_lookup_handle(hdev, handle);
4805 if (!conn) {
4806 BT_ERR("unknown handle (%d) in response", handle);
4807 goto unlock;
4808 }
4809
4810 match.conn = conn;
4811 match.mgmt_status = mgmt_status(status);
4812
4813 /* Cache refresh is complete, now reply for mgmt request for given
4814 * connection only.
4815 */
4816 mgmt_pending_foreach(MGMT_OP_GET_CONN_INFO, hdev,
4817 get_conn_info_complete, &match);
4818
4819 unlock:
4820 hci_dev_unlock(hdev);
4821 }
4822
4823 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
4824 u16 len)
4825 {
4826 struct mgmt_cp_get_conn_info *cp = data;
4827 struct mgmt_rp_get_conn_info rp;
4828 struct hci_conn *conn;
4829 unsigned long conn_info_age;
4830 int err = 0;
4831
4832 BT_DBG("%s", hdev->name);
4833
4834 memset(&rp, 0, sizeof(rp));
4835 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4836 rp.addr.type = cp->addr.type;
4837
4838 if (!bdaddr_type_is_valid(cp->addr.type))
4839 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4840 MGMT_STATUS_INVALID_PARAMS,
4841 &rp, sizeof(rp));
4842
4843 hci_dev_lock(hdev);
4844
4845 if (!hdev_is_powered(hdev)) {
4846 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4847 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
4848 goto unlock;
4849 }
4850
4851 if (cp->addr.type == BDADDR_BREDR)
4852 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
4853 &cp->addr.bdaddr);
4854 else
4855 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
4856
4857 if (!conn || conn->state != BT_CONNECTED) {
4858 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4859 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
4860 goto unlock;
4861 }
4862
4863 /* To avoid client trying to guess when to poll again for information we
4864 * calculate conn info age as random value between min/max set in hdev.
4865 */
4866 conn_info_age = hdev->conn_info_min_age +
4867 prandom_u32_max(hdev->conn_info_max_age -
4868 hdev->conn_info_min_age);
4869
4870 /* Query controller to refresh cached values if they are too old or were
4871 * never read.
4872 */
4873 if (time_after(jiffies, conn->conn_info_timestamp +
4874 msecs_to_jiffies(conn_info_age)) ||
4875 !conn->conn_info_timestamp) {
4876 struct hci_request req;
4877 struct hci_cp_read_tx_power req_txp_cp;
4878 struct hci_cp_read_rssi req_rssi_cp;
4879 struct pending_cmd *cmd;
4880
4881 hci_req_init(&req, hdev);
4882 req_rssi_cp.handle = cpu_to_le16(conn->handle);
4883 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
4884 &req_rssi_cp);
4885
4886 /* For LE links TX power does not change thus we don't need to
4887 * query for it once value is known.
4888 */
4889 if (!bdaddr_type_is_le(cp->addr.type) ||
4890 conn->tx_power == HCI_TX_POWER_INVALID) {
4891 req_txp_cp.handle = cpu_to_le16(conn->handle);
4892 req_txp_cp.type = 0x00;
4893 hci_req_add(&req, HCI_OP_READ_TX_POWER,
4894 sizeof(req_txp_cp), &req_txp_cp);
4895 }
4896
4897 /* Max TX power needs to be read only once per connection */
4898 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
4899 req_txp_cp.handle = cpu_to_le16(conn->handle);
4900 req_txp_cp.type = 0x01;
4901 hci_req_add(&req, HCI_OP_READ_TX_POWER,
4902 sizeof(req_txp_cp), &req_txp_cp);
4903 }
4904
4905 err = hci_req_run(&req, conn_info_refresh_complete);
4906 if (err < 0)
4907 goto unlock;
4908
4909 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
4910 data, len);
4911 if (!cmd) {
4912 err = -ENOMEM;
4913 goto unlock;
4914 }
4915
4916 hci_conn_hold(conn);
4917 cmd->user_data = conn;
4918
4919 conn->conn_info_timestamp = jiffies;
4920 } else {
4921 /* Cache is valid, just reply with values cached in hci_conn */
4922 rp.rssi = conn->rssi;
4923 rp.tx_power = conn->tx_power;
4924 rp.max_tx_power = conn->max_tx_power;
4925
4926 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4927 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
4928 }
4929
4930 unlock:
4931 hci_dev_unlock(hdev);
4932 return err;
4933 }
4934
4935 static void get_clock_info_complete(struct hci_dev *hdev, u8 status)
4936 {
4937 struct mgmt_cp_get_clock_info *cp;
4938 struct mgmt_rp_get_clock_info rp;
4939 struct hci_cp_read_clock *hci_cp;
4940 struct pending_cmd *cmd;
4941 struct hci_conn *conn;
4942
4943 BT_DBG("%s status %u", hdev->name, status);
4944
4945 hci_dev_lock(hdev);
4946
4947 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
4948 if (!hci_cp)
4949 goto unlock;
4950
4951 if (hci_cp->which) {
4952 u16 handle = __le16_to_cpu(hci_cp->handle);
4953 conn = hci_conn_hash_lookup_handle(hdev, handle);
4954 } else {
4955 conn = NULL;
4956 }
4957
4958 cmd = mgmt_pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
4959 if (!cmd)
4960 goto unlock;
4961
4962 cp = cmd->param;
4963
4964 memset(&rp, 0, sizeof(rp));
4965 memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
4966
4967 if (status)
4968 goto send_rsp;
4969
4970 rp.local_clock = cpu_to_le32(hdev->clock);
4971
4972 if (conn) {
4973 rp.piconet_clock = cpu_to_le32(conn->clock);
4974 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
4975 }
4976
4977 send_rsp:
4978 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
4979 &rp, sizeof(rp));
4980 mgmt_pending_remove(cmd);
4981 if (conn)
4982 hci_conn_drop(conn);
4983
4984 unlock:
4985 hci_dev_unlock(hdev);
4986 }
4987
4988 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
4989 u16 len)
4990 {
4991 struct mgmt_cp_get_clock_info *cp = data;
4992 struct mgmt_rp_get_clock_info rp;
4993 struct hci_cp_read_clock hci_cp;
4994 struct pending_cmd *cmd;
4995 struct hci_request req;
4996 struct hci_conn *conn;
4997 int err;
4998
4999 BT_DBG("%s", hdev->name);
5000
5001 memset(&rp, 0, sizeof(rp));
5002 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5003 rp.addr.type = cp->addr.type;
5004
5005 if (cp->addr.type != BDADDR_BREDR)
5006 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5007 MGMT_STATUS_INVALID_PARAMS,
5008 &rp, sizeof(rp));
5009
5010 hci_dev_lock(hdev);
5011
5012 if (!hdev_is_powered(hdev)) {
5013 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5014 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
5015 goto unlock;
5016 }
5017
5018 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5019 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5020 &cp->addr.bdaddr);
5021 if (!conn || conn->state != BT_CONNECTED) {
5022 err = cmd_complete(sk, hdev->id,
5023 MGMT_OP_GET_CLOCK_INFO,
5024 MGMT_STATUS_NOT_CONNECTED,
5025 &rp, sizeof(rp));
5026 goto unlock;
5027 }
5028 } else {
5029 conn = NULL;
5030 }
5031
5032 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
5033 if (!cmd) {
5034 err = -ENOMEM;
5035 goto unlock;
5036 }
5037
5038 hci_req_init(&req, hdev);
5039
5040 memset(&hci_cp, 0, sizeof(hci_cp));
5041 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5042
5043 if (conn) {
5044 hci_conn_hold(conn);
5045 cmd->user_data = conn;
5046
5047 hci_cp.handle = cpu_to_le16(conn->handle);
5048 hci_cp.which = 0x01; /* Piconet clock */
5049 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5050 }
5051
5052 err = hci_req_run(&req, get_clock_info_complete);
5053 if (err < 0)
5054 mgmt_pending_remove(cmd);
5055
5056 unlock:
5057 hci_dev_unlock(hdev);
5058 return err;
5059 }
5060
5061 static void device_added(struct sock *sk, struct hci_dev *hdev,
5062 bdaddr_t *bdaddr, u8 type, u8 action)
5063 {
5064 struct mgmt_ev_device_added ev;
5065
5066 bacpy(&ev.addr.bdaddr, bdaddr);
5067 ev.addr.type = type;
5068 ev.action = action;
5069
5070 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
5071 }
5072
5073 static int add_device(struct sock *sk, struct hci_dev *hdev,
5074 void *data, u16 len)
5075 {
5076 struct mgmt_cp_add_device *cp = data;
5077 u8 auto_conn, addr_type;
5078 int err;
5079
5080 BT_DBG("%s", hdev->name);
5081
5082 if (!bdaddr_type_is_le(cp->addr.type) ||
5083 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
5084 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5085 MGMT_STATUS_INVALID_PARAMS,
5086 &cp->addr, sizeof(cp->addr));
5087
5088 if (cp->action != 0x00 && cp->action != 0x01)
5089 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5090 MGMT_STATUS_INVALID_PARAMS,
5091 &cp->addr, sizeof(cp->addr));
5092
5093 hci_dev_lock(hdev);
5094
5095 if (cp->addr.type == BDADDR_LE_PUBLIC)
5096 addr_type = ADDR_LE_DEV_PUBLIC;
5097 else
5098 addr_type = ADDR_LE_DEV_RANDOM;
5099
5100 if (cp->action)
5101 auto_conn = HCI_AUTO_CONN_ALWAYS;
5102 else
5103 auto_conn = HCI_AUTO_CONN_REPORT;
5104
5105 /* If the connection parameters don't exist for this device,
5106 * they will be created and configured with defaults.
5107 */
5108 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
5109 auto_conn) < 0) {
5110 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5111 MGMT_STATUS_FAILED,
5112 &cp->addr, sizeof(cp->addr));
5113 goto unlock;
5114 }
5115
5116 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
5117
5118 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5119 MGMT_STATUS_SUCCESS, &cp->addr, sizeof(cp->addr));
5120
5121 unlock:
5122 hci_dev_unlock(hdev);
5123 return err;
5124 }
5125
5126 static void device_removed(struct sock *sk, struct hci_dev *hdev,
5127 bdaddr_t *bdaddr, u8 type)
5128 {
5129 struct mgmt_ev_device_removed ev;
5130
5131 bacpy(&ev.addr.bdaddr, bdaddr);
5132 ev.addr.type = type;
5133
5134 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
5135 }
5136
5137 static int remove_device(struct sock *sk, struct hci_dev *hdev,
5138 void *data, u16 len)
5139 {
5140 struct mgmt_cp_remove_device *cp = data;
5141 int err;
5142
5143 BT_DBG("%s", hdev->name);
5144
5145 hci_dev_lock(hdev);
5146
5147 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5148 struct hci_conn_params *params;
5149 u8 addr_type;
5150
5151 if (!bdaddr_type_is_le(cp->addr.type)) {
5152 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5153 MGMT_STATUS_INVALID_PARAMS,
5154 &cp->addr, sizeof(cp->addr));
5155 goto unlock;
5156 }
5157
5158 if (cp->addr.type == BDADDR_LE_PUBLIC)
5159 addr_type = ADDR_LE_DEV_PUBLIC;
5160 else
5161 addr_type = ADDR_LE_DEV_RANDOM;
5162
5163 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5164 addr_type);
5165 if (!params) {
5166 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5167 MGMT_STATUS_INVALID_PARAMS,
5168 &cp->addr, sizeof(cp->addr));
5169 goto unlock;
5170 }
5171
5172 if (params->auto_connect == HCI_AUTO_CONN_DISABLED) {
5173 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5174 MGMT_STATUS_INVALID_PARAMS,
5175 &cp->addr, sizeof(cp->addr));
5176 goto unlock;
5177 }
5178
5179 hci_pend_le_conn_del(hdev, &cp->addr.bdaddr, addr_type);
5180 list_del(&params->list);
5181 kfree(params);
5182
5183 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
5184 } else {
5185 if (cp->addr.type) {
5186 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5187 MGMT_STATUS_INVALID_PARAMS,
5188 &cp->addr, sizeof(cp->addr));
5189 goto unlock;
5190 }
5191
5192 hci_conn_params_clear_enabled(hdev);
5193 }
5194
5195 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5196 MGMT_STATUS_SUCCESS, &cp->addr, sizeof(cp->addr));
5197
5198 unlock:
5199 hci_dev_unlock(hdev);
5200 return err;
5201 }
5202
5203 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
5204 u16 len)
5205 {
5206 struct mgmt_cp_load_conn_param *cp = data;
5207 u16 param_count, expected_len;
5208 int i;
5209
5210 if (!lmp_le_capable(hdev))
5211 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5212 MGMT_STATUS_NOT_SUPPORTED);
5213
5214 param_count = __le16_to_cpu(cp->param_count);
5215
5216 expected_len = sizeof(*cp) + param_count *
5217 sizeof(struct mgmt_conn_param);
5218 if (expected_len != len) {
5219 BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
5220 expected_len, len);
5221 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5222 MGMT_STATUS_INVALID_PARAMS);
5223 }
5224
5225 BT_DBG("%s param_count %u", hdev->name, param_count);
5226
5227 hci_dev_lock(hdev);
5228
5229 hci_conn_params_clear_disabled(hdev);
5230
5231 for (i = 0; i < param_count; i++) {
5232 struct mgmt_conn_param *param = &cp->params[i];
5233 struct hci_conn_params *hci_param;
5234 u16 min, max, latency, timeout;
5235 u8 addr_type;
5236
5237 BT_DBG("Adding %pMR (type %u)", &param->addr.bdaddr,
5238 param->addr.type);
5239
5240 if (param->addr.type == BDADDR_LE_PUBLIC) {
5241 addr_type = ADDR_LE_DEV_PUBLIC;
5242 } else if (param->addr.type == BDADDR_LE_RANDOM) {
5243 addr_type = ADDR_LE_DEV_RANDOM;
5244 } else {
5245 BT_ERR("Ignoring invalid connection parameters");
5246 continue;
5247 }
5248
5249 min = le16_to_cpu(param->min_interval);
5250 max = le16_to_cpu(param->max_interval);
5251 latency = le16_to_cpu(param->latency);
5252 timeout = le16_to_cpu(param->timeout);
5253
5254 BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
5255 min, max, latency, timeout);
5256
5257 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
5258 BT_ERR("Ignoring invalid connection parameters");
5259 continue;
5260 }
5261
5262 hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
5263 addr_type);
5264 if (!hci_param) {
5265 BT_ERR("Failed to add connection parameters");
5266 continue;
5267 }
5268
5269 hci_param->conn_min_interval = min;
5270 hci_param->conn_max_interval = max;
5271 hci_param->conn_latency = latency;
5272 hci_param->supervision_timeout = timeout;
5273 }
5274
5275 hci_dev_unlock(hdev);
5276
5277 return cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0, NULL, 0);
5278 }
5279
5280 static const struct mgmt_handler {
5281 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
5282 u16 data_len);
5283 bool var_len;
5284 size_t data_len;
5285 } mgmt_handlers[] = {
5286 { NULL }, /* 0x0000 (no command) */
5287 { read_version, false, MGMT_READ_VERSION_SIZE },
5288 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
5289 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
5290 { read_controller_info, false, MGMT_READ_INFO_SIZE },
5291 { set_powered, false, MGMT_SETTING_SIZE },
5292 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
5293 { set_connectable, false, MGMT_SETTING_SIZE },
5294 { set_fast_connectable, false, MGMT_SETTING_SIZE },
5295 { set_pairable, false, MGMT_SETTING_SIZE },
5296 { set_link_security, false, MGMT_SETTING_SIZE },
5297 { set_ssp, false, MGMT_SETTING_SIZE },
5298 { set_hs, false, MGMT_SETTING_SIZE },
5299 { set_le, false, MGMT_SETTING_SIZE },
5300 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
5301 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
5302 { add_uuid, false, MGMT_ADD_UUID_SIZE },
5303 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
5304 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
5305 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
5306 { disconnect, false, MGMT_DISCONNECT_SIZE },
5307 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
5308 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
5309 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
5310 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
5311 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
5312 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
5313 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
5314 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
5315 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
5316 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
5317 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
5318 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
5319 { add_remote_oob_data, true, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
5320 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
5321 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
5322 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
5323 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
5324 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
5325 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
5326 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
5327 { set_advertising, false, MGMT_SETTING_SIZE },
5328 { set_bredr, false, MGMT_SETTING_SIZE },
5329 { set_static_address, false, MGMT_SET_STATIC_ADDRESS_SIZE },
5330 { set_scan_params, false, MGMT_SET_SCAN_PARAMS_SIZE },
5331 { set_secure_conn, false, MGMT_SETTING_SIZE },
5332 { set_debug_keys, false, MGMT_SETTING_SIZE },
5333 { set_privacy, false, MGMT_SET_PRIVACY_SIZE },
5334 { load_irks, true, MGMT_LOAD_IRKS_SIZE },
5335 { get_conn_info, false, MGMT_GET_CONN_INFO_SIZE },
5336 { get_clock_info, false, MGMT_GET_CLOCK_INFO_SIZE },
5337 { add_device, false, MGMT_ADD_DEVICE_SIZE },
5338 { remove_device, false, MGMT_REMOVE_DEVICE_SIZE },
5339 { load_conn_param, true, MGMT_LOAD_CONN_PARAM_SIZE },
5340 { read_unconf_index_list, false, MGMT_READ_UNCONF_INDEX_LIST_SIZE },
5341 };
5342
5343 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
5344 {
5345 void *buf;
5346 u8 *cp;
5347 struct mgmt_hdr *hdr;
5348 u16 opcode, index, len;
5349 struct hci_dev *hdev = NULL;
5350 const struct mgmt_handler *handler;
5351 int err;
5352
5353 BT_DBG("got %zu bytes", msglen);
5354
5355 if (msglen < sizeof(*hdr))
5356 return -EINVAL;
5357
5358 buf = kmalloc(msglen, GFP_KERNEL);
5359 if (!buf)
5360 return -ENOMEM;
5361
5362 if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
5363 err = -EFAULT;
5364 goto done;
5365 }
5366
5367 hdr = buf;
5368 opcode = __le16_to_cpu(hdr->opcode);
5369 index = __le16_to_cpu(hdr->index);
5370 len = __le16_to_cpu(hdr->len);
5371
5372 if (len != msglen - sizeof(*hdr)) {
5373 err = -EINVAL;
5374 goto done;
5375 }
5376
5377 if (index != MGMT_INDEX_NONE) {
5378 hdev = hci_dev_get(index);
5379 if (!hdev) {
5380 err = cmd_status(sk, index, opcode,
5381 MGMT_STATUS_INVALID_INDEX);
5382 goto done;
5383 }
5384
5385 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
5386 test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
5387 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5388 err = cmd_status(sk, index, opcode,
5389 MGMT_STATUS_INVALID_INDEX);
5390 goto done;
5391 }
5392 }
5393
5394 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
5395 mgmt_handlers[opcode].func == NULL) {
5396 BT_DBG("Unknown op %u", opcode);
5397 err = cmd_status(sk, index, opcode,
5398 MGMT_STATUS_UNKNOWN_COMMAND);
5399 goto done;
5400 }
5401
5402 if (hdev && (opcode <= MGMT_OP_READ_INDEX_LIST ||
5403 opcode == MGMT_OP_READ_UNCONF_INDEX_LIST)) {
5404 err = cmd_status(sk, index, opcode,
5405 MGMT_STATUS_INVALID_INDEX);
5406 goto done;
5407 }
5408
5409 if (!hdev && (opcode > MGMT_OP_READ_INDEX_LIST &&
5410 opcode != MGMT_OP_READ_UNCONF_INDEX_LIST)) {
5411 err = cmd_status(sk, index, opcode,
5412 MGMT_STATUS_INVALID_INDEX);
5413 goto done;
5414 }
5415
5416 handler = &mgmt_handlers[opcode];
5417
5418 if ((handler->var_len && len < handler->data_len) ||
5419 (!handler->var_len && len != handler->data_len)) {
5420 err = cmd_status(sk, index, opcode,
5421 MGMT_STATUS_INVALID_PARAMS);
5422 goto done;
5423 }
5424
5425 if (hdev)
5426 mgmt_init_hdev(sk, hdev);
5427
5428 cp = buf + sizeof(*hdr);
5429
5430 err = handler->func(sk, hdev, cp, len);
5431 if (err < 0)
5432 goto done;
5433
5434 err = msglen;
5435
5436 done:
5437 if (hdev)
5438 hci_dev_put(hdev);
5439
5440 kfree(buf);
5441 return err;
5442 }
5443
5444 void mgmt_index_added(struct hci_dev *hdev)
5445 {
5446 if (hdev->dev_type != HCI_BREDR)
5447 return;
5448
5449 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
5450 return;
5451
5452 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
5453 mgmt_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0, NULL);
5454 else
5455 mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
5456 }
5457
5458 void mgmt_index_removed(struct hci_dev *hdev)
5459 {
5460 u8 status = MGMT_STATUS_INVALID_INDEX;
5461
5462 if (hdev->dev_type != HCI_BREDR)
5463 return;
5464
5465 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
5466 return;
5467
5468 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
5469
5470 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
5471 mgmt_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0, NULL);
5472 else
5473 mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
5474 }
5475
5476 /* This function requires the caller holds hdev->lock */
5477 static void restart_le_auto_conns(struct hci_dev *hdev)
5478 {
5479 struct hci_conn_params *p;
5480 bool added = false;
5481
5482 list_for_each_entry(p, &hdev->le_conn_params, list) {
5483 if (p->auto_connect == HCI_AUTO_CONN_ALWAYS) {
5484 hci_pend_le_conn_add(hdev, &p->addr, p->addr_type);
5485 added = true;
5486 }
5487 }
5488
5489 /* Calling hci_pend_le_conn_add will actually already trigger
5490 * background scanning when needed. So no need to trigger it
5491 * just another time.
5492 *
5493 * This check is here to avoid an unneeded restart of the
5494 * passive scanning. Since this is during the controller
5495 * power up phase the duplicate filtering is not an issue.
5496 */
5497 if (added)
5498 return;
5499
5500 hci_update_background_scan(hdev);
5501 }
5502
5503 static void powered_complete(struct hci_dev *hdev, u8 status)
5504 {
5505 struct cmd_lookup match = { NULL, hdev };
5506
5507 BT_DBG("status 0x%02x", status);
5508
5509 hci_dev_lock(hdev);
5510
5511 restart_le_auto_conns(hdev);
5512
5513 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
5514
5515 new_settings(hdev, match.sk);
5516
5517 hci_dev_unlock(hdev);
5518
5519 if (match.sk)
5520 sock_put(match.sk);
5521 }
5522
5523 static int powered_update_hci(struct hci_dev *hdev)
5524 {
5525 struct hci_request req;
5526 u8 link_sec;
5527
5528 hci_req_init(&req, hdev);
5529
5530 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
5531 !lmp_host_ssp_capable(hdev)) {
5532 u8 ssp = 1;
5533
5534 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
5535 }
5536
5537 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
5538 lmp_bredr_capable(hdev)) {
5539 struct hci_cp_write_le_host_supported cp;
5540
5541 cp.le = 1;
5542 cp.simul = lmp_le_br_capable(hdev);
5543
5544 /* Check first if we already have the right
5545 * host state (host features set)
5546 */
5547 if (cp.le != lmp_host_le_capable(hdev) ||
5548 cp.simul != lmp_host_le_br_capable(hdev))
5549 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
5550 sizeof(cp), &cp);
5551 }
5552
5553 if (lmp_le_capable(hdev)) {
5554 /* Make sure the controller has a good default for
5555 * advertising data. This also applies to the case
5556 * where BR/EDR was toggled during the AUTO_OFF phase.
5557 */
5558 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
5559 update_adv_data(&req);
5560 update_scan_rsp_data(&req);
5561 }
5562
5563 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
5564 enable_advertising(&req);
5565 }
5566
5567 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
5568 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
5569 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
5570 sizeof(link_sec), &link_sec);
5571
5572 if (lmp_bredr_capable(hdev)) {
5573 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
5574 set_bredr_scan(&req);
5575 update_class(&req);
5576 update_name(&req);
5577 update_eir(&req);
5578 }
5579
5580 return hci_req_run(&req, powered_complete);
5581 }
5582
5583 int mgmt_powered(struct hci_dev *hdev, u8 powered)
5584 {
5585 struct cmd_lookup match = { NULL, hdev };
5586 u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
5587 u8 zero_cod[] = { 0, 0, 0 };
5588 int err;
5589
5590 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
5591 return 0;
5592
5593 if (powered) {
5594 if (powered_update_hci(hdev) == 0)
5595 return 0;
5596
5597 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
5598 &match);
5599 goto new_settings;
5600 }
5601
5602 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
5603 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered);
5604
5605 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
5606 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
5607 zero_cod, sizeof(zero_cod), NULL);
5608
5609 new_settings:
5610 err = new_settings(hdev, match.sk);
5611
5612 if (match.sk)
5613 sock_put(match.sk);
5614
5615 return err;
5616 }
5617
5618 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
5619 {
5620 struct pending_cmd *cmd;
5621 u8 status;
5622
5623 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
5624 if (!cmd)
5625 return;
5626
5627 if (err == -ERFKILL)
5628 status = MGMT_STATUS_RFKILLED;
5629 else
5630 status = MGMT_STATUS_FAILED;
5631
5632 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
5633
5634 mgmt_pending_remove(cmd);
5635 }
5636
5637 void mgmt_discoverable_timeout(struct hci_dev *hdev)
5638 {
5639 struct hci_request req;
5640
5641 hci_dev_lock(hdev);
5642
5643 /* When discoverable timeout triggers, then just make sure
5644 * the limited discoverable flag is cleared. Even in the case
5645 * of a timeout triggered from general discoverable, it is
5646 * safe to unconditionally clear the flag.
5647 */
5648 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
5649 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
5650
5651 hci_req_init(&req, hdev);
5652 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
5653 u8 scan = SCAN_PAGE;
5654 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
5655 sizeof(scan), &scan);
5656 }
5657 update_class(&req);
5658 update_adv_data(&req);
5659 hci_req_run(&req, NULL);
5660
5661 hdev->discov_timeout = 0;
5662
5663 new_settings(hdev, NULL);
5664
5665 hci_dev_unlock(hdev);
5666 }
5667
5668 void mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
5669 {
5670 bool changed;
5671
5672 /* Nothing needed here if there's a pending command since that
5673 * commands request completion callback takes care of everything
5674 * necessary.
5675 */
5676 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
5677 return;
5678
5679 /* Powering off may clear the scan mode - don't let that interfere */
5680 if (!discoverable && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5681 return;
5682
5683 if (discoverable) {
5684 changed = !test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
5685 } else {
5686 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
5687 changed = test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
5688 }
5689
5690 if (changed) {
5691 struct hci_request req;
5692
5693 /* In case this change in discoverable was triggered by
5694 * a disabling of connectable there could be a need to
5695 * update the advertising flags.
5696 */
5697 hci_req_init(&req, hdev);
5698 update_adv_data(&req);
5699 hci_req_run(&req, NULL);
5700
5701 new_settings(hdev, NULL);
5702 }
5703 }
5704
5705 void mgmt_connectable(struct hci_dev *hdev, u8 connectable)
5706 {
5707 bool changed;
5708
5709 /* Nothing needed here if there's a pending command since that
5710 * commands request completion callback takes care of everything
5711 * necessary.
5712 */
5713 if (mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
5714 return;
5715
5716 /* Powering off may clear the scan mode - don't let that interfere */
5717 if (!connectable && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5718 return;
5719
5720 if (connectable)
5721 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
5722 else
5723 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
5724
5725 if (changed)
5726 new_settings(hdev, NULL);
5727 }
5728
5729 void mgmt_advertising(struct hci_dev *hdev, u8 advertising)
5730 {
5731 /* Powering off may stop advertising - don't let that interfere */
5732 if (!advertising && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5733 return;
5734
5735 if (advertising)
5736 set_bit(HCI_ADVERTISING, &hdev->dev_flags);
5737 else
5738 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
5739 }
5740
5741 void mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
5742 {
5743 u8 mgmt_err = mgmt_status(status);
5744
5745 if (scan & SCAN_PAGE)
5746 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
5747 cmd_status_rsp, &mgmt_err);
5748
5749 if (scan & SCAN_INQUIRY)
5750 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
5751 cmd_status_rsp, &mgmt_err);
5752 }
5753
5754 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
5755 bool persistent)
5756 {
5757 struct mgmt_ev_new_link_key ev;
5758
5759 memset(&ev, 0, sizeof(ev));
5760
5761 ev.store_hint = persistent;
5762 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
5763 ev.key.addr.type = BDADDR_BREDR;
5764 ev.key.type = key->type;
5765 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
5766 ev.key.pin_len = key->pin_len;
5767
5768 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
5769 }
5770
5771 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
5772 {
5773 if (ltk->authenticated)
5774 return MGMT_LTK_AUTHENTICATED;
5775
5776 return MGMT_LTK_UNAUTHENTICATED;
5777 }
5778
5779 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
5780 {
5781 struct mgmt_ev_new_long_term_key ev;
5782
5783 memset(&ev, 0, sizeof(ev));
5784
5785 /* Devices using resolvable or non-resolvable random addresses
5786 * without providing an indentity resolving key don't require
5787 * to store long term keys. Their addresses will change the
5788 * next time around.
5789 *
5790 * Only when a remote device provides an identity address
5791 * make sure the long term key is stored. If the remote
5792 * identity is known, the long term keys are internally
5793 * mapped to the identity address. So allow static random
5794 * and public addresses here.
5795 */
5796 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
5797 (key->bdaddr.b[5] & 0xc0) != 0xc0)
5798 ev.store_hint = 0x00;
5799 else
5800 ev.store_hint = persistent;
5801
5802 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
5803 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
5804 ev.key.type = mgmt_ltk_type(key);
5805 ev.key.enc_size = key->enc_size;
5806 ev.key.ediv = key->ediv;
5807 ev.key.rand = key->rand;
5808
5809 if (key->type == SMP_LTK)
5810 ev.key.master = 1;
5811
5812 memcpy(ev.key.val, key->val, sizeof(key->val));
5813
5814 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
5815 }
5816
5817 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
5818 {
5819 struct mgmt_ev_new_irk ev;
5820
5821 memset(&ev, 0, sizeof(ev));
5822
5823 /* For identity resolving keys from devices that are already
5824 * using a public address or static random address, do not
5825 * ask for storing this key. The identity resolving key really
5826 * is only mandatory for devices using resovlable random
5827 * addresses.
5828 *
5829 * Storing all identity resolving keys has the downside that
5830 * they will be also loaded on next boot of they system. More
5831 * identity resolving keys, means more time during scanning is
5832 * needed to actually resolve these addresses.
5833 */
5834 if (bacmp(&irk->rpa, BDADDR_ANY))
5835 ev.store_hint = 0x01;
5836 else
5837 ev.store_hint = 0x00;
5838
5839 bacpy(&ev.rpa, &irk->rpa);
5840 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
5841 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
5842 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
5843
5844 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
5845 }
5846
5847 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
5848 bool persistent)
5849 {
5850 struct mgmt_ev_new_csrk ev;
5851
5852 memset(&ev, 0, sizeof(ev));
5853
5854 /* Devices using resolvable or non-resolvable random addresses
5855 * without providing an indentity resolving key don't require
5856 * to store signature resolving keys. Their addresses will change
5857 * the next time around.
5858 *
5859 * Only when a remote device provides an identity address
5860 * make sure the signature resolving key is stored. So allow
5861 * static random and public addresses here.
5862 */
5863 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
5864 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
5865 ev.store_hint = 0x00;
5866 else
5867 ev.store_hint = persistent;
5868
5869 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
5870 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
5871 ev.key.master = csrk->master;
5872 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
5873
5874 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
5875 }
5876
5877 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
5878 u8 bdaddr_type, u8 store_hint, u16 min_interval,
5879 u16 max_interval, u16 latency, u16 timeout)
5880 {
5881 struct mgmt_ev_new_conn_param ev;
5882
5883 if (!hci_is_identity_address(bdaddr, bdaddr_type))
5884 return;
5885
5886 memset(&ev, 0, sizeof(ev));
5887 bacpy(&ev.addr.bdaddr, bdaddr);
5888 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
5889 ev.store_hint = store_hint;
5890 ev.min_interval = cpu_to_le16(min_interval);
5891 ev.max_interval = cpu_to_le16(max_interval);
5892 ev.latency = cpu_to_le16(latency);
5893 ev.timeout = cpu_to_le16(timeout);
5894
5895 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
5896 }
5897
5898 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
5899 u8 data_len)
5900 {
5901 eir[eir_len++] = sizeof(type) + data_len;
5902 eir[eir_len++] = type;
5903 memcpy(&eir[eir_len], data, data_len);
5904 eir_len += data_len;
5905
5906 return eir_len;
5907 }
5908
5909 void mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5910 u8 addr_type, u32 flags, u8 *name, u8 name_len,
5911 u8 *dev_class)
5912 {
5913 char buf[512];
5914 struct mgmt_ev_device_connected *ev = (void *) buf;
5915 u16 eir_len = 0;
5916
5917 bacpy(&ev->addr.bdaddr, bdaddr);
5918 ev->addr.type = link_to_bdaddr(link_type, addr_type);
5919
5920 ev->flags = __cpu_to_le32(flags);
5921
5922 if (name_len > 0)
5923 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
5924 name, name_len);
5925
5926 if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
5927 eir_len = eir_append_data(ev->eir, eir_len,
5928 EIR_CLASS_OF_DEV, dev_class, 3);
5929
5930 ev->eir_len = cpu_to_le16(eir_len);
5931
5932 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
5933 sizeof(*ev) + eir_len, NULL);
5934 }
5935
5936 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
5937 {
5938 struct mgmt_cp_disconnect *cp = cmd->param;
5939 struct sock **sk = data;
5940 struct mgmt_rp_disconnect rp;
5941
5942 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5943 rp.addr.type = cp->addr.type;
5944
5945 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
5946 sizeof(rp));
5947
5948 *sk = cmd->sk;
5949 sock_hold(*sk);
5950
5951 mgmt_pending_remove(cmd);
5952 }
5953
5954 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
5955 {
5956 struct hci_dev *hdev = data;
5957 struct mgmt_cp_unpair_device *cp = cmd->param;
5958 struct mgmt_rp_unpair_device rp;
5959
5960 memset(&rp, 0, sizeof(rp));
5961 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5962 rp.addr.type = cp->addr.type;
5963
5964 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
5965
5966 cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
5967
5968 mgmt_pending_remove(cmd);
5969 }
5970
5971 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
5972 u8 link_type, u8 addr_type, u8 reason,
5973 bool mgmt_connected)
5974 {
5975 struct mgmt_ev_device_disconnected ev;
5976 struct pending_cmd *power_off;
5977 struct sock *sk = NULL;
5978
5979 power_off = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
5980 if (power_off) {
5981 struct mgmt_mode *cp = power_off->param;
5982
5983 /* The connection is still in hci_conn_hash so test for 1
5984 * instead of 0 to know if this is the last one.
5985 */
5986 if (!cp->val && hci_conn_count(hdev) == 1) {
5987 cancel_delayed_work(&hdev->power_off);
5988 queue_work(hdev->req_workqueue, &hdev->power_off.work);
5989 }
5990 }
5991
5992 if (!mgmt_connected)
5993 return;
5994
5995 if (link_type != ACL_LINK && link_type != LE_LINK)
5996 return;
5997
5998 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
5999
6000 bacpy(&ev.addr.bdaddr, bdaddr);
6001 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6002 ev.reason = reason;
6003
6004 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
6005
6006 if (sk)
6007 sock_put(sk);
6008
6009 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6010 hdev);
6011 }
6012
6013 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
6014 u8 link_type, u8 addr_type, u8 status)
6015 {
6016 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
6017 struct mgmt_cp_disconnect *cp;
6018 struct mgmt_rp_disconnect rp;
6019 struct pending_cmd *cmd;
6020
6021 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6022 hdev);
6023
6024 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
6025 if (!cmd)
6026 return;
6027
6028 cp = cmd->param;
6029
6030 if (bacmp(bdaddr, &cp->addr.bdaddr))
6031 return;
6032
6033 if (cp->addr.type != bdaddr_type)
6034 return;
6035
6036 bacpy(&rp.addr.bdaddr, bdaddr);
6037 rp.addr.type = bdaddr_type;
6038
6039 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
6040 mgmt_status(status), &rp, sizeof(rp));
6041
6042 mgmt_pending_remove(cmd);
6043 }
6044
6045 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6046 u8 addr_type, u8 status)
6047 {
6048 struct mgmt_ev_connect_failed ev;
6049 struct pending_cmd *power_off;
6050
6051 power_off = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6052 if (power_off) {
6053 struct mgmt_mode *cp = power_off->param;
6054
6055 /* The connection is still in hci_conn_hash so test for 1
6056 * instead of 0 to know if this is the last one.
6057 */
6058 if (!cp->val && hci_conn_count(hdev) == 1) {
6059 cancel_delayed_work(&hdev->power_off);
6060 queue_work(hdev->req_workqueue, &hdev->power_off.work);
6061 }
6062 }
6063
6064 bacpy(&ev.addr.bdaddr, bdaddr);
6065 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6066 ev.status = mgmt_status(status);
6067
6068 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
6069 }
6070
6071 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
6072 {
6073 struct mgmt_ev_pin_code_request ev;
6074
6075 bacpy(&ev.addr.bdaddr, bdaddr);
6076 ev.addr.type = BDADDR_BREDR;
6077 ev.secure = secure;
6078
6079 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
6080 }
6081
6082 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6083 u8 status)
6084 {
6085 struct pending_cmd *cmd;
6086 struct mgmt_rp_pin_code_reply rp;
6087
6088 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
6089 if (!cmd)
6090 return;
6091
6092 bacpy(&rp.addr.bdaddr, bdaddr);
6093 rp.addr.type = BDADDR_BREDR;
6094
6095 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
6096 mgmt_status(status), &rp, sizeof(rp));
6097
6098 mgmt_pending_remove(cmd);
6099 }
6100
6101 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6102 u8 status)
6103 {
6104 struct pending_cmd *cmd;
6105 struct mgmt_rp_pin_code_reply rp;
6106
6107 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
6108 if (!cmd)
6109 return;
6110
6111 bacpy(&rp.addr.bdaddr, bdaddr);
6112 rp.addr.type = BDADDR_BREDR;
6113
6114 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
6115 mgmt_status(status), &rp, sizeof(rp));
6116
6117 mgmt_pending_remove(cmd);
6118 }
6119
6120 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6121 u8 link_type, u8 addr_type, u32 value,
6122 u8 confirm_hint)
6123 {
6124 struct mgmt_ev_user_confirm_request ev;
6125
6126 BT_DBG("%s", hdev->name);
6127
6128 bacpy(&ev.addr.bdaddr, bdaddr);
6129 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6130 ev.confirm_hint = confirm_hint;
6131 ev.value = cpu_to_le32(value);
6132
6133 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
6134 NULL);
6135 }
6136
6137 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6138 u8 link_type, u8 addr_type)
6139 {
6140 struct mgmt_ev_user_passkey_request ev;
6141
6142 BT_DBG("%s", hdev->name);
6143
6144 bacpy(&ev.addr.bdaddr, bdaddr);
6145 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6146
6147 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
6148 NULL);
6149 }
6150
6151 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6152 u8 link_type, u8 addr_type, u8 status,
6153 u8 opcode)
6154 {
6155 struct pending_cmd *cmd;
6156 struct mgmt_rp_user_confirm_reply rp;
6157 int err;
6158
6159 cmd = mgmt_pending_find(opcode, hdev);
6160 if (!cmd)
6161 return -ENOENT;
6162
6163 bacpy(&rp.addr.bdaddr, bdaddr);
6164 rp.addr.type = link_to_bdaddr(link_type, addr_type);
6165 err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
6166 &rp, sizeof(rp));
6167
6168 mgmt_pending_remove(cmd);
6169
6170 return err;
6171 }
6172
6173 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6174 u8 link_type, u8 addr_type, u8 status)
6175 {
6176 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6177 status, MGMT_OP_USER_CONFIRM_REPLY);
6178 }
6179
6180 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6181 u8 link_type, u8 addr_type, u8 status)
6182 {
6183 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6184 status,
6185 MGMT_OP_USER_CONFIRM_NEG_REPLY);
6186 }
6187
6188 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6189 u8 link_type, u8 addr_type, u8 status)
6190 {
6191 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6192 status, MGMT_OP_USER_PASSKEY_REPLY);
6193 }
6194
6195 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6196 u8 link_type, u8 addr_type, u8 status)
6197 {
6198 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6199 status,
6200 MGMT_OP_USER_PASSKEY_NEG_REPLY);
6201 }
6202
6203 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
6204 u8 link_type, u8 addr_type, u32 passkey,
6205 u8 entered)
6206 {
6207 struct mgmt_ev_passkey_notify ev;
6208
6209 BT_DBG("%s", hdev->name);
6210
6211 bacpy(&ev.addr.bdaddr, bdaddr);
6212 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6213 ev.passkey = __cpu_to_le32(passkey);
6214 ev.entered = entered;
6215
6216 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
6217 }
6218
6219 void mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6220 u8 addr_type, u8 status)
6221 {
6222 struct mgmt_ev_auth_failed ev;
6223
6224 bacpy(&ev.addr.bdaddr, bdaddr);
6225 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6226 ev.status = mgmt_status(status);
6227
6228 mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
6229 }
6230
6231 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
6232 {
6233 struct cmd_lookup match = { NULL, hdev };
6234 bool changed;
6235
6236 if (status) {
6237 u8 mgmt_err = mgmt_status(status);
6238 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
6239 cmd_status_rsp, &mgmt_err);
6240 return;
6241 }
6242
6243 if (test_bit(HCI_AUTH, &hdev->flags))
6244 changed = !test_and_set_bit(HCI_LINK_SECURITY,
6245 &hdev->dev_flags);
6246 else
6247 changed = test_and_clear_bit(HCI_LINK_SECURITY,
6248 &hdev->dev_flags);
6249
6250 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
6251 &match);
6252
6253 if (changed)
6254 new_settings(hdev, match.sk);
6255
6256 if (match.sk)
6257 sock_put(match.sk);
6258 }
6259
6260 static void clear_eir(struct hci_request *req)
6261 {
6262 struct hci_dev *hdev = req->hdev;
6263 struct hci_cp_write_eir cp;
6264
6265 if (!lmp_ext_inq_capable(hdev))
6266 return;
6267
6268 memset(hdev->eir, 0, sizeof(hdev->eir));
6269
6270 memset(&cp, 0, sizeof(cp));
6271
6272 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
6273 }
6274
6275 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
6276 {
6277 struct cmd_lookup match = { NULL, hdev };
6278 struct hci_request req;
6279 bool changed = false;
6280
6281 if (status) {
6282 u8 mgmt_err = mgmt_status(status);
6283
6284 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
6285 &hdev->dev_flags)) {
6286 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
6287 new_settings(hdev, NULL);
6288 }
6289
6290 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
6291 &mgmt_err);
6292 return;
6293 }
6294
6295 if (enable) {
6296 changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
6297 } else {
6298 changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
6299 if (!changed)
6300 changed = test_and_clear_bit(HCI_HS_ENABLED,
6301 &hdev->dev_flags);
6302 else
6303 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
6304 }
6305
6306 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
6307
6308 if (changed)
6309 new_settings(hdev, match.sk);
6310
6311 if (match.sk)
6312 sock_put(match.sk);
6313
6314 hci_req_init(&req, hdev);
6315
6316 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
6317 if (test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
6318 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
6319 sizeof(enable), &enable);
6320 update_eir(&req);
6321 } else {
6322 clear_eir(&req);
6323 }
6324
6325 hci_req_run(&req, NULL);
6326 }
6327
6328 void mgmt_sc_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
6329 {
6330 struct cmd_lookup match = { NULL, hdev };
6331 bool changed = false;
6332
6333 if (status) {
6334 u8 mgmt_err = mgmt_status(status);
6335
6336 if (enable) {
6337 if (test_and_clear_bit(HCI_SC_ENABLED,
6338 &hdev->dev_flags))
6339 new_settings(hdev, NULL);
6340 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
6341 }
6342
6343 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
6344 cmd_status_rsp, &mgmt_err);
6345 return;
6346 }
6347
6348 if (enable) {
6349 changed = !test_and_set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
6350 } else {
6351 changed = test_and_clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
6352 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
6353 }
6354
6355 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
6356 settings_rsp, &match);
6357
6358 if (changed)
6359 new_settings(hdev, match.sk);
6360
6361 if (match.sk)
6362 sock_put(match.sk);
6363 }
6364
6365 static void sk_lookup(struct pending_cmd *cmd, void *data)
6366 {
6367 struct cmd_lookup *match = data;
6368
6369 if (match->sk == NULL) {
6370 match->sk = cmd->sk;
6371 sock_hold(match->sk);
6372 }
6373 }
6374
6375 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
6376 u8 status)
6377 {
6378 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
6379
6380 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
6381 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
6382 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
6383
6384 if (!status)
6385 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
6386 NULL);
6387
6388 if (match.sk)
6389 sock_put(match.sk);
6390 }
6391
6392 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
6393 {
6394 struct mgmt_cp_set_local_name ev;
6395 struct pending_cmd *cmd;
6396
6397 if (status)
6398 return;
6399
6400 memset(&ev, 0, sizeof(ev));
6401 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
6402 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
6403
6404 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
6405 if (!cmd) {
6406 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
6407
6408 /* If this is a HCI command related to powering on the
6409 * HCI dev don't send any mgmt signals.
6410 */
6411 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
6412 return;
6413 }
6414
6415 mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
6416 cmd ? cmd->sk : NULL);
6417 }
6418
6419 void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
6420 u8 *randomizer192, u8 *hash256,
6421 u8 *randomizer256, u8 status)
6422 {
6423 struct pending_cmd *cmd;
6424
6425 BT_DBG("%s status %u", hdev->name, status);
6426
6427 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
6428 if (!cmd)
6429 return;
6430
6431 if (status) {
6432 cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
6433 mgmt_status(status));
6434 } else {
6435 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
6436 hash256 && randomizer256) {
6437 struct mgmt_rp_read_local_oob_ext_data rp;
6438
6439 memcpy(rp.hash192, hash192, sizeof(rp.hash192));
6440 memcpy(rp.randomizer192, randomizer192,
6441 sizeof(rp.randomizer192));
6442
6443 memcpy(rp.hash256, hash256, sizeof(rp.hash256));
6444 memcpy(rp.randomizer256, randomizer256,
6445 sizeof(rp.randomizer256));
6446
6447 cmd_complete(cmd->sk, hdev->id,
6448 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
6449 &rp, sizeof(rp));
6450 } else {
6451 struct mgmt_rp_read_local_oob_data rp;
6452
6453 memcpy(rp.hash, hash192, sizeof(rp.hash));
6454 memcpy(rp.randomizer, randomizer192,
6455 sizeof(rp.randomizer));
6456
6457 cmd_complete(cmd->sk, hdev->id,
6458 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
6459 &rp, sizeof(rp));
6460 }
6461 }
6462
6463 mgmt_pending_remove(cmd);
6464 }
6465
6466 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6467 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
6468 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
6469 {
6470 char buf[512];
6471 struct mgmt_ev_device_found *ev = (void *) buf;
6472 struct smp_irk *irk;
6473 size_t ev_size;
6474
6475 /* Don't send events for a non-kernel initiated discovery. With
6476 * LE one exception is if we have pend_le_reports > 0 in which
6477 * case we're doing passive scanning and want these events.
6478 */
6479 if (!hci_discovery_active(hdev)) {
6480 if (link_type == ACL_LINK)
6481 return;
6482 if (link_type == LE_LINK && !hdev->pend_le_reports)
6483 return;
6484 }
6485
6486 /* Make sure that the buffer is big enough. The 5 extra bytes
6487 * are for the potential CoD field.
6488 */
6489 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
6490 return;
6491
6492 memset(buf, 0, sizeof(buf));
6493
6494 irk = hci_get_irk(hdev, bdaddr, addr_type);
6495 if (irk) {
6496 bacpy(&ev->addr.bdaddr, &irk->bdaddr);
6497 ev->addr.type = link_to_bdaddr(link_type, irk->addr_type);
6498 } else {
6499 bacpy(&ev->addr.bdaddr, bdaddr);
6500 ev->addr.type = link_to_bdaddr(link_type, addr_type);
6501 }
6502
6503 ev->rssi = rssi;
6504 ev->flags = cpu_to_le32(flags);
6505
6506 if (eir_len > 0)
6507 memcpy(ev->eir, eir, eir_len);
6508
6509 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
6510 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
6511 dev_class, 3);
6512
6513 if (scan_rsp_len > 0)
6514 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
6515
6516 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
6517 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
6518
6519 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
6520 }
6521
6522 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6523 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
6524 {
6525 struct mgmt_ev_device_found *ev;
6526 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
6527 u16 eir_len;
6528
6529 ev = (struct mgmt_ev_device_found *) buf;
6530
6531 memset(buf, 0, sizeof(buf));
6532
6533 bacpy(&ev->addr.bdaddr, bdaddr);
6534 ev->addr.type = link_to_bdaddr(link_type, addr_type);
6535 ev->rssi = rssi;
6536
6537 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
6538 name_len);
6539
6540 ev->eir_len = cpu_to_le16(eir_len);
6541
6542 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
6543 }
6544
6545 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
6546 {
6547 struct mgmt_ev_discovering ev;
6548 struct pending_cmd *cmd;
6549
6550 BT_DBG("%s discovering %u", hdev->name, discovering);
6551
6552 if (discovering)
6553 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
6554 else
6555 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
6556
6557 if (cmd != NULL) {
6558 u8 type = hdev->discovery.type;
6559
6560 cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
6561 sizeof(type));
6562 mgmt_pending_remove(cmd);
6563 }
6564
6565 memset(&ev, 0, sizeof(ev));
6566 ev.type = hdev->discovery.type;
6567 ev.discovering = discovering;
6568
6569 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
6570 }
6571
6572 static void adv_enable_complete(struct hci_dev *hdev, u8 status)
6573 {
6574 BT_DBG("%s status %u", hdev->name, status);
6575
6576 /* Clear the advertising mgmt setting if we failed to re-enable it */
6577 if (status) {
6578 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
6579 new_settings(hdev, NULL);
6580 }
6581 }
6582
6583 void mgmt_reenable_advertising(struct hci_dev *hdev)
6584 {
6585 struct hci_request req;
6586
6587 if (hci_conn_num(hdev, LE_LINK) > 0)
6588 return;
6589
6590 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
6591 return;
6592
6593 hci_req_init(&req, hdev);
6594 enable_advertising(&req);
6595
6596 /* If this fails we have no option but to let user space know
6597 * that we've disabled advertising.
6598 */
6599 if (hci_req_run(&req, adv_enable_complete) < 0) {
6600 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
6601 new_settings(hdev, NULL);
6602 }
6603 }