]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/bluetooth/mgmt.c
Bluetooth: Rename hci_conn_params_clear to hci_conn_params_clear_all
[mirror_ubuntu-bionic-kernel.git] / net / bluetooth / mgmt.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI Management interface */
26
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/l2cap.h>
33 #include <net/bluetooth/mgmt.h>
34
35 #include "smp.h"
36
37 #define MGMT_VERSION 1
38 #define MGMT_REVISION 7
39
40 static const u16 mgmt_commands[] = {
41 MGMT_OP_READ_INDEX_LIST,
42 MGMT_OP_READ_INFO,
43 MGMT_OP_SET_POWERED,
44 MGMT_OP_SET_DISCOVERABLE,
45 MGMT_OP_SET_CONNECTABLE,
46 MGMT_OP_SET_FAST_CONNECTABLE,
47 MGMT_OP_SET_PAIRABLE,
48 MGMT_OP_SET_LINK_SECURITY,
49 MGMT_OP_SET_SSP,
50 MGMT_OP_SET_HS,
51 MGMT_OP_SET_LE,
52 MGMT_OP_SET_DEV_CLASS,
53 MGMT_OP_SET_LOCAL_NAME,
54 MGMT_OP_ADD_UUID,
55 MGMT_OP_REMOVE_UUID,
56 MGMT_OP_LOAD_LINK_KEYS,
57 MGMT_OP_LOAD_LONG_TERM_KEYS,
58 MGMT_OP_DISCONNECT,
59 MGMT_OP_GET_CONNECTIONS,
60 MGMT_OP_PIN_CODE_REPLY,
61 MGMT_OP_PIN_CODE_NEG_REPLY,
62 MGMT_OP_SET_IO_CAPABILITY,
63 MGMT_OP_PAIR_DEVICE,
64 MGMT_OP_CANCEL_PAIR_DEVICE,
65 MGMT_OP_UNPAIR_DEVICE,
66 MGMT_OP_USER_CONFIRM_REPLY,
67 MGMT_OP_USER_CONFIRM_NEG_REPLY,
68 MGMT_OP_USER_PASSKEY_REPLY,
69 MGMT_OP_USER_PASSKEY_NEG_REPLY,
70 MGMT_OP_READ_LOCAL_OOB_DATA,
71 MGMT_OP_ADD_REMOTE_OOB_DATA,
72 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
73 MGMT_OP_START_DISCOVERY,
74 MGMT_OP_STOP_DISCOVERY,
75 MGMT_OP_CONFIRM_NAME,
76 MGMT_OP_BLOCK_DEVICE,
77 MGMT_OP_UNBLOCK_DEVICE,
78 MGMT_OP_SET_DEVICE_ID,
79 MGMT_OP_SET_ADVERTISING,
80 MGMT_OP_SET_BREDR,
81 MGMT_OP_SET_STATIC_ADDRESS,
82 MGMT_OP_SET_SCAN_PARAMS,
83 MGMT_OP_SET_SECURE_CONN,
84 MGMT_OP_SET_DEBUG_KEYS,
85 MGMT_OP_SET_PRIVACY,
86 MGMT_OP_LOAD_IRKS,
87 MGMT_OP_GET_CONN_INFO,
88 MGMT_OP_GET_CLOCK_INFO,
89 MGMT_OP_ADD_DEVICE,
90 MGMT_OP_REMOVE_DEVICE,
91 };
92
93 static const u16 mgmt_events[] = {
94 MGMT_EV_CONTROLLER_ERROR,
95 MGMT_EV_INDEX_ADDED,
96 MGMT_EV_INDEX_REMOVED,
97 MGMT_EV_NEW_SETTINGS,
98 MGMT_EV_CLASS_OF_DEV_CHANGED,
99 MGMT_EV_LOCAL_NAME_CHANGED,
100 MGMT_EV_NEW_LINK_KEY,
101 MGMT_EV_NEW_LONG_TERM_KEY,
102 MGMT_EV_DEVICE_CONNECTED,
103 MGMT_EV_DEVICE_DISCONNECTED,
104 MGMT_EV_CONNECT_FAILED,
105 MGMT_EV_PIN_CODE_REQUEST,
106 MGMT_EV_USER_CONFIRM_REQUEST,
107 MGMT_EV_USER_PASSKEY_REQUEST,
108 MGMT_EV_AUTH_FAILED,
109 MGMT_EV_DEVICE_FOUND,
110 MGMT_EV_DISCOVERING,
111 MGMT_EV_DEVICE_BLOCKED,
112 MGMT_EV_DEVICE_UNBLOCKED,
113 MGMT_EV_DEVICE_UNPAIRED,
114 MGMT_EV_PASSKEY_NOTIFY,
115 MGMT_EV_NEW_IRK,
116 MGMT_EV_NEW_CSRK,
117 MGMT_EV_DEVICE_ADDED,
118 MGMT_EV_DEVICE_REMOVED,
119 MGMT_EV_NEW_CONN_PARAM,
120 };
121
122 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
123
124 #define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
125 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
126
127 struct pending_cmd {
128 struct list_head list;
129 u16 opcode;
130 int index;
131 void *param;
132 struct sock *sk;
133 void *user_data;
134 };
135
136 /* HCI to MGMT error code conversion table */
137 static u8 mgmt_status_table[] = {
138 MGMT_STATUS_SUCCESS,
139 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
140 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
141 MGMT_STATUS_FAILED, /* Hardware Failure */
142 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
143 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
144 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
145 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
146 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
147 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
148 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
149 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
150 MGMT_STATUS_BUSY, /* Command Disallowed */
151 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
152 MGMT_STATUS_REJECTED, /* Rejected Security */
153 MGMT_STATUS_REJECTED, /* Rejected Personal */
154 MGMT_STATUS_TIMEOUT, /* Host Timeout */
155 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
156 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
157 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
158 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
159 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
160 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
161 MGMT_STATUS_BUSY, /* Repeated Attempts */
162 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
163 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
164 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
165 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
166 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
167 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
168 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
169 MGMT_STATUS_FAILED, /* Unspecified Error */
170 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
171 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
172 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
173 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
174 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
175 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
176 MGMT_STATUS_FAILED, /* Unit Link Key Used */
177 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
178 MGMT_STATUS_TIMEOUT, /* Instant Passed */
179 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
180 MGMT_STATUS_FAILED, /* Transaction Collision */
181 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
182 MGMT_STATUS_REJECTED, /* QoS Rejected */
183 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
184 MGMT_STATUS_REJECTED, /* Insufficient Security */
185 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
186 MGMT_STATUS_BUSY, /* Role Switch Pending */
187 MGMT_STATUS_FAILED, /* Slot Violation */
188 MGMT_STATUS_FAILED, /* Role Switch Failed */
189 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
190 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
191 MGMT_STATUS_BUSY, /* Host Busy Pairing */
192 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
193 MGMT_STATUS_BUSY, /* Controller Busy */
194 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
195 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
196 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
197 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
198 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
199 };
200
201 static u8 mgmt_status(u8 hci_status)
202 {
203 if (hci_status < ARRAY_SIZE(mgmt_status_table))
204 return mgmt_status_table[hci_status];
205
206 return MGMT_STATUS_FAILED;
207 }
208
209 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
210 {
211 struct sk_buff *skb;
212 struct mgmt_hdr *hdr;
213 struct mgmt_ev_cmd_status *ev;
214 int err;
215
216 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
217
218 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
219 if (!skb)
220 return -ENOMEM;
221
222 hdr = (void *) skb_put(skb, sizeof(*hdr));
223
224 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS);
225 hdr->index = cpu_to_le16(index);
226 hdr->len = cpu_to_le16(sizeof(*ev));
227
228 ev = (void *) skb_put(skb, sizeof(*ev));
229 ev->status = status;
230 ev->opcode = cpu_to_le16(cmd);
231
232 err = sock_queue_rcv_skb(sk, skb);
233 if (err < 0)
234 kfree_skb(skb);
235
236 return err;
237 }
238
239 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
240 void *rp, size_t rp_len)
241 {
242 struct sk_buff *skb;
243 struct mgmt_hdr *hdr;
244 struct mgmt_ev_cmd_complete *ev;
245 int err;
246
247 BT_DBG("sock %p", sk);
248
249 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
250 if (!skb)
251 return -ENOMEM;
252
253 hdr = (void *) skb_put(skb, sizeof(*hdr));
254
255 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
256 hdr->index = cpu_to_le16(index);
257 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
258
259 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
260 ev->opcode = cpu_to_le16(cmd);
261 ev->status = status;
262
263 if (rp)
264 memcpy(ev->data, rp, rp_len);
265
266 err = sock_queue_rcv_skb(sk, skb);
267 if (err < 0)
268 kfree_skb(skb);
269
270 return err;
271 }
272
273 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
274 u16 data_len)
275 {
276 struct mgmt_rp_read_version rp;
277
278 BT_DBG("sock %p", sk);
279
280 rp.version = MGMT_VERSION;
281 rp.revision = cpu_to_le16(MGMT_REVISION);
282
283 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
284 sizeof(rp));
285 }
286
287 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
288 u16 data_len)
289 {
290 struct mgmt_rp_read_commands *rp;
291 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
292 const u16 num_events = ARRAY_SIZE(mgmt_events);
293 __le16 *opcode;
294 size_t rp_size;
295 int i, err;
296
297 BT_DBG("sock %p", sk);
298
299 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
300
301 rp = kmalloc(rp_size, GFP_KERNEL);
302 if (!rp)
303 return -ENOMEM;
304
305 rp->num_commands = cpu_to_le16(num_commands);
306 rp->num_events = cpu_to_le16(num_events);
307
308 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
309 put_unaligned_le16(mgmt_commands[i], opcode);
310
311 for (i = 0; i < num_events; i++, opcode++)
312 put_unaligned_le16(mgmt_events[i], opcode);
313
314 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
315 rp_size);
316 kfree(rp);
317
318 return err;
319 }
320
321 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
322 u16 data_len)
323 {
324 struct mgmt_rp_read_index_list *rp;
325 struct hci_dev *d;
326 size_t rp_len;
327 u16 count;
328 int err;
329
330 BT_DBG("sock %p", sk);
331
332 read_lock(&hci_dev_list_lock);
333
334 count = 0;
335 list_for_each_entry(d, &hci_dev_list, list) {
336 if (d->dev_type == HCI_BREDR)
337 count++;
338 }
339
340 rp_len = sizeof(*rp) + (2 * count);
341 rp = kmalloc(rp_len, GFP_ATOMIC);
342 if (!rp) {
343 read_unlock(&hci_dev_list_lock);
344 return -ENOMEM;
345 }
346
347 count = 0;
348 list_for_each_entry(d, &hci_dev_list, list) {
349 if (test_bit(HCI_SETUP, &d->dev_flags))
350 continue;
351
352 if (test_bit(HCI_USER_CHANNEL, &d->dev_flags))
353 continue;
354
355 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
356 continue;
357
358 if (d->dev_type == HCI_BREDR) {
359 rp->index[count++] = cpu_to_le16(d->id);
360 BT_DBG("Added hci%u", d->id);
361 }
362 }
363
364 rp->num_controllers = cpu_to_le16(count);
365 rp_len = sizeof(*rp) + (2 * count);
366
367 read_unlock(&hci_dev_list_lock);
368
369 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
370 rp_len);
371
372 kfree(rp);
373
374 return err;
375 }
376
377 static u32 get_supported_settings(struct hci_dev *hdev)
378 {
379 u32 settings = 0;
380
381 settings |= MGMT_SETTING_POWERED;
382 settings |= MGMT_SETTING_PAIRABLE;
383 settings |= MGMT_SETTING_DEBUG_KEYS;
384
385 if (lmp_bredr_capable(hdev)) {
386 settings |= MGMT_SETTING_CONNECTABLE;
387 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
388 settings |= MGMT_SETTING_FAST_CONNECTABLE;
389 settings |= MGMT_SETTING_DISCOVERABLE;
390 settings |= MGMT_SETTING_BREDR;
391 settings |= MGMT_SETTING_LINK_SECURITY;
392
393 if (lmp_ssp_capable(hdev)) {
394 settings |= MGMT_SETTING_SSP;
395 settings |= MGMT_SETTING_HS;
396 }
397
398 if (lmp_sc_capable(hdev) ||
399 test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
400 settings |= MGMT_SETTING_SECURE_CONN;
401 }
402
403 if (lmp_le_capable(hdev)) {
404 settings |= MGMT_SETTING_LE;
405 settings |= MGMT_SETTING_ADVERTISING;
406 settings |= MGMT_SETTING_PRIVACY;
407 }
408
409 return settings;
410 }
411
412 static u32 get_current_settings(struct hci_dev *hdev)
413 {
414 u32 settings = 0;
415
416 if (hdev_is_powered(hdev))
417 settings |= MGMT_SETTING_POWERED;
418
419 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
420 settings |= MGMT_SETTING_CONNECTABLE;
421
422 if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
423 settings |= MGMT_SETTING_FAST_CONNECTABLE;
424
425 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
426 settings |= MGMT_SETTING_DISCOVERABLE;
427
428 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
429 settings |= MGMT_SETTING_PAIRABLE;
430
431 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
432 settings |= MGMT_SETTING_BREDR;
433
434 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
435 settings |= MGMT_SETTING_LE;
436
437 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
438 settings |= MGMT_SETTING_LINK_SECURITY;
439
440 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
441 settings |= MGMT_SETTING_SSP;
442
443 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
444 settings |= MGMT_SETTING_HS;
445
446 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
447 settings |= MGMT_SETTING_ADVERTISING;
448
449 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
450 settings |= MGMT_SETTING_SECURE_CONN;
451
452 if (test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags))
453 settings |= MGMT_SETTING_DEBUG_KEYS;
454
455 if (test_bit(HCI_PRIVACY, &hdev->dev_flags))
456 settings |= MGMT_SETTING_PRIVACY;
457
458 return settings;
459 }
460
461 #define PNP_INFO_SVCLASS_ID 0x1200
462
463 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
464 {
465 u8 *ptr = data, *uuids_start = NULL;
466 struct bt_uuid *uuid;
467
468 if (len < 4)
469 return ptr;
470
471 list_for_each_entry(uuid, &hdev->uuids, list) {
472 u16 uuid16;
473
474 if (uuid->size != 16)
475 continue;
476
477 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
478 if (uuid16 < 0x1100)
479 continue;
480
481 if (uuid16 == PNP_INFO_SVCLASS_ID)
482 continue;
483
484 if (!uuids_start) {
485 uuids_start = ptr;
486 uuids_start[0] = 1;
487 uuids_start[1] = EIR_UUID16_ALL;
488 ptr += 2;
489 }
490
491 /* Stop if not enough space to put next UUID */
492 if ((ptr - data) + sizeof(u16) > len) {
493 uuids_start[1] = EIR_UUID16_SOME;
494 break;
495 }
496
497 *ptr++ = (uuid16 & 0x00ff);
498 *ptr++ = (uuid16 & 0xff00) >> 8;
499 uuids_start[0] += sizeof(uuid16);
500 }
501
502 return ptr;
503 }
504
505 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
506 {
507 u8 *ptr = data, *uuids_start = NULL;
508 struct bt_uuid *uuid;
509
510 if (len < 6)
511 return ptr;
512
513 list_for_each_entry(uuid, &hdev->uuids, list) {
514 if (uuid->size != 32)
515 continue;
516
517 if (!uuids_start) {
518 uuids_start = ptr;
519 uuids_start[0] = 1;
520 uuids_start[1] = EIR_UUID32_ALL;
521 ptr += 2;
522 }
523
524 /* Stop if not enough space to put next UUID */
525 if ((ptr - data) + sizeof(u32) > len) {
526 uuids_start[1] = EIR_UUID32_SOME;
527 break;
528 }
529
530 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
531 ptr += sizeof(u32);
532 uuids_start[0] += sizeof(u32);
533 }
534
535 return ptr;
536 }
537
538 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
539 {
540 u8 *ptr = data, *uuids_start = NULL;
541 struct bt_uuid *uuid;
542
543 if (len < 18)
544 return ptr;
545
546 list_for_each_entry(uuid, &hdev->uuids, list) {
547 if (uuid->size != 128)
548 continue;
549
550 if (!uuids_start) {
551 uuids_start = ptr;
552 uuids_start[0] = 1;
553 uuids_start[1] = EIR_UUID128_ALL;
554 ptr += 2;
555 }
556
557 /* Stop if not enough space to put next UUID */
558 if ((ptr - data) + 16 > len) {
559 uuids_start[1] = EIR_UUID128_SOME;
560 break;
561 }
562
563 memcpy(ptr, uuid->uuid, 16);
564 ptr += 16;
565 uuids_start[0] += 16;
566 }
567
568 return ptr;
569 }
570
571 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
572 {
573 struct pending_cmd *cmd;
574
575 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
576 if (cmd->opcode == opcode)
577 return cmd;
578 }
579
580 return NULL;
581 }
582
583 static struct pending_cmd *mgmt_pending_find_data(u16 opcode,
584 struct hci_dev *hdev,
585 const void *data)
586 {
587 struct pending_cmd *cmd;
588
589 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
590 if (cmd->user_data != data)
591 continue;
592 if (cmd->opcode == opcode)
593 return cmd;
594 }
595
596 return NULL;
597 }
598
599 static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
600 {
601 u8 ad_len = 0;
602 size_t name_len;
603
604 name_len = strlen(hdev->dev_name);
605 if (name_len > 0) {
606 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
607
608 if (name_len > max_len) {
609 name_len = max_len;
610 ptr[1] = EIR_NAME_SHORT;
611 } else
612 ptr[1] = EIR_NAME_COMPLETE;
613
614 ptr[0] = name_len + 1;
615
616 memcpy(ptr + 2, hdev->dev_name, name_len);
617
618 ad_len += (name_len + 2);
619 ptr += (name_len + 2);
620 }
621
622 return ad_len;
623 }
624
625 static void update_scan_rsp_data(struct hci_request *req)
626 {
627 struct hci_dev *hdev = req->hdev;
628 struct hci_cp_le_set_scan_rsp_data cp;
629 u8 len;
630
631 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
632 return;
633
634 memset(&cp, 0, sizeof(cp));
635
636 len = create_scan_rsp_data(hdev, cp.data);
637
638 if (hdev->scan_rsp_data_len == len &&
639 memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
640 return;
641
642 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
643 hdev->scan_rsp_data_len = len;
644
645 cp.length = len;
646
647 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
648 }
649
650 static u8 get_adv_discov_flags(struct hci_dev *hdev)
651 {
652 struct pending_cmd *cmd;
653
654 /* If there's a pending mgmt command the flags will not yet have
655 * their final values, so check for this first.
656 */
657 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
658 if (cmd) {
659 struct mgmt_mode *cp = cmd->param;
660 if (cp->val == 0x01)
661 return LE_AD_GENERAL;
662 else if (cp->val == 0x02)
663 return LE_AD_LIMITED;
664 } else {
665 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
666 return LE_AD_LIMITED;
667 else if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
668 return LE_AD_GENERAL;
669 }
670
671 return 0;
672 }
673
674 static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
675 {
676 u8 ad_len = 0, flags = 0;
677
678 flags |= get_adv_discov_flags(hdev);
679
680 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
681 flags |= LE_AD_NO_BREDR;
682
683 if (flags) {
684 BT_DBG("adv flags 0x%02x", flags);
685
686 ptr[0] = 2;
687 ptr[1] = EIR_FLAGS;
688 ptr[2] = flags;
689
690 ad_len += 3;
691 ptr += 3;
692 }
693
694 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
695 ptr[0] = 2;
696 ptr[1] = EIR_TX_POWER;
697 ptr[2] = (u8) hdev->adv_tx_power;
698
699 ad_len += 3;
700 ptr += 3;
701 }
702
703 return ad_len;
704 }
705
706 static void update_adv_data(struct hci_request *req)
707 {
708 struct hci_dev *hdev = req->hdev;
709 struct hci_cp_le_set_adv_data cp;
710 u8 len;
711
712 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
713 return;
714
715 memset(&cp, 0, sizeof(cp));
716
717 len = create_adv_data(hdev, cp.data);
718
719 if (hdev->adv_data_len == len &&
720 memcmp(cp.data, hdev->adv_data, len) == 0)
721 return;
722
723 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
724 hdev->adv_data_len = len;
725
726 cp.length = len;
727
728 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
729 }
730
731 static void create_eir(struct hci_dev *hdev, u8 *data)
732 {
733 u8 *ptr = data;
734 size_t name_len;
735
736 name_len = strlen(hdev->dev_name);
737
738 if (name_len > 0) {
739 /* EIR Data type */
740 if (name_len > 48) {
741 name_len = 48;
742 ptr[1] = EIR_NAME_SHORT;
743 } else
744 ptr[1] = EIR_NAME_COMPLETE;
745
746 /* EIR Data length */
747 ptr[0] = name_len + 1;
748
749 memcpy(ptr + 2, hdev->dev_name, name_len);
750
751 ptr += (name_len + 2);
752 }
753
754 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
755 ptr[0] = 2;
756 ptr[1] = EIR_TX_POWER;
757 ptr[2] = (u8) hdev->inq_tx_power;
758
759 ptr += 3;
760 }
761
762 if (hdev->devid_source > 0) {
763 ptr[0] = 9;
764 ptr[1] = EIR_DEVICE_ID;
765
766 put_unaligned_le16(hdev->devid_source, ptr + 2);
767 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
768 put_unaligned_le16(hdev->devid_product, ptr + 6);
769 put_unaligned_le16(hdev->devid_version, ptr + 8);
770
771 ptr += 10;
772 }
773
774 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
775 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
776 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
777 }
778
779 static void update_eir(struct hci_request *req)
780 {
781 struct hci_dev *hdev = req->hdev;
782 struct hci_cp_write_eir cp;
783
784 if (!hdev_is_powered(hdev))
785 return;
786
787 if (!lmp_ext_inq_capable(hdev))
788 return;
789
790 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
791 return;
792
793 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
794 return;
795
796 memset(&cp, 0, sizeof(cp));
797
798 create_eir(hdev, cp.data);
799
800 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
801 return;
802
803 memcpy(hdev->eir, cp.data, sizeof(cp.data));
804
805 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
806 }
807
808 static u8 get_service_classes(struct hci_dev *hdev)
809 {
810 struct bt_uuid *uuid;
811 u8 val = 0;
812
813 list_for_each_entry(uuid, &hdev->uuids, list)
814 val |= uuid->svc_hint;
815
816 return val;
817 }
818
819 static void update_class(struct hci_request *req)
820 {
821 struct hci_dev *hdev = req->hdev;
822 u8 cod[3];
823
824 BT_DBG("%s", hdev->name);
825
826 if (!hdev_is_powered(hdev))
827 return;
828
829 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
830 return;
831
832 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
833 return;
834
835 cod[0] = hdev->minor_class;
836 cod[1] = hdev->major_class;
837 cod[2] = get_service_classes(hdev);
838
839 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
840 cod[1] |= 0x20;
841
842 if (memcmp(cod, hdev->dev_class, 3) == 0)
843 return;
844
845 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
846 }
847
848 static bool get_connectable(struct hci_dev *hdev)
849 {
850 struct pending_cmd *cmd;
851
852 /* If there's a pending mgmt command the flag will not yet have
853 * it's final value, so check for this first.
854 */
855 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
856 if (cmd) {
857 struct mgmt_mode *cp = cmd->param;
858 return cp->val;
859 }
860
861 return test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
862 }
863
864 static void enable_advertising(struct hci_request *req)
865 {
866 struct hci_dev *hdev = req->hdev;
867 struct hci_cp_le_set_adv_param cp;
868 u8 own_addr_type, enable = 0x01;
869 bool connectable;
870
871 /* Clear the HCI_ADVERTISING bit temporarily so that the
872 * hci_update_random_address knows that it's safe to go ahead
873 * and write a new random address. The flag will be set back on
874 * as soon as the SET_ADV_ENABLE HCI command completes.
875 */
876 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
877
878 connectable = get_connectable(hdev);
879
880 /* Set require_privacy to true only when non-connectable
881 * advertising is used. In that case it is fine to use a
882 * non-resolvable private address.
883 */
884 if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
885 return;
886
887 memset(&cp, 0, sizeof(cp));
888 cp.min_interval = cpu_to_le16(0x0800);
889 cp.max_interval = cpu_to_le16(0x0800);
890 cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
891 cp.own_address_type = own_addr_type;
892 cp.channel_map = hdev->le_adv_channel_map;
893
894 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
895
896 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
897 }
898
899 static void disable_advertising(struct hci_request *req)
900 {
901 u8 enable = 0x00;
902
903 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
904 }
905
906 static void service_cache_off(struct work_struct *work)
907 {
908 struct hci_dev *hdev = container_of(work, struct hci_dev,
909 service_cache.work);
910 struct hci_request req;
911
912 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
913 return;
914
915 hci_req_init(&req, hdev);
916
917 hci_dev_lock(hdev);
918
919 update_eir(&req);
920 update_class(&req);
921
922 hci_dev_unlock(hdev);
923
924 hci_req_run(&req, NULL);
925 }
926
927 static void rpa_expired(struct work_struct *work)
928 {
929 struct hci_dev *hdev = container_of(work, struct hci_dev,
930 rpa_expired.work);
931 struct hci_request req;
932
933 BT_DBG("");
934
935 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
936
937 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags) ||
938 hci_conn_num(hdev, LE_LINK) > 0)
939 return;
940
941 /* The generation of a new RPA and programming it into the
942 * controller happens in the enable_advertising() function.
943 */
944
945 hci_req_init(&req, hdev);
946
947 disable_advertising(&req);
948 enable_advertising(&req);
949
950 hci_req_run(&req, NULL);
951 }
952
953 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
954 {
955 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
956 return;
957
958 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
959 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
960
961 /* Non-mgmt controlled devices get this bit set
962 * implicitly so that pairing works for them, however
963 * for mgmt we require user-space to explicitly enable
964 * it
965 */
966 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
967 }
968
969 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
970 void *data, u16 data_len)
971 {
972 struct mgmt_rp_read_info rp;
973
974 BT_DBG("sock %p %s", sk, hdev->name);
975
976 hci_dev_lock(hdev);
977
978 memset(&rp, 0, sizeof(rp));
979
980 bacpy(&rp.bdaddr, &hdev->bdaddr);
981
982 rp.version = hdev->hci_ver;
983 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
984
985 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
986 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
987
988 memcpy(rp.dev_class, hdev->dev_class, 3);
989
990 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
991 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
992
993 hci_dev_unlock(hdev);
994
995 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
996 sizeof(rp));
997 }
998
999 static void mgmt_pending_free(struct pending_cmd *cmd)
1000 {
1001 sock_put(cmd->sk);
1002 kfree(cmd->param);
1003 kfree(cmd);
1004 }
1005
1006 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
1007 struct hci_dev *hdev, void *data,
1008 u16 len)
1009 {
1010 struct pending_cmd *cmd;
1011
1012 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
1013 if (!cmd)
1014 return NULL;
1015
1016 cmd->opcode = opcode;
1017 cmd->index = hdev->id;
1018
1019 cmd->param = kmalloc(len, GFP_KERNEL);
1020 if (!cmd->param) {
1021 kfree(cmd);
1022 return NULL;
1023 }
1024
1025 if (data)
1026 memcpy(cmd->param, data, len);
1027
1028 cmd->sk = sk;
1029 sock_hold(sk);
1030
1031 list_add(&cmd->list, &hdev->mgmt_pending);
1032
1033 return cmd;
1034 }
1035
1036 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
1037 void (*cb)(struct pending_cmd *cmd,
1038 void *data),
1039 void *data)
1040 {
1041 struct pending_cmd *cmd, *tmp;
1042
1043 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
1044 if (opcode > 0 && cmd->opcode != opcode)
1045 continue;
1046
1047 cb(cmd, data);
1048 }
1049 }
1050
1051 static void mgmt_pending_remove(struct pending_cmd *cmd)
1052 {
1053 list_del(&cmd->list);
1054 mgmt_pending_free(cmd);
1055 }
1056
1057 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1058 {
1059 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1060
1061 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
1062 sizeof(settings));
1063 }
1064
1065 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status)
1066 {
1067 BT_DBG("%s status 0x%02x", hdev->name, status);
1068
1069 if (hci_conn_count(hdev) == 0) {
1070 cancel_delayed_work(&hdev->power_off);
1071 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1072 }
1073 }
1074
1075 static void hci_stop_discovery(struct hci_request *req)
1076 {
1077 struct hci_dev *hdev = req->hdev;
1078 struct hci_cp_remote_name_req_cancel cp;
1079 struct inquiry_entry *e;
1080
1081 switch (hdev->discovery.state) {
1082 case DISCOVERY_FINDING:
1083 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
1084 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1085 } else {
1086 cancel_delayed_work(&hdev->le_scan_disable);
1087 hci_req_add_le_scan_disable(req);
1088 }
1089
1090 break;
1091
1092 case DISCOVERY_RESOLVING:
1093 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
1094 NAME_PENDING);
1095 if (!e)
1096 return;
1097
1098 bacpy(&cp.bdaddr, &e->data.bdaddr);
1099 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
1100 &cp);
1101
1102 break;
1103
1104 default:
1105 /* Passive scanning */
1106 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1107 hci_req_add_le_scan_disable(req);
1108 break;
1109 }
1110 }
1111
1112 static int clean_up_hci_state(struct hci_dev *hdev)
1113 {
1114 struct hci_request req;
1115 struct hci_conn *conn;
1116
1117 hci_req_init(&req, hdev);
1118
1119 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1120 test_bit(HCI_PSCAN, &hdev->flags)) {
1121 u8 scan = 0x00;
1122 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1123 }
1124
1125 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1126 disable_advertising(&req);
1127
1128 hci_stop_discovery(&req);
1129
1130 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1131 struct hci_cp_disconnect dc;
1132 struct hci_cp_reject_conn_req rej;
1133
1134 switch (conn->state) {
1135 case BT_CONNECTED:
1136 case BT_CONFIG:
1137 dc.handle = cpu_to_le16(conn->handle);
1138 dc.reason = 0x15; /* Terminated due to Power Off */
1139 hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1140 break;
1141 case BT_CONNECT:
1142 if (conn->type == LE_LINK)
1143 hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL,
1144 0, NULL);
1145 else if (conn->type == ACL_LINK)
1146 hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL,
1147 6, &conn->dst);
1148 break;
1149 case BT_CONNECT2:
1150 bacpy(&rej.bdaddr, &conn->dst);
1151 rej.reason = 0x15; /* Terminated due to Power Off */
1152 if (conn->type == ACL_LINK)
1153 hci_req_add(&req, HCI_OP_REJECT_CONN_REQ,
1154 sizeof(rej), &rej);
1155 else if (conn->type == SCO_LINK)
1156 hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ,
1157 sizeof(rej), &rej);
1158 break;
1159 }
1160 }
1161
1162 return hci_req_run(&req, clean_up_hci_complete);
1163 }
1164
1165 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1166 u16 len)
1167 {
1168 struct mgmt_mode *cp = data;
1169 struct pending_cmd *cmd;
1170 int err;
1171
1172 BT_DBG("request for %s", hdev->name);
1173
1174 if (cp->val != 0x00 && cp->val != 0x01)
1175 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1176 MGMT_STATUS_INVALID_PARAMS);
1177
1178 hci_dev_lock(hdev);
1179
1180 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
1181 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1182 MGMT_STATUS_BUSY);
1183 goto failed;
1184 }
1185
1186 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1187 cancel_delayed_work(&hdev->power_off);
1188
1189 if (cp->val) {
1190 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
1191 data, len);
1192 err = mgmt_powered(hdev, 1);
1193 goto failed;
1194 }
1195 }
1196
1197 if (!!cp->val == hdev_is_powered(hdev)) {
1198 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1199 goto failed;
1200 }
1201
1202 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1203 if (!cmd) {
1204 err = -ENOMEM;
1205 goto failed;
1206 }
1207
1208 if (cp->val) {
1209 queue_work(hdev->req_workqueue, &hdev->power_on);
1210 err = 0;
1211 } else {
1212 /* Disconnect connections, stop scans, etc */
1213 err = clean_up_hci_state(hdev);
1214 if (!err)
1215 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1216 HCI_POWER_OFF_TIMEOUT);
1217
1218 /* ENODATA means there were no HCI commands queued */
1219 if (err == -ENODATA) {
1220 cancel_delayed_work(&hdev->power_off);
1221 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1222 err = 0;
1223 }
1224 }
1225
1226 failed:
1227 hci_dev_unlock(hdev);
1228 return err;
1229 }
1230
1231 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
1232 struct sock *skip_sk)
1233 {
1234 struct sk_buff *skb;
1235 struct mgmt_hdr *hdr;
1236
1237 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
1238 if (!skb)
1239 return -ENOMEM;
1240
1241 hdr = (void *) skb_put(skb, sizeof(*hdr));
1242 hdr->opcode = cpu_to_le16(event);
1243 if (hdev)
1244 hdr->index = cpu_to_le16(hdev->id);
1245 else
1246 hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
1247 hdr->len = cpu_to_le16(data_len);
1248
1249 if (data)
1250 memcpy(skb_put(skb, data_len), data, data_len);
1251
1252 /* Time stamp */
1253 __net_timestamp(skb);
1254
1255 hci_send_to_control(skb, skip_sk);
1256 kfree_skb(skb);
1257
1258 return 0;
1259 }
1260
1261 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1262 {
1263 __le32 ev;
1264
1265 ev = cpu_to_le32(get_current_settings(hdev));
1266
1267 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
1268 }
1269
1270 struct cmd_lookup {
1271 struct sock *sk;
1272 struct hci_dev *hdev;
1273 u8 mgmt_status;
1274 };
1275
1276 static void settings_rsp(struct pending_cmd *cmd, void *data)
1277 {
1278 struct cmd_lookup *match = data;
1279
1280 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1281
1282 list_del(&cmd->list);
1283
1284 if (match->sk == NULL) {
1285 match->sk = cmd->sk;
1286 sock_hold(match->sk);
1287 }
1288
1289 mgmt_pending_free(cmd);
1290 }
1291
1292 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
1293 {
1294 u8 *status = data;
1295
1296 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1297 mgmt_pending_remove(cmd);
1298 }
1299
1300 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1301 {
1302 if (!lmp_bredr_capable(hdev))
1303 return MGMT_STATUS_NOT_SUPPORTED;
1304 else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1305 return MGMT_STATUS_REJECTED;
1306 else
1307 return MGMT_STATUS_SUCCESS;
1308 }
1309
1310 static u8 mgmt_le_support(struct hci_dev *hdev)
1311 {
1312 if (!lmp_le_capable(hdev))
1313 return MGMT_STATUS_NOT_SUPPORTED;
1314 else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1315 return MGMT_STATUS_REJECTED;
1316 else
1317 return MGMT_STATUS_SUCCESS;
1318 }
1319
1320 static void set_discoverable_complete(struct hci_dev *hdev, u8 status)
1321 {
1322 struct pending_cmd *cmd;
1323 struct mgmt_mode *cp;
1324 struct hci_request req;
1325 bool changed;
1326
1327 BT_DBG("status 0x%02x", status);
1328
1329 hci_dev_lock(hdev);
1330
1331 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1332 if (!cmd)
1333 goto unlock;
1334
1335 if (status) {
1336 u8 mgmt_err = mgmt_status(status);
1337 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1338 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1339 goto remove_cmd;
1340 }
1341
1342 cp = cmd->param;
1343 if (cp->val) {
1344 changed = !test_and_set_bit(HCI_DISCOVERABLE,
1345 &hdev->dev_flags);
1346
1347 if (hdev->discov_timeout > 0) {
1348 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1349 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1350 to);
1351 }
1352 } else {
1353 changed = test_and_clear_bit(HCI_DISCOVERABLE,
1354 &hdev->dev_flags);
1355 }
1356
1357 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1358
1359 if (changed)
1360 new_settings(hdev, cmd->sk);
1361
1362 /* When the discoverable mode gets changed, make sure
1363 * that class of device has the limited discoverable
1364 * bit correctly set.
1365 */
1366 hci_req_init(&req, hdev);
1367 update_class(&req);
1368 hci_req_run(&req, NULL);
1369
1370 remove_cmd:
1371 mgmt_pending_remove(cmd);
1372
1373 unlock:
1374 hci_dev_unlock(hdev);
1375 }
1376
1377 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1378 u16 len)
1379 {
1380 struct mgmt_cp_set_discoverable *cp = data;
1381 struct pending_cmd *cmd;
1382 struct hci_request req;
1383 u16 timeout;
1384 u8 scan;
1385 int err;
1386
1387 BT_DBG("request for %s", hdev->name);
1388
1389 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1390 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1391 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1392 MGMT_STATUS_REJECTED);
1393
1394 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1395 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1396 MGMT_STATUS_INVALID_PARAMS);
1397
1398 timeout = __le16_to_cpu(cp->timeout);
1399
1400 /* Disabling discoverable requires that no timeout is set,
1401 * and enabling limited discoverable requires a timeout.
1402 */
1403 if ((cp->val == 0x00 && timeout > 0) ||
1404 (cp->val == 0x02 && timeout == 0))
1405 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1406 MGMT_STATUS_INVALID_PARAMS);
1407
1408 hci_dev_lock(hdev);
1409
1410 if (!hdev_is_powered(hdev) && timeout > 0) {
1411 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1412 MGMT_STATUS_NOT_POWERED);
1413 goto failed;
1414 }
1415
1416 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1417 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1418 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1419 MGMT_STATUS_BUSY);
1420 goto failed;
1421 }
1422
1423 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
1424 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1425 MGMT_STATUS_REJECTED);
1426 goto failed;
1427 }
1428
1429 if (!hdev_is_powered(hdev)) {
1430 bool changed = false;
1431
1432 /* Setting limited discoverable when powered off is
1433 * not a valid operation since it requires a timeout
1434 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1435 */
1436 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
1437 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1438 changed = true;
1439 }
1440
1441 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1442 if (err < 0)
1443 goto failed;
1444
1445 if (changed)
1446 err = new_settings(hdev, sk);
1447
1448 goto failed;
1449 }
1450
1451 /* If the current mode is the same, then just update the timeout
1452 * value with the new value. And if only the timeout gets updated,
1453 * then no need for any HCI transactions.
1454 */
1455 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
1456 (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
1457 &hdev->dev_flags)) {
1458 cancel_delayed_work(&hdev->discov_off);
1459 hdev->discov_timeout = timeout;
1460
1461 if (cp->val && hdev->discov_timeout > 0) {
1462 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1463 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1464 to);
1465 }
1466
1467 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1468 goto failed;
1469 }
1470
1471 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1472 if (!cmd) {
1473 err = -ENOMEM;
1474 goto failed;
1475 }
1476
1477 /* Cancel any potential discoverable timeout that might be
1478 * still active and store new timeout value. The arming of
1479 * the timeout happens in the complete handler.
1480 */
1481 cancel_delayed_work(&hdev->discov_off);
1482 hdev->discov_timeout = timeout;
1483
1484 /* Limited discoverable mode */
1485 if (cp->val == 0x02)
1486 set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1487 else
1488 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1489
1490 hci_req_init(&req, hdev);
1491
1492 /* The procedure for LE-only controllers is much simpler - just
1493 * update the advertising data.
1494 */
1495 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1496 goto update_ad;
1497
1498 scan = SCAN_PAGE;
1499
1500 if (cp->val) {
1501 struct hci_cp_write_current_iac_lap hci_cp;
1502
1503 if (cp->val == 0x02) {
1504 /* Limited discoverable mode */
1505 hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1506 hci_cp.iac_lap[0] = 0x00; /* LIAC */
1507 hci_cp.iac_lap[1] = 0x8b;
1508 hci_cp.iac_lap[2] = 0x9e;
1509 hci_cp.iac_lap[3] = 0x33; /* GIAC */
1510 hci_cp.iac_lap[4] = 0x8b;
1511 hci_cp.iac_lap[5] = 0x9e;
1512 } else {
1513 /* General discoverable mode */
1514 hci_cp.num_iac = 1;
1515 hci_cp.iac_lap[0] = 0x33; /* GIAC */
1516 hci_cp.iac_lap[1] = 0x8b;
1517 hci_cp.iac_lap[2] = 0x9e;
1518 }
1519
1520 hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1521 (hci_cp.num_iac * 3) + 1, &hci_cp);
1522
1523 scan |= SCAN_INQUIRY;
1524 } else {
1525 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1526 }
1527
1528 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1529
1530 update_ad:
1531 update_adv_data(&req);
1532
1533 err = hci_req_run(&req, set_discoverable_complete);
1534 if (err < 0)
1535 mgmt_pending_remove(cmd);
1536
1537 failed:
1538 hci_dev_unlock(hdev);
1539 return err;
1540 }
1541
1542 static void write_fast_connectable(struct hci_request *req, bool enable)
1543 {
1544 struct hci_dev *hdev = req->hdev;
1545 struct hci_cp_write_page_scan_activity acp;
1546 u8 type;
1547
1548 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1549 return;
1550
1551 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1552 return;
1553
1554 if (enable) {
1555 type = PAGE_SCAN_TYPE_INTERLACED;
1556
1557 /* 160 msec page scan interval */
1558 acp.interval = cpu_to_le16(0x0100);
1559 } else {
1560 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1561
1562 /* default 1.28 sec page scan */
1563 acp.interval = cpu_to_le16(0x0800);
1564 }
1565
1566 acp.window = cpu_to_le16(0x0012);
1567
1568 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1569 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1570 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1571 sizeof(acp), &acp);
1572
1573 if (hdev->page_scan_type != type)
1574 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1575 }
1576
1577 static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1578 {
1579 struct pending_cmd *cmd;
1580 struct mgmt_mode *cp;
1581 bool changed;
1582
1583 BT_DBG("status 0x%02x", status);
1584
1585 hci_dev_lock(hdev);
1586
1587 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1588 if (!cmd)
1589 goto unlock;
1590
1591 if (status) {
1592 u8 mgmt_err = mgmt_status(status);
1593 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1594 goto remove_cmd;
1595 }
1596
1597 cp = cmd->param;
1598 if (cp->val)
1599 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1600 else
1601 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1602
1603 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1604
1605 if (changed)
1606 new_settings(hdev, cmd->sk);
1607
1608 remove_cmd:
1609 mgmt_pending_remove(cmd);
1610
1611 unlock:
1612 hci_dev_unlock(hdev);
1613 }
1614
1615 static int set_connectable_update_settings(struct hci_dev *hdev,
1616 struct sock *sk, u8 val)
1617 {
1618 bool changed = false;
1619 int err;
1620
1621 if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1622 changed = true;
1623
1624 if (val) {
1625 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1626 } else {
1627 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1628 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1629 }
1630
1631 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1632 if (err < 0)
1633 return err;
1634
1635 if (changed)
1636 return new_settings(hdev, sk);
1637
1638 return 0;
1639 }
1640
1641 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1642 u16 len)
1643 {
1644 struct mgmt_mode *cp = data;
1645 struct pending_cmd *cmd;
1646 struct hci_request req;
1647 u8 scan;
1648 int err;
1649
1650 BT_DBG("request for %s", hdev->name);
1651
1652 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1653 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1654 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1655 MGMT_STATUS_REJECTED);
1656
1657 if (cp->val != 0x00 && cp->val != 0x01)
1658 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1659 MGMT_STATUS_INVALID_PARAMS);
1660
1661 hci_dev_lock(hdev);
1662
1663 if (!hdev_is_powered(hdev)) {
1664 err = set_connectable_update_settings(hdev, sk, cp->val);
1665 goto failed;
1666 }
1667
1668 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1669 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1670 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1671 MGMT_STATUS_BUSY);
1672 goto failed;
1673 }
1674
1675 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1676 if (!cmd) {
1677 err = -ENOMEM;
1678 goto failed;
1679 }
1680
1681 hci_req_init(&req, hdev);
1682
1683 /* If BR/EDR is not enabled and we disable advertising as a
1684 * by-product of disabling connectable, we need to update the
1685 * advertising flags.
1686 */
1687 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1688 if (!cp->val) {
1689 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1690 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1691 }
1692 update_adv_data(&req);
1693 } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
1694 if (cp->val) {
1695 scan = SCAN_PAGE;
1696 } else {
1697 scan = 0;
1698
1699 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1700 hdev->discov_timeout > 0)
1701 cancel_delayed_work(&hdev->discov_off);
1702 }
1703
1704 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1705 }
1706
1707 /* If we're going from non-connectable to connectable or
1708 * vice-versa when fast connectable is enabled ensure that fast
1709 * connectable gets disabled. write_fast_connectable won't do
1710 * anything if the page scan parameters are already what they
1711 * should be.
1712 */
1713 if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1714 write_fast_connectable(&req, false);
1715
1716 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) &&
1717 hci_conn_num(hdev, LE_LINK) == 0) {
1718 disable_advertising(&req);
1719 enable_advertising(&req);
1720 }
1721
1722 err = hci_req_run(&req, set_connectable_complete);
1723 if (err < 0) {
1724 mgmt_pending_remove(cmd);
1725 if (err == -ENODATA)
1726 err = set_connectable_update_settings(hdev, sk,
1727 cp->val);
1728 goto failed;
1729 }
1730
1731 failed:
1732 hci_dev_unlock(hdev);
1733 return err;
1734 }
1735
1736 static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
1737 u16 len)
1738 {
1739 struct mgmt_mode *cp = data;
1740 bool changed;
1741 int err;
1742
1743 BT_DBG("request for %s", hdev->name);
1744
1745 if (cp->val != 0x00 && cp->val != 0x01)
1746 return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE,
1747 MGMT_STATUS_INVALID_PARAMS);
1748
1749 hci_dev_lock(hdev);
1750
1751 if (cp->val)
1752 changed = !test_and_set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1753 else
1754 changed = test_and_clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1755
1756 err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
1757 if (err < 0)
1758 goto unlock;
1759
1760 if (changed)
1761 err = new_settings(hdev, sk);
1762
1763 unlock:
1764 hci_dev_unlock(hdev);
1765 return err;
1766 }
1767
1768 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1769 u16 len)
1770 {
1771 struct mgmt_mode *cp = data;
1772 struct pending_cmd *cmd;
1773 u8 val, status;
1774 int err;
1775
1776 BT_DBG("request for %s", hdev->name);
1777
1778 status = mgmt_bredr_support(hdev);
1779 if (status)
1780 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1781 status);
1782
1783 if (cp->val != 0x00 && cp->val != 0x01)
1784 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1785 MGMT_STATUS_INVALID_PARAMS);
1786
1787 hci_dev_lock(hdev);
1788
1789 if (!hdev_is_powered(hdev)) {
1790 bool changed = false;
1791
1792 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1793 &hdev->dev_flags)) {
1794 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
1795 changed = true;
1796 }
1797
1798 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1799 if (err < 0)
1800 goto failed;
1801
1802 if (changed)
1803 err = new_settings(hdev, sk);
1804
1805 goto failed;
1806 }
1807
1808 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1809 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1810 MGMT_STATUS_BUSY);
1811 goto failed;
1812 }
1813
1814 val = !!cp->val;
1815
1816 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1817 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1818 goto failed;
1819 }
1820
1821 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1822 if (!cmd) {
1823 err = -ENOMEM;
1824 goto failed;
1825 }
1826
1827 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1828 if (err < 0) {
1829 mgmt_pending_remove(cmd);
1830 goto failed;
1831 }
1832
1833 failed:
1834 hci_dev_unlock(hdev);
1835 return err;
1836 }
1837
1838 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1839 {
1840 struct mgmt_mode *cp = data;
1841 struct pending_cmd *cmd;
1842 u8 status;
1843 int err;
1844
1845 BT_DBG("request for %s", hdev->name);
1846
1847 status = mgmt_bredr_support(hdev);
1848 if (status)
1849 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1850
1851 if (!lmp_ssp_capable(hdev))
1852 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1853 MGMT_STATUS_NOT_SUPPORTED);
1854
1855 if (cp->val != 0x00 && cp->val != 0x01)
1856 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1857 MGMT_STATUS_INVALID_PARAMS);
1858
1859 hci_dev_lock(hdev);
1860
1861 if (!hdev_is_powered(hdev)) {
1862 bool changed;
1863
1864 if (cp->val) {
1865 changed = !test_and_set_bit(HCI_SSP_ENABLED,
1866 &hdev->dev_flags);
1867 } else {
1868 changed = test_and_clear_bit(HCI_SSP_ENABLED,
1869 &hdev->dev_flags);
1870 if (!changed)
1871 changed = test_and_clear_bit(HCI_HS_ENABLED,
1872 &hdev->dev_flags);
1873 else
1874 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1875 }
1876
1877 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1878 if (err < 0)
1879 goto failed;
1880
1881 if (changed)
1882 err = new_settings(hdev, sk);
1883
1884 goto failed;
1885 }
1886
1887 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev) ||
1888 mgmt_pending_find(MGMT_OP_SET_HS, hdev)) {
1889 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1890 MGMT_STATUS_BUSY);
1891 goto failed;
1892 }
1893
1894 if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1895 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1896 goto failed;
1897 }
1898
1899 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1900 if (!cmd) {
1901 err = -ENOMEM;
1902 goto failed;
1903 }
1904
1905 if (!cp->val && test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
1906 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
1907 sizeof(cp->val), &cp->val);
1908
1909 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1910 if (err < 0) {
1911 mgmt_pending_remove(cmd);
1912 goto failed;
1913 }
1914
1915 failed:
1916 hci_dev_unlock(hdev);
1917 return err;
1918 }
1919
1920 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1921 {
1922 struct mgmt_mode *cp = data;
1923 bool changed;
1924 u8 status;
1925 int err;
1926
1927 BT_DBG("request for %s", hdev->name);
1928
1929 status = mgmt_bredr_support(hdev);
1930 if (status)
1931 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1932
1933 if (!lmp_ssp_capable(hdev))
1934 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1935 MGMT_STATUS_NOT_SUPPORTED);
1936
1937 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
1938 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1939 MGMT_STATUS_REJECTED);
1940
1941 if (cp->val != 0x00 && cp->val != 0x01)
1942 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1943 MGMT_STATUS_INVALID_PARAMS);
1944
1945 hci_dev_lock(hdev);
1946
1947 if (cp->val) {
1948 changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1949 } else {
1950 if (hdev_is_powered(hdev)) {
1951 err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1952 MGMT_STATUS_REJECTED);
1953 goto unlock;
1954 }
1955
1956 changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1957 }
1958
1959 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1960 if (err < 0)
1961 goto unlock;
1962
1963 if (changed)
1964 err = new_settings(hdev, sk);
1965
1966 unlock:
1967 hci_dev_unlock(hdev);
1968 return err;
1969 }
1970
1971 static void le_enable_complete(struct hci_dev *hdev, u8 status)
1972 {
1973 struct cmd_lookup match = { NULL, hdev };
1974
1975 if (status) {
1976 u8 mgmt_err = mgmt_status(status);
1977
1978 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1979 &mgmt_err);
1980 return;
1981 }
1982
1983 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1984
1985 new_settings(hdev, match.sk);
1986
1987 if (match.sk)
1988 sock_put(match.sk);
1989
1990 /* Make sure the controller has a good default for
1991 * advertising data. Restrict the update to when LE
1992 * has actually been enabled. During power on, the
1993 * update in powered_update_hci will take care of it.
1994 */
1995 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1996 struct hci_request req;
1997
1998 hci_dev_lock(hdev);
1999
2000 hci_req_init(&req, hdev);
2001 update_adv_data(&req);
2002 update_scan_rsp_data(&req);
2003 hci_req_run(&req, NULL);
2004
2005 hci_dev_unlock(hdev);
2006 }
2007 }
2008
2009 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2010 {
2011 struct mgmt_mode *cp = data;
2012 struct hci_cp_write_le_host_supported hci_cp;
2013 struct pending_cmd *cmd;
2014 struct hci_request req;
2015 int err;
2016 u8 val, enabled;
2017
2018 BT_DBG("request for %s", hdev->name);
2019
2020 if (!lmp_le_capable(hdev))
2021 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2022 MGMT_STATUS_NOT_SUPPORTED);
2023
2024 if (cp->val != 0x00 && cp->val != 0x01)
2025 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2026 MGMT_STATUS_INVALID_PARAMS);
2027
2028 /* LE-only devices do not allow toggling LE on/off */
2029 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
2030 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2031 MGMT_STATUS_REJECTED);
2032
2033 hci_dev_lock(hdev);
2034
2035 val = !!cp->val;
2036 enabled = lmp_host_le_capable(hdev);
2037
2038 if (!hdev_is_powered(hdev) || val == enabled) {
2039 bool changed = false;
2040
2041 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2042 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2043 changed = true;
2044 }
2045
2046 if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
2047 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
2048 changed = true;
2049 }
2050
2051 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2052 if (err < 0)
2053 goto unlock;
2054
2055 if (changed)
2056 err = new_settings(hdev, sk);
2057
2058 goto unlock;
2059 }
2060
2061 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
2062 mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2063 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2064 MGMT_STATUS_BUSY);
2065 goto unlock;
2066 }
2067
2068 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2069 if (!cmd) {
2070 err = -ENOMEM;
2071 goto unlock;
2072 }
2073
2074 hci_req_init(&req, hdev);
2075
2076 memset(&hci_cp, 0, sizeof(hci_cp));
2077
2078 if (val) {
2079 hci_cp.le = val;
2080 hci_cp.simul = lmp_le_br_capable(hdev);
2081 } else {
2082 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
2083 disable_advertising(&req);
2084 }
2085
2086 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2087 &hci_cp);
2088
2089 err = hci_req_run(&req, le_enable_complete);
2090 if (err < 0)
2091 mgmt_pending_remove(cmd);
2092
2093 unlock:
2094 hci_dev_unlock(hdev);
2095 return err;
2096 }
2097
2098 /* This is a helper function to test for pending mgmt commands that can
2099 * cause CoD or EIR HCI commands. We can only allow one such pending
2100 * mgmt command at a time since otherwise we cannot easily track what
2101 * the current values are, will be, and based on that calculate if a new
2102 * HCI command needs to be sent and if yes with what value.
2103 */
2104 static bool pending_eir_or_class(struct hci_dev *hdev)
2105 {
2106 struct pending_cmd *cmd;
2107
2108 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2109 switch (cmd->opcode) {
2110 case MGMT_OP_ADD_UUID:
2111 case MGMT_OP_REMOVE_UUID:
2112 case MGMT_OP_SET_DEV_CLASS:
2113 case MGMT_OP_SET_POWERED:
2114 return true;
2115 }
2116 }
2117
2118 return false;
2119 }
2120
2121 static const u8 bluetooth_base_uuid[] = {
2122 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2123 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2124 };
2125
2126 static u8 get_uuid_size(const u8 *uuid)
2127 {
2128 u32 val;
2129
2130 if (memcmp(uuid, bluetooth_base_uuid, 12))
2131 return 128;
2132
2133 val = get_unaligned_le32(&uuid[12]);
2134 if (val > 0xffff)
2135 return 32;
2136
2137 return 16;
2138 }
2139
2140 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2141 {
2142 struct pending_cmd *cmd;
2143
2144 hci_dev_lock(hdev);
2145
2146 cmd = mgmt_pending_find(mgmt_op, hdev);
2147 if (!cmd)
2148 goto unlock;
2149
2150 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
2151 hdev->dev_class, 3);
2152
2153 mgmt_pending_remove(cmd);
2154
2155 unlock:
2156 hci_dev_unlock(hdev);
2157 }
2158
2159 static void add_uuid_complete(struct hci_dev *hdev, u8 status)
2160 {
2161 BT_DBG("status 0x%02x", status);
2162
2163 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2164 }
2165
2166 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2167 {
2168 struct mgmt_cp_add_uuid *cp = data;
2169 struct pending_cmd *cmd;
2170 struct hci_request req;
2171 struct bt_uuid *uuid;
2172 int err;
2173
2174 BT_DBG("request for %s", hdev->name);
2175
2176 hci_dev_lock(hdev);
2177
2178 if (pending_eir_or_class(hdev)) {
2179 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2180 MGMT_STATUS_BUSY);
2181 goto failed;
2182 }
2183
2184 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2185 if (!uuid) {
2186 err = -ENOMEM;
2187 goto failed;
2188 }
2189
2190 memcpy(uuid->uuid, cp->uuid, 16);
2191 uuid->svc_hint = cp->svc_hint;
2192 uuid->size = get_uuid_size(cp->uuid);
2193
2194 list_add_tail(&uuid->list, &hdev->uuids);
2195
2196 hci_req_init(&req, hdev);
2197
2198 update_class(&req);
2199 update_eir(&req);
2200
2201 err = hci_req_run(&req, add_uuid_complete);
2202 if (err < 0) {
2203 if (err != -ENODATA)
2204 goto failed;
2205
2206 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2207 hdev->dev_class, 3);
2208 goto failed;
2209 }
2210
2211 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2212 if (!cmd) {
2213 err = -ENOMEM;
2214 goto failed;
2215 }
2216
2217 err = 0;
2218
2219 failed:
2220 hci_dev_unlock(hdev);
2221 return err;
2222 }
2223
2224 static bool enable_service_cache(struct hci_dev *hdev)
2225 {
2226 if (!hdev_is_powered(hdev))
2227 return false;
2228
2229 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2230 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2231 CACHE_TIMEOUT);
2232 return true;
2233 }
2234
2235 return false;
2236 }
2237
2238 static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
2239 {
2240 BT_DBG("status 0x%02x", status);
2241
2242 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2243 }
2244
2245 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2246 u16 len)
2247 {
2248 struct mgmt_cp_remove_uuid *cp = data;
2249 struct pending_cmd *cmd;
2250 struct bt_uuid *match, *tmp;
2251 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2252 struct hci_request req;
2253 int err, found;
2254
2255 BT_DBG("request for %s", hdev->name);
2256
2257 hci_dev_lock(hdev);
2258
2259 if (pending_eir_or_class(hdev)) {
2260 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2261 MGMT_STATUS_BUSY);
2262 goto unlock;
2263 }
2264
2265 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2266 hci_uuids_clear(hdev);
2267
2268 if (enable_service_cache(hdev)) {
2269 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2270 0, hdev->dev_class, 3);
2271 goto unlock;
2272 }
2273
2274 goto update_class;
2275 }
2276
2277 found = 0;
2278
2279 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2280 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2281 continue;
2282
2283 list_del(&match->list);
2284 kfree(match);
2285 found++;
2286 }
2287
2288 if (found == 0) {
2289 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2290 MGMT_STATUS_INVALID_PARAMS);
2291 goto unlock;
2292 }
2293
2294 update_class:
2295 hci_req_init(&req, hdev);
2296
2297 update_class(&req);
2298 update_eir(&req);
2299
2300 err = hci_req_run(&req, remove_uuid_complete);
2301 if (err < 0) {
2302 if (err != -ENODATA)
2303 goto unlock;
2304
2305 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2306 hdev->dev_class, 3);
2307 goto unlock;
2308 }
2309
2310 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2311 if (!cmd) {
2312 err = -ENOMEM;
2313 goto unlock;
2314 }
2315
2316 err = 0;
2317
2318 unlock:
2319 hci_dev_unlock(hdev);
2320 return err;
2321 }
2322
2323 static void set_class_complete(struct hci_dev *hdev, u8 status)
2324 {
2325 BT_DBG("status 0x%02x", status);
2326
2327 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2328 }
2329
2330 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2331 u16 len)
2332 {
2333 struct mgmt_cp_set_dev_class *cp = data;
2334 struct pending_cmd *cmd;
2335 struct hci_request req;
2336 int err;
2337
2338 BT_DBG("request for %s", hdev->name);
2339
2340 if (!lmp_bredr_capable(hdev))
2341 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2342 MGMT_STATUS_NOT_SUPPORTED);
2343
2344 hci_dev_lock(hdev);
2345
2346 if (pending_eir_or_class(hdev)) {
2347 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2348 MGMT_STATUS_BUSY);
2349 goto unlock;
2350 }
2351
2352 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2353 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2354 MGMT_STATUS_INVALID_PARAMS);
2355 goto unlock;
2356 }
2357
2358 hdev->major_class = cp->major;
2359 hdev->minor_class = cp->minor;
2360
2361 if (!hdev_is_powered(hdev)) {
2362 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2363 hdev->dev_class, 3);
2364 goto unlock;
2365 }
2366
2367 hci_req_init(&req, hdev);
2368
2369 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2370 hci_dev_unlock(hdev);
2371 cancel_delayed_work_sync(&hdev->service_cache);
2372 hci_dev_lock(hdev);
2373 update_eir(&req);
2374 }
2375
2376 update_class(&req);
2377
2378 err = hci_req_run(&req, set_class_complete);
2379 if (err < 0) {
2380 if (err != -ENODATA)
2381 goto unlock;
2382
2383 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2384 hdev->dev_class, 3);
2385 goto unlock;
2386 }
2387
2388 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2389 if (!cmd) {
2390 err = -ENOMEM;
2391 goto unlock;
2392 }
2393
2394 err = 0;
2395
2396 unlock:
2397 hci_dev_unlock(hdev);
2398 return err;
2399 }
2400
2401 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2402 u16 len)
2403 {
2404 struct mgmt_cp_load_link_keys *cp = data;
2405 u16 key_count, expected_len;
2406 bool changed;
2407 int i;
2408
2409 BT_DBG("request for %s", hdev->name);
2410
2411 if (!lmp_bredr_capable(hdev))
2412 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2413 MGMT_STATUS_NOT_SUPPORTED);
2414
2415 key_count = __le16_to_cpu(cp->key_count);
2416
2417 expected_len = sizeof(*cp) + key_count *
2418 sizeof(struct mgmt_link_key_info);
2419 if (expected_len != len) {
2420 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2421 expected_len, len);
2422 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2423 MGMT_STATUS_INVALID_PARAMS);
2424 }
2425
2426 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2427 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2428 MGMT_STATUS_INVALID_PARAMS);
2429
2430 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2431 key_count);
2432
2433 for (i = 0; i < key_count; i++) {
2434 struct mgmt_link_key_info *key = &cp->keys[i];
2435
2436 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2437 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2438 MGMT_STATUS_INVALID_PARAMS);
2439 }
2440
2441 hci_dev_lock(hdev);
2442
2443 hci_link_keys_clear(hdev);
2444
2445 if (cp->debug_keys)
2446 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
2447 &hdev->dev_flags);
2448 else
2449 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
2450 &hdev->dev_flags);
2451
2452 if (changed)
2453 new_settings(hdev, NULL);
2454
2455 for (i = 0; i < key_count; i++) {
2456 struct mgmt_link_key_info *key = &cp->keys[i];
2457
2458 /* Always ignore debug keys and require a new pairing if
2459 * the user wants to use them.
2460 */
2461 if (key->type == HCI_LK_DEBUG_COMBINATION)
2462 continue;
2463
2464 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2465 key->type, key->pin_len, NULL);
2466 }
2467
2468 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2469
2470 hci_dev_unlock(hdev);
2471
2472 return 0;
2473 }
2474
2475 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2476 u8 addr_type, struct sock *skip_sk)
2477 {
2478 struct mgmt_ev_device_unpaired ev;
2479
2480 bacpy(&ev.addr.bdaddr, bdaddr);
2481 ev.addr.type = addr_type;
2482
2483 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2484 skip_sk);
2485 }
2486
2487 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2488 u16 len)
2489 {
2490 struct mgmt_cp_unpair_device *cp = data;
2491 struct mgmt_rp_unpair_device rp;
2492 struct hci_cp_disconnect dc;
2493 struct pending_cmd *cmd;
2494 struct hci_conn *conn;
2495 int err;
2496
2497 memset(&rp, 0, sizeof(rp));
2498 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2499 rp.addr.type = cp->addr.type;
2500
2501 if (!bdaddr_type_is_valid(cp->addr.type))
2502 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2503 MGMT_STATUS_INVALID_PARAMS,
2504 &rp, sizeof(rp));
2505
2506 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2507 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2508 MGMT_STATUS_INVALID_PARAMS,
2509 &rp, sizeof(rp));
2510
2511 hci_dev_lock(hdev);
2512
2513 if (!hdev_is_powered(hdev)) {
2514 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2515 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2516 goto unlock;
2517 }
2518
2519 if (cp->addr.type == BDADDR_BREDR) {
2520 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2521 } else {
2522 u8 addr_type;
2523
2524 if (cp->addr.type == BDADDR_LE_PUBLIC)
2525 addr_type = ADDR_LE_DEV_PUBLIC;
2526 else
2527 addr_type = ADDR_LE_DEV_RANDOM;
2528
2529 hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2530
2531 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2532
2533 err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2534 }
2535
2536 if (err < 0) {
2537 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2538 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
2539 goto unlock;
2540 }
2541
2542 if (cp->disconnect) {
2543 if (cp->addr.type == BDADDR_BREDR)
2544 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2545 &cp->addr.bdaddr);
2546 else
2547 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2548 &cp->addr.bdaddr);
2549 } else {
2550 conn = NULL;
2551 }
2552
2553 if (!conn) {
2554 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2555 &rp, sizeof(rp));
2556 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2557 goto unlock;
2558 }
2559
2560 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2561 sizeof(*cp));
2562 if (!cmd) {
2563 err = -ENOMEM;
2564 goto unlock;
2565 }
2566
2567 dc.handle = cpu_to_le16(conn->handle);
2568 dc.reason = 0x13; /* Remote User Terminated Connection */
2569 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2570 if (err < 0)
2571 mgmt_pending_remove(cmd);
2572
2573 unlock:
2574 hci_dev_unlock(hdev);
2575 return err;
2576 }
2577
2578 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2579 u16 len)
2580 {
2581 struct mgmt_cp_disconnect *cp = data;
2582 struct mgmt_rp_disconnect rp;
2583 struct hci_cp_disconnect dc;
2584 struct pending_cmd *cmd;
2585 struct hci_conn *conn;
2586 int err;
2587
2588 BT_DBG("");
2589
2590 memset(&rp, 0, sizeof(rp));
2591 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2592 rp.addr.type = cp->addr.type;
2593
2594 if (!bdaddr_type_is_valid(cp->addr.type))
2595 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2596 MGMT_STATUS_INVALID_PARAMS,
2597 &rp, sizeof(rp));
2598
2599 hci_dev_lock(hdev);
2600
2601 if (!test_bit(HCI_UP, &hdev->flags)) {
2602 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2603 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2604 goto failed;
2605 }
2606
2607 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
2608 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2609 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2610 goto failed;
2611 }
2612
2613 if (cp->addr.type == BDADDR_BREDR)
2614 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2615 &cp->addr.bdaddr);
2616 else
2617 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2618
2619 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2620 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2621 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
2622 goto failed;
2623 }
2624
2625 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2626 if (!cmd) {
2627 err = -ENOMEM;
2628 goto failed;
2629 }
2630
2631 dc.handle = cpu_to_le16(conn->handle);
2632 dc.reason = HCI_ERROR_REMOTE_USER_TERM;
2633
2634 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2635 if (err < 0)
2636 mgmt_pending_remove(cmd);
2637
2638 failed:
2639 hci_dev_unlock(hdev);
2640 return err;
2641 }
2642
2643 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2644 {
2645 switch (link_type) {
2646 case LE_LINK:
2647 switch (addr_type) {
2648 case ADDR_LE_DEV_PUBLIC:
2649 return BDADDR_LE_PUBLIC;
2650
2651 default:
2652 /* Fallback to LE Random address type */
2653 return BDADDR_LE_RANDOM;
2654 }
2655
2656 default:
2657 /* Fallback to BR/EDR type */
2658 return BDADDR_BREDR;
2659 }
2660 }
2661
2662 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2663 u16 data_len)
2664 {
2665 struct mgmt_rp_get_connections *rp;
2666 struct hci_conn *c;
2667 size_t rp_len;
2668 int err;
2669 u16 i;
2670
2671 BT_DBG("");
2672
2673 hci_dev_lock(hdev);
2674
2675 if (!hdev_is_powered(hdev)) {
2676 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2677 MGMT_STATUS_NOT_POWERED);
2678 goto unlock;
2679 }
2680
2681 i = 0;
2682 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2683 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2684 i++;
2685 }
2686
2687 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2688 rp = kmalloc(rp_len, GFP_KERNEL);
2689 if (!rp) {
2690 err = -ENOMEM;
2691 goto unlock;
2692 }
2693
2694 i = 0;
2695 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2696 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2697 continue;
2698 bacpy(&rp->addr[i].bdaddr, &c->dst);
2699 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2700 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2701 continue;
2702 i++;
2703 }
2704
2705 rp->conn_count = cpu_to_le16(i);
2706
2707 /* Recalculate length in case of filtered SCO connections, etc */
2708 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2709
2710 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2711 rp_len);
2712
2713 kfree(rp);
2714
2715 unlock:
2716 hci_dev_unlock(hdev);
2717 return err;
2718 }
2719
2720 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2721 struct mgmt_cp_pin_code_neg_reply *cp)
2722 {
2723 struct pending_cmd *cmd;
2724 int err;
2725
2726 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2727 sizeof(*cp));
2728 if (!cmd)
2729 return -ENOMEM;
2730
2731 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2732 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2733 if (err < 0)
2734 mgmt_pending_remove(cmd);
2735
2736 return err;
2737 }
2738
2739 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2740 u16 len)
2741 {
2742 struct hci_conn *conn;
2743 struct mgmt_cp_pin_code_reply *cp = data;
2744 struct hci_cp_pin_code_reply reply;
2745 struct pending_cmd *cmd;
2746 int err;
2747
2748 BT_DBG("");
2749
2750 hci_dev_lock(hdev);
2751
2752 if (!hdev_is_powered(hdev)) {
2753 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2754 MGMT_STATUS_NOT_POWERED);
2755 goto failed;
2756 }
2757
2758 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2759 if (!conn) {
2760 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2761 MGMT_STATUS_NOT_CONNECTED);
2762 goto failed;
2763 }
2764
2765 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2766 struct mgmt_cp_pin_code_neg_reply ncp;
2767
2768 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2769
2770 BT_ERR("PIN code is not 16 bytes long");
2771
2772 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2773 if (err >= 0)
2774 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2775 MGMT_STATUS_INVALID_PARAMS);
2776
2777 goto failed;
2778 }
2779
2780 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2781 if (!cmd) {
2782 err = -ENOMEM;
2783 goto failed;
2784 }
2785
2786 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2787 reply.pin_len = cp->pin_len;
2788 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2789
2790 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2791 if (err < 0)
2792 mgmt_pending_remove(cmd);
2793
2794 failed:
2795 hci_dev_unlock(hdev);
2796 return err;
2797 }
2798
2799 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2800 u16 len)
2801 {
2802 struct mgmt_cp_set_io_capability *cp = data;
2803
2804 BT_DBG("");
2805
2806 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2807 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2808 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
2809
2810 hci_dev_lock(hdev);
2811
2812 hdev->io_capability = cp->io_capability;
2813
2814 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2815 hdev->io_capability);
2816
2817 hci_dev_unlock(hdev);
2818
2819 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
2820 0);
2821 }
2822
2823 static struct pending_cmd *find_pairing(struct hci_conn *conn)
2824 {
2825 struct hci_dev *hdev = conn->hdev;
2826 struct pending_cmd *cmd;
2827
2828 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2829 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2830 continue;
2831
2832 if (cmd->user_data != conn)
2833 continue;
2834
2835 return cmd;
2836 }
2837
2838 return NULL;
2839 }
2840
2841 static void pairing_complete(struct pending_cmd *cmd, u8 status)
2842 {
2843 struct mgmt_rp_pair_device rp;
2844 struct hci_conn *conn = cmd->user_data;
2845
2846 bacpy(&rp.addr.bdaddr, &conn->dst);
2847 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2848
2849 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
2850 &rp, sizeof(rp));
2851
2852 /* So we don't get further callbacks for this connection */
2853 conn->connect_cfm_cb = NULL;
2854 conn->security_cfm_cb = NULL;
2855 conn->disconn_cfm_cb = NULL;
2856
2857 hci_conn_drop(conn);
2858
2859 mgmt_pending_remove(cmd);
2860 }
2861
2862 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2863 {
2864 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2865 struct pending_cmd *cmd;
2866
2867 cmd = find_pairing(conn);
2868 if (cmd)
2869 pairing_complete(cmd, status);
2870 }
2871
2872 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2873 {
2874 struct pending_cmd *cmd;
2875
2876 BT_DBG("status %u", status);
2877
2878 cmd = find_pairing(conn);
2879 if (!cmd)
2880 BT_DBG("Unable to find a pending command");
2881 else
2882 pairing_complete(cmd, mgmt_status(status));
2883 }
2884
2885 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2886 {
2887 struct pending_cmd *cmd;
2888
2889 BT_DBG("status %u", status);
2890
2891 if (!status)
2892 return;
2893
2894 cmd = find_pairing(conn);
2895 if (!cmd)
2896 BT_DBG("Unable to find a pending command");
2897 else
2898 pairing_complete(cmd, mgmt_status(status));
2899 }
2900
2901 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2902 u16 len)
2903 {
2904 struct mgmt_cp_pair_device *cp = data;
2905 struct mgmt_rp_pair_device rp;
2906 struct pending_cmd *cmd;
2907 u8 sec_level, auth_type;
2908 struct hci_conn *conn;
2909 int err;
2910
2911 BT_DBG("");
2912
2913 memset(&rp, 0, sizeof(rp));
2914 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2915 rp.addr.type = cp->addr.type;
2916
2917 if (!bdaddr_type_is_valid(cp->addr.type))
2918 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2919 MGMT_STATUS_INVALID_PARAMS,
2920 &rp, sizeof(rp));
2921
2922 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
2923 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2924 MGMT_STATUS_INVALID_PARAMS,
2925 &rp, sizeof(rp));
2926
2927 hci_dev_lock(hdev);
2928
2929 if (!hdev_is_powered(hdev)) {
2930 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2931 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2932 goto unlock;
2933 }
2934
2935 sec_level = BT_SECURITY_MEDIUM;
2936 auth_type = HCI_AT_DEDICATED_BONDING;
2937
2938 if (cp->addr.type == BDADDR_BREDR) {
2939 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
2940 auth_type);
2941 } else {
2942 u8 addr_type;
2943
2944 /* Convert from L2CAP channel address type to HCI address type
2945 */
2946 if (cp->addr.type == BDADDR_LE_PUBLIC)
2947 addr_type = ADDR_LE_DEV_PUBLIC;
2948 else
2949 addr_type = ADDR_LE_DEV_RANDOM;
2950
2951 /* When pairing a new device, it is expected to remember
2952 * this device for future connections. Adding the connection
2953 * parameter information ahead of time allows tracking
2954 * of the slave preferred values and will speed up any
2955 * further connection establishment.
2956 *
2957 * If connection parameters already exist, then they
2958 * will be kept and this function does nothing.
2959 */
2960 hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
2961
2962 conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
2963 sec_level, auth_type);
2964 }
2965
2966 if (IS_ERR(conn)) {
2967 int status;
2968
2969 if (PTR_ERR(conn) == -EBUSY)
2970 status = MGMT_STATUS_BUSY;
2971 else
2972 status = MGMT_STATUS_CONNECT_FAILED;
2973
2974 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2975 status, &rp,
2976 sizeof(rp));
2977 goto unlock;
2978 }
2979
2980 if (conn->connect_cfm_cb) {
2981 hci_conn_drop(conn);
2982 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2983 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2984 goto unlock;
2985 }
2986
2987 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2988 if (!cmd) {
2989 err = -ENOMEM;
2990 hci_conn_drop(conn);
2991 goto unlock;
2992 }
2993
2994 /* For LE, just connecting isn't a proof that the pairing finished */
2995 if (cp->addr.type == BDADDR_BREDR) {
2996 conn->connect_cfm_cb = pairing_complete_cb;
2997 conn->security_cfm_cb = pairing_complete_cb;
2998 conn->disconn_cfm_cb = pairing_complete_cb;
2999 } else {
3000 conn->connect_cfm_cb = le_pairing_complete_cb;
3001 conn->security_cfm_cb = le_pairing_complete_cb;
3002 conn->disconn_cfm_cb = le_pairing_complete_cb;
3003 }
3004
3005 conn->io_capability = cp->io_cap;
3006 cmd->user_data = conn;
3007
3008 if (conn->state == BT_CONNECTED &&
3009 hci_conn_security(conn, sec_level, auth_type))
3010 pairing_complete(cmd, 0);
3011
3012 err = 0;
3013
3014 unlock:
3015 hci_dev_unlock(hdev);
3016 return err;
3017 }
3018
3019 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3020 u16 len)
3021 {
3022 struct mgmt_addr_info *addr = data;
3023 struct pending_cmd *cmd;
3024 struct hci_conn *conn;
3025 int err;
3026
3027 BT_DBG("");
3028
3029 hci_dev_lock(hdev);
3030
3031 if (!hdev_is_powered(hdev)) {
3032 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3033 MGMT_STATUS_NOT_POWERED);
3034 goto unlock;
3035 }
3036
3037 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3038 if (!cmd) {
3039 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3040 MGMT_STATUS_INVALID_PARAMS);
3041 goto unlock;
3042 }
3043
3044 conn = cmd->user_data;
3045
3046 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3047 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3048 MGMT_STATUS_INVALID_PARAMS);
3049 goto unlock;
3050 }
3051
3052 pairing_complete(cmd, MGMT_STATUS_CANCELLED);
3053
3054 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3055 addr, sizeof(*addr));
3056 unlock:
3057 hci_dev_unlock(hdev);
3058 return err;
3059 }
3060
3061 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3062 struct mgmt_addr_info *addr, u16 mgmt_op,
3063 u16 hci_op, __le32 passkey)
3064 {
3065 struct pending_cmd *cmd;
3066 struct hci_conn *conn;
3067 int err;
3068
3069 hci_dev_lock(hdev);
3070
3071 if (!hdev_is_powered(hdev)) {
3072 err = cmd_complete(sk, hdev->id, mgmt_op,
3073 MGMT_STATUS_NOT_POWERED, addr,
3074 sizeof(*addr));
3075 goto done;
3076 }
3077
3078 if (addr->type == BDADDR_BREDR)
3079 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3080 else
3081 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
3082
3083 if (!conn) {
3084 err = cmd_complete(sk, hdev->id, mgmt_op,
3085 MGMT_STATUS_NOT_CONNECTED, addr,
3086 sizeof(*addr));
3087 goto done;
3088 }
3089
3090 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3091 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3092 if (!err)
3093 err = cmd_complete(sk, hdev->id, mgmt_op,
3094 MGMT_STATUS_SUCCESS, addr,
3095 sizeof(*addr));
3096 else
3097 err = cmd_complete(sk, hdev->id, mgmt_op,
3098 MGMT_STATUS_FAILED, addr,
3099 sizeof(*addr));
3100
3101 goto done;
3102 }
3103
3104 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3105 if (!cmd) {
3106 err = -ENOMEM;
3107 goto done;
3108 }
3109
3110 /* Continue with pairing via HCI */
3111 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3112 struct hci_cp_user_passkey_reply cp;
3113
3114 bacpy(&cp.bdaddr, &addr->bdaddr);
3115 cp.passkey = passkey;
3116 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3117 } else
3118 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3119 &addr->bdaddr);
3120
3121 if (err < 0)
3122 mgmt_pending_remove(cmd);
3123
3124 done:
3125 hci_dev_unlock(hdev);
3126 return err;
3127 }
3128
3129 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3130 void *data, u16 len)
3131 {
3132 struct mgmt_cp_pin_code_neg_reply *cp = data;
3133
3134 BT_DBG("");
3135
3136 return user_pairing_resp(sk, hdev, &cp->addr,
3137 MGMT_OP_PIN_CODE_NEG_REPLY,
3138 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3139 }
3140
3141 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3142 u16 len)
3143 {
3144 struct mgmt_cp_user_confirm_reply *cp = data;
3145
3146 BT_DBG("");
3147
3148 if (len != sizeof(*cp))
3149 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3150 MGMT_STATUS_INVALID_PARAMS);
3151
3152 return user_pairing_resp(sk, hdev, &cp->addr,
3153 MGMT_OP_USER_CONFIRM_REPLY,
3154 HCI_OP_USER_CONFIRM_REPLY, 0);
3155 }
3156
3157 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3158 void *data, u16 len)
3159 {
3160 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3161
3162 BT_DBG("");
3163
3164 return user_pairing_resp(sk, hdev, &cp->addr,
3165 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3166 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3167 }
3168
3169 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3170 u16 len)
3171 {
3172 struct mgmt_cp_user_passkey_reply *cp = data;
3173
3174 BT_DBG("");
3175
3176 return user_pairing_resp(sk, hdev, &cp->addr,
3177 MGMT_OP_USER_PASSKEY_REPLY,
3178 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3179 }
3180
3181 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3182 void *data, u16 len)
3183 {
3184 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3185
3186 BT_DBG("");
3187
3188 return user_pairing_resp(sk, hdev, &cp->addr,
3189 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3190 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3191 }
3192
3193 static void update_name(struct hci_request *req)
3194 {
3195 struct hci_dev *hdev = req->hdev;
3196 struct hci_cp_write_local_name cp;
3197
3198 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3199
3200 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3201 }
3202
3203 static void set_name_complete(struct hci_dev *hdev, u8 status)
3204 {
3205 struct mgmt_cp_set_local_name *cp;
3206 struct pending_cmd *cmd;
3207
3208 BT_DBG("status 0x%02x", status);
3209
3210 hci_dev_lock(hdev);
3211
3212 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3213 if (!cmd)
3214 goto unlock;
3215
3216 cp = cmd->param;
3217
3218 if (status)
3219 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3220 mgmt_status(status));
3221 else
3222 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3223 cp, sizeof(*cp));
3224
3225 mgmt_pending_remove(cmd);
3226
3227 unlock:
3228 hci_dev_unlock(hdev);
3229 }
3230
3231 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3232 u16 len)
3233 {
3234 struct mgmt_cp_set_local_name *cp = data;
3235 struct pending_cmd *cmd;
3236 struct hci_request req;
3237 int err;
3238
3239 BT_DBG("");
3240
3241 hci_dev_lock(hdev);
3242
3243 /* If the old values are the same as the new ones just return a
3244 * direct command complete event.
3245 */
3246 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3247 !memcmp(hdev->short_name, cp->short_name,
3248 sizeof(hdev->short_name))) {
3249 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3250 data, len);
3251 goto failed;
3252 }
3253
3254 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3255
3256 if (!hdev_is_powered(hdev)) {
3257 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3258
3259 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3260 data, len);
3261 if (err < 0)
3262 goto failed;
3263
3264 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
3265 sk);
3266
3267 goto failed;
3268 }
3269
3270 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3271 if (!cmd) {
3272 err = -ENOMEM;
3273 goto failed;
3274 }
3275
3276 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3277
3278 hci_req_init(&req, hdev);
3279
3280 if (lmp_bredr_capable(hdev)) {
3281 update_name(&req);
3282 update_eir(&req);
3283 }
3284
3285 /* The name is stored in the scan response data and so
3286 * no need to udpate the advertising data here.
3287 */
3288 if (lmp_le_capable(hdev))
3289 update_scan_rsp_data(&req);
3290
3291 err = hci_req_run(&req, set_name_complete);
3292 if (err < 0)
3293 mgmt_pending_remove(cmd);
3294
3295 failed:
3296 hci_dev_unlock(hdev);
3297 return err;
3298 }
3299
3300 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3301 void *data, u16 data_len)
3302 {
3303 struct pending_cmd *cmd;
3304 int err;
3305
3306 BT_DBG("%s", hdev->name);
3307
3308 hci_dev_lock(hdev);
3309
3310 if (!hdev_is_powered(hdev)) {
3311 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3312 MGMT_STATUS_NOT_POWERED);
3313 goto unlock;
3314 }
3315
3316 if (!lmp_ssp_capable(hdev)) {
3317 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3318 MGMT_STATUS_NOT_SUPPORTED);
3319 goto unlock;
3320 }
3321
3322 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3323 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3324 MGMT_STATUS_BUSY);
3325 goto unlock;
3326 }
3327
3328 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3329 if (!cmd) {
3330 err = -ENOMEM;
3331 goto unlock;
3332 }
3333
3334 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
3335 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
3336 0, NULL);
3337 else
3338 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3339
3340 if (err < 0)
3341 mgmt_pending_remove(cmd);
3342
3343 unlock:
3344 hci_dev_unlock(hdev);
3345 return err;
3346 }
3347
3348 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3349 void *data, u16 len)
3350 {
3351 int err;
3352
3353 BT_DBG("%s ", hdev->name);
3354
3355 hci_dev_lock(hdev);
3356
3357 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3358 struct mgmt_cp_add_remote_oob_data *cp = data;
3359 u8 status;
3360
3361 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3362 cp->hash, cp->randomizer);
3363 if (err < 0)
3364 status = MGMT_STATUS_FAILED;
3365 else
3366 status = MGMT_STATUS_SUCCESS;
3367
3368 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3369 status, &cp->addr, sizeof(cp->addr));
3370 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3371 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3372 u8 status;
3373
3374 err = hci_add_remote_oob_ext_data(hdev, &cp->addr.bdaddr,
3375 cp->hash192,
3376 cp->randomizer192,
3377 cp->hash256,
3378 cp->randomizer256);
3379 if (err < 0)
3380 status = MGMT_STATUS_FAILED;
3381 else
3382 status = MGMT_STATUS_SUCCESS;
3383
3384 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3385 status, &cp->addr, sizeof(cp->addr));
3386 } else {
3387 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3388 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3389 MGMT_STATUS_INVALID_PARAMS);
3390 }
3391
3392 hci_dev_unlock(hdev);
3393 return err;
3394 }
3395
3396 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3397 void *data, u16 len)
3398 {
3399 struct mgmt_cp_remove_remote_oob_data *cp = data;
3400 u8 status;
3401 int err;
3402
3403 BT_DBG("%s", hdev->name);
3404
3405 hci_dev_lock(hdev);
3406
3407 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
3408 if (err < 0)
3409 status = MGMT_STATUS_INVALID_PARAMS;
3410 else
3411 status = MGMT_STATUS_SUCCESS;
3412
3413 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3414 status, &cp->addr, sizeof(cp->addr));
3415
3416 hci_dev_unlock(hdev);
3417 return err;
3418 }
3419
3420 static int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
3421 {
3422 struct pending_cmd *cmd;
3423 u8 type;
3424 int err;
3425
3426 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3427
3428 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3429 if (!cmd)
3430 return -ENOENT;
3431
3432 type = hdev->discovery.type;
3433
3434 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3435 &type, sizeof(type));
3436 mgmt_pending_remove(cmd);
3437
3438 return err;
3439 }
3440
3441 static void start_discovery_complete(struct hci_dev *hdev, u8 status)
3442 {
3443 unsigned long timeout = 0;
3444
3445 BT_DBG("status %d", status);
3446
3447 if (status) {
3448 hci_dev_lock(hdev);
3449 mgmt_start_discovery_failed(hdev, status);
3450 hci_dev_unlock(hdev);
3451 return;
3452 }
3453
3454 hci_dev_lock(hdev);
3455 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3456 hci_dev_unlock(hdev);
3457
3458 switch (hdev->discovery.type) {
3459 case DISCOV_TYPE_LE:
3460 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3461 break;
3462
3463 case DISCOV_TYPE_INTERLEAVED:
3464 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
3465 break;
3466
3467 case DISCOV_TYPE_BREDR:
3468 break;
3469
3470 default:
3471 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
3472 }
3473
3474 if (!timeout)
3475 return;
3476
3477 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable, timeout);
3478 }
3479
3480 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3481 void *data, u16 len)
3482 {
3483 struct mgmt_cp_start_discovery *cp = data;
3484 struct pending_cmd *cmd;
3485 struct hci_cp_le_set_scan_param param_cp;
3486 struct hci_cp_le_set_scan_enable enable_cp;
3487 struct hci_cp_inquiry inq_cp;
3488 struct hci_request req;
3489 /* General inquiry access code (GIAC) */
3490 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3491 u8 status, own_addr_type;
3492 int err;
3493
3494 BT_DBG("%s", hdev->name);
3495
3496 hci_dev_lock(hdev);
3497
3498 if (!hdev_is_powered(hdev)) {
3499 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3500 MGMT_STATUS_NOT_POWERED);
3501 goto failed;
3502 }
3503
3504 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3505 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3506 MGMT_STATUS_BUSY);
3507 goto failed;
3508 }
3509
3510 if (hdev->discovery.state != DISCOVERY_STOPPED) {
3511 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3512 MGMT_STATUS_BUSY);
3513 goto failed;
3514 }
3515
3516 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
3517 if (!cmd) {
3518 err = -ENOMEM;
3519 goto failed;
3520 }
3521
3522 hdev->discovery.type = cp->type;
3523
3524 hci_req_init(&req, hdev);
3525
3526 switch (hdev->discovery.type) {
3527 case DISCOV_TYPE_BREDR:
3528 status = mgmt_bredr_support(hdev);
3529 if (status) {
3530 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3531 status);
3532 mgmt_pending_remove(cmd);
3533 goto failed;
3534 }
3535
3536 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3537 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3538 MGMT_STATUS_BUSY);
3539 mgmt_pending_remove(cmd);
3540 goto failed;
3541 }
3542
3543 hci_inquiry_cache_flush(hdev);
3544
3545 memset(&inq_cp, 0, sizeof(inq_cp));
3546 memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
3547 inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
3548 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
3549 break;
3550
3551 case DISCOV_TYPE_LE:
3552 case DISCOV_TYPE_INTERLEAVED:
3553 status = mgmt_le_support(hdev);
3554 if (status) {
3555 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3556 status);
3557 mgmt_pending_remove(cmd);
3558 goto failed;
3559 }
3560
3561 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3562 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3563 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3564 MGMT_STATUS_NOT_SUPPORTED);
3565 mgmt_pending_remove(cmd);
3566 goto failed;
3567 }
3568
3569 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3570 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3571 MGMT_STATUS_REJECTED);
3572 mgmt_pending_remove(cmd);
3573 goto failed;
3574 }
3575
3576 /* If controller is scanning, it means the background scanning
3577 * is running. Thus, we should temporarily stop it in order to
3578 * set the discovery scanning parameters.
3579 */
3580 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
3581 hci_req_add_le_scan_disable(&req);
3582
3583 memset(&param_cp, 0, sizeof(param_cp));
3584
3585 /* All active scans will be done with either a resolvable
3586 * private address (when privacy feature has been enabled)
3587 * or unresolvable private address.
3588 */
3589 err = hci_update_random_address(&req, true, &own_addr_type);
3590 if (err < 0) {
3591 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3592 MGMT_STATUS_FAILED);
3593 mgmt_pending_remove(cmd);
3594 goto failed;
3595 }
3596
3597 param_cp.type = LE_SCAN_ACTIVE;
3598 param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
3599 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3600 param_cp.own_address_type = own_addr_type;
3601 hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
3602 &param_cp);
3603
3604 memset(&enable_cp, 0, sizeof(enable_cp));
3605 enable_cp.enable = LE_SCAN_ENABLE;
3606 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3607 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
3608 &enable_cp);
3609 break;
3610
3611 default:
3612 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3613 MGMT_STATUS_INVALID_PARAMS);
3614 mgmt_pending_remove(cmd);
3615 goto failed;
3616 }
3617
3618 err = hci_req_run(&req, start_discovery_complete);
3619 if (err < 0)
3620 mgmt_pending_remove(cmd);
3621 else
3622 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3623
3624 failed:
3625 hci_dev_unlock(hdev);
3626 return err;
3627 }
3628
3629 static int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
3630 {
3631 struct pending_cmd *cmd;
3632 int err;
3633
3634 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
3635 if (!cmd)
3636 return -ENOENT;
3637
3638 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3639 &hdev->discovery.type, sizeof(hdev->discovery.type));
3640 mgmt_pending_remove(cmd);
3641
3642 return err;
3643 }
3644
3645 static void stop_discovery_complete(struct hci_dev *hdev, u8 status)
3646 {
3647 BT_DBG("status %d", status);
3648
3649 hci_dev_lock(hdev);
3650
3651 if (status) {
3652 mgmt_stop_discovery_failed(hdev, status);
3653 goto unlock;
3654 }
3655
3656 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3657
3658 unlock:
3659 hci_dev_unlock(hdev);
3660 }
3661
3662 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
3663 u16 len)
3664 {
3665 struct mgmt_cp_stop_discovery *mgmt_cp = data;
3666 struct pending_cmd *cmd;
3667 struct hci_request req;
3668 int err;
3669
3670 BT_DBG("%s", hdev->name);
3671
3672 hci_dev_lock(hdev);
3673
3674 if (!hci_discovery_active(hdev)) {
3675 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3676 MGMT_STATUS_REJECTED, &mgmt_cp->type,
3677 sizeof(mgmt_cp->type));
3678 goto unlock;
3679 }
3680
3681 if (hdev->discovery.type != mgmt_cp->type) {
3682 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3683 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
3684 sizeof(mgmt_cp->type));
3685 goto unlock;
3686 }
3687
3688 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
3689 if (!cmd) {
3690 err = -ENOMEM;
3691 goto unlock;
3692 }
3693
3694 hci_req_init(&req, hdev);
3695
3696 hci_stop_discovery(&req);
3697
3698 err = hci_req_run(&req, stop_discovery_complete);
3699 if (!err) {
3700 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
3701 goto unlock;
3702 }
3703
3704 mgmt_pending_remove(cmd);
3705
3706 /* If no HCI commands were sent we're done */
3707 if (err == -ENODATA) {
3708 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
3709 &mgmt_cp->type, sizeof(mgmt_cp->type));
3710 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3711 }
3712
3713 unlock:
3714 hci_dev_unlock(hdev);
3715 return err;
3716 }
3717
3718 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
3719 u16 len)
3720 {
3721 struct mgmt_cp_confirm_name *cp = data;
3722 struct inquiry_entry *e;
3723 int err;
3724
3725 BT_DBG("%s", hdev->name);
3726
3727 hci_dev_lock(hdev);
3728
3729 if (!hci_discovery_active(hdev)) {
3730 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3731 MGMT_STATUS_FAILED, &cp->addr,
3732 sizeof(cp->addr));
3733 goto failed;
3734 }
3735
3736 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
3737 if (!e) {
3738 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3739 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
3740 sizeof(cp->addr));
3741 goto failed;
3742 }
3743
3744 if (cp->name_known) {
3745 e->name_state = NAME_KNOWN;
3746 list_del(&e->list);
3747 } else {
3748 e->name_state = NAME_NEEDED;
3749 hci_inquiry_cache_update_resolve(hdev, e);
3750 }
3751
3752 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
3753 sizeof(cp->addr));
3754
3755 failed:
3756 hci_dev_unlock(hdev);
3757 return err;
3758 }
3759
3760 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
3761 u16 len)
3762 {
3763 struct mgmt_cp_block_device *cp = data;
3764 u8 status;
3765 int err;
3766
3767 BT_DBG("%s", hdev->name);
3768
3769 if (!bdaddr_type_is_valid(cp->addr.type))
3770 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
3771 MGMT_STATUS_INVALID_PARAMS,
3772 &cp->addr, sizeof(cp->addr));
3773
3774 hci_dev_lock(hdev);
3775
3776 err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type);
3777 if (err < 0) {
3778 status = MGMT_STATUS_FAILED;
3779 goto done;
3780 }
3781
3782 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
3783 sk);
3784 status = MGMT_STATUS_SUCCESS;
3785
3786 done:
3787 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
3788 &cp->addr, sizeof(cp->addr));
3789
3790 hci_dev_unlock(hdev);
3791
3792 return err;
3793 }
3794
3795 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
3796 u16 len)
3797 {
3798 struct mgmt_cp_unblock_device *cp = data;
3799 u8 status;
3800 int err;
3801
3802 BT_DBG("%s", hdev->name);
3803
3804 if (!bdaddr_type_is_valid(cp->addr.type))
3805 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
3806 MGMT_STATUS_INVALID_PARAMS,
3807 &cp->addr, sizeof(cp->addr));
3808
3809 hci_dev_lock(hdev);
3810
3811 err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type);
3812 if (err < 0) {
3813 status = MGMT_STATUS_INVALID_PARAMS;
3814 goto done;
3815 }
3816
3817 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
3818 sk);
3819 status = MGMT_STATUS_SUCCESS;
3820
3821 done:
3822 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
3823 &cp->addr, sizeof(cp->addr));
3824
3825 hci_dev_unlock(hdev);
3826
3827 return err;
3828 }
3829
3830 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
3831 u16 len)
3832 {
3833 struct mgmt_cp_set_device_id *cp = data;
3834 struct hci_request req;
3835 int err;
3836 __u16 source;
3837
3838 BT_DBG("%s", hdev->name);
3839
3840 source = __le16_to_cpu(cp->source);
3841
3842 if (source > 0x0002)
3843 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
3844 MGMT_STATUS_INVALID_PARAMS);
3845
3846 hci_dev_lock(hdev);
3847
3848 hdev->devid_source = source;
3849 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
3850 hdev->devid_product = __le16_to_cpu(cp->product);
3851 hdev->devid_version = __le16_to_cpu(cp->version);
3852
3853 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
3854
3855 hci_req_init(&req, hdev);
3856 update_eir(&req);
3857 hci_req_run(&req, NULL);
3858
3859 hci_dev_unlock(hdev);
3860
3861 return err;
3862 }
3863
3864 static void set_advertising_complete(struct hci_dev *hdev, u8 status)
3865 {
3866 struct cmd_lookup match = { NULL, hdev };
3867
3868 if (status) {
3869 u8 mgmt_err = mgmt_status(status);
3870
3871 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
3872 cmd_status_rsp, &mgmt_err);
3873 return;
3874 }
3875
3876 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
3877 &match);
3878
3879 new_settings(hdev, match.sk);
3880
3881 if (match.sk)
3882 sock_put(match.sk);
3883 }
3884
3885 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
3886 u16 len)
3887 {
3888 struct mgmt_mode *cp = data;
3889 struct pending_cmd *cmd;
3890 struct hci_request req;
3891 u8 val, enabled, status;
3892 int err;
3893
3894 BT_DBG("request for %s", hdev->name);
3895
3896 status = mgmt_le_support(hdev);
3897 if (status)
3898 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3899 status);
3900
3901 if (cp->val != 0x00 && cp->val != 0x01)
3902 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3903 MGMT_STATUS_INVALID_PARAMS);
3904
3905 hci_dev_lock(hdev);
3906
3907 val = !!cp->val;
3908 enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
3909
3910 /* The following conditions are ones which mean that we should
3911 * not do any HCI communication but directly send a mgmt
3912 * response to user space (after toggling the flag if
3913 * necessary).
3914 */
3915 if (!hdev_is_powered(hdev) || val == enabled ||
3916 hci_conn_num(hdev, LE_LINK) > 0) {
3917 bool changed = false;
3918
3919 if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3920 change_bit(HCI_ADVERTISING, &hdev->dev_flags);
3921 changed = true;
3922 }
3923
3924 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
3925 if (err < 0)
3926 goto unlock;
3927
3928 if (changed)
3929 err = new_settings(hdev, sk);
3930
3931 goto unlock;
3932 }
3933
3934 if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
3935 mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
3936 err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3937 MGMT_STATUS_BUSY);
3938 goto unlock;
3939 }
3940
3941 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
3942 if (!cmd) {
3943 err = -ENOMEM;
3944 goto unlock;
3945 }
3946
3947 hci_req_init(&req, hdev);
3948
3949 if (val)
3950 enable_advertising(&req);
3951 else
3952 disable_advertising(&req);
3953
3954 err = hci_req_run(&req, set_advertising_complete);
3955 if (err < 0)
3956 mgmt_pending_remove(cmd);
3957
3958 unlock:
3959 hci_dev_unlock(hdev);
3960 return err;
3961 }
3962
3963 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
3964 void *data, u16 len)
3965 {
3966 struct mgmt_cp_set_static_address *cp = data;
3967 int err;
3968
3969 BT_DBG("%s", hdev->name);
3970
3971 if (!lmp_le_capable(hdev))
3972 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
3973 MGMT_STATUS_NOT_SUPPORTED);
3974
3975 if (hdev_is_powered(hdev))
3976 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
3977 MGMT_STATUS_REJECTED);
3978
3979 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
3980 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
3981 return cmd_status(sk, hdev->id,
3982 MGMT_OP_SET_STATIC_ADDRESS,
3983 MGMT_STATUS_INVALID_PARAMS);
3984
3985 /* Two most significant bits shall be set */
3986 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
3987 return cmd_status(sk, hdev->id,
3988 MGMT_OP_SET_STATIC_ADDRESS,
3989 MGMT_STATUS_INVALID_PARAMS);
3990 }
3991
3992 hci_dev_lock(hdev);
3993
3994 bacpy(&hdev->static_addr, &cp->bdaddr);
3995
3996 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0);
3997
3998 hci_dev_unlock(hdev);
3999
4000 return err;
4001 }
4002
4003 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
4004 void *data, u16 len)
4005 {
4006 struct mgmt_cp_set_scan_params *cp = data;
4007 __u16 interval, window;
4008 int err;
4009
4010 BT_DBG("%s", hdev->name);
4011
4012 if (!lmp_le_capable(hdev))
4013 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4014 MGMT_STATUS_NOT_SUPPORTED);
4015
4016 interval = __le16_to_cpu(cp->interval);
4017
4018 if (interval < 0x0004 || interval > 0x4000)
4019 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4020 MGMT_STATUS_INVALID_PARAMS);
4021
4022 window = __le16_to_cpu(cp->window);
4023
4024 if (window < 0x0004 || window > 0x4000)
4025 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4026 MGMT_STATUS_INVALID_PARAMS);
4027
4028 if (window > interval)
4029 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4030 MGMT_STATUS_INVALID_PARAMS);
4031
4032 hci_dev_lock(hdev);
4033
4034 hdev->le_scan_interval = interval;
4035 hdev->le_scan_window = window;
4036
4037 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);
4038
4039 /* If background scan is running, restart it so new parameters are
4040 * loaded.
4041 */
4042 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
4043 hdev->discovery.state == DISCOVERY_STOPPED) {
4044 struct hci_request req;
4045
4046 hci_req_init(&req, hdev);
4047
4048 hci_req_add_le_scan_disable(&req);
4049 hci_req_add_le_passive_scan(&req);
4050
4051 hci_req_run(&req, NULL);
4052 }
4053
4054 hci_dev_unlock(hdev);
4055
4056 return err;
4057 }
4058
4059 static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
4060 {
4061 struct pending_cmd *cmd;
4062
4063 BT_DBG("status 0x%02x", status);
4064
4065 hci_dev_lock(hdev);
4066
4067 cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4068 if (!cmd)
4069 goto unlock;
4070
4071 if (status) {
4072 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4073 mgmt_status(status));
4074 } else {
4075 struct mgmt_mode *cp = cmd->param;
4076
4077 if (cp->val)
4078 set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4079 else
4080 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4081
4082 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4083 new_settings(hdev, cmd->sk);
4084 }
4085
4086 mgmt_pending_remove(cmd);
4087
4088 unlock:
4089 hci_dev_unlock(hdev);
4090 }
4091
4092 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4093 void *data, u16 len)
4094 {
4095 struct mgmt_mode *cp = data;
4096 struct pending_cmd *cmd;
4097 struct hci_request req;
4098 int err;
4099
4100 BT_DBG("%s", hdev->name);
4101
4102 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
4103 hdev->hci_ver < BLUETOOTH_VER_1_2)
4104 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4105 MGMT_STATUS_NOT_SUPPORTED);
4106
4107 if (cp->val != 0x00 && cp->val != 0x01)
4108 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4109 MGMT_STATUS_INVALID_PARAMS);
4110
4111 if (!hdev_is_powered(hdev))
4112 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4113 MGMT_STATUS_NOT_POWERED);
4114
4115 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4116 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4117 MGMT_STATUS_REJECTED);
4118
4119 hci_dev_lock(hdev);
4120
4121 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4122 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4123 MGMT_STATUS_BUSY);
4124 goto unlock;
4125 }
4126
4127 if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
4128 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4129 hdev);
4130 goto unlock;
4131 }
4132
4133 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4134 data, len);
4135 if (!cmd) {
4136 err = -ENOMEM;
4137 goto unlock;
4138 }
4139
4140 hci_req_init(&req, hdev);
4141
4142 write_fast_connectable(&req, cp->val);
4143
4144 err = hci_req_run(&req, fast_connectable_complete);
4145 if (err < 0) {
4146 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4147 MGMT_STATUS_FAILED);
4148 mgmt_pending_remove(cmd);
4149 }
4150
4151 unlock:
4152 hci_dev_unlock(hdev);
4153
4154 return err;
4155 }
4156
4157 static void set_bredr_scan(struct hci_request *req)
4158 {
4159 struct hci_dev *hdev = req->hdev;
4160 u8 scan = 0;
4161
4162 /* Ensure that fast connectable is disabled. This function will
4163 * not do anything if the page scan parameters are already what
4164 * they should be.
4165 */
4166 write_fast_connectable(req, false);
4167
4168 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4169 scan |= SCAN_PAGE;
4170 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
4171 scan |= SCAN_INQUIRY;
4172
4173 if (scan)
4174 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
4175 }
4176
4177 static void set_bredr_complete(struct hci_dev *hdev, u8 status)
4178 {
4179 struct pending_cmd *cmd;
4180
4181 BT_DBG("status 0x%02x", status);
4182
4183 hci_dev_lock(hdev);
4184
4185 cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
4186 if (!cmd)
4187 goto unlock;
4188
4189 if (status) {
4190 u8 mgmt_err = mgmt_status(status);
4191
4192 /* We need to restore the flag if related HCI commands
4193 * failed.
4194 */
4195 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4196
4197 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4198 } else {
4199 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4200 new_settings(hdev, cmd->sk);
4201 }
4202
4203 mgmt_pending_remove(cmd);
4204
4205 unlock:
4206 hci_dev_unlock(hdev);
4207 }
4208
4209 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4210 {
4211 struct mgmt_mode *cp = data;
4212 struct pending_cmd *cmd;
4213 struct hci_request req;
4214 int err;
4215
4216 BT_DBG("request for %s", hdev->name);
4217
4218 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4219 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4220 MGMT_STATUS_NOT_SUPPORTED);
4221
4222 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4223 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4224 MGMT_STATUS_REJECTED);
4225
4226 if (cp->val != 0x00 && cp->val != 0x01)
4227 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4228 MGMT_STATUS_INVALID_PARAMS);
4229
4230 hci_dev_lock(hdev);
4231
4232 if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4233 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4234 goto unlock;
4235 }
4236
4237 if (!hdev_is_powered(hdev)) {
4238 if (!cp->val) {
4239 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4240 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4241 clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4242 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4243 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4244 }
4245
4246 change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4247
4248 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4249 if (err < 0)
4250 goto unlock;
4251
4252 err = new_settings(hdev, sk);
4253 goto unlock;
4254 }
4255
4256 /* Reject disabling when powered on */
4257 if (!cp->val) {
4258 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4259 MGMT_STATUS_REJECTED);
4260 goto unlock;
4261 }
4262
4263 if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
4264 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4265 MGMT_STATUS_BUSY);
4266 goto unlock;
4267 }
4268
4269 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4270 if (!cmd) {
4271 err = -ENOMEM;
4272 goto unlock;
4273 }
4274
4275 /* We need to flip the bit already here so that update_adv_data
4276 * generates the correct flags.
4277 */
4278 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4279
4280 hci_req_init(&req, hdev);
4281
4282 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4283 set_bredr_scan(&req);
4284
4285 /* Since only the advertising data flags will change, there
4286 * is no need to update the scan response data.
4287 */
4288 update_adv_data(&req);
4289
4290 err = hci_req_run(&req, set_bredr_complete);
4291 if (err < 0)
4292 mgmt_pending_remove(cmd);
4293
4294 unlock:
4295 hci_dev_unlock(hdev);
4296 return err;
4297 }
4298
4299 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4300 void *data, u16 len)
4301 {
4302 struct mgmt_mode *cp = data;
4303 struct pending_cmd *cmd;
4304 u8 val, status;
4305 int err;
4306
4307 BT_DBG("request for %s", hdev->name);
4308
4309 status = mgmt_bredr_support(hdev);
4310 if (status)
4311 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4312 status);
4313
4314 if (!lmp_sc_capable(hdev) &&
4315 !test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
4316 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4317 MGMT_STATUS_NOT_SUPPORTED);
4318
4319 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4320 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4321 MGMT_STATUS_INVALID_PARAMS);
4322
4323 hci_dev_lock(hdev);
4324
4325 if (!hdev_is_powered(hdev)) {
4326 bool changed;
4327
4328 if (cp->val) {
4329 changed = !test_and_set_bit(HCI_SC_ENABLED,
4330 &hdev->dev_flags);
4331 if (cp->val == 0x02)
4332 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4333 else
4334 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4335 } else {
4336 changed = test_and_clear_bit(HCI_SC_ENABLED,
4337 &hdev->dev_flags);
4338 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4339 }
4340
4341 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4342 if (err < 0)
4343 goto failed;
4344
4345 if (changed)
4346 err = new_settings(hdev, sk);
4347
4348 goto failed;
4349 }
4350
4351 if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4352 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4353 MGMT_STATUS_BUSY);
4354 goto failed;
4355 }
4356
4357 val = !!cp->val;
4358
4359 if (val == test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
4360 (cp->val == 0x02) == test_bit(HCI_SC_ONLY, &hdev->dev_flags)) {
4361 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4362 goto failed;
4363 }
4364
4365 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4366 if (!cmd) {
4367 err = -ENOMEM;
4368 goto failed;
4369 }
4370
4371 err = hci_send_cmd(hdev, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4372 if (err < 0) {
4373 mgmt_pending_remove(cmd);
4374 goto failed;
4375 }
4376
4377 if (cp->val == 0x02)
4378 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4379 else
4380 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4381
4382 failed:
4383 hci_dev_unlock(hdev);
4384 return err;
4385 }
4386
4387 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
4388 void *data, u16 len)
4389 {
4390 struct mgmt_mode *cp = data;
4391 bool changed, use_changed;
4392 int err;
4393
4394 BT_DBG("request for %s", hdev->name);
4395
4396 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4397 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
4398 MGMT_STATUS_INVALID_PARAMS);
4399
4400 hci_dev_lock(hdev);
4401
4402 if (cp->val)
4403 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
4404 &hdev->dev_flags);
4405 else
4406 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
4407 &hdev->dev_flags);
4408
4409 if (cp->val == 0x02)
4410 use_changed = !test_and_set_bit(HCI_USE_DEBUG_KEYS,
4411 &hdev->dev_flags);
4412 else
4413 use_changed = test_and_clear_bit(HCI_USE_DEBUG_KEYS,
4414 &hdev->dev_flags);
4415
4416 if (hdev_is_powered(hdev) && use_changed &&
4417 test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
4418 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
4419 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
4420 sizeof(mode), &mode);
4421 }
4422
4423 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
4424 if (err < 0)
4425 goto unlock;
4426
4427 if (changed)
4428 err = new_settings(hdev, sk);
4429
4430 unlock:
4431 hci_dev_unlock(hdev);
4432 return err;
4433 }
4434
4435 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4436 u16 len)
4437 {
4438 struct mgmt_cp_set_privacy *cp = cp_data;
4439 bool changed;
4440 int err;
4441
4442 BT_DBG("request for %s", hdev->name);
4443
4444 if (!lmp_le_capable(hdev))
4445 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4446 MGMT_STATUS_NOT_SUPPORTED);
4447
4448 if (cp->privacy != 0x00 && cp->privacy != 0x01)
4449 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4450 MGMT_STATUS_INVALID_PARAMS);
4451
4452 if (hdev_is_powered(hdev))
4453 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4454 MGMT_STATUS_REJECTED);
4455
4456 hci_dev_lock(hdev);
4457
4458 /* If user space supports this command it is also expected to
4459 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
4460 */
4461 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4462
4463 if (cp->privacy) {
4464 changed = !test_and_set_bit(HCI_PRIVACY, &hdev->dev_flags);
4465 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
4466 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4467 } else {
4468 changed = test_and_clear_bit(HCI_PRIVACY, &hdev->dev_flags);
4469 memset(hdev->irk, 0, sizeof(hdev->irk));
4470 clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4471 }
4472
4473 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
4474 if (err < 0)
4475 goto unlock;
4476
4477 if (changed)
4478 err = new_settings(hdev, sk);
4479
4480 unlock:
4481 hci_dev_unlock(hdev);
4482 return err;
4483 }
4484
4485 static bool irk_is_valid(struct mgmt_irk_info *irk)
4486 {
4487 switch (irk->addr.type) {
4488 case BDADDR_LE_PUBLIC:
4489 return true;
4490
4491 case BDADDR_LE_RANDOM:
4492 /* Two most significant bits shall be set */
4493 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4494 return false;
4495 return true;
4496 }
4497
4498 return false;
4499 }
4500
4501 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4502 u16 len)
4503 {
4504 struct mgmt_cp_load_irks *cp = cp_data;
4505 u16 irk_count, expected_len;
4506 int i, err;
4507
4508 BT_DBG("request for %s", hdev->name);
4509
4510 if (!lmp_le_capable(hdev))
4511 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4512 MGMT_STATUS_NOT_SUPPORTED);
4513
4514 irk_count = __le16_to_cpu(cp->irk_count);
4515
4516 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
4517 if (expected_len != len) {
4518 BT_ERR("load_irks: expected %u bytes, got %u bytes",
4519 expected_len, len);
4520 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4521 MGMT_STATUS_INVALID_PARAMS);
4522 }
4523
4524 BT_DBG("%s irk_count %u", hdev->name, irk_count);
4525
4526 for (i = 0; i < irk_count; i++) {
4527 struct mgmt_irk_info *key = &cp->irks[i];
4528
4529 if (!irk_is_valid(key))
4530 return cmd_status(sk, hdev->id,
4531 MGMT_OP_LOAD_IRKS,
4532 MGMT_STATUS_INVALID_PARAMS);
4533 }
4534
4535 hci_dev_lock(hdev);
4536
4537 hci_smp_irks_clear(hdev);
4538
4539 for (i = 0; i < irk_count; i++) {
4540 struct mgmt_irk_info *irk = &cp->irks[i];
4541 u8 addr_type;
4542
4543 if (irk->addr.type == BDADDR_LE_PUBLIC)
4544 addr_type = ADDR_LE_DEV_PUBLIC;
4545 else
4546 addr_type = ADDR_LE_DEV_RANDOM;
4547
4548 hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
4549 BDADDR_ANY);
4550 }
4551
4552 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4553
4554 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
4555
4556 hci_dev_unlock(hdev);
4557
4558 return err;
4559 }
4560
4561 static bool ltk_is_valid(struct mgmt_ltk_info *key)
4562 {
4563 if (key->master != 0x00 && key->master != 0x01)
4564 return false;
4565
4566 switch (key->addr.type) {
4567 case BDADDR_LE_PUBLIC:
4568 return true;
4569
4570 case BDADDR_LE_RANDOM:
4571 /* Two most significant bits shall be set */
4572 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4573 return false;
4574 return true;
4575 }
4576
4577 return false;
4578 }
4579
4580 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
4581 void *cp_data, u16 len)
4582 {
4583 struct mgmt_cp_load_long_term_keys *cp = cp_data;
4584 u16 key_count, expected_len;
4585 int i, err;
4586
4587 BT_DBG("request for %s", hdev->name);
4588
4589 if (!lmp_le_capable(hdev))
4590 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4591 MGMT_STATUS_NOT_SUPPORTED);
4592
4593 key_count = __le16_to_cpu(cp->key_count);
4594
4595 expected_len = sizeof(*cp) + key_count *
4596 sizeof(struct mgmt_ltk_info);
4597 if (expected_len != len) {
4598 BT_ERR("load_keys: expected %u bytes, got %u bytes",
4599 expected_len, len);
4600 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4601 MGMT_STATUS_INVALID_PARAMS);
4602 }
4603
4604 BT_DBG("%s key_count %u", hdev->name, key_count);
4605
4606 for (i = 0; i < key_count; i++) {
4607 struct mgmt_ltk_info *key = &cp->keys[i];
4608
4609 if (!ltk_is_valid(key))
4610 return cmd_status(sk, hdev->id,
4611 MGMT_OP_LOAD_LONG_TERM_KEYS,
4612 MGMT_STATUS_INVALID_PARAMS);
4613 }
4614
4615 hci_dev_lock(hdev);
4616
4617 hci_smp_ltks_clear(hdev);
4618
4619 for (i = 0; i < key_count; i++) {
4620 struct mgmt_ltk_info *key = &cp->keys[i];
4621 u8 type, addr_type, authenticated;
4622
4623 if (key->addr.type == BDADDR_LE_PUBLIC)
4624 addr_type = ADDR_LE_DEV_PUBLIC;
4625 else
4626 addr_type = ADDR_LE_DEV_RANDOM;
4627
4628 if (key->master)
4629 type = SMP_LTK;
4630 else
4631 type = SMP_LTK_SLAVE;
4632
4633 switch (key->type) {
4634 case MGMT_LTK_UNAUTHENTICATED:
4635 authenticated = 0x00;
4636 break;
4637 case MGMT_LTK_AUTHENTICATED:
4638 authenticated = 0x01;
4639 break;
4640 default:
4641 continue;
4642 }
4643
4644 hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
4645 authenticated, key->val, key->enc_size, key->ediv,
4646 key->rand);
4647 }
4648
4649 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
4650 NULL, 0);
4651
4652 hci_dev_unlock(hdev);
4653
4654 return err;
4655 }
4656
4657 struct cmd_conn_lookup {
4658 struct hci_conn *conn;
4659 bool valid_tx_power;
4660 u8 mgmt_status;
4661 };
4662
4663 static void get_conn_info_complete(struct pending_cmd *cmd, void *data)
4664 {
4665 struct cmd_conn_lookup *match = data;
4666 struct mgmt_cp_get_conn_info *cp;
4667 struct mgmt_rp_get_conn_info rp;
4668 struct hci_conn *conn = cmd->user_data;
4669
4670 if (conn != match->conn)
4671 return;
4672
4673 cp = (struct mgmt_cp_get_conn_info *) cmd->param;
4674
4675 memset(&rp, 0, sizeof(rp));
4676 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4677 rp.addr.type = cp->addr.type;
4678
4679 if (!match->mgmt_status) {
4680 rp.rssi = conn->rssi;
4681
4682 if (match->valid_tx_power) {
4683 rp.tx_power = conn->tx_power;
4684 rp.max_tx_power = conn->max_tx_power;
4685 } else {
4686 rp.tx_power = HCI_TX_POWER_INVALID;
4687 rp.max_tx_power = HCI_TX_POWER_INVALID;
4688 }
4689 }
4690
4691 cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
4692 match->mgmt_status, &rp, sizeof(rp));
4693
4694 hci_conn_drop(conn);
4695
4696 mgmt_pending_remove(cmd);
4697 }
4698
4699 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 status)
4700 {
4701 struct hci_cp_read_rssi *cp;
4702 struct hci_conn *conn;
4703 struct cmd_conn_lookup match;
4704 u16 handle;
4705
4706 BT_DBG("status 0x%02x", status);
4707
4708 hci_dev_lock(hdev);
4709
4710 /* TX power data is valid in case request completed successfully,
4711 * otherwise we assume it's not valid. At the moment we assume that
4712 * either both or none of current and max values are valid to keep code
4713 * simple.
4714 */
4715 match.valid_tx_power = !status;
4716
4717 /* Commands sent in request are either Read RSSI or Read Transmit Power
4718 * Level so we check which one was last sent to retrieve connection
4719 * handle. Both commands have handle as first parameter so it's safe to
4720 * cast data on the same command struct.
4721 *
4722 * First command sent is always Read RSSI and we fail only if it fails.
4723 * In other case we simply override error to indicate success as we
4724 * already remembered if TX power value is actually valid.
4725 */
4726 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
4727 if (!cp) {
4728 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
4729 status = 0;
4730 }
4731
4732 if (!cp) {
4733 BT_ERR("invalid sent_cmd in response");
4734 goto unlock;
4735 }
4736
4737 handle = __le16_to_cpu(cp->handle);
4738 conn = hci_conn_hash_lookup_handle(hdev, handle);
4739 if (!conn) {
4740 BT_ERR("unknown handle (%d) in response", handle);
4741 goto unlock;
4742 }
4743
4744 match.conn = conn;
4745 match.mgmt_status = mgmt_status(status);
4746
4747 /* Cache refresh is complete, now reply for mgmt request for given
4748 * connection only.
4749 */
4750 mgmt_pending_foreach(MGMT_OP_GET_CONN_INFO, hdev,
4751 get_conn_info_complete, &match);
4752
4753 unlock:
4754 hci_dev_unlock(hdev);
4755 }
4756
4757 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
4758 u16 len)
4759 {
4760 struct mgmt_cp_get_conn_info *cp = data;
4761 struct mgmt_rp_get_conn_info rp;
4762 struct hci_conn *conn;
4763 unsigned long conn_info_age;
4764 int err = 0;
4765
4766 BT_DBG("%s", hdev->name);
4767
4768 memset(&rp, 0, sizeof(rp));
4769 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4770 rp.addr.type = cp->addr.type;
4771
4772 if (!bdaddr_type_is_valid(cp->addr.type))
4773 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4774 MGMT_STATUS_INVALID_PARAMS,
4775 &rp, sizeof(rp));
4776
4777 hci_dev_lock(hdev);
4778
4779 if (!hdev_is_powered(hdev)) {
4780 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4781 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
4782 goto unlock;
4783 }
4784
4785 if (cp->addr.type == BDADDR_BREDR)
4786 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
4787 &cp->addr.bdaddr);
4788 else
4789 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
4790
4791 if (!conn || conn->state != BT_CONNECTED) {
4792 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4793 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
4794 goto unlock;
4795 }
4796
4797 /* To avoid client trying to guess when to poll again for information we
4798 * calculate conn info age as random value between min/max set in hdev.
4799 */
4800 conn_info_age = hdev->conn_info_min_age +
4801 prandom_u32_max(hdev->conn_info_max_age -
4802 hdev->conn_info_min_age);
4803
4804 /* Query controller to refresh cached values if they are too old or were
4805 * never read.
4806 */
4807 if (time_after(jiffies, conn->conn_info_timestamp +
4808 msecs_to_jiffies(conn_info_age)) ||
4809 !conn->conn_info_timestamp) {
4810 struct hci_request req;
4811 struct hci_cp_read_tx_power req_txp_cp;
4812 struct hci_cp_read_rssi req_rssi_cp;
4813 struct pending_cmd *cmd;
4814
4815 hci_req_init(&req, hdev);
4816 req_rssi_cp.handle = cpu_to_le16(conn->handle);
4817 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
4818 &req_rssi_cp);
4819
4820 /* For LE links TX power does not change thus we don't need to
4821 * query for it once value is known.
4822 */
4823 if (!bdaddr_type_is_le(cp->addr.type) ||
4824 conn->tx_power == HCI_TX_POWER_INVALID) {
4825 req_txp_cp.handle = cpu_to_le16(conn->handle);
4826 req_txp_cp.type = 0x00;
4827 hci_req_add(&req, HCI_OP_READ_TX_POWER,
4828 sizeof(req_txp_cp), &req_txp_cp);
4829 }
4830
4831 /* Max TX power needs to be read only once per connection */
4832 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
4833 req_txp_cp.handle = cpu_to_le16(conn->handle);
4834 req_txp_cp.type = 0x01;
4835 hci_req_add(&req, HCI_OP_READ_TX_POWER,
4836 sizeof(req_txp_cp), &req_txp_cp);
4837 }
4838
4839 err = hci_req_run(&req, conn_info_refresh_complete);
4840 if (err < 0)
4841 goto unlock;
4842
4843 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
4844 data, len);
4845 if (!cmd) {
4846 err = -ENOMEM;
4847 goto unlock;
4848 }
4849
4850 hci_conn_hold(conn);
4851 cmd->user_data = conn;
4852
4853 conn->conn_info_timestamp = jiffies;
4854 } else {
4855 /* Cache is valid, just reply with values cached in hci_conn */
4856 rp.rssi = conn->rssi;
4857 rp.tx_power = conn->tx_power;
4858 rp.max_tx_power = conn->max_tx_power;
4859
4860 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4861 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
4862 }
4863
4864 unlock:
4865 hci_dev_unlock(hdev);
4866 return err;
4867 }
4868
4869 static void get_clock_info_complete(struct hci_dev *hdev, u8 status)
4870 {
4871 struct mgmt_cp_get_clock_info *cp;
4872 struct mgmt_rp_get_clock_info rp;
4873 struct hci_cp_read_clock *hci_cp;
4874 struct pending_cmd *cmd;
4875 struct hci_conn *conn;
4876
4877 BT_DBG("%s status %u", hdev->name, status);
4878
4879 hci_dev_lock(hdev);
4880
4881 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
4882 if (!hci_cp)
4883 goto unlock;
4884
4885 if (hci_cp->which) {
4886 u16 handle = __le16_to_cpu(hci_cp->handle);
4887 conn = hci_conn_hash_lookup_handle(hdev, handle);
4888 } else {
4889 conn = NULL;
4890 }
4891
4892 cmd = mgmt_pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
4893 if (!cmd)
4894 goto unlock;
4895
4896 cp = cmd->param;
4897
4898 memset(&rp, 0, sizeof(rp));
4899 memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
4900
4901 if (status)
4902 goto send_rsp;
4903
4904 rp.local_clock = cpu_to_le32(hdev->clock);
4905
4906 if (conn) {
4907 rp.piconet_clock = cpu_to_le32(conn->clock);
4908 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
4909 }
4910
4911 send_rsp:
4912 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
4913 &rp, sizeof(rp));
4914 mgmt_pending_remove(cmd);
4915 if (conn)
4916 hci_conn_drop(conn);
4917
4918 unlock:
4919 hci_dev_unlock(hdev);
4920 }
4921
4922 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
4923 u16 len)
4924 {
4925 struct mgmt_cp_get_clock_info *cp = data;
4926 struct mgmt_rp_get_clock_info rp;
4927 struct hci_cp_read_clock hci_cp;
4928 struct pending_cmd *cmd;
4929 struct hci_request req;
4930 struct hci_conn *conn;
4931 int err;
4932
4933 BT_DBG("%s", hdev->name);
4934
4935 memset(&rp, 0, sizeof(rp));
4936 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4937 rp.addr.type = cp->addr.type;
4938
4939 if (cp->addr.type != BDADDR_BREDR)
4940 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
4941 MGMT_STATUS_INVALID_PARAMS,
4942 &rp, sizeof(rp));
4943
4944 hci_dev_lock(hdev);
4945
4946 if (!hdev_is_powered(hdev)) {
4947 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
4948 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
4949 goto unlock;
4950 }
4951
4952 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
4953 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
4954 &cp->addr.bdaddr);
4955 if (!conn || conn->state != BT_CONNECTED) {
4956 err = cmd_complete(sk, hdev->id,
4957 MGMT_OP_GET_CLOCK_INFO,
4958 MGMT_STATUS_NOT_CONNECTED,
4959 &rp, sizeof(rp));
4960 goto unlock;
4961 }
4962 } else {
4963 conn = NULL;
4964 }
4965
4966 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
4967 if (!cmd) {
4968 err = -ENOMEM;
4969 goto unlock;
4970 }
4971
4972 hci_req_init(&req, hdev);
4973
4974 memset(&hci_cp, 0, sizeof(hci_cp));
4975 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
4976
4977 if (conn) {
4978 hci_conn_hold(conn);
4979 cmd->user_data = conn;
4980
4981 hci_cp.handle = cpu_to_le16(conn->handle);
4982 hci_cp.which = 0x01; /* Piconet clock */
4983 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
4984 }
4985
4986 err = hci_req_run(&req, get_clock_info_complete);
4987 if (err < 0)
4988 mgmt_pending_remove(cmd);
4989
4990 unlock:
4991 hci_dev_unlock(hdev);
4992 return err;
4993 }
4994
4995 static void device_added(struct sock *sk, struct hci_dev *hdev,
4996 bdaddr_t *bdaddr, u8 type, u8 action)
4997 {
4998 struct mgmt_ev_device_added ev;
4999
5000 bacpy(&ev.addr.bdaddr, bdaddr);
5001 ev.addr.type = type;
5002 ev.action = action;
5003
5004 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
5005 }
5006
5007 static int add_device(struct sock *sk, struct hci_dev *hdev,
5008 void *data, u16 len)
5009 {
5010 struct mgmt_cp_add_device *cp = data;
5011 u8 auto_conn, addr_type;
5012 int err;
5013
5014 BT_DBG("%s", hdev->name);
5015
5016 if (!bdaddr_type_is_le(cp->addr.type) ||
5017 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
5018 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5019 MGMT_STATUS_INVALID_PARAMS,
5020 &cp->addr, sizeof(cp->addr));
5021
5022 if (cp->action != 0x00 && cp->action != 0x01)
5023 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5024 MGMT_STATUS_INVALID_PARAMS,
5025 &cp->addr, sizeof(cp->addr));
5026
5027 hci_dev_lock(hdev);
5028
5029 if (cp->addr.type == BDADDR_LE_PUBLIC)
5030 addr_type = ADDR_LE_DEV_PUBLIC;
5031 else
5032 addr_type = ADDR_LE_DEV_RANDOM;
5033
5034 if (cp->action)
5035 auto_conn = HCI_AUTO_CONN_ALWAYS;
5036 else
5037 auto_conn = HCI_AUTO_CONN_DISABLED;
5038
5039 /* If the connection parameters don't exist for this device,
5040 * they will be created and configured with defaults.
5041 */
5042 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
5043 auto_conn) < 0) {
5044 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5045 MGMT_STATUS_FAILED,
5046 &cp->addr, sizeof(cp->addr));
5047 goto unlock;
5048 }
5049
5050 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
5051
5052 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5053 MGMT_STATUS_SUCCESS, &cp->addr, sizeof(cp->addr));
5054
5055 unlock:
5056 hci_dev_unlock(hdev);
5057 return err;
5058 }
5059
5060 static void device_removed(struct sock *sk, struct hci_dev *hdev,
5061 bdaddr_t *bdaddr, u8 type)
5062 {
5063 struct mgmt_ev_device_removed ev;
5064
5065 bacpy(&ev.addr.bdaddr, bdaddr);
5066 ev.addr.type = type;
5067
5068 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
5069 }
5070
5071 static int remove_device(struct sock *sk, struct hci_dev *hdev,
5072 void *data, u16 len)
5073 {
5074 struct mgmt_cp_remove_device *cp = data;
5075 int err;
5076
5077 BT_DBG("%s", hdev->name);
5078
5079 hci_dev_lock(hdev);
5080
5081 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5082 u8 addr_type;
5083
5084 if (!bdaddr_type_is_le(cp->addr.type)) {
5085 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5086 MGMT_STATUS_INVALID_PARAMS,
5087 &cp->addr, sizeof(cp->addr));
5088 goto unlock;
5089 }
5090
5091 if (cp->addr.type == BDADDR_LE_PUBLIC)
5092 addr_type = ADDR_LE_DEV_PUBLIC;
5093 else
5094 addr_type = ADDR_LE_DEV_RANDOM;
5095
5096 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
5097
5098 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
5099 } else {
5100 if (cp->addr.type) {
5101 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5102 MGMT_STATUS_INVALID_PARAMS,
5103 &cp->addr, sizeof(cp->addr));
5104 goto unlock;
5105 }
5106
5107 hci_conn_params_clear_all(hdev);
5108 }
5109
5110 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5111 MGMT_STATUS_SUCCESS, &cp->addr, sizeof(cp->addr));
5112
5113 unlock:
5114 hci_dev_unlock(hdev);
5115 return err;
5116 }
5117
5118 static const struct mgmt_handler {
5119 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
5120 u16 data_len);
5121 bool var_len;
5122 size_t data_len;
5123 } mgmt_handlers[] = {
5124 { NULL }, /* 0x0000 (no command) */
5125 { read_version, false, MGMT_READ_VERSION_SIZE },
5126 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
5127 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
5128 { read_controller_info, false, MGMT_READ_INFO_SIZE },
5129 { set_powered, false, MGMT_SETTING_SIZE },
5130 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
5131 { set_connectable, false, MGMT_SETTING_SIZE },
5132 { set_fast_connectable, false, MGMT_SETTING_SIZE },
5133 { set_pairable, false, MGMT_SETTING_SIZE },
5134 { set_link_security, false, MGMT_SETTING_SIZE },
5135 { set_ssp, false, MGMT_SETTING_SIZE },
5136 { set_hs, false, MGMT_SETTING_SIZE },
5137 { set_le, false, MGMT_SETTING_SIZE },
5138 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
5139 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
5140 { add_uuid, false, MGMT_ADD_UUID_SIZE },
5141 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
5142 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
5143 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
5144 { disconnect, false, MGMT_DISCONNECT_SIZE },
5145 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
5146 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
5147 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
5148 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
5149 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
5150 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
5151 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
5152 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
5153 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
5154 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
5155 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
5156 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
5157 { add_remote_oob_data, true, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
5158 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
5159 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
5160 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
5161 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
5162 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
5163 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
5164 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
5165 { set_advertising, false, MGMT_SETTING_SIZE },
5166 { set_bredr, false, MGMT_SETTING_SIZE },
5167 { set_static_address, false, MGMT_SET_STATIC_ADDRESS_SIZE },
5168 { set_scan_params, false, MGMT_SET_SCAN_PARAMS_SIZE },
5169 { set_secure_conn, false, MGMT_SETTING_SIZE },
5170 { set_debug_keys, false, MGMT_SETTING_SIZE },
5171 { set_privacy, false, MGMT_SET_PRIVACY_SIZE },
5172 { load_irks, true, MGMT_LOAD_IRKS_SIZE },
5173 { get_conn_info, false, MGMT_GET_CONN_INFO_SIZE },
5174 { get_clock_info, false, MGMT_GET_CLOCK_INFO_SIZE },
5175 { add_device, false, MGMT_ADD_DEVICE_SIZE },
5176 { remove_device, false, MGMT_REMOVE_DEVICE_SIZE },
5177 };
5178
5179 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
5180 {
5181 void *buf;
5182 u8 *cp;
5183 struct mgmt_hdr *hdr;
5184 u16 opcode, index, len;
5185 struct hci_dev *hdev = NULL;
5186 const struct mgmt_handler *handler;
5187 int err;
5188
5189 BT_DBG("got %zu bytes", msglen);
5190
5191 if (msglen < sizeof(*hdr))
5192 return -EINVAL;
5193
5194 buf = kmalloc(msglen, GFP_KERNEL);
5195 if (!buf)
5196 return -ENOMEM;
5197
5198 if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
5199 err = -EFAULT;
5200 goto done;
5201 }
5202
5203 hdr = buf;
5204 opcode = __le16_to_cpu(hdr->opcode);
5205 index = __le16_to_cpu(hdr->index);
5206 len = __le16_to_cpu(hdr->len);
5207
5208 if (len != msglen - sizeof(*hdr)) {
5209 err = -EINVAL;
5210 goto done;
5211 }
5212
5213 if (index != MGMT_INDEX_NONE) {
5214 hdev = hci_dev_get(index);
5215 if (!hdev) {
5216 err = cmd_status(sk, index, opcode,
5217 MGMT_STATUS_INVALID_INDEX);
5218 goto done;
5219 }
5220
5221 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
5222 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) ||
5223 test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
5224 err = cmd_status(sk, index, opcode,
5225 MGMT_STATUS_INVALID_INDEX);
5226 goto done;
5227 }
5228 }
5229
5230 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
5231 mgmt_handlers[opcode].func == NULL) {
5232 BT_DBG("Unknown op %u", opcode);
5233 err = cmd_status(sk, index, opcode,
5234 MGMT_STATUS_UNKNOWN_COMMAND);
5235 goto done;
5236 }
5237
5238 if ((hdev && opcode < MGMT_OP_READ_INFO) ||
5239 (!hdev && opcode >= MGMT_OP_READ_INFO)) {
5240 err = cmd_status(sk, index, opcode,
5241 MGMT_STATUS_INVALID_INDEX);
5242 goto done;
5243 }
5244
5245 handler = &mgmt_handlers[opcode];
5246
5247 if ((handler->var_len && len < handler->data_len) ||
5248 (!handler->var_len && len != handler->data_len)) {
5249 err = cmd_status(sk, index, opcode,
5250 MGMT_STATUS_INVALID_PARAMS);
5251 goto done;
5252 }
5253
5254 if (hdev)
5255 mgmt_init_hdev(sk, hdev);
5256
5257 cp = buf + sizeof(*hdr);
5258
5259 err = handler->func(sk, hdev, cp, len);
5260 if (err < 0)
5261 goto done;
5262
5263 err = msglen;
5264
5265 done:
5266 if (hdev)
5267 hci_dev_put(hdev);
5268
5269 kfree(buf);
5270 return err;
5271 }
5272
5273 void mgmt_index_added(struct hci_dev *hdev)
5274 {
5275 if (hdev->dev_type != HCI_BREDR)
5276 return;
5277
5278 mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
5279 }
5280
5281 void mgmt_index_removed(struct hci_dev *hdev)
5282 {
5283 u8 status = MGMT_STATUS_INVALID_INDEX;
5284
5285 if (hdev->dev_type != HCI_BREDR)
5286 return;
5287
5288 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
5289
5290 mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
5291 }
5292
5293 /* This function requires the caller holds hdev->lock */
5294 static void restart_le_auto_conns(struct hci_dev *hdev)
5295 {
5296 struct hci_conn_params *p;
5297 bool added = false;
5298
5299 list_for_each_entry(p, &hdev->le_conn_params, list) {
5300 if (p->auto_connect == HCI_AUTO_CONN_ALWAYS) {
5301 hci_pend_le_conn_add(hdev, &p->addr, p->addr_type);
5302 added = true;
5303 }
5304 }
5305
5306 /* Calling hci_pend_le_conn_add will actually already trigger
5307 * background scanning when needed. So no need to trigger it
5308 * just another time.
5309 *
5310 * This check is here to avoid an unneeded restart of the
5311 * passive scanning. Since this is during the controller
5312 * power up phase the duplicate filtering is not an issue.
5313 */
5314 if (added)
5315 return;
5316
5317 hci_update_background_scan(hdev);
5318 }
5319
5320 static void powered_complete(struct hci_dev *hdev, u8 status)
5321 {
5322 struct cmd_lookup match = { NULL, hdev };
5323
5324 BT_DBG("status 0x%02x", status);
5325
5326 hci_dev_lock(hdev);
5327
5328 restart_le_auto_conns(hdev);
5329
5330 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
5331
5332 new_settings(hdev, match.sk);
5333
5334 hci_dev_unlock(hdev);
5335
5336 if (match.sk)
5337 sock_put(match.sk);
5338 }
5339
5340 static int powered_update_hci(struct hci_dev *hdev)
5341 {
5342 struct hci_request req;
5343 u8 link_sec;
5344
5345 hci_req_init(&req, hdev);
5346
5347 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
5348 !lmp_host_ssp_capable(hdev)) {
5349 u8 ssp = 1;
5350
5351 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
5352 }
5353
5354 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
5355 lmp_bredr_capable(hdev)) {
5356 struct hci_cp_write_le_host_supported cp;
5357
5358 cp.le = 1;
5359 cp.simul = lmp_le_br_capable(hdev);
5360
5361 /* Check first if we already have the right
5362 * host state (host features set)
5363 */
5364 if (cp.le != lmp_host_le_capable(hdev) ||
5365 cp.simul != lmp_host_le_br_capable(hdev))
5366 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
5367 sizeof(cp), &cp);
5368 }
5369
5370 if (lmp_le_capable(hdev)) {
5371 /* Make sure the controller has a good default for
5372 * advertising data. This also applies to the case
5373 * where BR/EDR was toggled during the AUTO_OFF phase.
5374 */
5375 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
5376 update_adv_data(&req);
5377 update_scan_rsp_data(&req);
5378 }
5379
5380 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
5381 enable_advertising(&req);
5382 }
5383
5384 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
5385 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
5386 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
5387 sizeof(link_sec), &link_sec);
5388
5389 if (lmp_bredr_capable(hdev)) {
5390 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
5391 set_bredr_scan(&req);
5392 update_class(&req);
5393 update_name(&req);
5394 update_eir(&req);
5395 }
5396
5397 return hci_req_run(&req, powered_complete);
5398 }
5399
5400 int mgmt_powered(struct hci_dev *hdev, u8 powered)
5401 {
5402 struct cmd_lookup match = { NULL, hdev };
5403 u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
5404 u8 zero_cod[] = { 0, 0, 0 };
5405 int err;
5406
5407 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
5408 return 0;
5409
5410 if (powered) {
5411 if (powered_update_hci(hdev) == 0)
5412 return 0;
5413
5414 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
5415 &match);
5416 goto new_settings;
5417 }
5418
5419 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
5420 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered);
5421
5422 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
5423 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
5424 zero_cod, sizeof(zero_cod), NULL);
5425
5426 new_settings:
5427 err = new_settings(hdev, match.sk);
5428
5429 if (match.sk)
5430 sock_put(match.sk);
5431
5432 return err;
5433 }
5434
5435 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
5436 {
5437 struct pending_cmd *cmd;
5438 u8 status;
5439
5440 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
5441 if (!cmd)
5442 return;
5443
5444 if (err == -ERFKILL)
5445 status = MGMT_STATUS_RFKILLED;
5446 else
5447 status = MGMT_STATUS_FAILED;
5448
5449 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
5450
5451 mgmt_pending_remove(cmd);
5452 }
5453
5454 void mgmt_discoverable_timeout(struct hci_dev *hdev)
5455 {
5456 struct hci_request req;
5457
5458 hci_dev_lock(hdev);
5459
5460 /* When discoverable timeout triggers, then just make sure
5461 * the limited discoverable flag is cleared. Even in the case
5462 * of a timeout triggered from general discoverable, it is
5463 * safe to unconditionally clear the flag.
5464 */
5465 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
5466 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
5467
5468 hci_req_init(&req, hdev);
5469 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
5470 u8 scan = SCAN_PAGE;
5471 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
5472 sizeof(scan), &scan);
5473 }
5474 update_class(&req);
5475 update_adv_data(&req);
5476 hci_req_run(&req, NULL);
5477
5478 hdev->discov_timeout = 0;
5479
5480 new_settings(hdev, NULL);
5481
5482 hci_dev_unlock(hdev);
5483 }
5484
5485 void mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
5486 {
5487 bool changed;
5488
5489 /* Nothing needed here if there's a pending command since that
5490 * commands request completion callback takes care of everything
5491 * necessary.
5492 */
5493 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
5494 return;
5495
5496 /* Powering off may clear the scan mode - don't let that interfere */
5497 if (!discoverable && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5498 return;
5499
5500 if (discoverable) {
5501 changed = !test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
5502 } else {
5503 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
5504 changed = test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
5505 }
5506
5507 if (changed) {
5508 struct hci_request req;
5509
5510 /* In case this change in discoverable was triggered by
5511 * a disabling of connectable there could be a need to
5512 * update the advertising flags.
5513 */
5514 hci_req_init(&req, hdev);
5515 update_adv_data(&req);
5516 hci_req_run(&req, NULL);
5517
5518 new_settings(hdev, NULL);
5519 }
5520 }
5521
5522 void mgmt_connectable(struct hci_dev *hdev, u8 connectable)
5523 {
5524 bool changed;
5525
5526 /* Nothing needed here if there's a pending command since that
5527 * commands request completion callback takes care of everything
5528 * necessary.
5529 */
5530 if (mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
5531 return;
5532
5533 /* Powering off may clear the scan mode - don't let that interfere */
5534 if (!connectable && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5535 return;
5536
5537 if (connectable)
5538 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
5539 else
5540 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
5541
5542 if (changed)
5543 new_settings(hdev, NULL);
5544 }
5545
5546 void mgmt_advertising(struct hci_dev *hdev, u8 advertising)
5547 {
5548 /* Powering off may stop advertising - don't let that interfere */
5549 if (!advertising && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5550 return;
5551
5552 if (advertising)
5553 set_bit(HCI_ADVERTISING, &hdev->dev_flags);
5554 else
5555 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
5556 }
5557
5558 void mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
5559 {
5560 u8 mgmt_err = mgmt_status(status);
5561
5562 if (scan & SCAN_PAGE)
5563 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
5564 cmd_status_rsp, &mgmt_err);
5565
5566 if (scan & SCAN_INQUIRY)
5567 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
5568 cmd_status_rsp, &mgmt_err);
5569 }
5570
5571 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
5572 bool persistent)
5573 {
5574 struct mgmt_ev_new_link_key ev;
5575
5576 memset(&ev, 0, sizeof(ev));
5577
5578 ev.store_hint = persistent;
5579 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
5580 ev.key.addr.type = BDADDR_BREDR;
5581 ev.key.type = key->type;
5582 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
5583 ev.key.pin_len = key->pin_len;
5584
5585 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
5586 }
5587
5588 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
5589 {
5590 if (ltk->authenticated)
5591 return MGMT_LTK_AUTHENTICATED;
5592
5593 return MGMT_LTK_UNAUTHENTICATED;
5594 }
5595
5596 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
5597 {
5598 struct mgmt_ev_new_long_term_key ev;
5599
5600 memset(&ev, 0, sizeof(ev));
5601
5602 /* Devices using resolvable or non-resolvable random addresses
5603 * without providing an indentity resolving key don't require
5604 * to store long term keys. Their addresses will change the
5605 * next time around.
5606 *
5607 * Only when a remote device provides an identity address
5608 * make sure the long term key is stored. If the remote
5609 * identity is known, the long term keys are internally
5610 * mapped to the identity address. So allow static random
5611 * and public addresses here.
5612 */
5613 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
5614 (key->bdaddr.b[5] & 0xc0) != 0xc0)
5615 ev.store_hint = 0x00;
5616 else
5617 ev.store_hint = persistent;
5618
5619 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
5620 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
5621 ev.key.type = mgmt_ltk_type(key);
5622 ev.key.enc_size = key->enc_size;
5623 ev.key.ediv = key->ediv;
5624 ev.key.rand = key->rand;
5625
5626 if (key->type == SMP_LTK)
5627 ev.key.master = 1;
5628
5629 memcpy(ev.key.val, key->val, sizeof(key->val));
5630
5631 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
5632 }
5633
5634 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
5635 {
5636 struct mgmt_ev_new_irk ev;
5637
5638 memset(&ev, 0, sizeof(ev));
5639
5640 /* For identity resolving keys from devices that are already
5641 * using a public address or static random address, do not
5642 * ask for storing this key. The identity resolving key really
5643 * is only mandatory for devices using resovlable random
5644 * addresses.
5645 *
5646 * Storing all identity resolving keys has the downside that
5647 * they will be also loaded on next boot of they system. More
5648 * identity resolving keys, means more time during scanning is
5649 * needed to actually resolve these addresses.
5650 */
5651 if (bacmp(&irk->rpa, BDADDR_ANY))
5652 ev.store_hint = 0x01;
5653 else
5654 ev.store_hint = 0x00;
5655
5656 bacpy(&ev.rpa, &irk->rpa);
5657 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
5658 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
5659 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
5660
5661 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
5662 }
5663
5664 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
5665 bool persistent)
5666 {
5667 struct mgmt_ev_new_csrk ev;
5668
5669 memset(&ev, 0, sizeof(ev));
5670
5671 /* Devices using resolvable or non-resolvable random addresses
5672 * without providing an indentity resolving key don't require
5673 * to store signature resolving keys. Their addresses will change
5674 * the next time around.
5675 *
5676 * Only when a remote device provides an identity address
5677 * make sure the signature resolving key is stored. So allow
5678 * static random and public addresses here.
5679 */
5680 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
5681 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
5682 ev.store_hint = 0x00;
5683 else
5684 ev.store_hint = persistent;
5685
5686 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
5687 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
5688 ev.key.master = csrk->master;
5689 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
5690
5691 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
5692 }
5693
5694 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
5695 u8 bdaddr_type, u16 min_interval, u16 max_interval,
5696 u16 latency, u16 timeout)
5697 {
5698 struct mgmt_ev_new_conn_param ev;
5699
5700 memset(&ev, 0, sizeof(ev));
5701 bacpy(&ev.addr.bdaddr, bdaddr);
5702 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
5703 ev.store_hint = 0x00;
5704 ev.min_interval = cpu_to_le16(min_interval);
5705 ev.max_interval = cpu_to_le16(max_interval);
5706 ev.latency = cpu_to_le16(latency);
5707 ev.timeout = cpu_to_le16(timeout);
5708
5709 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
5710 }
5711
5712 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
5713 u8 data_len)
5714 {
5715 eir[eir_len++] = sizeof(type) + data_len;
5716 eir[eir_len++] = type;
5717 memcpy(&eir[eir_len], data, data_len);
5718 eir_len += data_len;
5719
5720 return eir_len;
5721 }
5722
5723 void mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5724 u8 addr_type, u32 flags, u8 *name, u8 name_len,
5725 u8 *dev_class)
5726 {
5727 char buf[512];
5728 struct mgmt_ev_device_connected *ev = (void *) buf;
5729 u16 eir_len = 0;
5730
5731 bacpy(&ev->addr.bdaddr, bdaddr);
5732 ev->addr.type = link_to_bdaddr(link_type, addr_type);
5733
5734 ev->flags = __cpu_to_le32(flags);
5735
5736 if (name_len > 0)
5737 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
5738 name, name_len);
5739
5740 if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
5741 eir_len = eir_append_data(ev->eir, eir_len,
5742 EIR_CLASS_OF_DEV, dev_class, 3);
5743
5744 ev->eir_len = cpu_to_le16(eir_len);
5745
5746 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
5747 sizeof(*ev) + eir_len, NULL);
5748 }
5749
5750 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
5751 {
5752 struct mgmt_cp_disconnect *cp = cmd->param;
5753 struct sock **sk = data;
5754 struct mgmt_rp_disconnect rp;
5755
5756 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5757 rp.addr.type = cp->addr.type;
5758
5759 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
5760 sizeof(rp));
5761
5762 *sk = cmd->sk;
5763 sock_hold(*sk);
5764
5765 mgmt_pending_remove(cmd);
5766 }
5767
5768 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
5769 {
5770 struct hci_dev *hdev = data;
5771 struct mgmt_cp_unpair_device *cp = cmd->param;
5772 struct mgmt_rp_unpair_device rp;
5773
5774 memset(&rp, 0, sizeof(rp));
5775 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5776 rp.addr.type = cp->addr.type;
5777
5778 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
5779
5780 cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
5781
5782 mgmt_pending_remove(cmd);
5783 }
5784
5785 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
5786 u8 link_type, u8 addr_type, u8 reason,
5787 bool mgmt_connected)
5788 {
5789 struct mgmt_ev_device_disconnected ev;
5790 struct pending_cmd *power_off;
5791 struct sock *sk = NULL;
5792
5793 power_off = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
5794 if (power_off) {
5795 struct mgmt_mode *cp = power_off->param;
5796
5797 /* The connection is still in hci_conn_hash so test for 1
5798 * instead of 0 to know if this is the last one.
5799 */
5800 if (!cp->val && hci_conn_count(hdev) == 1) {
5801 cancel_delayed_work(&hdev->power_off);
5802 queue_work(hdev->req_workqueue, &hdev->power_off.work);
5803 }
5804 }
5805
5806 if (!mgmt_connected)
5807 return;
5808
5809 if (link_type != ACL_LINK && link_type != LE_LINK)
5810 return;
5811
5812 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
5813
5814 bacpy(&ev.addr.bdaddr, bdaddr);
5815 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5816 ev.reason = reason;
5817
5818 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
5819
5820 if (sk)
5821 sock_put(sk);
5822
5823 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
5824 hdev);
5825 }
5826
5827 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
5828 u8 link_type, u8 addr_type, u8 status)
5829 {
5830 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
5831 struct mgmt_cp_disconnect *cp;
5832 struct mgmt_rp_disconnect rp;
5833 struct pending_cmd *cmd;
5834
5835 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
5836 hdev);
5837
5838 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
5839 if (!cmd)
5840 return;
5841
5842 cp = cmd->param;
5843
5844 if (bacmp(bdaddr, &cp->addr.bdaddr))
5845 return;
5846
5847 if (cp->addr.type != bdaddr_type)
5848 return;
5849
5850 bacpy(&rp.addr.bdaddr, bdaddr);
5851 rp.addr.type = bdaddr_type;
5852
5853 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
5854 mgmt_status(status), &rp, sizeof(rp));
5855
5856 mgmt_pending_remove(cmd);
5857 }
5858
5859 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5860 u8 addr_type, u8 status)
5861 {
5862 struct mgmt_ev_connect_failed ev;
5863 struct pending_cmd *power_off;
5864
5865 power_off = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
5866 if (power_off) {
5867 struct mgmt_mode *cp = power_off->param;
5868
5869 /* The connection is still in hci_conn_hash so test for 1
5870 * instead of 0 to know if this is the last one.
5871 */
5872 if (!cp->val && hci_conn_count(hdev) == 1) {
5873 cancel_delayed_work(&hdev->power_off);
5874 queue_work(hdev->req_workqueue, &hdev->power_off.work);
5875 }
5876 }
5877
5878 bacpy(&ev.addr.bdaddr, bdaddr);
5879 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5880 ev.status = mgmt_status(status);
5881
5882 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
5883 }
5884
5885 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
5886 {
5887 struct mgmt_ev_pin_code_request ev;
5888
5889 bacpy(&ev.addr.bdaddr, bdaddr);
5890 ev.addr.type = BDADDR_BREDR;
5891 ev.secure = secure;
5892
5893 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
5894 }
5895
5896 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5897 u8 status)
5898 {
5899 struct pending_cmd *cmd;
5900 struct mgmt_rp_pin_code_reply rp;
5901
5902 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
5903 if (!cmd)
5904 return;
5905
5906 bacpy(&rp.addr.bdaddr, bdaddr);
5907 rp.addr.type = BDADDR_BREDR;
5908
5909 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
5910 mgmt_status(status), &rp, sizeof(rp));
5911
5912 mgmt_pending_remove(cmd);
5913 }
5914
5915 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5916 u8 status)
5917 {
5918 struct pending_cmd *cmd;
5919 struct mgmt_rp_pin_code_reply rp;
5920
5921 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
5922 if (!cmd)
5923 return;
5924
5925 bacpy(&rp.addr.bdaddr, bdaddr);
5926 rp.addr.type = BDADDR_BREDR;
5927
5928 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
5929 mgmt_status(status), &rp, sizeof(rp));
5930
5931 mgmt_pending_remove(cmd);
5932 }
5933
5934 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
5935 u8 link_type, u8 addr_type, u32 value,
5936 u8 confirm_hint)
5937 {
5938 struct mgmt_ev_user_confirm_request ev;
5939
5940 BT_DBG("%s", hdev->name);
5941
5942 bacpy(&ev.addr.bdaddr, bdaddr);
5943 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5944 ev.confirm_hint = confirm_hint;
5945 ev.value = cpu_to_le32(value);
5946
5947 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
5948 NULL);
5949 }
5950
5951 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
5952 u8 link_type, u8 addr_type)
5953 {
5954 struct mgmt_ev_user_passkey_request ev;
5955
5956 BT_DBG("%s", hdev->name);
5957
5958 bacpy(&ev.addr.bdaddr, bdaddr);
5959 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5960
5961 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
5962 NULL);
5963 }
5964
5965 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5966 u8 link_type, u8 addr_type, u8 status,
5967 u8 opcode)
5968 {
5969 struct pending_cmd *cmd;
5970 struct mgmt_rp_user_confirm_reply rp;
5971 int err;
5972
5973 cmd = mgmt_pending_find(opcode, hdev);
5974 if (!cmd)
5975 return -ENOENT;
5976
5977 bacpy(&rp.addr.bdaddr, bdaddr);
5978 rp.addr.type = link_to_bdaddr(link_type, addr_type);
5979 err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
5980 &rp, sizeof(rp));
5981
5982 mgmt_pending_remove(cmd);
5983
5984 return err;
5985 }
5986
5987 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5988 u8 link_type, u8 addr_type, u8 status)
5989 {
5990 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5991 status, MGMT_OP_USER_CONFIRM_REPLY);
5992 }
5993
5994 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5995 u8 link_type, u8 addr_type, u8 status)
5996 {
5997 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5998 status,
5999 MGMT_OP_USER_CONFIRM_NEG_REPLY);
6000 }
6001
6002 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6003 u8 link_type, u8 addr_type, u8 status)
6004 {
6005 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6006 status, MGMT_OP_USER_PASSKEY_REPLY);
6007 }
6008
6009 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6010 u8 link_type, u8 addr_type, u8 status)
6011 {
6012 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6013 status,
6014 MGMT_OP_USER_PASSKEY_NEG_REPLY);
6015 }
6016
6017 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
6018 u8 link_type, u8 addr_type, u32 passkey,
6019 u8 entered)
6020 {
6021 struct mgmt_ev_passkey_notify ev;
6022
6023 BT_DBG("%s", hdev->name);
6024
6025 bacpy(&ev.addr.bdaddr, bdaddr);
6026 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6027 ev.passkey = __cpu_to_le32(passkey);
6028 ev.entered = entered;
6029
6030 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
6031 }
6032
6033 void mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6034 u8 addr_type, u8 status)
6035 {
6036 struct mgmt_ev_auth_failed ev;
6037
6038 bacpy(&ev.addr.bdaddr, bdaddr);
6039 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6040 ev.status = mgmt_status(status);
6041
6042 mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
6043 }
6044
6045 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
6046 {
6047 struct cmd_lookup match = { NULL, hdev };
6048 bool changed;
6049
6050 if (status) {
6051 u8 mgmt_err = mgmt_status(status);
6052 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
6053 cmd_status_rsp, &mgmt_err);
6054 return;
6055 }
6056
6057 if (test_bit(HCI_AUTH, &hdev->flags))
6058 changed = !test_and_set_bit(HCI_LINK_SECURITY,
6059 &hdev->dev_flags);
6060 else
6061 changed = test_and_clear_bit(HCI_LINK_SECURITY,
6062 &hdev->dev_flags);
6063
6064 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
6065 &match);
6066
6067 if (changed)
6068 new_settings(hdev, match.sk);
6069
6070 if (match.sk)
6071 sock_put(match.sk);
6072 }
6073
6074 static void clear_eir(struct hci_request *req)
6075 {
6076 struct hci_dev *hdev = req->hdev;
6077 struct hci_cp_write_eir cp;
6078
6079 if (!lmp_ext_inq_capable(hdev))
6080 return;
6081
6082 memset(hdev->eir, 0, sizeof(hdev->eir));
6083
6084 memset(&cp, 0, sizeof(cp));
6085
6086 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
6087 }
6088
6089 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
6090 {
6091 struct cmd_lookup match = { NULL, hdev };
6092 struct hci_request req;
6093 bool changed = false;
6094
6095 if (status) {
6096 u8 mgmt_err = mgmt_status(status);
6097
6098 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
6099 &hdev->dev_flags)) {
6100 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
6101 new_settings(hdev, NULL);
6102 }
6103
6104 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
6105 &mgmt_err);
6106 return;
6107 }
6108
6109 if (enable) {
6110 changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
6111 } else {
6112 changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
6113 if (!changed)
6114 changed = test_and_clear_bit(HCI_HS_ENABLED,
6115 &hdev->dev_flags);
6116 else
6117 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
6118 }
6119
6120 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
6121
6122 if (changed)
6123 new_settings(hdev, match.sk);
6124
6125 if (match.sk)
6126 sock_put(match.sk);
6127
6128 hci_req_init(&req, hdev);
6129
6130 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
6131 if (test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
6132 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
6133 sizeof(enable), &enable);
6134 update_eir(&req);
6135 } else {
6136 clear_eir(&req);
6137 }
6138
6139 hci_req_run(&req, NULL);
6140 }
6141
6142 void mgmt_sc_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
6143 {
6144 struct cmd_lookup match = { NULL, hdev };
6145 bool changed = false;
6146
6147 if (status) {
6148 u8 mgmt_err = mgmt_status(status);
6149
6150 if (enable) {
6151 if (test_and_clear_bit(HCI_SC_ENABLED,
6152 &hdev->dev_flags))
6153 new_settings(hdev, NULL);
6154 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
6155 }
6156
6157 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
6158 cmd_status_rsp, &mgmt_err);
6159 return;
6160 }
6161
6162 if (enable) {
6163 changed = !test_and_set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
6164 } else {
6165 changed = test_and_clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
6166 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
6167 }
6168
6169 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
6170 settings_rsp, &match);
6171
6172 if (changed)
6173 new_settings(hdev, match.sk);
6174
6175 if (match.sk)
6176 sock_put(match.sk);
6177 }
6178
6179 static void sk_lookup(struct pending_cmd *cmd, void *data)
6180 {
6181 struct cmd_lookup *match = data;
6182
6183 if (match->sk == NULL) {
6184 match->sk = cmd->sk;
6185 sock_hold(match->sk);
6186 }
6187 }
6188
6189 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
6190 u8 status)
6191 {
6192 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
6193
6194 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
6195 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
6196 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
6197
6198 if (!status)
6199 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
6200 NULL);
6201
6202 if (match.sk)
6203 sock_put(match.sk);
6204 }
6205
6206 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
6207 {
6208 struct mgmt_cp_set_local_name ev;
6209 struct pending_cmd *cmd;
6210
6211 if (status)
6212 return;
6213
6214 memset(&ev, 0, sizeof(ev));
6215 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
6216 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
6217
6218 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
6219 if (!cmd) {
6220 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
6221
6222 /* If this is a HCI command related to powering on the
6223 * HCI dev don't send any mgmt signals.
6224 */
6225 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
6226 return;
6227 }
6228
6229 mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
6230 cmd ? cmd->sk : NULL);
6231 }
6232
6233 void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
6234 u8 *randomizer192, u8 *hash256,
6235 u8 *randomizer256, u8 status)
6236 {
6237 struct pending_cmd *cmd;
6238
6239 BT_DBG("%s status %u", hdev->name, status);
6240
6241 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
6242 if (!cmd)
6243 return;
6244
6245 if (status) {
6246 cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
6247 mgmt_status(status));
6248 } else {
6249 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
6250 hash256 && randomizer256) {
6251 struct mgmt_rp_read_local_oob_ext_data rp;
6252
6253 memcpy(rp.hash192, hash192, sizeof(rp.hash192));
6254 memcpy(rp.randomizer192, randomizer192,
6255 sizeof(rp.randomizer192));
6256
6257 memcpy(rp.hash256, hash256, sizeof(rp.hash256));
6258 memcpy(rp.randomizer256, randomizer256,
6259 sizeof(rp.randomizer256));
6260
6261 cmd_complete(cmd->sk, hdev->id,
6262 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
6263 &rp, sizeof(rp));
6264 } else {
6265 struct mgmt_rp_read_local_oob_data rp;
6266
6267 memcpy(rp.hash, hash192, sizeof(rp.hash));
6268 memcpy(rp.randomizer, randomizer192,
6269 sizeof(rp.randomizer));
6270
6271 cmd_complete(cmd->sk, hdev->id,
6272 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
6273 &rp, sizeof(rp));
6274 }
6275 }
6276
6277 mgmt_pending_remove(cmd);
6278 }
6279
6280 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6281 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
6282 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
6283 {
6284 char buf[512];
6285 struct mgmt_ev_device_found *ev = (void *) buf;
6286 struct smp_irk *irk;
6287 size_t ev_size;
6288
6289 if (!hci_discovery_active(hdev))
6290 return;
6291
6292 /* Make sure that the buffer is big enough. The 5 extra bytes
6293 * are for the potential CoD field.
6294 */
6295 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
6296 return;
6297
6298 memset(buf, 0, sizeof(buf));
6299
6300 irk = hci_get_irk(hdev, bdaddr, addr_type);
6301 if (irk) {
6302 bacpy(&ev->addr.bdaddr, &irk->bdaddr);
6303 ev->addr.type = link_to_bdaddr(link_type, irk->addr_type);
6304 } else {
6305 bacpy(&ev->addr.bdaddr, bdaddr);
6306 ev->addr.type = link_to_bdaddr(link_type, addr_type);
6307 }
6308
6309 ev->rssi = rssi;
6310 ev->flags = cpu_to_le32(flags);
6311
6312 if (eir_len > 0)
6313 memcpy(ev->eir, eir, eir_len);
6314
6315 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
6316 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
6317 dev_class, 3);
6318
6319 if (scan_rsp_len > 0)
6320 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
6321
6322 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
6323 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
6324
6325 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
6326 }
6327
6328 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6329 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
6330 {
6331 struct mgmt_ev_device_found *ev;
6332 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
6333 u16 eir_len;
6334
6335 ev = (struct mgmt_ev_device_found *) buf;
6336
6337 memset(buf, 0, sizeof(buf));
6338
6339 bacpy(&ev->addr.bdaddr, bdaddr);
6340 ev->addr.type = link_to_bdaddr(link_type, addr_type);
6341 ev->rssi = rssi;
6342
6343 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
6344 name_len);
6345
6346 ev->eir_len = cpu_to_le16(eir_len);
6347
6348 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
6349 }
6350
6351 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
6352 {
6353 struct mgmt_ev_discovering ev;
6354 struct pending_cmd *cmd;
6355
6356 BT_DBG("%s discovering %u", hdev->name, discovering);
6357
6358 if (discovering)
6359 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
6360 else
6361 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
6362
6363 if (cmd != NULL) {
6364 u8 type = hdev->discovery.type;
6365
6366 cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
6367 sizeof(type));
6368 mgmt_pending_remove(cmd);
6369 }
6370
6371 memset(&ev, 0, sizeof(ev));
6372 ev.type = hdev->discovery.type;
6373 ev.discovering = discovering;
6374
6375 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
6376 }
6377
6378 static void adv_enable_complete(struct hci_dev *hdev, u8 status)
6379 {
6380 BT_DBG("%s status %u", hdev->name, status);
6381
6382 /* Clear the advertising mgmt setting if we failed to re-enable it */
6383 if (status) {
6384 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
6385 new_settings(hdev, NULL);
6386 }
6387 }
6388
6389 void mgmt_reenable_advertising(struct hci_dev *hdev)
6390 {
6391 struct hci_request req;
6392
6393 if (hci_conn_num(hdev, LE_LINK) > 0)
6394 return;
6395
6396 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
6397 return;
6398
6399 hci_req_init(&req, hdev);
6400 enable_advertising(&req);
6401
6402 /* If this fails we have no option but to let user space know
6403 * that we've disabled advertising.
6404 */
6405 if (hci_req_run(&req, adv_enable_complete) < 0) {
6406 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
6407 new_settings(hdev, NULL);
6408 }
6409 }