]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/bluetooth/mgmt.c
Bluetooth: Add Load Connection Parameters command
[mirror_ubuntu-bionic-kernel.git] / net / bluetooth / mgmt.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI Management interface */
26
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/l2cap.h>
33 #include <net/bluetooth/mgmt.h>
34
35 #include "smp.h"
36
37 #define MGMT_VERSION 1
38 #define MGMT_REVISION 7
39
40 static const u16 mgmt_commands[] = {
41 MGMT_OP_READ_INDEX_LIST,
42 MGMT_OP_READ_INFO,
43 MGMT_OP_SET_POWERED,
44 MGMT_OP_SET_DISCOVERABLE,
45 MGMT_OP_SET_CONNECTABLE,
46 MGMT_OP_SET_FAST_CONNECTABLE,
47 MGMT_OP_SET_PAIRABLE,
48 MGMT_OP_SET_LINK_SECURITY,
49 MGMT_OP_SET_SSP,
50 MGMT_OP_SET_HS,
51 MGMT_OP_SET_LE,
52 MGMT_OP_SET_DEV_CLASS,
53 MGMT_OP_SET_LOCAL_NAME,
54 MGMT_OP_ADD_UUID,
55 MGMT_OP_REMOVE_UUID,
56 MGMT_OP_LOAD_LINK_KEYS,
57 MGMT_OP_LOAD_LONG_TERM_KEYS,
58 MGMT_OP_DISCONNECT,
59 MGMT_OP_GET_CONNECTIONS,
60 MGMT_OP_PIN_CODE_REPLY,
61 MGMT_OP_PIN_CODE_NEG_REPLY,
62 MGMT_OP_SET_IO_CAPABILITY,
63 MGMT_OP_PAIR_DEVICE,
64 MGMT_OP_CANCEL_PAIR_DEVICE,
65 MGMT_OP_UNPAIR_DEVICE,
66 MGMT_OP_USER_CONFIRM_REPLY,
67 MGMT_OP_USER_CONFIRM_NEG_REPLY,
68 MGMT_OP_USER_PASSKEY_REPLY,
69 MGMT_OP_USER_PASSKEY_NEG_REPLY,
70 MGMT_OP_READ_LOCAL_OOB_DATA,
71 MGMT_OP_ADD_REMOTE_OOB_DATA,
72 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
73 MGMT_OP_START_DISCOVERY,
74 MGMT_OP_STOP_DISCOVERY,
75 MGMT_OP_CONFIRM_NAME,
76 MGMT_OP_BLOCK_DEVICE,
77 MGMT_OP_UNBLOCK_DEVICE,
78 MGMT_OP_SET_DEVICE_ID,
79 MGMT_OP_SET_ADVERTISING,
80 MGMT_OP_SET_BREDR,
81 MGMT_OP_SET_STATIC_ADDRESS,
82 MGMT_OP_SET_SCAN_PARAMS,
83 MGMT_OP_SET_SECURE_CONN,
84 MGMT_OP_SET_DEBUG_KEYS,
85 MGMT_OP_SET_PRIVACY,
86 MGMT_OP_LOAD_IRKS,
87 MGMT_OP_GET_CONN_INFO,
88 MGMT_OP_GET_CLOCK_INFO,
89 MGMT_OP_ADD_DEVICE,
90 MGMT_OP_REMOVE_DEVICE,
91 MGMT_OP_LOAD_CONN_PARAM,
92 };
93
94 static const u16 mgmt_events[] = {
95 MGMT_EV_CONTROLLER_ERROR,
96 MGMT_EV_INDEX_ADDED,
97 MGMT_EV_INDEX_REMOVED,
98 MGMT_EV_NEW_SETTINGS,
99 MGMT_EV_CLASS_OF_DEV_CHANGED,
100 MGMT_EV_LOCAL_NAME_CHANGED,
101 MGMT_EV_NEW_LINK_KEY,
102 MGMT_EV_NEW_LONG_TERM_KEY,
103 MGMT_EV_DEVICE_CONNECTED,
104 MGMT_EV_DEVICE_DISCONNECTED,
105 MGMT_EV_CONNECT_FAILED,
106 MGMT_EV_PIN_CODE_REQUEST,
107 MGMT_EV_USER_CONFIRM_REQUEST,
108 MGMT_EV_USER_PASSKEY_REQUEST,
109 MGMT_EV_AUTH_FAILED,
110 MGMT_EV_DEVICE_FOUND,
111 MGMT_EV_DISCOVERING,
112 MGMT_EV_DEVICE_BLOCKED,
113 MGMT_EV_DEVICE_UNBLOCKED,
114 MGMT_EV_DEVICE_UNPAIRED,
115 MGMT_EV_PASSKEY_NOTIFY,
116 MGMT_EV_NEW_IRK,
117 MGMT_EV_NEW_CSRK,
118 MGMT_EV_DEVICE_ADDED,
119 MGMT_EV_DEVICE_REMOVED,
120 MGMT_EV_NEW_CONN_PARAM,
121 };
122
123 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
124
125 #define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
126 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
127
128 struct pending_cmd {
129 struct list_head list;
130 u16 opcode;
131 int index;
132 void *param;
133 struct sock *sk;
134 void *user_data;
135 };
136
137 /* HCI to MGMT error code conversion table */
138 static u8 mgmt_status_table[] = {
139 MGMT_STATUS_SUCCESS,
140 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
141 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
142 MGMT_STATUS_FAILED, /* Hardware Failure */
143 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
144 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
145 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
146 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
147 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
148 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
149 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
150 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
151 MGMT_STATUS_BUSY, /* Command Disallowed */
152 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
153 MGMT_STATUS_REJECTED, /* Rejected Security */
154 MGMT_STATUS_REJECTED, /* Rejected Personal */
155 MGMT_STATUS_TIMEOUT, /* Host Timeout */
156 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
157 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
158 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
159 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
160 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
161 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
162 MGMT_STATUS_BUSY, /* Repeated Attempts */
163 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
164 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
165 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
166 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
167 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
168 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
169 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
170 MGMT_STATUS_FAILED, /* Unspecified Error */
171 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
172 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
173 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
174 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
175 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
176 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
177 MGMT_STATUS_FAILED, /* Unit Link Key Used */
178 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
179 MGMT_STATUS_TIMEOUT, /* Instant Passed */
180 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
181 MGMT_STATUS_FAILED, /* Transaction Collision */
182 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
183 MGMT_STATUS_REJECTED, /* QoS Rejected */
184 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
185 MGMT_STATUS_REJECTED, /* Insufficient Security */
186 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
187 MGMT_STATUS_BUSY, /* Role Switch Pending */
188 MGMT_STATUS_FAILED, /* Slot Violation */
189 MGMT_STATUS_FAILED, /* Role Switch Failed */
190 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
191 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
192 MGMT_STATUS_BUSY, /* Host Busy Pairing */
193 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
194 MGMT_STATUS_BUSY, /* Controller Busy */
195 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
196 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
197 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
198 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
199 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
200 };
201
202 static u8 mgmt_status(u8 hci_status)
203 {
204 if (hci_status < ARRAY_SIZE(mgmt_status_table))
205 return mgmt_status_table[hci_status];
206
207 return MGMT_STATUS_FAILED;
208 }
209
210 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
211 {
212 struct sk_buff *skb;
213 struct mgmt_hdr *hdr;
214 struct mgmt_ev_cmd_status *ev;
215 int err;
216
217 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
218
219 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
220 if (!skb)
221 return -ENOMEM;
222
223 hdr = (void *) skb_put(skb, sizeof(*hdr));
224
225 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS);
226 hdr->index = cpu_to_le16(index);
227 hdr->len = cpu_to_le16(sizeof(*ev));
228
229 ev = (void *) skb_put(skb, sizeof(*ev));
230 ev->status = status;
231 ev->opcode = cpu_to_le16(cmd);
232
233 err = sock_queue_rcv_skb(sk, skb);
234 if (err < 0)
235 kfree_skb(skb);
236
237 return err;
238 }
239
240 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
241 void *rp, size_t rp_len)
242 {
243 struct sk_buff *skb;
244 struct mgmt_hdr *hdr;
245 struct mgmt_ev_cmd_complete *ev;
246 int err;
247
248 BT_DBG("sock %p", sk);
249
250 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
251 if (!skb)
252 return -ENOMEM;
253
254 hdr = (void *) skb_put(skb, sizeof(*hdr));
255
256 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
257 hdr->index = cpu_to_le16(index);
258 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
259
260 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
261 ev->opcode = cpu_to_le16(cmd);
262 ev->status = status;
263
264 if (rp)
265 memcpy(ev->data, rp, rp_len);
266
267 err = sock_queue_rcv_skb(sk, skb);
268 if (err < 0)
269 kfree_skb(skb);
270
271 return err;
272 }
273
274 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
275 u16 data_len)
276 {
277 struct mgmt_rp_read_version rp;
278
279 BT_DBG("sock %p", sk);
280
281 rp.version = MGMT_VERSION;
282 rp.revision = cpu_to_le16(MGMT_REVISION);
283
284 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
285 sizeof(rp));
286 }
287
288 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
289 u16 data_len)
290 {
291 struct mgmt_rp_read_commands *rp;
292 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
293 const u16 num_events = ARRAY_SIZE(mgmt_events);
294 __le16 *opcode;
295 size_t rp_size;
296 int i, err;
297
298 BT_DBG("sock %p", sk);
299
300 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
301
302 rp = kmalloc(rp_size, GFP_KERNEL);
303 if (!rp)
304 return -ENOMEM;
305
306 rp->num_commands = cpu_to_le16(num_commands);
307 rp->num_events = cpu_to_le16(num_events);
308
309 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
310 put_unaligned_le16(mgmt_commands[i], opcode);
311
312 for (i = 0; i < num_events; i++, opcode++)
313 put_unaligned_le16(mgmt_events[i], opcode);
314
315 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
316 rp_size);
317 kfree(rp);
318
319 return err;
320 }
321
322 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
323 u16 data_len)
324 {
325 struct mgmt_rp_read_index_list *rp;
326 struct hci_dev *d;
327 size_t rp_len;
328 u16 count;
329 int err;
330
331 BT_DBG("sock %p", sk);
332
333 read_lock(&hci_dev_list_lock);
334
335 count = 0;
336 list_for_each_entry(d, &hci_dev_list, list) {
337 if (d->dev_type == HCI_BREDR)
338 count++;
339 }
340
341 rp_len = sizeof(*rp) + (2 * count);
342 rp = kmalloc(rp_len, GFP_ATOMIC);
343 if (!rp) {
344 read_unlock(&hci_dev_list_lock);
345 return -ENOMEM;
346 }
347
348 count = 0;
349 list_for_each_entry(d, &hci_dev_list, list) {
350 if (test_bit(HCI_SETUP, &d->dev_flags))
351 continue;
352
353 if (test_bit(HCI_USER_CHANNEL, &d->dev_flags))
354 continue;
355
356 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
357 continue;
358
359 if (d->dev_type == HCI_BREDR) {
360 rp->index[count++] = cpu_to_le16(d->id);
361 BT_DBG("Added hci%u", d->id);
362 }
363 }
364
365 rp->num_controllers = cpu_to_le16(count);
366 rp_len = sizeof(*rp) + (2 * count);
367
368 read_unlock(&hci_dev_list_lock);
369
370 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
371 rp_len);
372
373 kfree(rp);
374
375 return err;
376 }
377
378 static u32 get_supported_settings(struct hci_dev *hdev)
379 {
380 u32 settings = 0;
381
382 settings |= MGMT_SETTING_POWERED;
383 settings |= MGMT_SETTING_PAIRABLE;
384 settings |= MGMT_SETTING_DEBUG_KEYS;
385
386 if (lmp_bredr_capable(hdev)) {
387 settings |= MGMT_SETTING_CONNECTABLE;
388 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
389 settings |= MGMT_SETTING_FAST_CONNECTABLE;
390 settings |= MGMT_SETTING_DISCOVERABLE;
391 settings |= MGMT_SETTING_BREDR;
392 settings |= MGMT_SETTING_LINK_SECURITY;
393
394 if (lmp_ssp_capable(hdev)) {
395 settings |= MGMT_SETTING_SSP;
396 settings |= MGMT_SETTING_HS;
397 }
398
399 if (lmp_sc_capable(hdev) ||
400 test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
401 settings |= MGMT_SETTING_SECURE_CONN;
402 }
403
404 if (lmp_le_capable(hdev)) {
405 settings |= MGMT_SETTING_LE;
406 settings |= MGMT_SETTING_ADVERTISING;
407 settings |= MGMT_SETTING_PRIVACY;
408 }
409
410 return settings;
411 }
412
413 static u32 get_current_settings(struct hci_dev *hdev)
414 {
415 u32 settings = 0;
416
417 if (hdev_is_powered(hdev))
418 settings |= MGMT_SETTING_POWERED;
419
420 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
421 settings |= MGMT_SETTING_CONNECTABLE;
422
423 if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
424 settings |= MGMT_SETTING_FAST_CONNECTABLE;
425
426 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
427 settings |= MGMT_SETTING_DISCOVERABLE;
428
429 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
430 settings |= MGMT_SETTING_PAIRABLE;
431
432 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
433 settings |= MGMT_SETTING_BREDR;
434
435 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
436 settings |= MGMT_SETTING_LE;
437
438 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
439 settings |= MGMT_SETTING_LINK_SECURITY;
440
441 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
442 settings |= MGMT_SETTING_SSP;
443
444 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
445 settings |= MGMT_SETTING_HS;
446
447 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
448 settings |= MGMT_SETTING_ADVERTISING;
449
450 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
451 settings |= MGMT_SETTING_SECURE_CONN;
452
453 if (test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags))
454 settings |= MGMT_SETTING_DEBUG_KEYS;
455
456 if (test_bit(HCI_PRIVACY, &hdev->dev_flags))
457 settings |= MGMT_SETTING_PRIVACY;
458
459 return settings;
460 }
461
462 #define PNP_INFO_SVCLASS_ID 0x1200
463
464 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
465 {
466 u8 *ptr = data, *uuids_start = NULL;
467 struct bt_uuid *uuid;
468
469 if (len < 4)
470 return ptr;
471
472 list_for_each_entry(uuid, &hdev->uuids, list) {
473 u16 uuid16;
474
475 if (uuid->size != 16)
476 continue;
477
478 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
479 if (uuid16 < 0x1100)
480 continue;
481
482 if (uuid16 == PNP_INFO_SVCLASS_ID)
483 continue;
484
485 if (!uuids_start) {
486 uuids_start = ptr;
487 uuids_start[0] = 1;
488 uuids_start[1] = EIR_UUID16_ALL;
489 ptr += 2;
490 }
491
492 /* Stop if not enough space to put next UUID */
493 if ((ptr - data) + sizeof(u16) > len) {
494 uuids_start[1] = EIR_UUID16_SOME;
495 break;
496 }
497
498 *ptr++ = (uuid16 & 0x00ff);
499 *ptr++ = (uuid16 & 0xff00) >> 8;
500 uuids_start[0] += sizeof(uuid16);
501 }
502
503 return ptr;
504 }
505
506 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
507 {
508 u8 *ptr = data, *uuids_start = NULL;
509 struct bt_uuid *uuid;
510
511 if (len < 6)
512 return ptr;
513
514 list_for_each_entry(uuid, &hdev->uuids, list) {
515 if (uuid->size != 32)
516 continue;
517
518 if (!uuids_start) {
519 uuids_start = ptr;
520 uuids_start[0] = 1;
521 uuids_start[1] = EIR_UUID32_ALL;
522 ptr += 2;
523 }
524
525 /* Stop if not enough space to put next UUID */
526 if ((ptr - data) + sizeof(u32) > len) {
527 uuids_start[1] = EIR_UUID32_SOME;
528 break;
529 }
530
531 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
532 ptr += sizeof(u32);
533 uuids_start[0] += sizeof(u32);
534 }
535
536 return ptr;
537 }
538
539 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
540 {
541 u8 *ptr = data, *uuids_start = NULL;
542 struct bt_uuid *uuid;
543
544 if (len < 18)
545 return ptr;
546
547 list_for_each_entry(uuid, &hdev->uuids, list) {
548 if (uuid->size != 128)
549 continue;
550
551 if (!uuids_start) {
552 uuids_start = ptr;
553 uuids_start[0] = 1;
554 uuids_start[1] = EIR_UUID128_ALL;
555 ptr += 2;
556 }
557
558 /* Stop if not enough space to put next UUID */
559 if ((ptr - data) + 16 > len) {
560 uuids_start[1] = EIR_UUID128_SOME;
561 break;
562 }
563
564 memcpy(ptr, uuid->uuid, 16);
565 ptr += 16;
566 uuids_start[0] += 16;
567 }
568
569 return ptr;
570 }
571
572 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
573 {
574 struct pending_cmd *cmd;
575
576 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
577 if (cmd->opcode == opcode)
578 return cmd;
579 }
580
581 return NULL;
582 }
583
584 static struct pending_cmd *mgmt_pending_find_data(u16 opcode,
585 struct hci_dev *hdev,
586 const void *data)
587 {
588 struct pending_cmd *cmd;
589
590 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
591 if (cmd->user_data != data)
592 continue;
593 if (cmd->opcode == opcode)
594 return cmd;
595 }
596
597 return NULL;
598 }
599
600 static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
601 {
602 u8 ad_len = 0;
603 size_t name_len;
604
605 name_len = strlen(hdev->dev_name);
606 if (name_len > 0) {
607 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
608
609 if (name_len > max_len) {
610 name_len = max_len;
611 ptr[1] = EIR_NAME_SHORT;
612 } else
613 ptr[1] = EIR_NAME_COMPLETE;
614
615 ptr[0] = name_len + 1;
616
617 memcpy(ptr + 2, hdev->dev_name, name_len);
618
619 ad_len += (name_len + 2);
620 ptr += (name_len + 2);
621 }
622
623 return ad_len;
624 }
625
626 static void update_scan_rsp_data(struct hci_request *req)
627 {
628 struct hci_dev *hdev = req->hdev;
629 struct hci_cp_le_set_scan_rsp_data cp;
630 u8 len;
631
632 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
633 return;
634
635 memset(&cp, 0, sizeof(cp));
636
637 len = create_scan_rsp_data(hdev, cp.data);
638
639 if (hdev->scan_rsp_data_len == len &&
640 memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
641 return;
642
643 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
644 hdev->scan_rsp_data_len = len;
645
646 cp.length = len;
647
648 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
649 }
650
651 static u8 get_adv_discov_flags(struct hci_dev *hdev)
652 {
653 struct pending_cmd *cmd;
654
655 /* If there's a pending mgmt command the flags will not yet have
656 * their final values, so check for this first.
657 */
658 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
659 if (cmd) {
660 struct mgmt_mode *cp = cmd->param;
661 if (cp->val == 0x01)
662 return LE_AD_GENERAL;
663 else if (cp->val == 0x02)
664 return LE_AD_LIMITED;
665 } else {
666 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
667 return LE_AD_LIMITED;
668 else if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
669 return LE_AD_GENERAL;
670 }
671
672 return 0;
673 }
674
675 static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
676 {
677 u8 ad_len = 0, flags = 0;
678
679 flags |= get_adv_discov_flags(hdev);
680
681 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
682 flags |= LE_AD_NO_BREDR;
683
684 if (flags) {
685 BT_DBG("adv flags 0x%02x", flags);
686
687 ptr[0] = 2;
688 ptr[1] = EIR_FLAGS;
689 ptr[2] = flags;
690
691 ad_len += 3;
692 ptr += 3;
693 }
694
695 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
696 ptr[0] = 2;
697 ptr[1] = EIR_TX_POWER;
698 ptr[2] = (u8) hdev->adv_tx_power;
699
700 ad_len += 3;
701 ptr += 3;
702 }
703
704 return ad_len;
705 }
706
707 static void update_adv_data(struct hci_request *req)
708 {
709 struct hci_dev *hdev = req->hdev;
710 struct hci_cp_le_set_adv_data cp;
711 u8 len;
712
713 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
714 return;
715
716 memset(&cp, 0, sizeof(cp));
717
718 len = create_adv_data(hdev, cp.data);
719
720 if (hdev->adv_data_len == len &&
721 memcmp(cp.data, hdev->adv_data, len) == 0)
722 return;
723
724 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
725 hdev->adv_data_len = len;
726
727 cp.length = len;
728
729 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
730 }
731
732 static void create_eir(struct hci_dev *hdev, u8 *data)
733 {
734 u8 *ptr = data;
735 size_t name_len;
736
737 name_len = strlen(hdev->dev_name);
738
739 if (name_len > 0) {
740 /* EIR Data type */
741 if (name_len > 48) {
742 name_len = 48;
743 ptr[1] = EIR_NAME_SHORT;
744 } else
745 ptr[1] = EIR_NAME_COMPLETE;
746
747 /* EIR Data length */
748 ptr[0] = name_len + 1;
749
750 memcpy(ptr + 2, hdev->dev_name, name_len);
751
752 ptr += (name_len + 2);
753 }
754
755 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
756 ptr[0] = 2;
757 ptr[1] = EIR_TX_POWER;
758 ptr[2] = (u8) hdev->inq_tx_power;
759
760 ptr += 3;
761 }
762
763 if (hdev->devid_source > 0) {
764 ptr[0] = 9;
765 ptr[1] = EIR_DEVICE_ID;
766
767 put_unaligned_le16(hdev->devid_source, ptr + 2);
768 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
769 put_unaligned_le16(hdev->devid_product, ptr + 6);
770 put_unaligned_le16(hdev->devid_version, ptr + 8);
771
772 ptr += 10;
773 }
774
775 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
776 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
777 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
778 }
779
780 static void update_eir(struct hci_request *req)
781 {
782 struct hci_dev *hdev = req->hdev;
783 struct hci_cp_write_eir cp;
784
785 if (!hdev_is_powered(hdev))
786 return;
787
788 if (!lmp_ext_inq_capable(hdev))
789 return;
790
791 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
792 return;
793
794 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
795 return;
796
797 memset(&cp, 0, sizeof(cp));
798
799 create_eir(hdev, cp.data);
800
801 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
802 return;
803
804 memcpy(hdev->eir, cp.data, sizeof(cp.data));
805
806 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
807 }
808
809 static u8 get_service_classes(struct hci_dev *hdev)
810 {
811 struct bt_uuid *uuid;
812 u8 val = 0;
813
814 list_for_each_entry(uuid, &hdev->uuids, list)
815 val |= uuid->svc_hint;
816
817 return val;
818 }
819
820 static void update_class(struct hci_request *req)
821 {
822 struct hci_dev *hdev = req->hdev;
823 u8 cod[3];
824
825 BT_DBG("%s", hdev->name);
826
827 if (!hdev_is_powered(hdev))
828 return;
829
830 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
831 return;
832
833 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
834 return;
835
836 cod[0] = hdev->minor_class;
837 cod[1] = hdev->major_class;
838 cod[2] = get_service_classes(hdev);
839
840 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
841 cod[1] |= 0x20;
842
843 if (memcmp(cod, hdev->dev_class, 3) == 0)
844 return;
845
846 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
847 }
848
849 static bool get_connectable(struct hci_dev *hdev)
850 {
851 struct pending_cmd *cmd;
852
853 /* If there's a pending mgmt command the flag will not yet have
854 * it's final value, so check for this first.
855 */
856 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
857 if (cmd) {
858 struct mgmt_mode *cp = cmd->param;
859 return cp->val;
860 }
861
862 return test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
863 }
864
865 static void enable_advertising(struct hci_request *req)
866 {
867 struct hci_dev *hdev = req->hdev;
868 struct hci_cp_le_set_adv_param cp;
869 u8 own_addr_type, enable = 0x01;
870 bool connectable;
871
872 /* Clear the HCI_ADVERTISING bit temporarily so that the
873 * hci_update_random_address knows that it's safe to go ahead
874 * and write a new random address. The flag will be set back on
875 * as soon as the SET_ADV_ENABLE HCI command completes.
876 */
877 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
878
879 connectable = get_connectable(hdev);
880
881 /* Set require_privacy to true only when non-connectable
882 * advertising is used. In that case it is fine to use a
883 * non-resolvable private address.
884 */
885 if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
886 return;
887
888 memset(&cp, 0, sizeof(cp));
889 cp.min_interval = cpu_to_le16(0x0800);
890 cp.max_interval = cpu_to_le16(0x0800);
891 cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
892 cp.own_address_type = own_addr_type;
893 cp.channel_map = hdev->le_adv_channel_map;
894
895 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
896
897 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
898 }
899
900 static void disable_advertising(struct hci_request *req)
901 {
902 u8 enable = 0x00;
903
904 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
905 }
906
907 static void service_cache_off(struct work_struct *work)
908 {
909 struct hci_dev *hdev = container_of(work, struct hci_dev,
910 service_cache.work);
911 struct hci_request req;
912
913 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
914 return;
915
916 hci_req_init(&req, hdev);
917
918 hci_dev_lock(hdev);
919
920 update_eir(&req);
921 update_class(&req);
922
923 hci_dev_unlock(hdev);
924
925 hci_req_run(&req, NULL);
926 }
927
928 static void rpa_expired(struct work_struct *work)
929 {
930 struct hci_dev *hdev = container_of(work, struct hci_dev,
931 rpa_expired.work);
932 struct hci_request req;
933
934 BT_DBG("");
935
936 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
937
938 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags) ||
939 hci_conn_num(hdev, LE_LINK) > 0)
940 return;
941
942 /* The generation of a new RPA and programming it into the
943 * controller happens in the enable_advertising() function.
944 */
945
946 hci_req_init(&req, hdev);
947
948 disable_advertising(&req);
949 enable_advertising(&req);
950
951 hci_req_run(&req, NULL);
952 }
953
954 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
955 {
956 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
957 return;
958
959 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
960 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
961
962 /* Non-mgmt controlled devices get this bit set
963 * implicitly so that pairing works for them, however
964 * for mgmt we require user-space to explicitly enable
965 * it
966 */
967 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
968 }
969
970 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
971 void *data, u16 data_len)
972 {
973 struct mgmt_rp_read_info rp;
974
975 BT_DBG("sock %p %s", sk, hdev->name);
976
977 hci_dev_lock(hdev);
978
979 memset(&rp, 0, sizeof(rp));
980
981 bacpy(&rp.bdaddr, &hdev->bdaddr);
982
983 rp.version = hdev->hci_ver;
984 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
985
986 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
987 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
988
989 memcpy(rp.dev_class, hdev->dev_class, 3);
990
991 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
992 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
993
994 hci_dev_unlock(hdev);
995
996 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
997 sizeof(rp));
998 }
999
1000 static void mgmt_pending_free(struct pending_cmd *cmd)
1001 {
1002 sock_put(cmd->sk);
1003 kfree(cmd->param);
1004 kfree(cmd);
1005 }
1006
1007 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
1008 struct hci_dev *hdev, void *data,
1009 u16 len)
1010 {
1011 struct pending_cmd *cmd;
1012
1013 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
1014 if (!cmd)
1015 return NULL;
1016
1017 cmd->opcode = opcode;
1018 cmd->index = hdev->id;
1019
1020 cmd->param = kmalloc(len, GFP_KERNEL);
1021 if (!cmd->param) {
1022 kfree(cmd);
1023 return NULL;
1024 }
1025
1026 if (data)
1027 memcpy(cmd->param, data, len);
1028
1029 cmd->sk = sk;
1030 sock_hold(sk);
1031
1032 list_add(&cmd->list, &hdev->mgmt_pending);
1033
1034 return cmd;
1035 }
1036
1037 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
1038 void (*cb)(struct pending_cmd *cmd,
1039 void *data),
1040 void *data)
1041 {
1042 struct pending_cmd *cmd, *tmp;
1043
1044 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
1045 if (opcode > 0 && cmd->opcode != opcode)
1046 continue;
1047
1048 cb(cmd, data);
1049 }
1050 }
1051
1052 static void mgmt_pending_remove(struct pending_cmd *cmd)
1053 {
1054 list_del(&cmd->list);
1055 mgmt_pending_free(cmd);
1056 }
1057
1058 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1059 {
1060 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1061
1062 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
1063 sizeof(settings));
1064 }
1065
1066 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status)
1067 {
1068 BT_DBG("%s status 0x%02x", hdev->name, status);
1069
1070 if (hci_conn_count(hdev) == 0) {
1071 cancel_delayed_work(&hdev->power_off);
1072 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1073 }
1074 }
1075
1076 static void hci_stop_discovery(struct hci_request *req)
1077 {
1078 struct hci_dev *hdev = req->hdev;
1079 struct hci_cp_remote_name_req_cancel cp;
1080 struct inquiry_entry *e;
1081
1082 switch (hdev->discovery.state) {
1083 case DISCOVERY_FINDING:
1084 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
1085 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1086 } else {
1087 cancel_delayed_work(&hdev->le_scan_disable);
1088 hci_req_add_le_scan_disable(req);
1089 }
1090
1091 break;
1092
1093 case DISCOVERY_RESOLVING:
1094 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
1095 NAME_PENDING);
1096 if (!e)
1097 return;
1098
1099 bacpy(&cp.bdaddr, &e->data.bdaddr);
1100 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
1101 &cp);
1102
1103 break;
1104
1105 default:
1106 /* Passive scanning */
1107 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1108 hci_req_add_le_scan_disable(req);
1109 break;
1110 }
1111 }
1112
1113 static int clean_up_hci_state(struct hci_dev *hdev)
1114 {
1115 struct hci_request req;
1116 struct hci_conn *conn;
1117
1118 hci_req_init(&req, hdev);
1119
1120 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1121 test_bit(HCI_PSCAN, &hdev->flags)) {
1122 u8 scan = 0x00;
1123 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1124 }
1125
1126 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1127 disable_advertising(&req);
1128
1129 hci_stop_discovery(&req);
1130
1131 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1132 struct hci_cp_disconnect dc;
1133 struct hci_cp_reject_conn_req rej;
1134
1135 switch (conn->state) {
1136 case BT_CONNECTED:
1137 case BT_CONFIG:
1138 dc.handle = cpu_to_le16(conn->handle);
1139 dc.reason = 0x15; /* Terminated due to Power Off */
1140 hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1141 break;
1142 case BT_CONNECT:
1143 if (conn->type == LE_LINK)
1144 hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL,
1145 0, NULL);
1146 else if (conn->type == ACL_LINK)
1147 hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL,
1148 6, &conn->dst);
1149 break;
1150 case BT_CONNECT2:
1151 bacpy(&rej.bdaddr, &conn->dst);
1152 rej.reason = 0x15; /* Terminated due to Power Off */
1153 if (conn->type == ACL_LINK)
1154 hci_req_add(&req, HCI_OP_REJECT_CONN_REQ,
1155 sizeof(rej), &rej);
1156 else if (conn->type == SCO_LINK)
1157 hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ,
1158 sizeof(rej), &rej);
1159 break;
1160 }
1161 }
1162
1163 return hci_req_run(&req, clean_up_hci_complete);
1164 }
1165
1166 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1167 u16 len)
1168 {
1169 struct mgmt_mode *cp = data;
1170 struct pending_cmd *cmd;
1171 int err;
1172
1173 BT_DBG("request for %s", hdev->name);
1174
1175 if (cp->val != 0x00 && cp->val != 0x01)
1176 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1177 MGMT_STATUS_INVALID_PARAMS);
1178
1179 hci_dev_lock(hdev);
1180
1181 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
1182 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1183 MGMT_STATUS_BUSY);
1184 goto failed;
1185 }
1186
1187 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1188 cancel_delayed_work(&hdev->power_off);
1189
1190 if (cp->val) {
1191 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
1192 data, len);
1193 err = mgmt_powered(hdev, 1);
1194 goto failed;
1195 }
1196 }
1197
1198 if (!!cp->val == hdev_is_powered(hdev)) {
1199 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1200 goto failed;
1201 }
1202
1203 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1204 if (!cmd) {
1205 err = -ENOMEM;
1206 goto failed;
1207 }
1208
1209 if (cp->val) {
1210 queue_work(hdev->req_workqueue, &hdev->power_on);
1211 err = 0;
1212 } else {
1213 /* Disconnect connections, stop scans, etc */
1214 err = clean_up_hci_state(hdev);
1215 if (!err)
1216 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1217 HCI_POWER_OFF_TIMEOUT);
1218
1219 /* ENODATA means there were no HCI commands queued */
1220 if (err == -ENODATA) {
1221 cancel_delayed_work(&hdev->power_off);
1222 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1223 err = 0;
1224 }
1225 }
1226
1227 failed:
1228 hci_dev_unlock(hdev);
1229 return err;
1230 }
1231
1232 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
1233 struct sock *skip_sk)
1234 {
1235 struct sk_buff *skb;
1236 struct mgmt_hdr *hdr;
1237
1238 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
1239 if (!skb)
1240 return -ENOMEM;
1241
1242 hdr = (void *) skb_put(skb, sizeof(*hdr));
1243 hdr->opcode = cpu_to_le16(event);
1244 if (hdev)
1245 hdr->index = cpu_to_le16(hdev->id);
1246 else
1247 hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
1248 hdr->len = cpu_to_le16(data_len);
1249
1250 if (data)
1251 memcpy(skb_put(skb, data_len), data, data_len);
1252
1253 /* Time stamp */
1254 __net_timestamp(skb);
1255
1256 hci_send_to_control(skb, skip_sk);
1257 kfree_skb(skb);
1258
1259 return 0;
1260 }
1261
1262 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1263 {
1264 __le32 ev;
1265
1266 ev = cpu_to_le32(get_current_settings(hdev));
1267
1268 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
1269 }
1270
1271 struct cmd_lookup {
1272 struct sock *sk;
1273 struct hci_dev *hdev;
1274 u8 mgmt_status;
1275 };
1276
1277 static void settings_rsp(struct pending_cmd *cmd, void *data)
1278 {
1279 struct cmd_lookup *match = data;
1280
1281 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1282
1283 list_del(&cmd->list);
1284
1285 if (match->sk == NULL) {
1286 match->sk = cmd->sk;
1287 sock_hold(match->sk);
1288 }
1289
1290 mgmt_pending_free(cmd);
1291 }
1292
1293 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
1294 {
1295 u8 *status = data;
1296
1297 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1298 mgmt_pending_remove(cmd);
1299 }
1300
1301 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1302 {
1303 if (!lmp_bredr_capable(hdev))
1304 return MGMT_STATUS_NOT_SUPPORTED;
1305 else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1306 return MGMT_STATUS_REJECTED;
1307 else
1308 return MGMT_STATUS_SUCCESS;
1309 }
1310
1311 static u8 mgmt_le_support(struct hci_dev *hdev)
1312 {
1313 if (!lmp_le_capable(hdev))
1314 return MGMT_STATUS_NOT_SUPPORTED;
1315 else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1316 return MGMT_STATUS_REJECTED;
1317 else
1318 return MGMT_STATUS_SUCCESS;
1319 }
1320
1321 static void set_discoverable_complete(struct hci_dev *hdev, u8 status)
1322 {
1323 struct pending_cmd *cmd;
1324 struct mgmt_mode *cp;
1325 struct hci_request req;
1326 bool changed;
1327
1328 BT_DBG("status 0x%02x", status);
1329
1330 hci_dev_lock(hdev);
1331
1332 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1333 if (!cmd)
1334 goto unlock;
1335
1336 if (status) {
1337 u8 mgmt_err = mgmt_status(status);
1338 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1339 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1340 goto remove_cmd;
1341 }
1342
1343 cp = cmd->param;
1344 if (cp->val) {
1345 changed = !test_and_set_bit(HCI_DISCOVERABLE,
1346 &hdev->dev_flags);
1347
1348 if (hdev->discov_timeout > 0) {
1349 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1350 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1351 to);
1352 }
1353 } else {
1354 changed = test_and_clear_bit(HCI_DISCOVERABLE,
1355 &hdev->dev_flags);
1356 }
1357
1358 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1359
1360 if (changed)
1361 new_settings(hdev, cmd->sk);
1362
1363 /* When the discoverable mode gets changed, make sure
1364 * that class of device has the limited discoverable
1365 * bit correctly set.
1366 */
1367 hci_req_init(&req, hdev);
1368 update_class(&req);
1369 hci_req_run(&req, NULL);
1370
1371 remove_cmd:
1372 mgmt_pending_remove(cmd);
1373
1374 unlock:
1375 hci_dev_unlock(hdev);
1376 }
1377
1378 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1379 u16 len)
1380 {
1381 struct mgmt_cp_set_discoverable *cp = data;
1382 struct pending_cmd *cmd;
1383 struct hci_request req;
1384 u16 timeout;
1385 u8 scan;
1386 int err;
1387
1388 BT_DBG("request for %s", hdev->name);
1389
1390 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1391 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1392 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1393 MGMT_STATUS_REJECTED);
1394
1395 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1396 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1397 MGMT_STATUS_INVALID_PARAMS);
1398
1399 timeout = __le16_to_cpu(cp->timeout);
1400
1401 /* Disabling discoverable requires that no timeout is set,
1402 * and enabling limited discoverable requires a timeout.
1403 */
1404 if ((cp->val == 0x00 && timeout > 0) ||
1405 (cp->val == 0x02 && timeout == 0))
1406 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1407 MGMT_STATUS_INVALID_PARAMS);
1408
1409 hci_dev_lock(hdev);
1410
1411 if (!hdev_is_powered(hdev) && timeout > 0) {
1412 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1413 MGMT_STATUS_NOT_POWERED);
1414 goto failed;
1415 }
1416
1417 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1418 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1419 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1420 MGMT_STATUS_BUSY);
1421 goto failed;
1422 }
1423
1424 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
1425 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1426 MGMT_STATUS_REJECTED);
1427 goto failed;
1428 }
1429
1430 if (!hdev_is_powered(hdev)) {
1431 bool changed = false;
1432
1433 /* Setting limited discoverable when powered off is
1434 * not a valid operation since it requires a timeout
1435 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1436 */
1437 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
1438 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1439 changed = true;
1440 }
1441
1442 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1443 if (err < 0)
1444 goto failed;
1445
1446 if (changed)
1447 err = new_settings(hdev, sk);
1448
1449 goto failed;
1450 }
1451
1452 /* If the current mode is the same, then just update the timeout
1453 * value with the new value. And if only the timeout gets updated,
1454 * then no need for any HCI transactions.
1455 */
1456 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
1457 (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
1458 &hdev->dev_flags)) {
1459 cancel_delayed_work(&hdev->discov_off);
1460 hdev->discov_timeout = timeout;
1461
1462 if (cp->val && hdev->discov_timeout > 0) {
1463 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1464 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1465 to);
1466 }
1467
1468 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1469 goto failed;
1470 }
1471
1472 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1473 if (!cmd) {
1474 err = -ENOMEM;
1475 goto failed;
1476 }
1477
1478 /* Cancel any potential discoverable timeout that might be
1479 * still active and store new timeout value. The arming of
1480 * the timeout happens in the complete handler.
1481 */
1482 cancel_delayed_work(&hdev->discov_off);
1483 hdev->discov_timeout = timeout;
1484
1485 /* Limited discoverable mode */
1486 if (cp->val == 0x02)
1487 set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1488 else
1489 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1490
1491 hci_req_init(&req, hdev);
1492
1493 /* The procedure for LE-only controllers is much simpler - just
1494 * update the advertising data.
1495 */
1496 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1497 goto update_ad;
1498
1499 scan = SCAN_PAGE;
1500
1501 if (cp->val) {
1502 struct hci_cp_write_current_iac_lap hci_cp;
1503
1504 if (cp->val == 0x02) {
1505 /* Limited discoverable mode */
1506 hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1507 hci_cp.iac_lap[0] = 0x00; /* LIAC */
1508 hci_cp.iac_lap[1] = 0x8b;
1509 hci_cp.iac_lap[2] = 0x9e;
1510 hci_cp.iac_lap[3] = 0x33; /* GIAC */
1511 hci_cp.iac_lap[4] = 0x8b;
1512 hci_cp.iac_lap[5] = 0x9e;
1513 } else {
1514 /* General discoverable mode */
1515 hci_cp.num_iac = 1;
1516 hci_cp.iac_lap[0] = 0x33; /* GIAC */
1517 hci_cp.iac_lap[1] = 0x8b;
1518 hci_cp.iac_lap[2] = 0x9e;
1519 }
1520
1521 hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1522 (hci_cp.num_iac * 3) + 1, &hci_cp);
1523
1524 scan |= SCAN_INQUIRY;
1525 } else {
1526 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1527 }
1528
1529 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1530
1531 update_ad:
1532 update_adv_data(&req);
1533
1534 err = hci_req_run(&req, set_discoverable_complete);
1535 if (err < 0)
1536 mgmt_pending_remove(cmd);
1537
1538 failed:
1539 hci_dev_unlock(hdev);
1540 return err;
1541 }
1542
1543 static void write_fast_connectable(struct hci_request *req, bool enable)
1544 {
1545 struct hci_dev *hdev = req->hdev;
1546 struct hci_cp_write_page_scan_activity acp;
1547 u8 type;
1548
1549 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1550 return;
1551
1552 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1553 return;
1554
1555 if (enable) {
1556 type = PAGE_SCAN_TYPE_INTERLACED;
1557
1558 /* 160 msec page scan interval */
1559 acp.interval = cpu_to_le16(0x0100);
1560 } else {
1561 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1562
1563 /* default 1.28 sec page scan */
1564 acp.interval = cpu_to_le16(0x0800);
1565 }
1566
1567 acp.window = cpu_to_le16(0x0012);
1568
1569 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1570 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1571 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1572 sizeof(acp), &acp);
1573
1574 if (hdev->page_scan_type != type)
1575 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1576 }
1577
1578 static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1579 {
1580 struct pending_cmd *cmd;
1581 struct mgmt_mode *cp;
1582 bool changed;
1583
1584 BT_DBG("status 0x%02x", status);
1585
1586 hci_dev_lock(hdev);
1587
1588 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1589 if (!cmd)
1590 goto unlock;
1591
1592 if (status) {
1593 u8 mgmt_err = mgmt_status(status);
1594 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1595 goto remove_cmd;
1596 }
1597
1598 cp = cmd->param;
1599 if (cp->val)
1600 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1601 else
1602 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1603
1604 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1605
1606 if (changed)
1607 new_settings(hdev, cmd->sk);
1608
1609 remove_cmd:
1610 mgmt_pending_remove(cmd);
1611
1612 unlock:
1613 hci_dev_unlock(hdev);
1614 }
1615
1616 static int set_connectable_update_settings(struct hci_dev *hdev,
1617 struct sock *sk, u8 val)
1618 {
1619 bool changed = false;
1620 int err;
1621
1622 if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1623 changed = true;
1624
1625 if (val) {
1626 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1627 } else {
1628 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1629 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1630 }
1631
1632 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1633 if (err < 0)
1634 return err;
1635
1636 if (changed)
1637 return new_settings(hdev, sk);
1638
1639 return 0;
1640 }
1641
1642 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1643 u16 len)
1644 {
1645 struct mgmt_mode *cp = data;
1646 struct pending_cmd *cmd;
1647 struct hci_request req;
1648 u8 scan;
1649 int err;
1650
1651 BT_DBG("request for %s", hdev->name);
1652
1653 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1654 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1655 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1656 MGMT_STATUS_REJECTED);
1657
1658 if (cp->val != 0x00 && cp->val != 0x01)
1659 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1660 MGMT_STATUS_INVALID_PARAMS);
1661
1662 hci_dev_lock(hdev);
1663
1664 if (!hdev_is_powered(hdev)) {
1665 err = set_connectable_update_settings(hdev, sk, cp->val);
1666 goto failed;
1667 }
1668
1669 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1670 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1671 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1672 MGMT_STATUS_BUSY);
1673 goto failed;
1674 }
1675
1676 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1677 if (!cmd) {
1678 err = -ENOMEM;
1679 goto failed;
1680 }
1681
1682 hci_req_init(&req, hdev);
1683
1684 /* If BR/EDR is not enabled and we disable advertising as a
1685 * by-product of disabling connectable, we need to update the
1686 * advertising flags.
1687 */
1688 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1689 if (!cp->val) {
1690 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1691 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1692 }
1693 update_adv_data(&req);
1694 } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
1695 if (cp->val) {
1696 scan = SCAN_PAGE;
1697 } else {
1698 scan = 0;
1699
1700 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1701 hdev->discov_timeout > 0)
1702 cancel_delayed_work(&hdev->discov_off);
1703 }
1704
1705 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1706 }
1707
1708 /* If we're going from non-connectable to connectable or
1709 * vice-versa when fast connectable is enabled ensure that fast
1710 * connectable gets disabled. write_fast_connectable won't do
1711 * anything if the page scan parameters are already what they
1712 * should be.
1713 */
1714 if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1715 write_fast_connectable(&req, false);
1716
1717 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) &&
1718 hci_conn_num(hdev, LE_LINK) == 0) {
1719 disable_advertising(&req);
1720 enable_advertising(&req);
1721 }
1722
1723 err = hci_req_run(&req, set_connectable_complete);
1724 if (err < 0) {
1725 mgmt_pending_remove(cmd);
1726 if (err == -ENODATA)
1727 err = set_connectable_update_settings(hdev, sk,
1728 cp->val);
1729 goto failed;
1730 }
1731
1732 failed:
1733 hci_dev_unlock(hdev);
1734 return err;
1735 }
1736
1737 static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
1738 u16 len)
1739 {
1740 struct mgmt_mode *cp = data;
1741 bool changed;
1742 int err;
1743
1744 BT_DBG("request for %s", hdev->name);
1745
1746 if (cp->val != 0x00 && cp->val != 0x01)
1747 return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE,
1748 MGMT_STATUS_INVALID_PARAMS);
1749
1750 hci_dev_lock(hdev);
1751
1752 if (cp->val)
1753 changed = !test_and_set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1754 else
1755 changed = test_and_clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1756
1757 err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
1758 if (err < 0)
1759 goto unlock;
1760
1761 if (changed)
1762 err = new_settings(hdev, sk);
1763
1764 unlock:
1765 hci_dev_unlock(hdev);
1766 return err;
1767 }
1768
1769 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1770 u16 len)
1771 {
1772 struct mgmt_mode *cp = data;
1773 struct pending_cmd *cmd;
1774 u8 val, status;
1775 int err;
1776
1777 BT_DBG("request for %s", hdev->name);
1778
1779 status = mgmt_bredr_support(hdev);
1780 if (status)
1781 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1782 status);
1783
1784 if (cp->val != 0x00 && cp->val != 0x01)
1785 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1786 MGMT_STATUS_INVALID_PARAMS);
1787
1788 hci_dev_lock(hdev);
1789
1790 if (!hdev_is_powered(hdev)) {
1791 bool changed = false;
1792
1793 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1794 &hdev->dev_flags)) {
1795 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
1796 changed = true;
1797 }
1798
1799 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1800 if (err < 0)
1801 goto failed;
1802
1803 if (changed)
1804 err = new_settings(hdev, sk);
1805
1806 goto failed;
1807 }
1808
1809 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1810 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1811 MGMT_STATUS_BUSY);
1812 goto failed;
1813 }
1814
1815 val = !!cp->val;
1816
1817 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1818 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1819 goto failed;
1820 }
1821
1822 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1823 if (!cmd) {
1824 err = -ENOMEM;
1825 goto failed;
1826 }
1827
1828 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1829 if (err < 0) {
1830 mgmt_pending_remove(cmd);
1831 goto failed;
1832 }
1833
1834 failed:
1835 hci_dev_unlock(hdev);
1836 return err;
1837 }
1838
1839 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1840 {
1841 struct mgmt_mode *cp = data;
1842 struct pending_cmd *cmd;
1843 u8 status;
1844 int err;
1845
1846 BT_DBG("request for %s", hdev->name);
1847
1848 status = mgmt_bredr_support(hdev);
1849 if (status)
1850 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1851
1852 if (!lmp_ssp_capable(hdev))
1853 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1854 MGMT_STATUS_NOT_SUPPORTED);
1855
1856 if (cp->val != 0x00 && cp->val != 0x01)
1857 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1858 MGMT_STATUS_INVALID_PARAMS);
1859
1860 hci_dev_lock(hdev);
1861
1862 if (!hdev_is_powered(hdev)) {
1863 bool changed;
1864
1865 if (cp->val) {
1866 changed = !test_and_set_bit(HCI_SSP_ENABLED,
1867 &hdev->dev_flags);
1868 } else {
1869 changed = test_and_clear_bit(HCI_SSP_ENABLED,
1870 &hdev->dev_flags);
1871 if (!changed)
1872 changed = test_and_clear_bit(HCI_HS_ENABLED,
1873 &hdev->dev_flags);
1874 else
1875 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1876 }
1877
1878 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1879 if (err < 0)
1880 goto failed;
1881
1882 if (changed)
1883 err = new_settings(hdev, sk);
1884
1885 goto failed;
1886 }
1887
1888 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev) ||
1889 mgmt_pending_find(MGMT_OP_SET_HS, hdev)) {
1890 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1891 MGMT_STATUS_BUSY);
1892 goto failed;
1893 }
1894
1895 if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1896 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1897 goto failed;
1898 }
1899
1900 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1901 if (!cmd) {
1902 err = -ENOMEM;
1903 goto failed;
1904 }
1905
1906 if (!cp->val && test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
1907 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
1908 sizeof(cp->val), &cp->val);
1909
1910 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1911 if (err < 0) {
1912 mgmt_pending_remove(cmd);
1913 goto failed;
1914 }
1915
1916 failed:
1917 hci_dev_unlock(hdev);
1918 return err;
1919 }
1920
1921 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1922 {
1923 struct mgmt_mode *cp = data;
1924 bool changed;
1925 u8 status;
1926 int err;
1927
1928 BT_DBG("request for %s", hdev->name);
1929
1930 status = mgmt_bredr_support(hdev);
1931 if (status)
1932 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1933
1934 if (!lmp_ssp_capable(hdev))
1935 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1936 MGMT_STATUS_NOT_SUPPORTED);
1937
1938 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
1939 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1940 MGMT_STATUS_REJECTED);
1941
1942 if (cp->val != 0x00 && cp->val != 0x01)
1943 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1944 MGMT_STATUS_INVALID_PARAMS);
1945
1946 hci_dev_lock(hdev);
1947
1948 if (cp->val) {
1949 changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1950 } else {
1951 if (hdev_is_powered(hdev)) {
1952 err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1953 MGMT_STATUS_REJECTED);
1954 goto unlock;
1955 }
1956
1957 changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1958 }
1959
1960 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1961 if (err < 0)
1962 goto unlock;
1963
1964 if (changed)
1965 err = new_settings(hdev, sk);
1966
1967 unlock:
1968 hci_dev_unlock(hdev);
1969 return err;
1970 }
1971
1972 static void le_enable_complete(struct hci_dev *hdev, u8 status)
1973 {
1974 struct cmd_lookup match = { NULL, hdev };
1975
1976 if (status) {
1977 u8 mgmt_err = mgmt_status(status);
1978
1979 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1980 &mgmt_err);
1981 return;
1982 }
1983
1984 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1985
1986 new_settings(hdev, match.sk);
1987
1988 if (match.sk)
1989 sock_put(match.sk);
1990
1991 /* Make sure the controller has a good default for
1992 * advertising data. Restrict the update to when LE
1993 * has actually been enabled. During power on, the
1994 * update in powered_update_hci will take care of it.
1995 */
1996 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1997 struct hci_request req;
1998
1999 hci_dev_lock(hdev);
2000
2001 hci_req_init(&req, hdev);
2002 update_adv_data(&req);
2003 update_scan_rsp_data(&req);
2004 hci_req_run(&req, NULL);
2005
2006 hci_dev_unlock(hdev);
2007 }
2008 }
2009
2010 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2011 {
2012 struct mgmt_mode *cp = data;
2013 struct hci_cp_write_le_host_supported hci_cp;
2014 struct pending_cmd *cmd;
2015 struct hci_request req;
2016 int err;
2017 u8 val, enabled;
2018
2019 BT_DBG("request for %s", hdev->name);
2020
2021 if (!lmp_le_capable(hdev))
2022 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2023 MGMT_STATUS_NOT_SUPPORTED);
2024
2025 if (cp->val != 0x00 && cp->val != 0x01)
2026 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2027 MGMT_STATUS_INVALID_PARAMS);
2028
2029 /* LE-only devices do not allow toggling LE on/off */
2030 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
2031 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2032 MGMT_STATUS_REJECTED);
2033
2034 hci_dev_lock(hdev);
2035
2036 val = !!cp->val;
2037 enabled = lmp_host_le_capable(hdev);
2038
2039 if (!hdev_is_powered(hdev) || val == enabled) {
2040 bool changed = false;
2041
2042 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2043 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2044 changed = true;
2045 }
2046
2047 if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
2048 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
2049 changed = true;
2050 }
2051
2052 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2053 if (err < 0)
2054 goto unlock;
2055
2056 if (changed)
2057 err = new_settings(hdev, sk);
2058
2059 goto unlock;
2060 }
2061
2062 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
2063 mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2064 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2065 MGMT_STATUS_BUSY);
2066 goto unlock;
2067 }
2068
2069 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2070 if (!cmd) {
2071 err = -ENOMEM;
2072 goto unlock;
2073 }
2074
2075 hci_req_init(&req, hdev);
2076
2077 memset(&hci_cp, 0, sizeof(hci_cp));
2078
2079 if (val) {
2080 hci_cp.le = val;
2081 hci_cp.simul = lmp_le_br_capable(hdev);
2082 } else {
2083 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
2084 disable_advertising(&req);
2085 }
2086
2087 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2088 &hci_cp);
2089
2090 err = hci_req_run(&req, le_enable_complete);
2091 if (err < 0)
2092 mgmt_pending_remove(cmd);
2093
2094 unlock:
2095 hci_dev_unlock(hdev);
2096 return err;
2097 }
2098
2099 /* This is a helper function to test for pending mgmt commands that can
2100 * cause CoD or EIR HCI commands. We can only allow one such pending
2101 * mgmt command at a time since otherwise we cannot easily track what
2102 * the current values are, will be, and based on that calculate if a new
2103 * HCI command needs to be sent and if yes with what value.
2104 */
2105 static bool pending_eir_or_class(struct hci_dev *hdev)
2106 {
2107 struct pending_cmd *cmd;
2108
2109 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2110 switch (cmd->opcode) {
2111 case MGMT_OP_ADD_UUID:
2112 case MGMT_OP_REMOVE_UUID:
2113 case MGMT_OP_SET_DEV_CLASS:
2114 case MGMT_OP_SET_POWERED:
2115 return true;
2116 }
2117 }
2118
2119 return false;
2120 }
2121
2122 static const u8 bluetooth_base_uuid[] = {
2123 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2124 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2125 };
2126
2127 static u8 get_uuid_size(const u8 *uuid)
2128 {
2129 u32 val;
2130
2131 if (memcmp(uuid, bluetooth_base_uuid, 12))
2132 return 128;
2133
2134 val = get_unaligned_le32(&uuid[12]);
2135 if (val > 0xffff)
2136 return 32;
2137
2138 return 16;
2139 }
2140
2141 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2142 {
2143 struct pending_cmd *cmd;
2144
2145 hci_dev_lock(hdev);
2146
2147 cmd = mgmt_pending_find(mgmt_op, hdev);
2148 if (!cmd)
2149 goto unlock;
2150
2151 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
2152 hdev->dev_class, 3);
2153
2154 mgmt_pending_remove(cmd);
2155
2156 unlock:
2157 hci_dev_unlock(hdev);
2158 }
2159
2160 static void add_uuid_complete(struct hci_dev *hdev, u8 status)
2161 {
2162 BT_DBG("status 0x%02x", status);
2163
2164 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2165 }
2166
2167 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2168 {
2169 struct mgmt_cp_add_uuid *cp = data;
2170 struct pending_cmd *cmd;
2171 struct hci_request req;
2172 struct bt_uuid *uuid;
2173 int err;
2174
2175 BT_DBG("request for %s", hdev->name);
2176
2177 hci_dev_lock(hdev);
2178
2179 if (pending_eir_or_class(hdev)) {
2180 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2181 MGMT_STATUS_BUSY);
2182 goto failed;
2183 }
2184
2185 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2186 if (!uuid) {
2187 err = -ENOMEM;
2188 goto failed;
2189 }
2190
2191 memcpy(uuid->uuid, cp->uuid, 16);
2192 uuid->svc_hint = cp->svc_hint;
2193 uuid->size = get_uuid_size(cp->uuid);
2194
2195 list_add_tail(&uuid->list, &hdev->uuids);
2196
2197 hci_req_init(&req, hdev);
2198
2199 update_class(&req);
2200 update_eir(&req);
2201
2202 err = hci_req_run(&req, add_uuid_complete);
2203 if (err < 0) {
2204 if (err != -ENODATA)
2205 goto failed;
2206
2207 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2208 hdev->dev_class, 3);
2209 goto failed;
2210 }
2211
2212 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2213 if (!cmd) {
2214 err = -ENOMEM;
2215 goto failed;
2216 }
2217
2218 err = 0;
2219
2220 failed:
2221 hci_dev_unlock(hdev);
2222 return err;
2223 }
2224
2225 static bool enable_service_cache(struct hci_dev *hdev)
2226 {
2227 if (!hdev_is_powered(hdev))
2228 return false;
2229
2230 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2231 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2232 CACHE_TIMEOUT);
2233 return true;
2234 }
2235
2236 return false;
2237 }
2238
2239 static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
2240 {
2241 BT_DBG("status 0x%02x", status);
2242
2243 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2244 }
2245
2246 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2247 u16 len)
2248 {
2249 struct mgmt_cp_remove_uuid *cp = data;
2250 struct pending_cmd *cmd;
2251 struct bt_uuid *match, *tmp;
2252 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2253 struct hci_request req;
2254 int err, found;
2255
2256 BT_DBG("request for %s", hdev->name);
2257
2258 hci_dev_lock(hdev);
2259
2260 if (pending_eir_or_class(hdev)) {
2261 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2262 MGMT_STATUS_BUSY);
2263 goto unlock;
2264 }
2265
2266 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2267 hci_uuids_clear(hdev);
2268
2269 if (enable_service_cache(hdev)) {
2270 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2271 0, hdev->dev_class, 3);
2272 goto unlock;
2273 }
2274
2275 goto update_class;
2276 }
2277
2278 found = 0;
2279
2280 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2281 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2282 continue;
2283
2284 list_del(&match->list);
2285 kfree(match);
2286 found++;
2287 }
2288
2289 if (found == 0) {
2290 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2291 MGMT_STATUS_INVALID_PARAMS);
2292 goto unlock;
2293 }
2294
2295 update_class:
2296 hci_req_init(&req, hdev);
2297
2298 update_class(&req);
2299 update_eir(&req);
2300
2301 err = hci_req_run(&req, remove_uuid_complete);
2302 if (err < 0) {
2303 if (err != -ENODATA)
2304 goto unlock;
2305
2306 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2307 hdev->dev_class, 3);
2308 goto unlock;
2309 }
2310
2311 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2312 if (!cmd) {
2313 err = -ENOMEM;
2314 goto unlock;
2315 }
2316
2317 err = 0;
2318
2319 unlock:
2320 hci_dev_unlock(hdev);
2321 return err;
2322 }
2323
2324 static void set_class_complete(struct hci_dev *hdev, u8 status)
2325 {
2326 BT_DBG("status 0x%02x", status);
2327
2328 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2329 }
2330
2331 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2332 u16 len)
2333 {
2334 struct mgmt_cp_set_dev_class *cp = data;
2335 struct pending_cmd *cmd;
2336 struct hci_request req;
2337 int err;
2338
2339 BT_DBG("request for %s", hdev->name);
2340
2341 if (!lmp_bredr_capable(hdev))
2342 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2343 MGMT_STATUS_NOT_SUPPORTED);
2344
2345 hci_dev_lock(hdev);
2346
2347 if (pending_eir_or_class(hdev)) {
2348 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2349 MGMT_STATUS_BUSY);
2350 goto unlock;
2351 }
2352
2353 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2354 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2355 MGMT_STATUS_INVALID_PARAMS);
2356 goto unlock;
2357 }
2358
2359 hdev->major_class = cp->major;
2360 hdev->minor_class = cp->minor;
2361
2362 if (!hdev_is_powered(hdev)) {
2363 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2364 hdev->dev_class, 3);
2365 goto unlock;
2366 }
2367
2368 hci_req_init(&req, hdev);
2369
2370 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2371 hci_dev_unlock(hdev);
2372 cancel_delayed_work_sync(&hdev->service_cache);
2373 hci_dev_lock(hdev);
2374 update_eir(&req);
2375 }
2376
2377 update_class(&req);
2378
2379 err = hci_req_run(&req, set_class_complete);
2380 if (err < 0) {
2381 if (err != -ENODATA)
2382 goto unlock;
2383
2384 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2385 hdev->dev_class, 3);
2386 goto unlock;
2387 }
2388
2389 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2390 if (!cmd) {
2391 err = -ENOMEM;
2392 goto unlock;
2393 }
2394
2395 err = 0;
2396
2397 unlock:
2398 hci_dev_unlock(hdev);
2399 return err;
2400 }
2401
2402 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2403 u16 len)
2404 {
2405 struct mgmt_cp_load_link_keys *cp = data;
2406 u16 key_count, expected_len;
2407 bool changed;
2408 int i;
2409
2410 BT_DBG("request for %s", hdev->name);
2411
2412 if (!lmp_bredr_capable(hdev))
2413 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2414 MGMT_STATUS_NOT_SUPPORTED);
2415
2416 key_count = __le16_to_cpu(cp->key_count);
2417
2418 expected_len = sizeof(*cp) + key_count *
2419 sizeof(struct mgmt_link_key_info);
2420 if (expected_len != len) {
2421 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2422 expected_len, len);
2423 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2424 MGMT_STATUS_INVALID_PARAMS);
2425 }
2426
2427 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2428 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2429 MGMT_STATUS_INVALID_PARAMS);
2430
2431 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2432 key_count);
2433
2434 for (i = 0; i < key_count; i++) {
2435 struct mgmt_link_key_info *key = &cp->keys[i];
2436
2437 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2438 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2439 MGMT_STATUS_INVALID_PARAMS);
2440 }
2441
2442 hci_dev_lock(hdev);
2443
2444 hci_link_keys_clear(hdev);
2445
2446 if (cp->debug_keys)
2447 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
2448 &hdev->dev_flags);
2449 else
2450 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
2451 &hdev->dev_flags);
2452
2453 if (changed)
2454 new_settings(hdev, NULL);
2455
2456 for (i = 0; i < key_count; i++) {
2457 struct mgmt_link_key_info *key = &cp->keys[i];
2458
2459 /* Always ignore debug keys and require a new pairing if
2460 * the user wants to use them.
2461 */
2462 if (key->type == HCI_LK_DEBUG_COMBINATION)
2463 continue;
2464
2465 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2466 key->type, key->pin_len, NULL);
2467 }
2468
2469 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2470
2471 hci_dev_unlock(hdev);
2472
2473 return 0;
2474 }
2475
2476 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2477 u8 addr_type, struct sock *skip_sk)
2478 {
2479 struct mgmt_ev_device_unpaired ev;
2480
2481 bacpy(&ev.addr.bdaddr, bdaddr);
2482 ev.addr.type = addr_type;
2483
2484 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2485 skip_sk);
2486 }
2487
2488 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2489 u16 len)
2490 {
2491 struct mgmt_cp_unpair_device *cp = data;
2492 struct mgmt_rp_unpair_device rp;
2493 struct hci_cp_disconnect dc;
2494 struct pending_cmd *cmd;
2495 struct hci_conn *conn;
2496 int err;
2497
2498 memset(&rp, 0, sizeof(rp));
2499 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2500 rp.addr.type = cp->addr.type;
2501
2502 if (!bdaddr_type_is_valid(cp->addr.type))
2503 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2504 MGMT_STATUS_INVALID_PARAMS,
2505 &rp, sizeof(rp));
2506
2507 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2508 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2509 MGMT_STATUS_INVALID_PARAMS,
2510 &rp, sizeof(rp));
2511
2512 hci_dev_lock(hdev);
2513
2514 if (!hdev_is_powered(hdev)) {
2515 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2516 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2517 goto unlock;
2518 }
2519
2520 if (cp->addr.type == BDADDR_BREDR) {
2521 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2522 } else {
2523 u8 addr_type;
2524
2525 if (cp->addr.type == BDADDR_LE_PUBLIC)
2526 addr_type = ADDR_LE_DEV_PUBLIC;
2527 else
2528 addr_type = ADDR_LE_DEV_RANDOM;
2529
2530 hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2531
2532 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2533
2534 err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2535 }
2536
2537 if (err < 0) {
2538 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2539 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
2540 goto unlock;
2541 }
2542
2543 if (cp->disconnect) {
2544 if (cp->addr.type == BDADDR_BREDR)
2545 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2546 &cp->addr.bdaddr);
2547 else
2548 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2549 &cp->addr.bdaddr);
2550 } else {
2551 conn = NULL;
2552 }
2553
2554 if (!conn) {
2555 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2556 &rp, sizeof(rp));
2557 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2558 goto unlock;
2559 }
2560
2561 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2562 sizeof(*cp));
2563 if (!cmd) {
2564 err = -ENOMEM;
2565 goto unlock;
2566 }
2567
2568 dc.handle = cpu_to_le16(conn->handle);
2569 dc.reason = 0x13; /* Remote User Terminated Connection */
2570 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2571 if (err < 0)
2572 mgmt_pending_remove(cmd);
2573
2574 unlock:
2575 hci_dev_unlock(hdev);
2576 return err;
2577 }
2578
2579 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2580 u16 len)
2581 {
2582 struct mgmt_cp_disconnect *cp = data;
2583 struct mgmt_rp_disconnect rp;
2584 struct hci_cp_disconnect dc;
2585 struct pending_cmd *cmd;
2586 struct hci_conn *conn;
2587 int err;
2588
2589 BT_DBG("");
2590
2591 memset(&rp, 0, sizeof(rp));
2592 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2593 rp.addr.type = cp->addr.type;
2594
2595 if (!bdaddr_type_is_valid(cp->addr.type))
2596 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2597 MGMT_STATUS_INVALID_PARAMS,
2598 &rp, sizeof(rp));
2599
2600 hci_dev_lock(hdev);
2601
2602 if (!test_bit(HCI_UP, &hdev->flags)) {
2603 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2604 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2605 goto failed;
2606 }
2607
2608 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
2609 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2610 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2611 goto failed;
2612 }
2613
2614 if (cp->addr.type == BDADDR_BREDR)
2615 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2616 &cp->addr.bdaddr);
2617 else
2618 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2619
2620 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2621 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2622 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
2623 goto failed;
2624 }
2625
2626 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2627 if (!cmd) {
2628 err = -ENOMEM;
2629 goto failed;
2630 }
2631
2632 dc.handle = cpu_to_le16(conn->handle);
2633 dc.reason = HCI_ERROR_REMOTE_USER_TERM;
2634
2635 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2636 if (err < 0)
2637 mgmt_pending_remove(cmd);
2638
2639 failed:
2640 hci_dev_unlock(hdev);
2641 return err;
2642 }
2643
2644 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2645 {
2646 switch (link_type) {
2647 case LE_LINK:
2648 switch (addr_type) {
2649 case ADDR_LE_DEV_PUBLIC:
2650 return BDADDR_LE_PUBLIC;
2651
2652 default:
2653 /* Fallback to LE Random address type */
2654 return BDADDR_LE_RANDOM;
2655 }
2656
2657 default:
2658 /* Fallback to BR/EDR type */
2659 return BDADDR_BREDR;
2660 }
2661 }
2662
2663 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2664 u16 data_len)
2665 {
2666 struct mgmt_rp_get_connections *rp;
2667 struct hci_conn *c;
2668 size_t rp_len;
2669 int err;
2670 u16 i;
2671
2672 BT_DBG("");
2673
2674 hci_dev_lock(hdev);
2675
2676 if (!hdev_is_powered(hdev)) {
2677 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2678 MGMT_STATUS_NOT_POWERED);
2679 goto unlock;
2680 }
2681
2682 i = 0;
2683 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2684 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2685 i++;
2686 }
2687
2688 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2689 rp = kmalloc(rp_len, GFP_KERNEL);
2690 if (!rp) {
2691 err = -ENOMEM;
2692 goto unlock;
2693 }
2694
2695 i = 0;
2696 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2697 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2698 continue;
2699 bacpy(&rp->addr[i].bdaddr, &c->dst);
2700 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2701 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2702 continue;
2703 i++;
2704 }
2705
2706 rp->conn_count = cpu_to_le16(i);
2707
2708 /* Recalculate length in case of filtered SCO connections, etc */
2709 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2710
2711 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2712 rp_len);
2713
2714 kfree(rp);
2715
2716 unlock:
2717 hci_dev_unlock(hdev);
2718 return err;
2719 }
2720
2721 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2722 struct mgmt_cp_pin_code_neg_reply *cp)
2723 {
2724 struct pending_cmd *cmd;
2725 int err;
2726
2727 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2728 sizeof(*cp));
2729 if (!cmd)
2730 return -ENOMEM;
2731
2732 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2733 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2734 if (err < 0)
2735 mgmt_pending_remove(cmd);
2736
2737 return err;
2738 }
2739
2740 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2741 u16 len)
2742 {
2743 struct hci_conn *conn;
2744 struct mgmt_cp_pin_code_reply *cp = data;
2745 struct hci_cp_pin_code_reply reply;
2746 struct pending_cmd *cmd;
2747 int err;
2748
2749 BT_DBG("");
2750
2751 hci_dev_lock(hdev);
2752
2753 if (!hdev_is_powered(hdev)) {
2754 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2755 MGMT_STATUS_NOT_POWERED);
2756 goto failed;
2757 }
2758
2759 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2760 if (!conn) {
2761 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2762 MGMT_STATUS_NOT_CONNECTED);
2763 goto failed;
2764 }
2765
2766 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2767 struct mgmt_cp_pin_code_neg_reply ncp;
2768
2769 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2770
2771 BT_ERR("PIN code is not 16 bytes long");
2772
2773 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2774 if (err >= 0)
2775 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2776 MGMT_STATUS_INVALID_PARAMS);
2777
2778 goto failed;
2779 }
2780
2781 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2782 if (!cmd) {
2783 err = -ENOMEM;
2784 goto failed;
2785 }
2786
2787 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2788 reply.pin_len = cp->pin_len;
2789 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2790
2791 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2792 if (err < 0)
2793 mgmt_pending_remove(cmd);
2794
2795 failed:
2796 hci_dev_unlock(hdev);
2797 return err;
2798 }
2799
2800 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2801 u16 len)
2802 {
2803 struct mgmt_cp_set_io_capability *cp = data;
2804
2805 BT_DBG("");
2806
2807 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2808 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2809 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
2810
2811 hci_dev_lock(hdev);
2812
2813 hdev->io_capability = cp->io_capability;
2814
2815 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2816 hdev->io_capability);
2817
2818 hci_dev_unlock(hdev);
2819
2820 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
2821 0);
2822 }
2823
2824 static struct pending_cmd *find_pairing(struct hci_conn *conn)
2825 {
2826 struct hci_dev *hdev = conn->hdev;
2827 struct pending_cmd *cmd;
2828
2829 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2830 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2831 continue;
2832
2833 if (cmd->user_data != conn)
2834 continue;
2835
2836 return cmd;
2837 }
2838
2839 return NULL;
2840 }
2841
2842 static void pairing_complete(struct pending_cmd *cmd, u8 status)
2843 {
2844 struct mgmt_rp_pair_device rp;
2845 struct hci_conn *conn = cmd->user_data;
2846
2847 bacpy(&rp.addr.bdaddr, &conn->dst);
2848 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2849
2850 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
2851 &rp, sizeof(rp));
2852
2853 /* So we don't get further callbacks for this connection */
2854 conn->connect_cfm_cb = NULL;
2855 conn->security_cfm_cb = NULL;
2856 conn->disconn_cfm_cb = NULL;
2857
2858 hci_conn_drop(conn);
2859
2860 mgmt_pending_remove(cmd);
2861 }
2862
2863 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2864 {
2865 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2866 struct pending_cmd *cmd;
2867
2868 cmd = find_pairing(conn);
2869 if (cmd)
2870 pairing_complete(cmd, status);
2871 }
2872
2873 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2874 {
2875 struct pending_cmd *cmd;
2876
2877 BT_DBG("status %u", status);
2878
2879 cmd = find_pairing(conn);
2880 if (!cmd)
2881 BT_DBG("Unable to find a pending command");
2882 else
2883 pairing_complete(cmd, mgmt_status(status));
2884 }
2885
2886 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2887 {
2888 struct pending_cmd *cmd;
2889
2890 BT_DBG("status %u", status);
2891
2892 if (!status)
2893 return;
2894
2895 cmd = find_pairing(conn);
2896 if (!cmd)
2897 BT_DBG("Unable to find a pending command");
2898 else
2899 pairing_complete(cmd, mgmt_status(status));
2900 }
2901
2902 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2903 u16 len)
2904 {
2905 struct mgmt_cp_pair_device *cp = data;
2906 struct mgmt_rp_pair_device rp;
2907 struct pending_cmd *cmd;
2908 u8 sec_level, auth_type;
2909 struct hci_conn *conn;
2910 int err;
2911
2912 BT_DBG("");
2913
2914 memset(&rp, 0, sizeof(rp));
2915 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2916 rp.addr.type = cp->addr.type;
2917
2918 if (!bdaddr_type_is_valid(cp->addr.type))
2919 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2920 MGMT_STATUS_INVALID_PARAMS,
2921 &rp, sizeof(rp));
2922
2923 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
2924 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2925 MGMT_STATUS_INVALID_PARAMS,
2926 &rp, sizeof(rp));
2927
2928 hci_dev_lock(hdev);
2929
2930 if (!hdev_is_powered(hdev)) {
2931 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2932 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2933 goto unlock;
2934 }
2935
2936 sec_level = BT_SECURITY_MEDIUM;
2937 auth_type = HCI_AT_DEDICATED_BONDING;
2938
2939 if (cp->addr.type == BDADDR_BREDR) {
2940 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
2941 auth_type);
2942 } else {
2943 u8 addr_type;
2944
2945 /* Convert from L2CAP channel address type to HCI address type
2946 */
2947 if (cp->addr.type == BDADDR_LE_PUBLIC)
2948 addr_type = ADDR_LE_DEV_PUBLIC;
2949 else
2950 addr_type = ADDR_LE_DEV_RANDOM;
2951
2952 /* When pairing a new device, it is expected to remember
2953 * this device for future connections. Adding the connection
2954 * parameter information ahead of time allows tracking
2955 * of the slave preferred values and will speed up any
2956 * further connection establishment.
2957 *
2958 * If connection parameters already exist, then they
2959 * will be kept and this function does nothing.
2960 */
2961 hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
2962
2963 conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
2964 sec_level, auth_type);
2965 }
2966
2967 if (IS_ERR(conn)) {
2968 int status;
2969
2970 if (PTR_ERR(conn) == -EBUSY)
2971 status = MGMT_STATUS_BUSY;
2972 else
2973 status = MGMT_STATUS_CONNECT_FAILED;
2974
2975 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2976 status, &rp,
2977 sizeof(rp));
2978 goto unlock;
2979 }
2980
2981 if (conn->connect_cfm_cb) {
2982 hci_conn_drop(conn);
2983 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2984 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2985 goto unlock;
2986 }
2987
2988 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2989 if (!cmd) {
2990 err = -ENOMEM;
2991 hci_conn_drop(conn);
2992 goto unlock;
2993 }
2994
2995 /* For LE, just connecting isn't a proof that the pairing finished */
2996 if (cp->addr.type == BDADDR_BREDR) {
2997 conn->connect_cfm_cb = pairing_complete_cb;
2998 conn->security_cfm_cb = pairing_complete_cb;
2999 conn->disconn_cfm_cb = pairing_complete_cb;
3000 } else {
3001 conn->connect_cfm_cb = le_pairing_complete_cb;
3002 conn->security_cfm_cb = le_pairing_complete_cb;
3003 conn->disconn_cfm_cb = le_pairing_complete_cb;
3004 }
3005
3006 conn->io_capability = cp->io_cap;
3007 cmd->user_data = conn;
3008
3009 if (conn->state == BT_CONNECTED &&
3010 hci_conn_security(conn, sec_level, auth_type))
3011 pairing_complete(cmd, 0);
3012
3013 err = 0;
3014
3015 unlock:
3016 hci_dev_unlock(hdev);
3017 return err;
3018 }
3019
3020 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3021 u16 len)
3022 {
3023 struct mgmt_addr_info *addr = data;
3024 struct pending_cmd *cmd;
3025 struct hci_conn *conn;
3026 int err;
3027
3028 BT_DBG("");
3029
3030 hci_dev_lock(hdev);
3031
3032 if (!hdev_is_powered(hdev)) {
3033 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3034 MGMT_STATUS_NOT_POWERED);
3035 goto unlock;
3036 }
3037
3038 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3039 if (!cmd) {
3040 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3041 MGMT_STATUS_INVALID_PARAMS);
3042 goto unlock;
3043 }
3044
3045 conn = cmd->user_data;
3046
3047 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3048 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3049 MGMT_STATUS_INVALID_PARAMS);
3050 goto unlock;
3051 }
3052
3053 pairing_complete(cmd, MGMT_STATUS_CANCELLED);
3054
3055 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3056 addr, sizeof(*addr));
3057 unlock:
3058 hci_dev_unlock(hdev);
3059 return err;
3060 }
3061
3062 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3063 struct mgmt_addr_info *addr, u16 mgmt_op,
3064 u16 hci_op, __le32 passkey)
3065 {
3066 struct pending_cmd *cmd;
3067 struct hci_conn *conn;
3068 int err;
3069
3070 hci_dev_lock(hdev);
3071
3072 if (!hdev_is_powered(hdev)) {
3073 err = cmd_complete(sk, hdev->id, mgmt_op,
3074 MGMT_STATUS_NOT_POWERED, addr,
3075 sizeof(*addr));
3076 goto done;
3077 }
3078
3079 if (addr->type == BDADDR_BREDR)
3080 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3081 else
3082 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
3083
3084 if (!conn) {
3085 err = cmd_complete(sk, hdev->id, mgmt_op,
3086 MGMT_STATUS_NOT_CONNECTED, addr,
3087 sizeof(*addr));
3088 goto done;
3089 }
3090
3091 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3092 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3093 if (!err)
3094 err = cmd_complete(sk, hdev->id, mgmt_op,
3095 MGMT_STATUS_SUCCESS, addr,
3096 sizeof(*addr));
3097 else
3098 err = cmd_complete(sk, hdev->id, mgmt_op,
3099 MGMT_STATUS_FAILED, addr,
3100 sizeof(*addr));
3101
3102 goto done;
3103 }
3104
3105 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3106 if (!cmd) {
3107 err = -ENOMEM;
3108 goto done;
3109 }
3110
3111 /* Continue with pairing via HCI */
3112 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3113 struct hci_cp_user_passkey_reply cp;
3114
3115 bacpy(&cp.bdaddr, &addr->bdaddr);
3116 cp.passkey = passkey;
3117 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3118 } else
3119 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3120 &addr->bdaddr);
3121
3122 if (err < 0)
3123 mgmt_pending_remove(cmd);
3124
3125 done:
3126 hci_dev_unlock(hdev);
3127 return err;
3128 }
3129
3130 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3131 void *data, u16 len)
3132 {
3133 struct mgmt_cp_pin_code_neg_reply *cp = data;
3134
3135 BT_DBG("");
3136
3137 return user_pairing_resp(sk, hdev, &cp->addr,
3138 MGMT_OP_PIN_CODE_NEG_REPLY,
3139 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3140 }
3141
3142 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3143 u16 len)
3144 {
3145 struct mgmt_cp_user_confirm_reply *cp = data;
3146
3147 BT_DBG("");
3148
3149 if (len != sizeof(*cp))
3150 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3151 MGMT_STATUS_INVALID_PARAMS);
3152
3153 return user_pairing_resp(sk, hdev, &cp->addr,
3154 MGMT_OP_USER_CONFIRM_REPLY,
3155 HCI_OP_USER_CONFIRM_REPLY, 0);
3156 }
3157
3158 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3159 void *data, u16 len)
3160 {
3161 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3162
3163 BT_DBG("");
3164
3165 return user_pairing_resp(sk, hdev, &cp->addr,
3166 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3167 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3168 }
3169
3170 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3171 u16 len)
3172 {
3173 struct mgmt_cp_user_passkey_reply *cp = data;
3174
3175 BT_DBG("");
3176
3177 return user_pairing_resp(sk, hdev, &cp->addr,
3178 MGMT_OP_USER_PASSKEY_REPLY,
3179 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3180 }
3181
3182 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3183 void *data, u16 len)
3184 {
3185 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3186
3187 BT_DBG("");
3188
3189 return user_pairing_resp(sk, hdev, &cp->addr,
3190 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3191 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3192 }
3193
3194 static void update_name(struct hci_request *req)
3195 {
3196 struct hci_dev *hdev = req->hdev;
3197 struct hci_cp_write_local_name cp;
3198
3199 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3200
3201 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3202 }
3203
3204 static void set_name_complete(struct hci_dev *hdev, u8 status)
3205 {
3206 struct mgmt_cp_set_local_name *cp;
3207 struct pending_cmd *cmd;
3208
3209 BT_DBG("status 0x%02x", status);
3210
3211 hci_dev_lock(hdev);
3212
3213 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3214 if (!cmd)
3215 goto unlock;
3216
3217 cp = cmd->param;
3218
3219 if (status)
3220 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3221 mgmt_status(status));
3222 else
3223 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3224 cp, sizeof(*cp));
3225
3226 mgmt_pending_remove(cmd);
3227
3228 unlock:
3229 hci_dev_unlock(hdev);
3230 }
3231
3232 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3233 u16 len)
3234 {
3235 struct mgmt_cp_set_local_name *cp = data;
3236 struct pending_cmd *cmd;
3237 struct hci_request req;
3238 int err;
3239
3240 BT_DBG("");
3241
3242 hci_dev_lock(hdev);
3243
3244 /* If the old values are the same as the new ones just return a
3245 * direct command complete event.
3246 */
3247 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3248 !memcmp(hdev->short_name, cp->short_name,
3249 sizeof(hdev->short_name))) {
3250 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3251 data, len);
3252 goto failed;
3253 }
3254
3255 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3256
3257 if (!hdev_is_powered(hdev)) {
3258 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3259
3260 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3261 data, len);
3262 if (err < 0)
3263 goto failed;
3264
3265 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
3266 sk);
3267
3268 goto failed;
3269 }
3270
3271 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3272 if (!cmd) {
3273 err = -ENOMEM;
3274 goto failed;
3275 }
3276
3277 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3278
3279 hci_req_init(&req, hdev);
3280
3281 if (lmp_bredr_capable(hdev)) {
3282 update_name(&req);
3283 update_eir(&req);
3284 }
3285
3286 /* The name is stored in the scan response data and so
3287 * no need to udpate the advertising data here.
3288 */
3289 if (lmp_le_capable(hdev))
3290 update_scan_rsp_data(&req);
3291
3292 err = hci_req_run(&req, set_name_complete);
3293 if (err < 0)
3294 mgmt_pending_remove(cmd);
3295
3296 failed:
3297 hci_dev_unlock(hdev);
3298 return err;
3299 }
3300
3301 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3302 void *data, u16 data_len)
3303 {
3304 struct pending_cmd *cmd;
3305 int err;
3306
3307 BT_DBG("%s", hdev->name);
3308
3309 hci_dev_lock(hdev);
3310
3311 if (!hdev_is_powered(hdev)) {
3312 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3313 MGMT_STATUS_NOT_POWERED);
3314 goto unlock;
3315 }
3316
3317 if (!lmp_ssp_capable(hdev)) {
3318 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3319 MGMT_STATUS_NOT_SUPPORTED);
3320 goto unlock;
3321 }
3322
3323 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3324 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3325 MGMT_STATUS_BUSY);
3326 goto unlock;
3327 }
3328
3329 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3330 if (!cmd) {
3331 err = -ENOMEM;
3332 goto unlock;
3333 }
3334
3335 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
3336 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
3337 0, NULL);
3338 else
3339 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3340
3341 if (err < 0)
3342 mgmt_pending_remove(cmd);
3343
3344 unlock:
3345 hci_dev_unlock(hdev);
3346 return err;
3347 }
3348
3349 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3350 void *data, u16 len)
3351 {
3352 int err;
3353
3354 BT_DBG("%s ", hdev->name);
3355
3356 hci_dev_lock(hdev);
3357
3358 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3359 struct mgmt_cp_add_remote_oob_data *cp = data;
3360 u8 status;
3361
3362 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3363 cp->hash, cp->randomizer);
3364 if (err < 0)
3365 status = MGMT_STATUS_FAILED;
3366 else
3367 status = MGMT_STATUS_SUCCESS;
3368
3369 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3370 status, &cp->addr, sizeof(cp->addr));
3371 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3372 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3373 u8 status;
3374
3375 err = hci_add_remote_oob_ext_data(hdev, &cp->addr.bdaddr,
3376 cp->hash192,
3377 cp->randomizer192,
3378 cp->hash256,
3379 cp->randomizer256);
3380 if (err < 0)
3381 status = MGMT_STATUS_FAILED;
3382 else
3383 status = MGMT_STATUS_SUCCESS;
3384
3385 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3386 status, &cp->addr, sizeof(cp->addr));
3387 } else {
3388 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3389 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3390 MGMT_STATUS_INVALID_PARAMS);
3391 }
3392
3393 hci_dev_unlock(hdev);
3394 return err;
3395 }
3396
3397 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3398 void *data, u16 len)
3399 {
3400 struct mgmt_cp_remove_remote_oob_data *cp = data;
3401 u8 status;
3402 int err;
3403
3404 BT_DBG("%s", hdev->name);
3405
3406 hci_dev_lock(hdev);
3407
3408 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
3409 if (err < 0)
3410 status = MGMT_STATUS_INVALID_PARAMS;
3411 else
3412 status = MGMT_STATUS_SUCCESS;
3413
3414 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3415 status, &cp->addr, sizeof(cp->addr));
3416
3417 hci_dev_unlock(hdev);
3418 return err;
3419 }
3420
3421 static int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
3422 {
3423 struct pending_cmd *cmd;
3424 u8 type;
3425 int err;
3426
3427 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3428
3429 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3430 if (!cmd)
3431 return -ENOENT;
3432
3433 type = hdev->discovery.type;
3434
3435 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3436 &type, sizeof(type));
3437 mgmt_pending_remove(cmd);
3438
3439 return err;
3440 }
3441
3442 static void start_discovery_complete(struct hci_dev *hdev, u8 status)
3443 {
3444 unsigned long timeout = 0;
3445
3446 BT_DBG("status %d", status);
3447
3448 if (status) {
3449 hci_dev_lock(hdev);
3450 mgmt_start_discovery_failed(hdev, status);
3451 hci_dev_unlock(hdev);
3452 return;
3453 }
3454
3455 hci_dev_lock(hdev);
3456 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3457 hci_dev_unlock(hdev);
3458
3459 switch (hdev->discovery.type) {
3460 case DISCOV_TYPE_LE:
3461 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3462 break;
3463
3464 case DISCOV_TYPE_INTERLEAVED:
3465 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
3466 break;
3467
3468 case DISCOV_TYPE_BREDR:
3469 break;
3470
3471 default:
3472 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
3473 }
3474
3475 if (!timeout)
3476 return;
3477
3478 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable, timeout);
3479 }
3480
3481 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3482 void *data, u16 len)
3483 {
3484 struct mgmt_cp_start_discovery *cp = data;
3485 struct pending_cmd *cmd;
3486 struct hci_cp_le_set_scan_param param_cp;
3487 struct hci_cp_le_set_scan_enable enable_cp;
3488 struct hci_cp_inquiry inq_cp;
3489 struct hci_request req;
3490 /* General inquiry access code (GIAC) */
3491 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3492 u8 status, own_addr_type;
3493 int err;
3494
3495 BT_DBG("%s", hdev->name);
3496
3497 hci_dev_lock(hdev);
3498
3499 if (!hdev_is_powered(hdev)) {
3500 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3501 MGMT_STATUS_NOT_POWERED);
3502 goto failed;
3503 }
3504
3505 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3506 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3507 MGMT_STATUS_BUSY);
3508 goto failed;
3509 }
3510
3511 if (hdev->discovery.state != DISCOVERY_STOPPED) {
3512 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3513 MGMT_STATUS_BUSY);
3514 goto failed;
3515 }
3516
3517 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
3518 if (!cmd) {
3519 err = -ENOMEM;
3520 goto failed;
3521 }
3522
3523 hdev->discovery.type = cp->type;
3524
3525 hci_req_init(&req, hdev);
3526
3527 switch (hdev->discovery.type) {
3528 case DISCOV_TYPE_BREDR:
3529 status = mgmt_bredr_support(hdev);
3530 if (status) {
3531 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3532 status);
3533 mgmt_pending_remove(cmd);
3534 goto failed;
3535 }
3536
3537 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3538 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3539 MGMT_STATUS_BUSY);
3540 mgmt_pending_remove(cmd);
3541 goto failed;
3542 }
3543
3544 hci_inquiry_cache_flush(hdev);
3545
3546 memset(&inq_cp, 0, sizeof(inq_cp));
3547 memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
3548 inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
3549 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
3550 break;
3551
3552 case DISCOV_TYPE_LE:
3553 case DISCOV_TYPE_INTERLEAVED:
3554 status = mgmt_le_support(hdev);
3555 if (status) {
3556 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3557 status);
3558 mgmt_pending_remove(cmd);
3559 goto failed;
3560 }
3561
3562 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3563 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3564 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3565 MGMT_STATUS_NOT_SUPPORTED);
3566 mgmt_pending_remove(cmd);
3567 goto failed;
3568 }
3569
3570 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3571 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3572 MGMT_STATUS_REJECTED);
3573 mgmt_pending_remove(cmd);
3574 goto failed;
3575 }
3576
3577 /* If controller is scanning, it means the background scanning
3578 * is running. Thus, we should temporarily stop it in order to
3579 * set the discovery scanning parameters.
3580 */
3581 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
3582 hci_req_add_le_scan_disable(&req);
3583
3584 memset(&param_cp, 0, sizeof(param_cp));
3585
3586 /* All active scans will be done with either a resolvable
3587 * private address (when privacy feature has been enabled)
3588 * or unresolvable private address.
3589 */
3590 err = hci_update_random_address(&req, true, &own_addr_type);
3591 if (err < 0) {
3592 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3593 MGMT_STATUS_FAILED);
3594 mgmt_pending_remove(cmd);
3595 goto failed;
3596 }
3597
3598 param_cp.type = LE_SCAN_ACTIVE;
3599 param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
3600 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3601 param_cp.own_address_type = own_addr_type;
3602 hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
3603 &param_cp);
3604
3605 memset(&enable_cp, 0, sizeof(enable_cp));
3606 enable_cp.enable = LE_SCAN_ENABLE;
3607 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3608 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
3609 &enable_cp);
3610 break;
3611
3612 default:
3613 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3614 MGMT_STATUS_INVALID_PARAMS);
3615 mgmt_pending_remove(cmd);
3616 goto failed;
3617 }
3618
3619 err = hci_req_run(&req, start_discovery_complete);
3620 if (err < 0)
3621 mgmt_pending_remove(cmd);
3622 else
3623 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3624
3625 failed:
3626 hci_dev_unlock(hdev);
3627 return err;
3628 }
3629
3630 static int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
3631 {
3632 struct pending_cmd *cmd;
3633 int err;
3634
3635 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
3636 if (!cmd)
3637 return -ENOENT;
3638
3639 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3640 &hdev->discovery.type, sizeof(hdev->discovery.type));
3641 mgmt_pending_remove(cmd);
3642
3643 return err;
3644 }
3645
3646 static void stop_discovery_complete(struct hci_dev *hdev, u8 status)
3647 {
3648 BT_DBG("status %d", status);
3649
3650 hci_dev_lock(hdev);
3651
3652 if (status) {
3653 mgmt_stop_discovery_failed(hdev, status);
3654 goto unlock;
3655 }
3656
3657 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3658
3659 unlock:
3660 hci_dev_unlock(hdev);
3661 }
3662
3663 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
3664 u16 len)
3665 {
3666 struct mgmt_cp_stop_discovery *mgmt_cp = data;
3667 struct pending_cmd *cmd;
3668 struct hci_request req;
3669 int err;
3670
3671 BT_DBG("%s", hdev->name);
3672
3673 hci_dev_lock(hdev);
3674
3675 if (!hci_discovery_active(hdev)) {
3676 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3677 MGMT_STATUS_REJECTED, &mgmt_cp->type,
3678 sizeof(mgmt_cp->type));
3679 goto unlock;
3680 }
3681
3682 if (hdev->discovery.type != mgmt_cp->type) {
3683 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3684 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
3685 sizeof(mgmt_cp->type));
3686 goto unlock;
3687 }
3688
3689 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
3690 if (!cmd) {
3691 err = -ENOMEM;
3692 goto unlock;
3693 }
3694
3695 hci_req_init(&req, hdev);
3696
3697 hci_stop_discovery(&req);
3698
3699 err = hci_req_run(&req, stop_discovery_complete);
3700 if (!err) {
3701 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
3702 goto unlock;
3703 }
3704
3705 mgmt_pending_remove(cmd);
3706
3707 /* If no HCI commands were sent we're done */
3708 if (err == -ENODATA) {
3709 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
3710 &mgmt_cp->type, sizeof(mgmt_cp->type));
3711 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3712 }
3713
3714 unlock:
3715 hci_dev_unlock(hdev);
3716 return err;
3717 }
3718
3719 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
3720 u16 len)
3721 {
3722 struct mgmt_cp_confirm_name *cp = data;
3723 struct inquiry_entry *e;
3724 int err;
3725
3726 BT_DBG("%s", hdev->name);
3727
3728 hci_dev_lock(hdev);
3729
3730 if (!hci_discovery_active(hdev)) {
3731 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3732 MGMT_STATUS_FAILED, &cp->addr,
3733 sizeof(cp->addr));
3734 goto failed;
3735 }
3736
3737 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
3738 if (!e) {
3739 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3740 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
3741 sizeof(cp->addr));
3742 goto failed;
3743 }
3744
3745 if (cp->name_known) {
3746 e->name_state = NAME_KNOWN;
3747 list_del(&e->list);
3748 } else {
3749 e->name_state = NAME_NEEDED;
3750 hci_inquiry_cache_update_resolve(hdev, e);
3751 }
3752
3753 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
3754 sizeof(cp->addr));
3755
3756 failed:
3757 hci_dev_unlock(hdev);
3758 return err;
3759 }
3760
3761 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
3762 u16 len)
3763 {
3764 struct mgmt_cp_block_device *cp = data;
3765 u8 status;
3766 int err;
3767
3768 BT_DBG("%s", hdev->name);
3769
3770 if (!bdaddr_type_is_valid(cp->addr.type))
3771 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
3772 MGMT_STATUS_INVALID_PARAMS,
3773 &cp->addr, sizeof(cp->addr));
3774
3775 hci_dev_lock(hdev);
3776
3777 err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type);
3778 if (err < 0) {
3779 status = MGMT_STATUS_FAILED;
3780 goto done;
3781 }
3782
3783 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
3784 sk);
3785 status = MGMT_STATUS_SUCCESS;
3786
3787 done:
3788 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
3789 &cp->addr, sizeof(cp->addr));
3790
3791 hci_dev_unlock(hdev);
3792
3793 return err;
3794 }
3795
3796 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
3797 u16 len)
3798 {
3799 struct mgmt_cp_unblock_device *cp = data;
3800 u8 status;
3801 int err;
3802
3803 BT_DBG("%s", hdev->name);
3804
3805 if (!bdaddr_type_is_valid(cp->addr.type))
3806 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
3807 MGMT_STATUS_INVALID_PARAMS,
3808 &cp->addr, sizeof(cp->addr));
3809
3810 hci_dev_lock(hdev);
3811
3812 err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type);
3813 if (err < 0) {
3814 status = MGMT_STATUS_INVALID_PARAMS;
3815 goto done;
3816 }
3817
3818 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
3819 sk);
3820 status = MGMT_STATUS_SUCCESS;
3821
3822 done:
3823 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
3824 &cp->addr, sizeof(cp->addr));
3825
3826 hci_dev_unlock(hdev);
3827
3828 return err;
3829 }
3830
3831 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
3832 u16 len)
3833 {
3834 struct mgmt_cp_set_device_id *cp = data;
3835 struct hci_request req;
3836 int err;
3837 __u16 source;
3838
3839 BT_DBG("%s", hdev->name);
3840
3841 source = __le16_to_cpu(cp->source);
3842
3843 if (source > 0x0002)
3844 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
3845 MGMT_STATUS_INVALID_PARAMS);
3846
3847 hci_dev_lock(hdev);
3848
3849 hdev->devid_source = source;
3850 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
3851 hdev->devid_product = __le16_to_cpu(cp->product);
3852 hdev->devid_version = __le16_to_cpu(cp->version);
3853
3854 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
3855
3856 hci_req_init(&req, hdev);
3857 update_eir(&req);
3858 hci_req_run(&req, NULL);
3859
3860 hci_dev_unlock(hdev);
3861
3862 return err;
3863 }
3864
3865 static void set_advertising_complete(struct hci_dev *hdev, u8 status)
3866 {
3867 struct cmd_lookup match = { NULL, hdev };
3868
3869 if (status) {
3870 u8 mgmt_err = mgmt_status(status);
3871
3872 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
3873 cmd_status_rsp, &mgmt_err);
3874 return;
3875 }
3876
3877 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
3878 &match);
3879
3880 new_settings(hdev, match.sk);
3881
3882 if (match.sk)
3883 sock_put(match.sk);
3884 }
3885
3886 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
3887 u16 len)
3888 {
3889 struct mgmt_mode *cp = data;
3890 struct pending_cmd *cmd;
3891 struct hci_request req;
3892 u8 val, enabled, status;
3893 int err;
3894
3895 BT_DBG("request for %s", hdev->name);
3896
3897 status = mgmt_le_support(hdev);
3898 if (status)
3899 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3900 status);
3901
3902 if (cp->val != 0x00 && cp->val != 0x01)
3903 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3904 MGMT_STATUS_INVALID_PARAMS);
3905
3906 hci_dev_lock(hdev);
3907
3908 val = !!cp->val;
3909 enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
3910
3911 /* The following conditions are ones which mean that we should
3912 * not do any HCI communication but directly send a mgmt
3913 * response to user space (after toggling the flag if
3914 * necessary).
3915 */
3916 if (!hdev_is_powered(hdev) || val == enabled ||
3917 hci_conn_num(hdev, LE_LINK) > 0) {
3918 bool changed = false;
3919
3920 if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3921 change_bit(HCI_ADVERTISING, &hdev->dev_flags);
3922 changed = true;
3923 }
3924
3925 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
3926 if (err < 0)
3927 goto unlock;
3928
3929 if (changed)
3930 err = new_settings(hdev, sk);
3931
3932 goto unlock;
3933 }
3934
3935 if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
3936 mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
3937 err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3938 MGMT_STATUS_BUSY);
3939 goto unlock;
3940 }
3941
3942 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
3943 if (!cmd) {
3944 err = -ENOMEM;
3945 goto unlock;
3946 }
3947
3948 hci_req_init(&req, hdev);
3949
3950 if (val)
3951 enable_advertising(&req);
3952 else
3953 disable_advertising(&req);
3954
3955 err = hci_req_run(&req, set_advertising_complete);
3956 if (err < 0)
3957 mgmt_pending_remove(cmd);
3958
3959 unlock:
3960 hci_dev_unlock(hdev);
3961 return err;
3962 }
3963
3964 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
3965 void *data, u16 len)
3966 {
3967 struct mgmt_cp_set_static_address *cp = data;
3968 int err;
3969
3970 BT_DBG("%s", hdev->name);
3971
3972 if (!lmp_le_capable(hdev))
3973 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
3974 MGMT_STATUS_NOT_SUPPORTED);
3975
3976 if (hdev_is_powered(hdev))
3977 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
3978 MGMT_STATUS_REJECTED);
3979
3980 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
3981 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
3982 return cmd_status(sk, hdev->id,
3983 MGMT_OP_SET_STATIC_ADDRESS,
3984 MGMT_STATUS_INVALID_PARAMS);
3985
3986 /* Two most significant bits shall be set */
3987 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
3988 return cmd_status(sk, hdev->id,
3989 MGMT_OP_SET_STATIC_ADDRESS,
3990 MGMT_STATUS_INVALID_PARAMS);
3991 }
3992
3993 hci_dev_lock(hdev);
3994
3995 bacpy(&hdev->static_addr, &cp->bdaddr);
3996
3997 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0);
3998
3999 hci_dev_unlock(hdev);
4000
4001 return err;
4002 }
4003
4004 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
4005 void *data, u16 len)
4006 {
4007 struct mgmt_cp_set_scan_params *cp = data;
4008 __u16 interval, window;
4009 int err;
4010
4011 BT_DBG("%s", hdev->name);
4012
4013 if (!lmp_le_capable(hdev))
4014 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4015 MGMT_STATUS_NOT_SUPPORTED);
4016
4017 interval = __le16_to_cpu(cp->interval);
4018
4019 if (interval < 0x0004 || interval > 0x4000)
4020 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4021 MGMT_STATUS_INVALID_PARAMS);
4022
4023 window = __le16_to_cpu(cp->window);
4024
4025 if (window < 0x0004 || window > 0x4000)
4026 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4027 MGMT_STATUS_INVALID_PARAMS);
4028
4029 if (window > interval)
4030 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4031 MGMT_STATUS_INVALID_PARAMS);
4032
4033 hci_dev_lock(hdev);
4034
4035 hdev->le_scan_interval = interval;
4036 hdev->le_scan_window = window;
4037
4038 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);
4039
4040 /* If background scan is running, restart it so new parameters are
4041 * loaded.
4042 */
4043 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
4044 hdev->discovery.state == DISCOVERY_STOPPED) {
4045 struct hci_request req;
4046
4047 hci_req_init(&req, hdev);
4048
4049 hci_req_add_le_scan_disable(&req);
4050 hci_req_add_le_passive_scan(&req);
4051
4052 hci_req_run(&req, NULL);
4053 }
4054
4055 hci_dev_unlock(hdev);
4056
4057 return err;
4058 }
4059
4060 static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
4061 {
4062 struct pending_cmd *cmd;
4063
4064 BT_DBG("status 0x%02x", status);
4065
4066 hci_dev_lock(hdev);
4067
4068 cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4069 if (!cmd)
4070 goto unlock;
4071
4072 if (status) {
4073 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4074 mgmt_status(status));
4075 } else {
4076 struct mgmt_mode *cp = cmd->param;
4077
4078 if (cp->val)
4079 set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4080 else
4081 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4082
4083 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4084 new_settings(hdev, cmd->sk);
4085 }
4086
4087 mgmt_pending_remove(cmd);
4088
4089 unlock:
4090 hci_dev_unlock(hdev);
4091 }
4092
4093 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4094 void *data, u16 len)
4095 {
4096 struct mgmt_mode *cp = data;
4097 struct pending_cmd *cmd;
4098 struct hci_request req;
4099 int err;
4100
4101 BT_DBG("%s", hdev->name);
4102
4103 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
4104 hdev->hci_ver < BLUETOOTH_VER_1_2)
4105 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4106 MGMT_STATUS_NOT_SUPPORTED);
4107
4108 if (cp->val != 0x00 && cp->val != 0x01)
4109 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4110 MGMT_STATUS_INVALID_PARAMS);
4111
4112 if (!hdev_is_powered(hdev))
4113 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4114 MGMT_STATUS_NOT_POWERED);
4115
4116 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4117 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4118 MGMT_STATUS_REJECTED);
4119
4120 hci_dev_lock(hdev);
4121
4122 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4123 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4124 MGMT_STATUS_BUSY);
4125 goto unlock;
4126 }
4127
4128 if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
4129 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4130 hdev);
4131 goto unlock;
4132 }
4133
4134 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4135 data, len);
4136 if (!cmd) {
4137 err = -ENOMEM;
4138 goto unlock;
4139 }
4140
4141 hci_req_init(&req, hdev);
4142
4143 write_fast_connectable(&req, cp->val);
4144
4145 err = hci_req_run(&req, fast_connectable_complete);
4146 if (err < 0) {
4147 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4148 MGMT_STATUS_FAILED);
4149 mgmt_pending_remove(cmd);
4150 }
4151
4152 unlock:
4153 hci_dev_unlock(hdev);
4154
4155 return err;
4156 }
4157
4158 static void set_bredr_scan(struct hci_request *req)
4159 {
4160 struct hci_dev *hdev = req->hdev;
4161 u8 scan = 0;
4162
4163 /* Ensure that fast connectable is disabled. This function will
4164 * not do anything if the page scan parameters are already what
4165 * they should be.
4166 */
4167 write_fast_connectable(req, false);
4168
4169 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4170 scan |= SCAN_PAGE;
4171 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
4172 scan |= SCAN_INQUIRY;
4173
4174 if (scan)
4175 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
4176 }
4177
4178 static void set_bredr_complete(struct hci_dev *hdev, u8 status)
4179 {
4180 struct pending_cmd *cmd;
4181
4182 BT_DBG("status 0x%02x", status);
4183
4184 hci_dev_lock(hdev);
4185
4186 cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
4187 if (!cmd)
4188 goto unlock;
4189
4190 if (status) {
4191 u8 mgmt_err = mgmt_status(status);
4192
4193 /* We need to restore the flag if related HCI commands
4194 * failed.
4195 */
4196 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4197
4198 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4199 } else {
4200 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4201 new_settings(hdev, cmd->sk);
4202 }
4203
4204 mgmt_pending_remove(cmd);
4205
4206 unlock:
4207 hci_dev_unlock(hdev);
4208 }
4209
4210 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4211 {
4212 struct mgmt_mode *cp = data;
4213 struct pending_cmd *cmd;
4214 struct hci_request req;
4215 int err;
4216
4217 BT_DBG("request for %s", hdev->name);
4218
4219 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4220 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4221 MGMT_STATUS_NOT_SUPPORTED);
4222
4223 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4224 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4225 MGMT_STATUS_REJECTED);
4226
4227 if (cp->val != 0x00 && cp->val != 0x01)
4228 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4229 MGMT_STATUS_INVALID_PARAMS);
4230
4231 hci_dev_lock(hdev);
4232
4233 if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4234 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4235 goto unlock;
4236 }
4237
4238 if (!hdev_is_powered(hdev)) {
4239 if (!cp->val) {
4240 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4241 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4242 clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4243 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4244 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4245 }
4246
4247 change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4248
4249 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4250 if (err < 0)
4251 goto unlock;
4252
4253 err = new_settings(hdev, sk);
4254 goto unlock;
4255 }
4256
4257 /* Reject disabling when powered on */
4258 if (!cp->val) {
4259 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4260 MGMT_STATUS_REJECTED);
4261 goto unlock;
4262 }
4263
4264 if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
4265 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4266 MGMT_STATUS_BUSY);
4267 goto unlock;
4268 }
4269
4270 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4271 if (!cmd) {
4272 err = -ENOMEM;
4273 goto unlock;
4274 }
4275
4276 /* We need to flip the bit already here so that update_adv_data
4277 * generates the correct flags.
4278 */
4279 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4280
4281 hci_req_init(&req, hdev);
4282
4283 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4284 set_bredr_scan(&req);
4285
4286 /* Since only the advertising data flags will change, there
4287 * is no need to update the scan response data.
4288 */
4289 update_adv_data(&req);
4290
4291 err = hci_req_run(&req, set_bredr_complete);
4292 if (err < 0)
4293 mgmt_pending_remove(cmd);
4294
4295 unlock:
4296 hci_dev_unlock(hdev);
4297 return err;
4298 }
4299
4300 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4301 void *data, u16 len)
4302 {
4303 struct mgmt_mode *cp = data;
4304 struct pending_cmd *cmd;
4305 u8 val, status;
4306 int err;
4307
4308 BT_DBG("request for %s", hdev->name);
4309
4310 status = mgmt_bredr_support(hdev);
4311 if (status)
4312 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4313 status);
4314
4315 if (!lmp_sc_capable(hdev) &&
4316 !test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
4317 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4318 MGMT_STATUS_NOT_SUPPORTED);
4319
4320 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4321 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4322 MGMT_STATUS_INVALID_PARAMS);
4323
4324 hci_dev_lock(hdev);
4325
4326 if (!hdev_is_powered(hdev)) {
4327 bool changed;
4328
4329 if (cp->val) {
4330 changed = !test_and_set_bit(HCI_SC_ENABLED,
4331 &hdev->dev_flags);
4332 if (cp->val == 0x02)
4333 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4334 else
4335 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4336 } else {
4337 changed = test_and_clear_bit(HCI_SC_ENABLED,
4338 &hdev->dev_flags);
4339 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4340 }
4341
4342 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4343 if (err < 0)
4344 goto failed;
4345
4346 if (changed)
4347 err = new_settings(hdev, sk);
4348
4349 goto failed;
4350 }
4351
4352 if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4353 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4354 MGMT_STATUS_BUSY);
4355 goto failed;
4356 }
4357
4358 val = !!cp->val;
4359
4360 if (val == test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
4361 (cp->val == 0x02) == test_bit(HCI_SC_ONLY, &hdev->dev_flags)) {
4362 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4363 goto failed;
4364 }
4365
4366 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4367 if (!cmd) {
4368 err = -ENOMEM;
4369 goto failed;
4370 }
4371
4372 err = hci_send_cmd(hdev, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4373 if (err < 0) {
4374 mgmt_pending_remove(cmd);
4375 goto failed;
4376 }
4377
4378 if (cp->val == 0x02)
4379 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4380 else
4381 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4382
4383 failed:
4384 hci_dev_unlock(hdev);
4385 return err;
4386 }
4387
4388 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
4389 void *data, u16 len)
4390 {
4391 struct mgmt_mode *cp = data;
4392 bool changed, use_changed;
4393 int err;
4394
4395 BT_DBG("request for %s", hdev->name);
4396
4397 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4398 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
4399 MGMT_STATUS_INVALID_PARAMS);
4400
4401 hci_dev_lock(hdev);
4402
4403 if (cp->val)
4404 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
4405 &hdev->dev_flags);
4406 else
4407 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
4408 &hdev->dev_flags);
4409
4410 if (cp->val == 0x02)
4411 use_changed = !test_and_set_bit(HCI_USE_DEBUG_KEYS,
4412 &hdev->dev_flags);
4413 else
4414 use_changed = test_and_clear_bit(HCI_USE_DEBUG_KEYS,
4415 &hdev->dev_flags);
4416
4417 if (hdev_is_powered(hdev) && use_changed &&
4418 test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
4419 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
4420 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
4421 sizeof(mode), &mode);
4422 }
4423
4424 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
4425 if (err < 0)
4426 goto unlock;
4427
4428 if (changed)
4429 err = new_settings(hdev, sk);
4430
4431 unlock:
4432 hci_dev_unlock(hdev);
4433 return err;
4434 }
4435
4436 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4437 u16 len)
4438 {
4439 struct mgmt_cp_set_privacy *cp = cp_data;
4440 bool changed;
4441 int err;
4442
4443 BT_DBG("request for %s", hdev->name);
4444
4445 if (!lmp_le_capable(hdev))
4446 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4447 MGMT_STATUS_NOT_SUPPORTED);
4448
4449 if (cp->privacy != 0x00 && cp->privacy != 0x01)
4450 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4451 MGMT_STATUS_INVALID_PARAMS);
4452
4453 if (hdev_is_powered(hdev))
4454 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4455 MGMT_STATUS_REJECTED);
4456
4457 hci_dev_lock(hdev);
4458
4459 /* If user space supports this command it is also expected to
4460 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
4461 */
4462 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4463
4464 if (cp->privacy) {
4465 changed = !test_and_set_bit(HCI_PRIVACY, &hdev->dev_flags);
4466 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
4467 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4468 } else {
4469 changed = test_and_clear_bit(HCI_PRIVACY, &hdev->dev_flags);
4470 memset(hdev->irk, 0, sizeof(hdev->irk));
4471 clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4472 }
4473
4474 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
4475 if (err < 0)
4476 goto unlock;
4477
4478 if (changed)
4479 err = new_settings(hdev, sk);
4480
4481 unlock:
4482 hci_dev_unlock(hdev);
4483 return err;
4484 }
4485
4486 static bool irk_is_valid(struct mgmt_irk_info *irk)
4487 {
4488 switch (irk->addr.type) {
4489 case BDADDR_LE_PUBLIC:
4490 return true;
4491
4492 case BDADDR_LE_RANDOM:
4493 /* Two most significant bits shall be set */
4494 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4495 return false;
4496 return true;
4497 }
4498
4499 return false;
4500 }
4501
4502 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4503 u16 len)
4504 {
4505 struct mgmt_cp_load_irks *cp = cp_data;
4506 u16 irk_count, expected_len;
4507 int i, err;
4508
4509 BT_DBG("request for %s", hdev->name);
4510
4511 if (!lmp_le_capable(hdev))
4512 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4513 MGMT_STATUS_NOT_SUPPORTED);
4514
4515 irk_count = __le16_to_cpu(cp->irk_count);
4516
4517 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
4518 if (expected_len != len) {
4519 BT_ERR("load_irks: expected %u bytes, got %u bytes",
4520 expected_len, len);
4521 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4522 MGMT_STATUS_INVALID_PARAMS);
4523 }
4524
4525 BT_DBG("%s irk_count %u", hdev->name, irk_count);
4526
4527 for (i = 0; i < irk_count; i++) {
4528 struct mgmt_irk_info *key = &cp->irks[i];
4529
4530 if (!irk_is_valid(key))
4531 return cmd_status(sk, hdev->id,
4532 MGMT_OP_LOAD_IRKS,
4533 MGMT_STATUS_INVALID_PARAMS);
4534 }
4535
4536 hci_dev_lock(hdev);
4537
4538 hci_smp_irks_clear(hdev);
4539
4540 for (i = 0; i < irk_count; i++) {
4541 struct mgmt_irk_info *irk = &cp->irks[i];
4542 u8 addr_type;
4543
4544 if (irk->addr.type == BDADDR_LE_PUBLIC)
4545 addr_type = ADDR_LE_DEV_PUBLIC;
4546 else
4547 addr_type = ADDR_LE_DEV_RANDOM;
4548
4549 hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
4550 BDADDR_ANY);
4551 }
4552
4553 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4554
4555 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
4556
4557 hci_dev_unlock(hdev);
4558
4559 return err;
4560 }
4561
4562 static bool ltk_is_valid(struct mgmt_ltk_info *key)
4563 {
4564 if (key->master != 0x00 && key->master != 0x01)
4565 return false;
4566
4567 switch (key->addr.type) {
4568 case BDADDR_LE_PUBLIC:
4569 return true;
4570
4571 case BDADDR_LE_RANDOM:
4572 /* Two most significant bits shall be set */
4573 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4574 return false;
4575 return true;
4576 }
4577
4578 return false;
4579 }
4580
4581 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
4582 void *cp_data, u16 len)
4583 {
4584 struct mgmt_cp_load_long_term_keys *cp = cp_data;
4585 u16 key_count, expected_len;
4586 int i, err;
4587
4588 BT_DBG("request for %s", hdev->name);
4589
4590 if (!lmp_le_capable(hdev))
4591 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4592 MGMT_STATUS_NOT_SUPPORTED);
4593
4594 key_count = __le16_to_cpu(cp->key_count);
4595
4596 expected_len = sizeof(*cp) + key_count *
4597 sizeof(struct mgmt_ltk_info);
4598 if (expected_len != len) {
4599 BT_ERR("load_keys: expected %u bytes, got %u bytes",
4600 expected_len, len);
4601 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4602 MGMT_STATUS_INVALID_PARAMS);
4603 }
4604
4605 BT_DBG("%s key_count %u", hdev->name, key_count);
4606
4607 for (i = 0; i < key_count; i++) {
4608 struct mgmt_ltk_info *key = &cp->keys[i];
4609
4610 if (!ltk_is_valid(key))
4611 return cmd_status(sk, hdev->id,
4612 MGMT_OP_LOAD_LONG_TERM_KEYS,
4613 MGMT_STATUS_INVALID_PARAMS);
4614 }
4615
4616 hci_dev_lock(hdev);
4617
4618 hci_smp_ltks_clear(hdev);
4619
4620 for (i = 0; i < key_count; i++) {
4621 struct mgmt_ltk_info *key = &cp->keys[i];
4622 u8 type, addr_type, authenticated;
4623
4624 if (key->addr.type == BDADDR_LE_PUBLIC)
4625 addr_type = ADDR_LE_DEV_PUBLIC;
4626 else
4627 addr_type = ADDR_LE_DEV_RANDOM;
4628
4629 if (key->master)
4630 type = SMP_LTK;
4631 else
4632 type = SMP_LTK_SLAVE;
4633
4634 switch (key->type) {
4635 case MGMT_LTK_UNAUTHENTICATED:
4636 authenticated = 0x00;
4637 break;
4638 case MGMT_LTK_AUTHENTICATED:
4639 authenticated = 0x01;
4640 break;
4641 default:
4642 continue;
4643 }
4644
4645 hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
4646 authenticated, key->val, key->enc_size, key->ediv,
4647 key->rand);
4648 }
4649
4650 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
4651 NULL, 0);
4652
4653 hci_dev_unlock(hdev);
4654
4655 return err;
4656 }
4657
4658 struct cmd_conn_lookup {
4659 struct hci_conn *conn;
4660 bool valid_tx_power;
4661 u8 mgmt_status;
4662 };
4663
4664 static void get_conn_info_complete(struct pending_cmd *cmd, void *data)
4665 {
4666 struct cmd_conn_lookup *match = data;
4667 struct mgmt_cp_get_conn_info *cp;
4668 struct mgmt_rp_get_conn_info rp;
4669 struct hci_conn *conn = cmd->user_data;
4670
4671 if (conn != match->conn)
4672 return;
4673
4674 cp = (struct mgmt_cp_get_conn_info *) cmd->param;
4675
4676 memset(&rp, 0, sizeof(rp));
4677 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4678 rp.addr.type = cp->addr.type;
4679
4680 if (!match->mgmt_status) {
4681 rp.rssi = conn->rssi;
4682
4683 if (match->valid_tx_power) {
4684 rp.tx_power = conn->tx_power;
4685 rp.max_tx_power = conn->max_tx_power;
4686 } else {
4687 rp.tx_power = HCI_TX_POWER_INVALID;
4688 rp.max_tx_power = HCI_TX_POWER_INVALID;
4689 }
4690 }
4691
4692 cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
4693 match->mgmt_status, &rp, sizeof(rp));
4694
4695 hci_conn_drop(conn);
4696
4697 mgmt_pending_remove(cmd);
4698 }
4699
4700 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 status)
4701 {
4702 struct hci_cp_read_rssi *cp;
4703 struct hci_conn *conn;
4704 struct cmd_conn_lookup match;
4705 u16 handle;
4706
4707 BT_DBG("status 0x%02x", status);
4708
4709 hci_dev_lock(hdev);
4710
4711 /* TX power data is valid in case request completed successfully,
4712 * otherwise we assume it's not valid. At the moment we assume that
4713 * either both or none of current and max values are valid to keep code
4714 * simple.
4715 */
4716 match.valid_tx_power = !status;
4717
4718 /* Commands sent in request are either Read RSSI or Read Transmit Power
4719 * Level so we check which one was last sent to retrieve connection
4720 * handle. Both commands have handle as first parameter so it's safe to
4721 * cast data on the same command struct.
4722 *
4723 * First command sent is always Read RSSI and we fail only if it fails.
4724 * In other case we simply override error to indicate success as we
4725 * already remembered if TX power value is actually valid.
4726 */
4727 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
4728 if (!cp) {
4729 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
4730 status = 0;
4731 }
4732
4733 if (!cp) {
4734 BT_ERR("invalid sent_cmd in response");
4735 goto unlock;
4736 }
4737
4738 handle = __le16_to_cpu(cp->handle);
4739 conn = hci_conn_hash_lookup_handle(hdev, handle);
4740 if (!conn) {
4741 BT_ERR("unknown handle (%d) in response", handle);
4742 goto unlock;
4743 }
4744
4745 match.conn = conn;
4746 match.mgmt_status = mgmt_status(status);
4747
4748 /* Cache refresh is complete, now reply for mgmt request for given
4749 * connection only.
4750 */
4751 mgmt_pending_foreach(MGMT_OP_GET_CONN_INFO, hdev,
4752 get_conn_info_complete, &match);
4753
4754 unlock:
4755 hci_dev_unlock(hdev);
4756 }
4757
4758 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
4759 u16 len)
4760 {
4761 struct mgmt_cp_get_conn_info *cp = data;
4762 struct mgmt_rp_get_conn_info rp;
4763 struct hci_conn *conn;
4764 unsigned long conn_info_age;
4765 int err = 0;
4766
4767 BT_DBG("%s", hdev->name);
4768
4769 memset(&rp, 0, sizeof(rp));
4770 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4771 rp.addr.type = cp->addr.type;
4772
4773 if (!bdaddr_type_is_valid(cp->addr.type))
4774 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4775 MGMT_STATUS_INVALID_PARAMS,
4776 &rp, sizeof(rp));
4777
4778 hci_dev_lock(hdev);
4779
4780 if (!hdev_is_powered(hdev)) {
4781 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4782 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
4783 goto unlock;
4784 }
4785
4786 if (cp->addr.type == BDADDR_BREDR)
4787 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
4788 &cp->addr.bdaddr);
4789 else
4790 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
4791
4792 if (!conn || conn->state != BT_CONNECTED) {
4793 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4794 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
4795 goto unlock;
4796 }
4797
4798 /* To avoid client trying to guess when to poll again for information we
4799 * calculate conn info age as random value between min/max set in hdev.
4800 */
4801 conn_info_age = hdev->conn_info_min_age +
4802 prandom_u32_max(hdev->conn_info_max_age -
4803 hdev->conn_info_min_age);
4804
4805 /* Query controller to refresh cached values if they are too old or were
4806 * never read.
4807 */
4808 if (time_after(jiffies, conn->conn_info_timestamp +
4809 msecs_to_jiffies(conn_info_age)) ||
4810 !conn->conn_info_timestamp) {
4811 struct hci_request req;
4812 struct hci_cp_read_tx_power req_txp_cp;
4813 struct hci_cp_read_rssi req_rssi_cp;
4814 struct pending_cmd *cmd;
4815
4816 hci_req_init(&req, hdev);
4817 req_rssi_cp.handle = cpu_to_le16(conn->handle);
4818 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
4819 &req_rssi_cp);
4820
4821 /* For LE links TX power does not change thus we don't need to
4822 * query for it once value is known.
4823 */
4824 if (!bdaddr_type_is_le(cp->addr.type) ||
4825 conn->tx_power == HCI_TX_POWER_INVALID) {
4826 req_txp_cp.handle = cpu_to_le16(conn->handle);
4827 req_txp_cp.type = 0x00;
4828 hci_req_add(&req, HCI_OP_READ_TX_POWER,
4829 sizeof(req_txp_cp), &req_txp_cp);
4830 }
4831
4832 /* Max TX power needs to be read only once per connection */
4833 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
4834 req_txp_cp.handle = cpu_to_le16(conn->handle);
4835 req_txp_cp.type = 0x01;
4836 hci_req_add(&req, HCI_OP_READ_TX_POWER,
4837 sizeof(req_txp_cp), &req_txp_cp);
4838 }
4839
4840 err = hci_req_run(&req, conn_info_refresh_complete);
4841 if (err < 0)
4842 goto unlock;
4843
4844 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
4845 data, len);
4846 if (!cmd) {
4847 err = -ENOMEM;
4848 goto unlock;
4849 }
4850
4851 hci_conn_hold(conn);
4852 cmd->user_data = conn;
4853
4854 conn->conn_info_timestamp = jiffies;
4855 } else {
4856 /* Cache is valid, just reply with values cached in hci_conn */
4857 rp.rssi = conn->rssi;
4858 rp.tx_power = conn->tx_power;
4859 rp.max_tx_power = conn->max_tx_power;
4860
4861 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4862 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
4863 }
4864
4865 unlock:
4866 hci_dev_unlock(hdev);
4867 return err;
4868 }
4869
4870 static void get_clock_info_complete(struct hci_dev *hdev, u8 status)
4871 {
4872 struct mgmt_cp_get_clock_info *cp;
4873 struct mgmt_rp_get_clock_info rp;
4874 struct hci_cp_read_clock *hci_cp;
4875 struct pending_cmd *cmd;
4876 struct hci_conn *conn;
4877
4878 BT_DBG("%s status %u", hdev->name, status);
4879
4880 hci_dev_lock(hdev);
4881
4882 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
4883 if (!hci_cp)
4884 goto unlock;
4885
4886 if (hci_cp->which) {
4887 u16 handle = __le16_to_cpu(hci_cp->handle);
4888 conn = hci_conn_hash_lookup_handle(hdev, handle);
4889 } else {
4890 conn = NULL;
4891 }
4892
4893 cmd = mgmt_pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
4894 if (!cmd)
4895 goto unlock;
4896
4897 cp = cmd->param;
4898
4899 memset(&rp, 0, sizeof(rp));
4900 memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
4901
4902 if (status)
4903 goto send_rsp;
4904
4905 rp.local_clock = cpu_to_le32(hdev->clock);
4906
4907 if (conn) {
4908 rp.piconet_clock = cpu_to_le32(conn->clock);
4909 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
4910 }
4911
4912 send_rsp:
4913 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
4914 &rp, sizeof(rp));
4915 mgmt_pending_remove(cmd);
4916 if (conn)
4917 hci_conn_drop(conn);
4918
4919 unlock:
4920 hci_dev_unlock(hdev);
4921 }
4922
4923 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
4924 u16 len)
4925 {
4926 struct mgmt_cp_get_clock_info *cp = data;
4927 struct mgmt_rp_get_clock_info rp;
4928 struct hci_cp_read_clock hci_cp;
4929 struct pending_cmd *cmd;
4930 struct hci_request req;
4931 struct hci_conn *conn;
4932 int err;
4933
4934 BT_DBG("%s", hdev->name);
4935
4936 memset(&rp, 0, sizeof(rp));
4937 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4938 rp.addr.type = cp->addr.type;
4939
4940 if (cp->addr.type != BDADDR_BREDR)
4941 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
4942 MGMT_STATUS_INVALID_PARAMS,
4943 &rp, sizeof(rp));
4944
4945 hci_dev_lock(hdev);
4946
4947 if (!hdev_is_powered(hdev)) {
4948 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
4949 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
4950 goto unlock;
4951 }
4952
4953 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
4954 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
4955 &cp->addr.bdaddr);
4956 if (!conn || conn->state != BT_CONNECTED) {
4957 err = cmd_complete(sk, hdev->id,
4958 MGMT_OP_GET_CLOCK_INFO,
4959 MGMT_STATUS_NOT_CONNECTED,
4960 &rp, sizeof(rp));
4961 goto unlock;
4962 }
4963 } else {
4964 conn = NULL;
4965 }
4966
4967 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
4968 if (!cmd) {
4969 err = -ENOMEM;
4970 goto unlock;
4971 }
4972
4973 hci_req_init(&req, hdev);
4974
4975 memset(&hci_cp, 0, sizeof(hci_cp));
4976 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
4977
4978 if (conn) {
4979 hci_conn_hold(conn);
4980 cmd->user_data = conn;
4981
4982 hci_cp.handle = cpu_to_le16(conn->handle);
4983 hci_cp.which = 0x01; /* Piconet clock */
4984 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
4985 }
4986
4987 err = hci_req_run(&req, get_clock_info_complete);
4988 if (err < 0)
4989 mgmt_pending_remove(cmd);
4990
4991 unlock:
4992 hci_dev_unlock(hdev);
4993 return err;
4994 }
4995
4996 static void device_added(struct sock *sk, struct hci_dev *hdev,
4997 bdaddr_t *bdaddr, u8 type, u8 action)
4998 {
4999 struct mgmt_ev_device_added ev;
5000
5001 bacpy(&ev.addr.bdaddr, bdaddr);
5002 ev.addr.type = type;
5003 ev.action = action;
5004
5005 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
5006 }
5007
5008 static int add_device(struct sock *sk, struct hci_dev *hdev,
5009 void *data, u16 len)
5010 {
5011 struct mgmt_cp_add_device *cp = data;
5012 u8 auto_conn, addr_type;
5013 int err;
5014
5015 BT_DBG("%s", hdev->name);
5016
5017 if (!bdaddr_type_is_le(cp->addr.type) ||
5018 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
5019 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5020 MGMT_STATUS_INVALID_PARAMS,
5021 &cp->addr, sizeof(cp->addr));
5022
5023 if (cp->action != 0x00 && cp->action != 0x01)
5024 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5025 MGMT_STATUS_INVALID_PARAMS,
5026 &cp->addr, sizeof(cp->addr));
5027
5028 hci_dev_lock(hdev);
5029
5030 if (cp->addr.type == BDADDR_LE_PUBLIC)
5031 addr_type = ADDR_LE_DEV_PUBLIC;
5032 else
5033 addr_type = ADDR_LE_DEV_RANDOM;
5034
5035 if (cp->action)
5036 auto_conn = HCI_AUTO_CONN_ALWAYS;
5037 else
5038 auto_conn = HCI_AUTO_CONN_REPORT;
5039
5040 /* If the connection parameters don't exist for this device,
5041 * they will be created and configured with defaults.
5042 */
5043 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
5044 auto_conn) < 0) {
5045 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5046 MGMT_STATUS_FAILED,
5047 &cp->addr, sizeof(cp->addr));
5048 goto unlock;
5049 }
5050
5051 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
5052
5053 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5054 MGMT_STATUS_SUCCESS, &cp->addr, sizeof(cp->addr));
5055
5056 unlock:
5057 hci_dev_unlock(hdev);
5058 return err;
5059 }
5060
5061 static void device_removed(struct sock *sk, struct hci_dev *hdev,
5062 bdaddr_t *bdaddr, u8 type)
5063 {
5064 struct mgmt_ev_device_removed ev;
5065
5066 bacpy(&ev.addr.bdaddr, bdaddr);
5067 ev.addr.type = type;
5068
5069 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
5070 }
5071
5072 static int remove_device(struct sock *sk, struct hci_dev *hdev,
5073 void *data, u16 len)
5074 {
5075 struct mgmt_cp_remove_device *cp = data;
5076 int err;
5077
5078 BT_DBG("%s", hdev->name);
5079
5080 hci_dev_lock(hdev);
5081
5082 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5083 struct hci_conn_params *params;
5084 u8 addr_type;
5085
5086 if (!bdaddr_type_is_le(cp->addr.type)) {
5087 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5088 MGMT_STATUS_INVALID_PARAMS,
5089 &cp->addr, sizeof(cp->addr));
5090 goto unlock;
5091 }
5092
5093 if (cp->addr.type == BDADDR_LE_PUBLIC)
5094 addr_type = ADDR_LE_DEV_PUBLIC;
5095 else
5096 addr_type = ADDR_LE_DEV_RANDOM;
5097
5098 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5099 addr_type);
5100 if (!params) {
5101 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5102 MGMT_STATUS_INVALID_PARAMS,
5103 &cp->addr, sizeof(cp->addr));
5104 goto unlock;
5105 }
5106
5107 if (params->auto_connect == HCI_AUTO_CONN_DISABLED) {
5108 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5109 MGMT_STATUS_INVALID_PARAMS,
5110 &cp->addr, sizeof(cp->addr));
5111 goto unlock;
5112 }
5113
5114 hci_pend_le_conn_del(hdev, &cp->addr.bdaddr, addr_type);
5115 list_del(&params->list);
5116 kfree(params);
5117
5118 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
5119 } else {
5120 if (cp->addr.type) {
5121 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5122 MGMT_STATUS_INVALID_PARAMS,
5123 &cp->addr, sizeof(cp->addr));
5124 goto unlock;
5125 }
5126
5127 hci_conn_params_clear_enabled(hdev);
5128 }
5129
5130 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5131 MGMT_STATUS_SUCCESS, &cp->addr, sizeof(cp->addr));
5132
5133 unlock:
5134 hci_dev_unlock(hdev);
5135 return err;
5136 }
5137
5138 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
5139 u16 len)
5140 {
5141 struct mgmt_cp_load_conn_param *cp = data;
5142 u16 param_count, expected_len;
5143 int i;
5144
5145 if (!lmp_le_capable(hdev))
5146 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5147 MGMT_STATUS_NOT_SUPPORTED);
5148
5149 param_count = __le16_to_cpu(cp->param_count);
5150
5151 expected_len = sizeof(*cp) + param_count *
5152 sizeof(struct mgmt_conn_param);
5153 if (expected_len != len) {
5154 BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
5155 expected_len, len);
5156 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5157 MGMT_STATUS_INVALID_PARAMS);
5158 }
5159
5160 BT_DBG("%s param_count %u", hdev->name, param_count);
5161
5162 hci_dev_lock(hdev);
5163
5164 hci_conn_params_clear_disabled(hdev);
5165
5166 for (i = 0; i < param_count; i++) {
5167 struct mgmt_conn_param *param = &cp->params[i];
5168 struct hci_conn_params *hci_param;
5169 u16 min, max, latency, timeout;
5170 u8 addr_type;
5171
5172 BT_DBG("Adding %pMR (type %u)", &param->addr.bdaddr,
5173 param->addr.type);
5174
5175 if (param->addr.type == BDADDR_LE_PUBLIC) {
5176 addr_type = ADDR_LE_DEV_PUBLIC;
5177 } else if (param->addr.type == BDADDR_LE_RANDOM) {
5178 addr_type = ADDR_LE_DEV_RANDOM;
5179 } else {
5180 BT_ERR("Ignoring invalid connection parameters");
5181 continue;
5182 }
5183
5184 min = le16_to_cpu(param->min_interval);
5185 max = le16_to_cpu(param->max_interval);
5186 latency = le16_to_cpu(param->latency);
5187 timeout = le16_to_cpu(param->timeout);
5188
5189 BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
5190 min, max, latency, timeout);
5191
5192 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
5193 BT_ERR("Ignoring invalid connection parameters");
5194 continue;
5195 }
5196
5197 hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
5198 addr_type);
5199 if (!hci_param) {
5200 BT_ERR("Failed to add connection parameters");
5201 continue;
5202 }
5203
5204 hci_param->conn_min_interval = min;
5205 hci_param->conn_max_interval = max;
5206 hci_param->conn_latency = latency;
5207 hci_param->supervision_timeout = timeout;
5208 }
5209
5210 hci_dev_unlock(hdev);
5211
5212 return cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0, NULL, 0);
5213 }
5214
5215 static const struct mgmt_handler {
5216 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
5217 u16 data_len);
5218 bool var_len;
5219 size_t data_len;
5220 } mgmt_handlers[] = {
5221 { NULL }, /* 0x0000 (no command) */
5222 { read_version, false, MGMT_READ_VERSION_SIZE },
5223 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
5224 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
5225 { read_controller_info, false, MGMT_READ_INFO_SIZE },
5226 { set_powered, false, MGMT_SETTING_SIZE },
5227 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
5228 { set_connectable, false, MGMT_SETTING_SIZE },
5229 { set_fast_connectable, false, MGMT_SETTING_SIZE },
5230 { set_pairable, false, MGMT_SETTING_SIZE },
5231 { set_link_security, false, MGMT_SETTING_SIZE },
5232 { set_ssp, false, MGMT_SETTING_SIZE },
5233 { set_hs, false, MGMT_SETTING_SIZE },
5234 { set_le, false, MGMT_SETTING_SIZE },
5235 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
5236 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
5237 { add_uuid, false, MGMT_ADD_UUID_SIZE },
5238 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
5239 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
5240 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
5241 { disconnect, false, MGMT_DISCONNECT_SIZE },
5242 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
5243 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
5244 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
5245 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
5246 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
5247 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
5248 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
5249 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
5250 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
5251 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
5252 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
5253 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
5254 { add_remote_oob_data, true, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
5255 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
5256 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
5257 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
5258 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
5259 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
5260 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
5261 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
5262 { set_advertising, false, MGMT_SETTING_SIZE },
5263 { set_bredr, false, MGMT_SETTING_SIZE },
5264 { set_static_address, false, MGMT_SET_STATIC_ADDRESS_SIZE },
5265 { set_scan_params, false, MGMT_SET_SCAN_PARAMS_SIZE },
5266 { set_secure_conn, false, MGMT_SETTING_SIZE },
5267 { set_debug_keys, false, MGMT_SETTING_SIZE },
5268 { set_privacy, false, MGMT_SET_PRIVACY_SIZE },
5269 { load_irks, true, MGMT_LOAD_IRKS_SIZE },
5270 { get_conn_info, false, MGMT_GET_CONN_INFO_SIZE },
5271 { get_clock_info, false, MGMT_GET_CLOCK_INFO_SIZE },
5272 { add_device, false, MGMT_ADD_DEVICE_SIZE },
5273 { remove_device, false, MGMT_REMOVE_DEVICE_SIZE },
5274 { load_conn_param, true, MGMT_LOAD_CONN_PARAM_SIZE },
5275 };
5276
5277 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
5278 {
5279 void *buf;
5280 u8 *cp;
5281 struct mgmt_hdr *hdr;
5282 u16 opcode, index, len;
5283 struct hci_dev *hdev = NULL;
5284 const struct mgmt_handler *handler;
5285 int err;
5286
5287 BT_DBG("got %zu bytes", msglen);
5288
5289 if (msglen < sizeof(*hdr))
5290 return -EINVAL;
5291
5292 buf = kmalloc(msglen, GFP_KERNEL);
5293 if (!buf)
5294 return -ENOMEM;
5295
5296 if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
5297 err = -EFAULT;
5298 goto done;
5299 }
5300
5301 hdr = buf;
5302 opcode = __le16_to_cpu(hdr->opcode);
5303 index = __le16_to_cpu(hdr->index);
5304 len = __le16_to_cpu(hdr->len);
5305
5306 if (len != msglen - sizeof(*hdr)) {
5307 err = -EINVAL;
5308 goto done;
5309 }
5310
5311 if (index != MGMT_INDEX_NONE) {
5312 hdev = hci_dev_get(index);
5313 if (!hdev) {
5314 err = cmd_status(sk, index, opcode,
5315 MGMT_STATUS_INVALID_INDEX);
5316 goto done;
5317 }
5318
5319 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
5320 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) ||
5321 test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
5322 err = cmd_status(sk, index, opcode,
5323 MGMT_STATUS_INVALID_INDEX);
5324 goto done;
5325 }
5326 }
5327
5328 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
5329 mgmt_handlers[opcode].func == NULL) {
5330 BT_DBG("Unknown op %u", opcode);
5331 err = cmd_status(sk, index, opcode,
5332 MGMT_STATUS_UNKNOWN_COMMAND);
5333 goto done;
5334 }
5335
5336 if ((hdev && opcode < MGMT_OP_READ_INFO) ||
5337 (!hdev && opcode >= MGMT_OP_READ_INFO)) {
5338 err = cmd_status(sk, index, opcode,
5339 MGMT_STATUS_INVALID_INDEX);
5340 goto done;
5341 }
5342
5343 handler = &mgmt_handlers[opcode];
5344
5345 if ((handler->var_len && len < handler->data_len) ||
5346 (!handler->var_len && len != handler->data_len)) {
5347 err = cmd_status(sk, index, opcode,
5348 MGMT_STATUS_INVALID_PARAMS);
5349 goto done;
5350 }
5351
5352 if (hdev)
5353 mgmt_init_hdev(sk, hdev);
5354
5355 cp = buf + sizeof(*hdr);
5356
5357 err = handler->func(sk, hdev, cp, len);
5358 if (err < 0)
5359 goto done;
5360
5361 err = msglen;
5362
5363 done:
5364 if (hdev)
5365 hci_dev_put(hdev);
5366
5367 kfree(buf);
5368 return err;
5369 }
5370
5371 void mgmt_index_added(struct hci_dev *hdev)
5372 {
5373 if (hdev->dev_type != HCI_BREDR)
5374 return;
5375
5376 mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
5377 }
5378
5379 void mgmt_index_removed(struct hci_dev *hdev)
5380 {
5381 u8 status = MGMT_STATUS_INVALID_INDEX;
5382
5383 if (hdev->dev_type != HCI_BREDR)
5384 return;
5385
5386 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
5387
5388 mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
5389 }
5390
5391 /* This function requires the caller holds hdev->lock */
5392 static void restart_le_auto_conns(struct hci_dev *hdev)
5393 {
5394 struct hci_conn_params *p;
5395 bool added = false;
5396
5397 list_for_each_entry(p, &hdev->le_conn_params, list) {
5398 if (p->auto_connect == HCI_AUTO_CONN_ALWAYS) {
5399 hci_pend_le_conn_add(hdev, &p->addr, p->addr_type);
5400 added = true;
5401 }
5402 }
5403
5404 /* Calling hci_pend_le_conn_add will actually already trigger
5405 * background scanning when needed. So no need to trigger it
5406 * just another time.
5407 *
5408 * This check is here to avoid an unneeded restart of the
5409 * passive scanning. Since this is during the controller
5410 * power up phase the duplicate filtering is not an issue.
5411 */
5412 if (added)
5413 return;
5414
5415 hci_update_background_scan(hdev);
5416 }
5417
5418 static void powered_complete(struct hci_dev *hdev, u8 status)
5419 {
5420 struct cmd_lookup match = { NULL, hdev };
5421
5422 BT_DBG("status 0x%02x", status);
5423
5424 hci_dev_lock(hdev);
5425
5426 restart_le_auto_conns(hdev);
5427
5428 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
5429
5430 new_settings(hdev, match.sk);
5431
5432 hci_dev_unlock(hdev);
5433
5434 if (match.sk)
5435 sock_put(match.sk);
5436 }
5437
5438 static int powered_update_hci(struct hci_dev *hdev)
5439 {
5440 struct hci_request req;
5441 u8 link_sec;
5442
5443 hci_req_init(&req, hdev);
5444
5445 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
5446 !lmp_host_ssp_capable(hdev)) {
5447 u8 ssp = 1;
5448
5449 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
5450 }
5451
5452 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
5453 lmp_bredr_capable(hdev)) {
5454 struct hci_cp_write_le_host_supported cp;
5455
5456 cp.le = 1;
5457 cp.simul = lmp_le_br_capable(hdev);
5458
5459 /* Check first if we already have the right
5460 * host state (host features set)
5461 */
5462 if (cp.le != lmp_host_le_capable(hdev) ||
5463 cp.simul != lmp_host_le_br_capable(hdev))
5464 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
5465 sizeof(cp), &cp);
5466 }
5467
5468 if (lmp_le_capable(hdev)) {
5469 /* Make sure the controller has a good default for
5470 * advertising data. This also applies to the case
5471 * where BR/EDR was toggled during the AUTO_OFF phase.
5472 */
5473 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
5474 update_adv_data(&req);
5475 update_scan_rsp_data(&req);
5476 }
5477
5478 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
5479 enable_advertising(&req);
5480 }
5481
5482 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
5483 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
5484 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
5485 sizeof(link_sec), &link_sec);
5486
5487 if (lmp_bredr_capable(hdev)) {
5488 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
5489 set_bredr_scan(&req);
5490 update_class(&req);
5491 update_name(&req);
5492 update_eir(&req);
5493 }
5494
5495 return hci_req_run(&req, powered_complete);
5496 }
5497
5498 int mgmt_powered(struct hci_dev *hdev, u8 powered)
5499 {
5500 struct cmd_lookup match = { NULL, hdev };
5501 u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
5502 u8 zero_cod[] = { 0, 0, 0 };
5503 int err;
5504
5505 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
5506 return 0;
5507
5508 if (powered) {
5509 if (powered_update_hci(hdev) == 0)
5510 return 0;
5511
5512 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
5513 &match);
5514 goto new_settings;
5515 }
5516
5517 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
5518 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered);
5519
5520 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
5521 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
5522 zero_cod, sizeof(zero_cod), NULL);
5523
5524 new_settings:
5525 err = new_settings(hdev, match.sk);
5526
5527 if (match.sk)
5528 sock_put(match.sk);
5529
5530 return err;
5531 }
5532
5533 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
5534 {
5535 struct pending_cmd *cmd;
5536 u8 status;
5537
5538 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
5539 if (!cmd)
5540 return;
5541
5542 if (err == -ERFKILL)
5543 status = MGMT_STATUS_RFKILLED;
5544 else
5545 status = MGMT_STATUS_FAILED;
5546
5547 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
5548
5549 mgmt_pending_remove(cmd);
5550 }
5551
5552 void mgmt_discoverable_timeout(struct hci_dev *hdev)
5553 {
5554 struct hci_request req;
5555
5556 hci_dev_lock(hdev);
5557
5558 /* When discoverable timeout triggers, then just make sure
5559 * the limited discoverable flag is cleared. Even in the case
5560 * of a timeout triggered from general discoverable, it is
5561 * safe to unconditionally clear the flag.
5562 */
5563 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
5564 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
5565
5566 hci_req_init(&req, hdev);
5567 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
5568 u8 scan = SCAN_PAGE;
5569 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
5570 sizeof(scan), &scan);
5571 }
5572 update_class(&req);
5573 update_adv_data(&req);
5574 hci_req_run(&req, NULL);
5575
5576 hdev->discov_timeout = 0;
5577
5578 new_settings(hdev, NULL);
5579
5580 hci_dev_unlock(hdev);
5581 }
5582
5583 void mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
5584 {
5585 bool changed;
5586
5587 /* Nothing needed here if there's a pending command since that
5588 * commands request completion callback takes care of everything
5589 * necessary.
5590 */
5591 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
5592 return;
5593
5594 /* Powering off may clear the scan mode - don't let that interfere */
5595 if (!discoverable && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5596 return;
5597
5598 if (discoverable) {
5599 changed = !test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
5600 } else {
5601 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
5602 changed = test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
5603 }
5604
5605 if (changed) {
5606 struct hci_request req;
5607
5608 /* In case this change in discoverable was triggered by
5609 * a disabling of connectable there could be a need to
5610 * update the advertising flags.
5611 */
5612 hci_req_init(&req, hdev);
5613 update_adv_data(&req);
5614 hci_req_run(&req, NULL);
5615
5616 new_settings(hdev, NULL);
5617 }
5618 }
5619
5620 void mgmt_connectable(struct hci_dev *hdev, u8 connectable)
5621 {
5622 bool changed;
5623
5624 /* Nothing needed here if there's a pending command since that
5625 * commands request completion callback takes care of everything
5626 * necessary.
5627 */
5628 if (mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
5629 return;
5630
5631 /* Powering off may clear the scan mode - don't let that interfere */
5632 if (!connectable && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5633 return;
5634
5635 if (connectable)
5636 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
5637 else
5638 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
5639
5640 if (changed)
5641 new_settings(hdev, NULL);
5642 }
5643
5644 void mgmt_advertising(struct hci_dev *hdev, u8 advertising)
5645 {
5646 /* Powering off may stop advertising - don't let that interfere */
5647 if (!advertising && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5648 return;
5649
5650 if (advertising)
5651 set_bit(HCI_ADVERTISING, &hdev->dev_flags);
5652 else
5653 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
5654 }
5655
5656 void mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
5657 {
5658 u8 mgmt_err = mgmt_status(status);
5659
5660 if (scan & SCAN_PAGE)
5661 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
5662 cmd_status_rsp, &mgmt_err);
5663
5664 if (scan & SCAN_INQUIRY)
5665 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
5666 cmd_status_rsp, &mgmt_err);
5667 }
5668
5669 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
5670 bool persistent)
5671 {
5672 struct mgmt_ev_new_link_key ev;
5673
5674 memset(&ev, 0, sizeof(ev));
5675
5676 ev.store_hint = persistent;
5677 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
5678 ev.key.addr.type = BDADDR_BREDR;
5679 ev.key.type = key->type;
5680 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
5681 ev.key.pin_len = key->pin_len;
5682
5683 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
5684 }
5685
5686 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
5687 {
5688 if (ltk->authenticated)
5689 return MGMT_LTK_AUTHENTICATED;
5690
5691 return MGMT_LTK_UNAUTHENTICATED;
5692 }
5693
5694 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
5695 {
5696 struct mgmt_ev_new_long_term_key ev;
5697
5698 memset(&ev, 0, sizeof(ev));
5699
5700 /* Devices using resolvable or non-resolvable random addresses
5701 * without providing an indentity resolving key don't require
5702 * to store long term keys. Their addresses will change the
5703 * next time around.
5704 *
5705 * Only when a remote device provides an identity address
5706 * make sure the long term key is stored. If the remote
5707 * identity is known, the long term keys are internally
5708 * mapped to the identity address. So allow static random
5709 * and public addresses here.
5710 */
5711 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
5712 (key->bdaddr.b[5] & 0xc0) != 0xc0)
5713 ev.store_hint = 0x00;
5714 else
5715 ev.store_hint = persistent;
5716
5717 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
5718 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
5719 ev.key.type = mgmt_ltk_type(key);
5720 ev.key.enc_size = key->enc_size;
5721 ev.key.ediv = key->ediv;
5722 ev.key.rand = key->rand;
5723
5724 if (key->type == SMP_LTK)
5725 ev.key.master = 1;
5726
5727 memcpy(ev.key.val, key->val, sizeof(key->val));
5728
5729 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
5730 }
5731
5732 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
5733 {
5734 struct mgmt_ev_new_irk ev;
5735
5736 memset(&ev, 0, sizeof(ev));
5737
5738 /* For identity resolving keys from devices that are already
5739 * using a public address or static random address, do not
5740 * ask for storing this key. The identity resolving key really
5741 * is only mandatory for devices using resovlable random
5742 * addresses.
5743 *
5744 * Storing all identity resolving keys has the downside that
5745 * they will be also loaded on next boot of they system. More
5746 * identity resolving keys, means more time during scanning is
5747 * needed to actually resolve these addresses.
5748 */
5749 if (bacmp(&irk->rpa, BDADDR_ANY))
5750 ev.store_hint = 0x01;
5751 else
5752 ev.store_hint = 0x00;
5753
5754 bacpy(&ev.rpa, &irk->rpa);
5755 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
5756 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
5757 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
5758
5759 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
5760 }
5761
5762 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
5763 bool persistent)
5764 {
5765 struct mgmt_ev_new_csrk ev;
5766
5767 memset(&ev, 0, sizeof(ev));
5768
5769 /* Devices using resolvable or non-resolvable random addresses
5770 * without providing an indentity resolving key don't require
5771 * to store signature resolving keys. Their addresses will change
5772 * the next time around.
5773 *
5774 * Only when a remote device provides an identity address
5775 * make sure the signature resolving key is stored. So allow
5776 * static random and public addresses here.
5777 */
5778 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
5779 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
5780 ev.store_hint = 0x00;
5781 else
5782 ev.store_hint = persistent;
5783
5784 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
5785 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
5786 ev.key.master = csrk->master;
5787 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
5788
5789 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
5790 }
5791
5792 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
5793 u8 bdaddr_type, u16 min_interval, u16 max_interval,
5794 u16 latency, u16 timeout)
5795 {
5796 struct mgmt_ev_new_conn_param ev;
5797
5798 memset(&ev, 0, sizeof(ev));
5799 bacpy(&ev.addr.bdaddr, bdaddr);
5800 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
5801 ev.store_hint = 0x00;
5802 ev.min_interval = cpu_to_le16(min_interval);
5803 ev.max_interval = cpu_to_le16(max_interval);
5804 ev.latency = cpu_to_le16(latency);
5805 ev.timeout = cpu_to_le16(timeout);
5806
5807 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
5808 }
5809
5810 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
5811 u8 data_len)
5812 {
5813 eir[eir_len++] = sizeof(type) + data_len;
5814 eir[eir_len++] = type;
5815 memcpy(&eir[eir_len], data, data_len);
5816 eir_len += data_len;
5817
5818 return eir_len;
5819 }
5820
5821 void mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5822 u8 addr_type, u32 flags, u8 *name, u8 name_len,
5823 u8 *dev_class)
5824 {
5825 char buf[512];
5826 struct mgmt_ev_device_connected *ev = (void *) buf;
5827 u16 eir_len = 0;
5828
5829 bacpy(&ev->addr.bdaddr, bdaddr);
5830 ev->addr.type = link_to_bdaddr(link_type, addr_type);
5831
5832 ev->flags = __cpu_to_le32(flags);
5833
5834 if (name_len > 0)
5835 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
5836 name, name_len);
5837
5838 if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
5839 eir_len = eir_append_data(ev->eir, eir_len,
5840 EIR_CLASS_OF_DEV, dev_class, 3);
5841
5842 ev->eir_len = cpu_to_le16(eir_len);
5843
5844 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
5845 sizeof(*ev) + eir_len, NULL);
5846 }
5847
5848 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
5849 {
5850 struct mgmt_cp_disconnect *cp = cmd->param;
5851 struct sock **sk = data;
5852 struct mgmt_rp_disconnect rp;
5853
5854 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5855 rp.addr.type = cp->addr.type;
5856
5857 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
5858 sizeof(rp));
5859
5860 *sk = cmd->sk;
5861 sock_hold(*sk);
5862
5863 mgmt_pending_remove(cmd);
5864 }
5865
5866 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
5867 {
5868 struct hci_dev *hdev = data;
5869 struct mgmt_cp_unpair_device *cp = cmd->param;
5870 struct mgmt_rp_unpair_device rp;
5871
5872 memset(&rp, 0, sizeof(rp));
5873 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5874 rp.addr.type = cp->addr.type;
5875
5876 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
5877
5878 cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
5879
5880 mgmt_pending_remove(cmd);
5881 }
5882
5883 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
5884 u8 link_type, u8 addr_type, u8 reason,
5885 bool mgmt_connected)
5886 {
5887 struct mgmt_ev_device_disconnected ev;
5888 struct pending_cmd *power_off;
5889 struct sock *sk = NULL;
5890
5891 power_off = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
5892 if (power_off) {
5893 struct mgmt_mode *cp = power_off->param;
5894
5895 /* The connection is still in hci_conn_hash so test for 1
5896 * instead of 0 to know if this is the last one.
5897 */
5898 if (!cp->val && hci_conn_count(hdev) == 1) {
5899 cancel_delayed_work(&hdev->power_off);
5900 queue_work(hdev->req_workqueue, &hdev->power_off.work);
5901 }
5902 }
5903
5904 if (!mgmt_connected)
5905 return;
5906
5907 if (link_type != ACL_LINK && link_type != LE_LINK)
5908 return;
5909
5910 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
5911
5912 bacpy(&ev.addr.bdaddr, bdaddr);
5913 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5914 ev.reason = reason;
5915
5916 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
5917
5918 if (sk)
5919 sock_put(sk);
5920
5921 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
5922 hdev);
5923 }
5924
5925 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
5926 u8 link_type, u8 addr_type, u8 status)
5927 {
5928 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
5929 struct mgmt_cp_disconnect *cp;
5930 struct mgmt_rp_disconnect rp;
5931 struct pending_cmd *cmd;
5932
5933 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
5934 hdev);
5935
5936 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
5937 if (!cmd)
5938 return;
5939
5940 cp = cmd->param;
5941
5942 if (bacmp(bdaddr, &cp->addr.bdaddr))
5943 return;
5944
5945 if (cp->addr.type != bdaddr_type)
5946 return;
5947
5948 bacpy(&rp.addr.bdaddr, bdaddr);
5949 rp.addr.type = bdaddr_type;
5950
5951 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
5952 mgmt_status(status), &rp, sizeof(rp));
5953
5954 mgmt_pending_remove(cmd);
5955 }
5956
5957 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5958 u8 addr_type, u8 status)
5959 {
5960 struct mgmt_ev_connect_failed ev;
5961 struct pending_cmd *power_off;
5962
5963 power_off = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
5964 if (power_off) {
5965 struct mgmt_mode *cp = power_off->param;
5966
5967 /* The connection is still in hci_conn_hash so test for 1
5968 * instead of 0 to know if this is the last one.
5969 */
5970 if (!cp->val && hci_conn_count(hdev) == 1) {
5971 cancel_delayed_work(&hdev->power_off);
5972 queue_work(hdev->req_workqueue, &hdev->power_off.work);
5973 }
5974 }
5975
5976 bacpy(&ev.addr.bdaddr, bdaddr);
5977 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5978 ev.status = mgmt_status(status);
5979
5980 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
5981 }
5982
5983 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
5984 {
5985 struct mgmt_ev_pin_code_request ev;
5986
5987 bacpy(&ev.addr.bdaddr, bdaddr);
5988 ev.addr.type = BDADDR_BREDR;
5989 ev.secure = secure;
5990
5991 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
5992 }
5993
5994 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5995 u8 status)
5996 {
5997 struct pending_cmd *cmd;
5998 struct mgmt_rp_pin_code_reply rp;
5999
6000 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
6001 if (!cmd)
6002 return;
6003
6004 bacpy(&rp.addr.bdaddr, bdaddr);
6005 rp.addr.type = BDADDR_BREDR;
6006
6007 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
6008 mgmt_status(status), &rp, sizeof(rp));
6009
6010 mgmt_pending_remove(cmd);
6011 }
6012
6013 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6014 u8 status)
6015 {
6016 struct pending_cmd *cmd;
6017 struct mgmt_rp_pin_code_reply rp;
6018
6019 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
6020 if (!cmd)
6021 return;
6022
6023 bacpy(&rp.addr.bdaddr, bdaddr);
6024 rp.addr.type = BDADDR_BREDR;
6025
6026 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
6027 mgmt_status(status), &rp, sizeof(rp));
6028
6029 mgmt_pending_remove(cmd);
6030 }
6031
6032 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6033 u8 link_type, u8 addr_type, u32 value,
6034 u8 confirm_hint)
6035 {
6036 struct mgmt_ev_user_confirm_request ev;
6037
6038 BT_DBG("%s", hdev->name);
6039
6040 bacpy(&ev.addr.bdaddr, bdaddr);
6041 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6042 ev.confirm_hint = confirm_hint;
6043 ev.value = cpu_to_le32(value);
6044
6045 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
6046 NULL);
6047 }
6048
6049 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6050 u8 link_type, u8 addr_type)
6051 {
6052 struct mgmt_ev_user_passkey_request ev;
6053
6054 BT_DBG("%s", hdev->name);
6055
6056 bacpy(&ev.addr.bdaddr, bdaddr);
6057 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6058
6059 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
6060 NULL);
6061 }
6062
6063 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6064 u8 link_type, u8 addr_type, u8 status,
6065 u8 opcode)
6066 {
6067 struct pending_cmd *cmd;
6068 struct mgmt_rp_user_confirm_reply rp;
6069 int err;
6070
6071 cmd = mgmt_pending_find(opcode, hdev);
6072 if (!cmd)
6073 return -ENOENT;
6074
6075 bacpy(&rp.addr.bdaddr, bdaddr);
6076 rp.addr.type = link_to_bdaddr(link_type, addr_type);
6077 err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
6078 &rp, sizeof(rp));
6079
6080 mgmt_pending_remove(cmd);
6081
6082 return err;
6083 }
6084
6085 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6086 u8 link_type, u8 addr_type, u8 status)
6087 {
6088 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6089 status, MGMT_OP_USER_CONFIRM_REPLY);
6090 }
6091
6092 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6093 u8 link_type, u8 addr_type, u8 status)
6094 {
6095 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6096 status,
6097 MGMT_OP_USER_CONFIRM_NEG_REPLY);
6098 }
6099
6100 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6101 u8 link_type, u8 addr_type, u8 status)
6102 {
6103 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6104 status, MGMT_OP_USER_PASSKEY_REPLY);
6105 }
6106
6107 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6108 u8 link_type, u8 addr_type, u8 status)
6109 {
6110 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6111 status,
6112 MGMT_OP_USER_PASSKEY_NEG_REPLY);
6113 }
6114
6115 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
6116 u8 link_type, u8 addr_type, u32 passkey,
6117 u8 entered)
6118 {
6119 struct mgmt_ev_passkey_notify ev;
6120
6121 BT_DBG("%s", hdev->name);
6122
6123 bacpy(&ev.addr.bdaddr, bdaddr);
6124 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6125 ev.passkey = __cpu_to_le32(passkey);
6126 ev.entered = entered;
6127
6128 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
6129 }
6130
6131 void mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6132 u8 addr_type, u8 status)
6133 {
6134 struct mgmt_ev_auth_failed ev;
6135
6136 bacpy(&ev.addr.bdaddr, bdaddr);
6137 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6138 ev.status = mgmt_status(status);
6139
6140 mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
6141 }
6142
6143 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
6144 {
6145 struct cmd_lookup match = { NULL, hdev };
6146 bool changed;
6147
6148 if (status) {
6149 u8 mgmt_err = mgmt_status(status);
6150 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
6151 cmd_status_rsp, &mgmt_err);
6152 return;
6153 }
6154
6155 if (test_bit(HCI_AUTH, &hdev->flags))
6156 changed = !test_and_set_bit(HCI_LINK_SECURITY,
6157 &hdev->dev_flags);
6158 else
6159 changed = test_and_clear_bit(HCI_LINK_SECURITY,
6160 &hdev->dev_flags);
6161
6162 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
6163 &match);
6164
6165 if (changed)
6166 new_settings(hdev, match.sk);
6167
6168 if (match.sk)
6169 sock_put(match.sk);
6170 }
6171
6172 static void clear_eir(struct hci_request *req)
6173 {
6174 struct hci_dev *hdev = req->hdev;
6175 struct hci_cp_write_eir cp;
6176
6177 if (!lmp_ext_inq_capable(hdev))
6178 return;
6179
6180 memset(hdev->eir, 0, sizeof(hdev->eir));
6181
6182 memset(&cp, 0, sizeof(cp));
6183
6184 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
6185 }
6186
6187 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
6188 {
6189 struct cmd_lookup match = { NULL, hdev };
6190 struct hci_request req;
6191 bool changed = false;
6192
6193 if (status) {
6194 u8 mgmt_err = mgmt_status(status);
6195
6196 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
6197 &hdev->dev_flags)) {
6198 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
6199 new_settings(hdev, NULL);
6200 }
6201
6202 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
6203 &mgmt_err);
6204 return;
6205 }
6206
6207 if (enable) {
6208 changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
6209 } else {
6210 changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
6211 if (!changed)
6212 changed = test_and_clear_bit(HCI_HS_ENABLED,
6213 &hdev->dev_flags);
6214 else
6215 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
6216 }
6217
6218 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
6219
6220 if (changed)
6221 new_settings(hdev, match.sk);
6222
6223 if (match.sk)
6224 sock_put(match.sk);
6225
6226 hci_req_init(&req, hdev);
6227
6228 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
6229 if (test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
6230 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
6231 sizeof(enable), &enable);
6232 update_eir(&req);
6233 } else {
6234 clear_eir(&req);
6235 }
6236
6237 hci_req_run(&req, NULL);
6238 }
6239
6240 void mgmt_sc_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
6241 {
6242 struct cmd_lookup match = { NULL, hdev };
6243 bool changed = false;
6244
6245 if (status) {
6246 u8 mgmt_err = mgmt_status(status);
6247
6248 if (enable) {
6249 if (test_and_clear_bit(HCI_SC_ENABLED,
6250 &hdev->dev_flags))
6251 new_settings(hdev, NULL);
6252 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
6253 }
6254
6255 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
6256 cmd_status_rsp, &mgmt_err);
6257 return;
6258 }
6259
6260 if (enable) {
6261 changed = !test_and_set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
6262 } else {
6263 changed = test_and_clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
6264 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
6265 }
6266
6267 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
6268 settings_rsp, &match);
6269
6270 if (changed)
6271 new_settings(hdev, match.sk);
6272
6273 if (match.sk)
6274 sock_put(match.sk);
6275 }
6276
6277 static void sk_lookup(struct pending_cmd *cmd, void *data)
6278 {
6279 struct cmd_lookup *match = data;
6280
6281 if (match->sk == NULL) {
6282 match->sk = cmd->sk;
6283 sock_hold(match->sk);
6284 }
6285 }
6286
6287 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
6288 u8 status)
6289 {
6290 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
6291
6292 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
6293 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
6294 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
6295
6296 if (!status)
6297 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
6298 NULL);
6299
6300 if (match.sk)
6301 sock_put(match.sk);
6302 }
6303
6304 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
6305 {
6306 struct mgmt_cp_set_local_name ev;
6307 struct pending_cmd *cmd;
6308
6309 if (status)
6310 return;
6311
6312 memset(&ev, 0, sizeof(ev));
6313 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
6314 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
6315
6316 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
6317 if (!cmd) {
6318 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
6319
6320 /* If this is a HCI command related to powering on the
6321 * HCI dev don't send any mgmt signals.
6322 */
6323 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
6324 return;
6325 }
6326
6327 mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
6328 cmd ? cmd->sk : NULL);
6329 }
6330
6331 void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
6332 u8 *randomizer192, u8 *hash256,
6333 u8 *randomizer256, u8 status)
6334 {
6335 struct pending_cmd *cmd;
6336
6337 BT_DBG("%s status %u", hdev->name, status);
6338
6339 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
6340 if (!cmd)
6341 return;
6342
6343 if (status) {
6344 cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
6345 mgmt_status(status));
6346 } else {
6347 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
6348 hash256 && randomizer256) {
6349 struct mgmt_rp_read_local_oob_ext_data rp;
6350
6351 memcpy(rp.hash192, hash192, sizeof(rp.hash192));
6352 memcpy(rp.randomizer192, randomizer192,
6353 sizeof(rp.randomizer192));
6354
6355 memcpy(rp.hash256, hash256, sizeof(rp.hash256));
6356 memcpy(rp.randomizer256, randomizer256,
6357 sizeof(rp.randomizer256));
6358
6359 cmd_complete(cmd->sk, hdev->id,
6360 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
6361 &rp, sizeof(rp));
6362 } else {
6363 struct mgmt_rp_read_local_oob_data rp;
6364
6365 memcpy(rp.hash, hash192, sizeof(rp.hash));
6366 memcpy(rp.randomizer, randomizer192,
6367 sizeof(rp.randomizer));
6368
6369 cmd_complete(cmd->sk, hdev->id,
6370 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
6371 &rp, sizeof(rp));
6372 }
6373 }
6374
6375 mgmt_pending_remove(cmd);
6376 }
6377
6378 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6379 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
6380 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
6381 {
6382 char buf[512];
6383 struct mgmt_ev_device_found *ev = (void *) buf;
6384 struct smp_irk *irk;
6385 size_t ev_size;
6386
6387 if (!hci_discovery_active(hdev))
6388 return;
6389
6390 /* Make sure that the buffer is big enough. The 5 extra bytes
6391 * are for the potential CoD field.
6392 */
6393 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
6394 return;
6395
6396 memset(buf, 0, sizeof(buf));
6397
6398 irk = hci_get_irk(hdev, bdaddr, addr_type);
6399 if (irk) {
6400 bacpy(&ev->addr.bdaddr, &irk->bdaddr);
6401 ev->addr.type = link_to_bdaddr(link_type, irk->addr_type);
6402 } else {
6403 bacpy(&ev->addr.bdaddr, bdaddr);
6404 ev->addr.type = link_to_bdaddr(link_type, addr_type);
6405 }
6406
6407 ev->rssi = rssi;
6408 ev->flags = cpu_to_le32(flags);
6409
6410 if (eir_len > 0)
6411 memcpy(ev->eir, eir, eir_len);
6412
6413 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
6414 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
6415 dev_class, 3);
6416
6417 if (scan_rsp_len > 0)
6418 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
6419
6420 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
6421 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
6422
6423 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
6424 }
6425
6426 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6427 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
6428 {
6429 struct mgmt_ev_device_found *ev;
6430 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
6431 u16 eir_len;
6432
6433 ev = (struct mgmt_ev_device_found *) buf;
6434
6435 memset(buf, 0, sizeof(buf));
6436
6437 bacpy(&ev->addr.bdaddr, bdaddr);
6438 ev->addr.type = link_to_bdaddr(link_type, addr_type);
6439 ev->rssi = rssi;
6440
6441 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
6442 name_len);
6443
6444 ev->eir_len = cpu_to_le16(eir_len);
6445
6446 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
6447 }
6448
6449 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
6450 {
6451 struct mgmt_ev_discovering ev;
6452 struct pending_cmd *cmd;
6453
6454 BT_DBG("%s discovering %u", hdev->name, discovering);
6455
6456 if (discovering)
6457 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
6458 else
6459 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
6460
6461 if (cmd != NULL) {
6462 u8 type = hdev->discovery.type;
6463
6464 cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
6465 sizeof(type));
6466 mgmt_pending_remove(cmd);
6467 }
6468
6469 memset(&ev, 0, sizeof(ev));
6470 ev.type = hdev->discovery.type;
6471 ev.discovering = discovering;
6472
6473 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
6474 }
6475
6476 static void adv_enable_complete(struct hci_dev *hdev, u8 status)
6477 {
6478 BT_DBG("%s status %u", hdev->name, status);
6479
6480 /* Clear the advertising mgmt setting if we failed to re-enable it */
6481 if (status) {
6482 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
6483 new_settings(hdev, NULL);
6484 }
6485 }
6486
6487 void mgmt_reenable_advertising(struct hci_dev *hdev)
6488 {
6489 struct hci_request req;
6490
6491 if (hci_conn_num(hdev, LE_LINK) > 0)
6492 return;
6493
6494 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
6495 return;
6496
6497 hci_req_init(&req, hdev);
6498 enable_advertising(&req);
6499
6500 /* If this fails we have no option but to let user space know
6501 * that we've disabled advertising.
6502 */
6503 if (hci_req_run(&req, adv_enable_complete) < 0) {
6504 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
6505 new_settings(hdev, NULL);
6506 }
6507 }