]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/bluetooth/mgmt.c
Bluetooth: Add support for Set External Configuration management command
[mirror_ubuntu-bionic-kernel.git] / net / bluetooth / mgmt.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI Management interface */
26
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/l2cap.h>
33 #include <net/bluetooth/mgmt.h>
34
35 #include "smp.h"
36
37 #define MGMT_VERSION 1
38 #define MGMT_REVISION 7
39
40 static const u16 mgmt_commands[] = {
41 MGMT_OP_READ_INDEX_LIST,
42 MGMT_OP_READ_INFO,
43 MGMT_OP_SET_POWERED,
44 MGMT_OP_SET_DISCOVERABLE,
45 MGMT_OP_SET_CONNECTABLE,
46 MGMT_OP_SET_FAST_CONNECTABLE,
47 MGMT_OP_SET_PAIRABLE,
48 MGMT_OP_SET_LINK_SECURITY,
49 MGMT_OP_SET_SSP,
50 MGMT_OP_SET_HS,
51 MGMT_OP_SET_LE,
52 MGMT_OP_SET_DEV_CLASS,
53 MGMT_OP_SET_LOCAL_NAME,
54 MGMT_OP_ADD_UUID,
55 MGMT_OP_REMOVE_UUID,
56 MGMT_OP_LOAD_LINK_KEYS,
57 MGMT_OP_LOAD_LONG_TERM_KEYS,
58 MGMT_OP_DISCONNECT,
59 MGMT_OP_GET_CONNECTIONS,
60 MGMT_OP_PIN_CODE_REPLY,
61 MGMT_OP_PIN_CODE_NEG_REPLY,
62 MGMT_OP_SET_IO_CAPABILITY,
63 MGMT_OP_PAIR_DEVICE,
64 MGMT_OP_CANCEL_PAIR_DEVICE,
65 MGMT_OP_UNPAIR_DEVICE,
66 MGMT_OP_USER_CONFIRM_REPLY,
67 MGMT_OP_USER_CONFIRM_NEG_REPLY,
68 MGMT_OP_USER_PASSKEY_REPLY,
69 MGMT_OP_USER_PASSKEY_NEG_REPLY,
70 MGMT_OP_READ_LOCAL_OOB_DATA,
71 MGMT_OP_ADD_REMOTE_OOB_DATA,
72 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
73 MGMT_OP_START_DISCOVERY,
74 MGMT_OP_STOP_DISCOVERY,
75 MGMT_OP_CONFIRM_NAME,
76 MGMT_OP_BLOCK_DEVICE,
77 MGMT_OP_UNBLOCK_DEVICE,
78 MGMT_OP_SET_DEVICE_ID,
79 MGMT_OP_SET_ADVERTISING,
80 MGMT_OP_SET_BREDR,
81 MGMT_OP_SET_STATIC_ADDRESS,
82 MGMT_OP_SET_SCAN_PARAMS,
83 MGMT_OP_SET_SECURE_CONN,
84 MGMT_OP_SET_DEBUG_KEYS,
85 MGMT_OP_SET_PRIVACY,
86 MGMT_OP_LOAD_IRKS,
87 MGMT_OP_GET_CONN_INFO,
88 MGMT_OP_GET_CLOCK_INFO,
89 MGMT_OP_ADD_DEVICE,
90 MGMT_OP_REMOVE_DEVICE,
91 MGMT_OP_LOAD_CONN_PARAM,
92 MGMT_OP_READ_UNCONF_INDEX_LIST,
93 MGMT_OP_READ_CONFIG_INFO,
94 MGMT_OP_SET_EXTERNAL_CONFIG,
95 };
96
97 static const u16 mgmt_events[] = {
98 MGMT_EV_CONTROLLER_ERROR,
99 MGMT_EV_INDEX_ADDED,
100 MGMT_EV_INDEX_REMOVED,
101 MGMT_EV_NEW_SETTINGS,
102 MGMT_EV_CLASS_OF_DEV_CHANGED,
103 MGMT_EV_LOCAL_NAME_CHANGED,
104 MGMT_EV_NEW_LINK_KEY,
105 MGMT_EV_NEW_LONG_TERM_KEY,
106 MGMT_EV_DEVICE_CONNECTED,
107 MGMT_EV_DEVICE_DISCONNECTED,
108 MGMT_EV_CONNECT_FAILED,
109 MGMT_EV_PIN_CODE_REQUEST,
110 MGMT_EV_USER_CONFIRM_REQUEST,
111 MGMT_EV_USER_PASSKEY_REQUEST,
112 MGMT_EV_AUTH_FAILED,
113 MGMT_EV_DEVICE_FOUND,
114 MGMT_EV_DISCOVERING,
115 MGMT_EV_DEVICE_BLOCKED,
116 MGMT_EV_DEVICE_UNBLOCKED,
117 MGMT_EV_DEVICE_UNPAIRED,
118 MGMT_EV_PASSKEY_NOTIFY,
119 MGMT_EV_NEW_IRK,
120 MGMT_EV_NEW_CSRK,
121 MGMT_EV_DEVICE_ADDED,
122 MGMT_EV_DEVICE_REMOVED,
123 MGMT_EV_NEW_CONN_PARAM,
124 MGMT_EV_UNCONF_INDEX_ADDED,
125 MGMT_EV_UNCONF_INDEX_REMOVED,
126 };
127
128 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
129
130 #define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
131 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
132
133 struct pending_cmd {
134 struct list_head list;
135 u16 opcode;
136 int index;
137 void *param;
138 struct sock *sk;
139 void *user_data;
140 };
141
142 /* HCI to MGMT error code conversion table */
143 static u8 mgmt_status_table[] = {
144 MGMT_STATUS_SUCCESS,
145 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
146 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
147 MGMT_STATUS_FAILED, /* Hardware Failure */
148 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
149 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
150 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
151 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
152 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
153 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
154 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
155 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
156 MGMT_STATUS_BUSY, /* Command Disallowed */
157 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
158 MGMT_STATUS_REJECTED, /* Rejected Security */
159 MGMT_STATUS_REJECTED, /* Rejected Personal */
160 MGMT_STATUS_TIMEOUT, /* Host Timeout */
161 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
162 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
163 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
164 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
165 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
166 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
167 MGMT_STATUS_BUSY, /* Repeated Attempts */
168 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
169 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
170 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
171 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
172 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
173 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
174 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
175 MGMT_STATUS_FAILED, /* Unspecified Error */
176 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
177 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
178 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
179 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
180 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
181 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
182 MGMT_STATUS_FAILED, /* Unit Link Key Used */
183 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
184 MGMT_STATUS_TIMEOUT, /* Instant Passed */
185 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
186 MGMT_STATUS_FAILED, /* Transaction Collision */
187 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
188 MGMT_STATUS_REJECTED, /* QoS Rejected */
189 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
190 MGMT_STATUS_REJECTED, /* Insufficient Security */
191 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
192 MGMT_STATUS_BUSY, /* Role Switch Pending */
193 MGMT_STATUS_FAILED, /* Slot Violation */
194 MGMT_STATUS_FAILED, /* Role Switch Failed */
195 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
196 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
197 MGMT_STATUS_BUSY, /* Host Busy Pairing */
198 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
199 MGMT_STATUS_BUSY, /* Controller Busy */
200 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
201 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
202 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
203 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
204 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
205 };
206
207 static u8 mgmt_status(u8 hci_status)
208 {
209 if (hci_status < ARRAY_SIZE(mgmt_status_table))
210 return mgmt_status_table[hci_status];
211
212 return MGMT_STATUS_FAILED;
213 }
214
215 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
216 {
217 struct sk_buff *skb;
218 struct mgmt_hdr *hdr;
219 struct mgmt_ev_cmd_status *ev;
220 int err;
221
222 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
223
224 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
225 if (!skb)
226 return -ENOMEM;
227
228 hdr = (void *) skb_put(skb, sizeof(*hdr));
229
230 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS);
231 hdr->index = cpu_to_le16(index);
232 hdr->len = cpu_to_le16(sizeof(*ev));
233
234 ev = (void *) skb_put(skb, sizeof(*ev));
235 ev->status = status;
236 ev->opcode = cpu_to_le16(cmd);
237
238 err = sock_queue_rcv_skb(sk, skb);
239 if (err < 0)
240 kfree_skb(skb);
241
242 return err;
243 }
244
245 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
246 void *rp, size_t rp_len)
247 {
248 struct sk_buff *skb;
249 struct mgmt_hdr *hdr;
250 struct mgmt_ev_cmd_complete *ev;
251 int err;
252
253 BT_DBG("sock %p", sk);
254
255 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
256 if (!skb)
257 return -ENOMEM;
258
259 hdr = (void *) skb_put(skb, sizeof(*hdr));
260
261 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
262 hdr->index = cpu_to_le16(index);
263 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
264
265 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
266 ev->opcode = cpu_to_le16(cmd);
267 ev->status = status;
268
269 if (rp)
270 memcpy(ev->data, rp, rp_len);
271
272 err = sock_queue_rcv_skb(sk, skb);
273 if (err < 0)
274 kfree_skb(skb);
275
276 return err;
277 }
278
279 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
280 u16 data_len)
281 {
282 struct mgmt_rp_read_version rp;
283
284 BT_DBG("sock %p", sk);
285
286 rp.version = MGMT_VERSION;
287 rp.revision = cpu_to_le16(MGMT_REVISION);
288
289 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
290 sizeof(rp));
291 }
292
293 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
294 u16 data_len)
295 {
296 struct mgmt_rp_read_commands *rp;
297 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
298 const u16 num_events = ARRAY_SIZE(mgmt_events);
299 __le16 *opcode;
300 size_t rp_size;
301 int i, err;
302
303 BT_DBG("sock %p", sk);
304
305 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
306
307 rp = kmalloc(rp_size, GFP_KERNEL);
308 if (!rp)
309 return -ENOMEM;
310
311 rp->num_commands = cpu_to_le16(num_commands);
312 rp->num_events = cpu_to_le16(num_events);
313
314 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
315 put_unaligned_le16(mgmt_commands[i], opcode);
316
317 for (i = 0; i < num_events; i++, opcode++)
318 put_unaligned_le16(mgmt_events[i], opcode);
319
320 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
321 rp_size);
322 kfree(rp);
323
324 return err;
325 }
326
327 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
328 u16 data_len)
329 {
330 struct mgmt_rp_read_index_list *rp;
331 struct hci_dev *d;
332 size_t rp_len;
333 u16 count;
334 int err;
335
336 BT_DBG("sock %p", sk);
337
338 read_lock(&hci_dev_list_lock);
339
340 count = 0;
341 list_for_each_entry(d, &hci_dev_list, list) {
342 if (d->dev_type == HCI_BREDR &&
343 !test_bit(HCI_UNCONFIGURED, &d->dev_flags))
344 count++;
345 }
346
347 rp_len = sizeof(*rp) + (2 * count);
348 rp = kmalloc(rp_len, GFP_ATOMIC);
349 if (!rp) {
350 read_unlock(&hci_dev_list_lock);
351 return -ENOMEM;
352 }
353
354 count = 0;
355 list_for_each_entry(d, &hci_dev_list, list) {
356 if (test_bit(HCI_SETUP, &d->dev_flags) ||
357 test_bit(HCI_USER_CHANNEL, &d->dev_flags))
358 continue;
359
360 /* Devices marked as raw-only are neither configured
361 * nor unconfigured controllers.
362 */
363 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
364 continue;
365
366 if (d->dev_type == HCI_BREDR &&
367 !test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
368 rp->index[count++] = cpu_to_le16(d->id);
369 BT_DBG("Added hci%u", d->id);
370 }
371 }
372
373 rp->num_controllers = cpu_to_le16(count);
374 rp_len = sizeof(*rp) + (2 * count);
375
376 read_unlock(&hci_dev_list_lock);
377
378 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
379 rp_len);
380
381 kfree(rp);
382
383 return err;
384 }
385
386 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
387 void *data, u16 data_len)
388 {
389 struct mgmt_rp_read_unconf_index_list *rp;
390 struct hci_dev *d;
391 size_t rp_len;
392 u16 count;
393 int err;
394
395 BT_DBG("sock %p", sk);
396
397 read_lock(&hci_dev_list_lock);
398
399 count = 0;
400 list_for_each_entry(d, &hci_dev_list, list) {
401 if (d->dev_type == HCI_BREDR &&
402 test_bit(HCI_UNCONFIGURED, &d->dev_flags))
403 count++;
404 }
405
406 rp_len = sizeof(*rp) + (2 * count);
407 rp = kmalloc(rp_len, GFP_ATOMIC);
408 if (!rp) {
409 read_unlock(&hci_dev_list_lock);
410 return -ENOMEM;
411 }
412
413 count = 0;
414 list_for_each_entry(d, &hci_dev_list, list) {
415 if (test_bit(HCI_SETUP, &d->dev_flags) ||
416 test_bit(HCI_USER_CHANNEL, &d->dev_flags))
417 continue;
418
419 /* Devices marked as raw-only are neither configured
420 * nor unconfigured controllers.
421 */
422 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
423 continue;
424
425 if (d->dev_type == HCI_BREDR &&
426 test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
427 rp->index[count++] = cpu_to_le16(d->id);
428 BT_DBG("Added hci%u", d->id);
429 }
430 }
431
432 rp->num_controllers = cpu_to_le16(count);
433 rp_len = sizeof(*rp) + (2 * count);
434
435 read_unlock(&hci_dev_list_lock);
436
437 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_UNCONF_INDEX_LIST,
438 0, rp, rp_len);
439
440 kfree(rp);
441
442 return err;
443 }
444
445 static bool is_configured(struct hci_dev *hdev)
446 {
447 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
448 !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
449 return false;
450
451 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
452 !bacmp(&hdev->public_addr, BDADDR_ANY))
453 return false;
454
455 return true;
456 }
457
458 static __le32 get_missing_options(struct hci_dev *hdev)
459 {
460 u32 options = 0;
461
462 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
463 !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
464 options |= MGMT_OPTION_EXTERNAL_CONFIG;
465
466 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
467 !bacmp(&hdev->public_addr, BDADDR_ANY))
468 options |= MGMT_OPTION_PUBLIC_ADDRESS;
469
470 return cpu_to_le32(options);
471 }
472
473 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
474 {
475 __le32 options = get_missing_options(hdev);
476
477 return cmd_complete(sk, hdev->id, opcode, 0, &options,
478 sizeof(options));
479 }
480
481 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
482 void *data, u16 data_len)
483 {
484 struct mgmt_rp_read_config_info rp;
485 u32 options = 0;
486
487 BT_DBG("sock %p %s", sk, hdev->name);
488
489 hci_dev_lock(hdev);
490
491 memset(&rp, 0, sizeof(rp));
492 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
493
494 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
495 options |= MGMT_OPTION_EXTERNAL_CONFIG;
496
497 if (hdev->set_bdaddr)
498 options |= MGMT_OPTION_PUBLIC_ADDRESS;
499
500 rp.supported_options = cpu_to_le32(options);
501 rp.missing_options = get_missing_options(hdev);
502
503 hci_dev_unlock(hdev);
504
505 return cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0, &rp,
506 sizeof(rp));
507 }
508
509 static u32 get_supported_settings(struct hci_dev *hdev)
510 {
511 u32 settings = 0;
512
513 settings |= MGMT_SETTING_POWERED;
514 settings |= MGMT_SETTING_PAIRABLE;
515 settings |= MGMT_SETTING_DEBUG_KEYS;
516
517 if (lmp_bredr_capable(hdev)) {
518 settings |= MGMT_SETTING_CONNECTABLE;
519 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
520 settings |= MGMT_SETTING_FAST_CONNECTABLE;
521 settings |= MGMT_SETTING_DISCOVERABLE;
522 settings |= MGMT_SETTING_BREDR;
523 settings |= MGMT_SETTING_LINK_SECURITY;
524
525 if (lmp_ssp_capable(hdev)) {
526 settings |= MGMT_SETTING_SSP;
527 settings |= MGMT_SETTING_HS;
528 }
529
530 if (lmp_sc_capable(hdev) ||
531 test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
532 settings |= MGMT_SETTING_SECURE_CONN;
533 }
534
535 if (lmp_le_capable(hdev)) {
536 settings |= MGMT_SETTING_LE;
537 settings |= MGMT_SETTING_ADVERTISING;
538 settings |= MGMT_SETTING_PRIVACY;
539 }
540
541 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
542 hdev->set_bdaddr)
543 settings |= MGMT_SETTING_CONFIGURATION;
544
545 return settings;
546 }
547
548 static u32 get_current_settings(struct hci_dev *hdev)
549 {
550 u32 settings = 0;
551
552 if (hdev_is_powered(hdev))
553 settings |= MGMT_SETTING_POWERED;
554
555 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
556 settings |= MGMT_SETTING_CONNECTABLE;
557
558 if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
559 settings |= MGMT_SETTING_FAST_CONNECTABLE;
560
561 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
562 settings |= MGMT_SETTING_DISCOVERABLE;
563
564 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
565 settings |= MGMT_SETTING_PAIRABLE;
566
567 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
568 settings |= MGMT_SETTING_BREDR;
569
570 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
571 settings |= MGMT_SETTING_LE;
572
573 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
574 settings |= MGMT_SETTING_LINK_SECURITY;
575
576 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
577 settings |= MGMT_SETTING_SSP;
578
579 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
580 settings |= MGMT_SETTING_HS;
581
582 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
583 settings |= MGMT_SETTING_ADVERTISING;
584
585 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
586 settings |= MGMT_SETTING_SECURE_CONN;
587
588 if (test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags))
589 settings |= MGMT_SETTING_DEBUG_KEYS;
590
591 if (test_bit(HCI_PRIVACY, &hdev->dev_flags))
592 settings |= MGMT_SETTING_PRIVACY;
593
594 return settings;
595 }
596
597 #define PNP_INFO_SVCLASS_ID 0x1200
598
599 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
600 {
601 u8 *ptr = data, *uuids_start = NULL;
602 struct bt_uuid *uuid;
603
604 if (len < 4)
605 return ptr;
606
607 list_for_each_entry(uuid, &hdev->uuids, list) {
608 u16 uuid16;
609
610 if (uuid->size != 16)
611 continue;
612
613 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
614 if (uuid16 < 0x1100)
615 continue;
616
617 if (uuid16 == PNP_INFO_SVCLASS_ID)
618 continue;
619
620 if (!uuids_start) {
621 uuids_start = ptr;
622 uuids_start[0] = 1;
623 uuids_start[1] = EIR_UUID16_ALL;
624 ptr += 2;
625 }
626
627 /* Stop if not enough space to put next UUID */
628 if ((ptr - data) + sizeof(u16) > len) {
629 uuids_start[1] = EIR_UUID16_SOME;
630 break;
631 }
632
633 *ptr++ = (uuid16 & 0x00ff);
634 *ptr++ = (uuid16 & 0xff00) >> 8;
635 uuids_start[0] += sizeof(uuid16);
636 }
637
638 return ptr;
639 }
640
641 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
642 {
643 u8 *ptr = data, *uuids_start = NULL;
644 struct bt_uuid *uuid;
645
646 if (len < 6)
647 return ptr;
648
649 list_for_each_entry(uuid, &hdev->uuids, list) {
650 if (uuid->size != 32)
651 continue;
652
653 if (!uuids_start) {
654 uuids_start = ptr;
655 uuids_start[0] = 1;
656 uuids_start[1] = EIR_UUID32_ALL;
657 ptr += 2;
658 }
659
660 /* Stop if not enough space to put next UUID */
661 if ((ptr - data) + sizeof(u32) > len) {
662 uuids_start[1] = EIR_UUID32_SOME;
663 break;
664 }
665
666 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
667 ptr += sizeof(u32);
668 uuids_start[0] += sizeof(u32);
669 }
670
671 return ptr;
672 }
673
674 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
675 {
676 u8 *ptr = data, *uuids_start = NULL;
677 struct bt_uuid *uuid;
678
679 if (len < 18)
680 return ptr;
681
682 list_for_each_entry(uuid, &hdev->uuids, list) {
683 if (uuid->size != 128)
684 continue;
685
686 if (!uuids_start) {
687 uuids_start = ptr;
688 uuids_start[0] = 1;
689 uuids_start[1] = EIR_UUID128_ALL;
690 ptr += 2;
691 }
692
693 /* Stop if not enough space to put next UUID */
694 if ((ptr - data) + 16 > len) {
695 uuids_start[1] = EIR_UUID128_SOME;
696 break;
697 }
698
699 memcpy(ptr, uuid->uuid, 16);
700 ptr += 16;
701 uuids_start[0] += 16;
702 }
703
704 return ptr;
705 }
706
707 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
708 {
709 struct pending_cmd *cmd;
710
711 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
712 if (cmd->opcode == opcode)
713 return cmd;
714 }
715
716 return NULL;
717 }
718
719 static struct pending_cmd *mgmt_pending_find_data(u16 opcode,
720 struct hci_dev *hdev,
721 const void *data)
722 {
723 struct pending_cmd *cmd;
724
725 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
726 if (cmd->user_data != data)
727 continue;
728 if (cmd->opcode == opcode)
729 return cmd;
730 }
731
732 return NULL;
733 }
734
735 static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
736 {
737 u8 ad_len = 0;
738 size_t name_len;
739
740 name_len = strlen(hdev->dev_name);
741 if (name_len > 0) {
742 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
743
744 if (name_len > max_len) {
745 name_len = max_len;
746 ptr[1] = EIR_NAME_SHORT;
747 } else
748 ptr[1] = EIR_NAME_COMPLETE;
749
750 ptr[0] = name_len + 1;
751
752 memcpy(ptr + 2, hdev->dev_name, name_len);
753
754 ad_len += (name_len + 2);
755 ptr += (name_len + 2);
756 }
757
758 return ad_len;
759 }
760
761 static void update_scan_rsp_data(struct hci_request *req)
762 {
763 struct hci_dev *hdev = req->hdev;
764 struct hci_cp_le_set_scan_rsp_data cp;
765 u8 len;
766
767 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
768 return;
769
770 memset(&cp, 0, sizeof(cp));
771
772 len = create_scan_rsp_data(hdev, cp.data);
773
774 if (hdev->scan_rsp_data_len == len &&
775 memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
776 return;
777
778 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
779 hdev->scan_rsp_data_len = len;
780
781 cp.length = len;
782
783 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
784 }
785
786 static u8 get_adv_discov_flags(struct hci_dev *hdev)
787 {
788 struct pending_cmd *cmd;
789
790 /* If there's a pending mgmt command the flags will not yet have
791 * their final values, so check for this first.
792 */
793 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
794 if (cmd) {
795 struct mgmt_mode *cp = cmd->param;
796 if (cp->val == 0x01)
797 return LE_AD_GENERAL;
798 else if (cp->val == 0x02)
799 return LE_AD_LIMITED;
800 } else {
801 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
802 return LE_AD_LIMITED;
803 else if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
804 return LE_AD_GENERAL;
805 }
806
807 return 0;
808 }
809
810 static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
811 {
812 u8 ad_len = 0, flags = 0;
813
814 flags |= get_adv_discov_flags(hdev);
815
816 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
817 flags |= LE_AD_NO_BREDR;
818
819 if (flags) {
820 BT_DBG("adv flags 0x%02x", flags);
821
822 ptr[0] = 2;
823 ptr[1] = EIR_FLAGS;
824 ptr[2] = flags;
825
826 ad_len += 3;
827 ptr += 3;
828 }
829
830 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
831 ptr[0] = 2;
832 ptr[1] = EIR_TX_POWER;
833 ptr[2] = (u8) hdev->adv_tx_power;
834
835 ad_len += 3;
836 ptr += 3;
837 }
838
839 return ad_len;
840 }
841
842 static void update_adv_data(struct hci_request *req)
843 {
844 struct hci_dev *hdev = req->hdev;
845 struct hci_cp_le_set_adv_data cp;
846 u8 len;
847
848 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
849 return;
850
851 memset(&cp, 0, sizeof(cp));
852
853 len = create_adv_data(hdev, cp.data);
854
855 if (hdev->adv_data_len == len &&
856 memcmp(cp.data, hdev->adv_data, len) == 0)
857 return;
858
859 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
860 hdev->adv_data_len = len;
861
862 cp.length = len;
863
864 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
865 }
866
867 static void create_eir(struct hci_dev *hdev, u8 *data)
868 {
869 u8 *ptr = data;
870 size_t name_len;
871
872 name_len = strlen(hdev->dev_name);
873
874 if (name_len > 0) {
875 /* EIR Data type */
876 if (name_len > 48) {
877 name_len = 48;
878 ptr[1] = EIR_NAME_SHORT;
879 } else
880 ptr[1] = EIR_NAME_COMPLETE;
881
882 /* EIR Data length */
883 ptr[0] = name_len + 1;
884
885 memcpy(ptr + 2, hdev->dev_name, name_len);
886
887 ptr += (name_len + 2);
888 }
889
890 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
891 ptr[0] = 2;
892 ptr[1] = EIR_TX_POWER;
893 ptr[2] = (u8) hdev->inq_tx_power;
894
895 ptr += 3;
896 }
897
898 if (hdev->devid_source > 0) {
899 ptr[0] = 9;
900 ptr[1] = EIR_DEVICE_ID;
901
902 put_unaligned_le16(hdev->devid_source, ptr + 2);
903 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
904 put_unaligned_le16(hdev->devid_product, ptr + 6);
905 put_unaligned_le16(hdev->devid_version, ptr + 8);
906
907 ptr += 10;
908 }
909
910 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
911 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
912 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
913 }
914
915 static void update_eir(struct hci_request *req)
916 {
917 struct hci_dev *hdev = req->hdev;
918 struct hci_cp_write_eir cp;
919
920 if (!hdev_is_powered(hdev))
921 return;
922
923 if (!lmp_ext_inq_capable(hdev))
924 return;
925
926 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
927 return;
928
929 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
930 return;
931
932 memset(&cp, 0, sizeof(cp));
933
934 create_eir(hdev, cp.data);
935
936 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
937 return;
938
939 memcpy(hdev->eir, cp.data, sizeof(cp.data));
940
941 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
942 }
943
944 static u8 get_service_classes(struct hci_dev *hdev)
945 {
946 struct bt_uuid *uuid;
947 u8 val = 0;
948
949 list_for_each_entry(uuid, &hdev->uuids, list)
950 val |= uuid->svc_hint;
951
952 return val;
953 }
954
955 static void update_class(struct hci_request *req)
956 {
957 struct hci_dev *hdev = req->hdev;
958 u8 cod[3];
959
960 BT_DBG("%s", hdev->name);
961
962 if (!hdev_is_powered(hdev))
963 return;
964
965 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
966 return;
967
968 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
969 return;
970
971 cod[0] = hdev->minor_class;
972 cod[1] = hdev->major_class;
973 cod[2] = get_service_classes(hdev);
974
975 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
976 cod[1] |= 0x20;
977
978 if (memcmp(cod, hdev->dev_class, 3) == 0)
979 return;
980
981 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
982 }
983
984 static bool get_connectable(struct hci_dev *hdev)
985 {
986 struct pending_cmd *cmd;
987
988 /* If there's a pending mgmt command the flag will not yet have
989 * it's final value, so check for this first.
990 */
991 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
992 if (cmd) {
993 struct mgmt_mode *cp = cmd->param;
994 return cp->val;
995 }
996
997 return test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
998 }
999
1000 static void enable_advertising(struct hci_request *req)
1001 {
1002 struct hci_dev *hdev = req->hdev;
1003 struct hci_cp_le_set_adv_param cp;
1004 u8 own_addr_type, enable = 0x01;
1005 bool connectable;
1006
1007 /* Clear the HCI_ADVERTISING bit temporarily so that the
1008 * hci_update_random_address knows that it's safe to go ahead
1009 * and write a new random address. The flag will be set back on
1010 * as soon as the SET_ADV_ENABLE HCI command completes.
1011 */
1012 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
1013
1014 connectable = get_connectable(hdev);
1015
1016 /* Set require_privacy to true only when non-connectable
1017 * advertising is used. In that case it is fine to use a
1018 * non-resolvable private address.
1019 */
1020 if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
1021 return;
1022
1023 memset(&cp, 0, sizeof(cp));
1024 cp.min_interval = cpu_to_le16(0x0800);
1025 cp.max_interval = cpu_to_le16(0x0800);
1026 cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
1027 cp.own_address_type = own_addr_type;
1028 cp.channel_map = hdev->le_adv_channel_map;
1029
1030 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1031
1032 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1033 }
1034
1035 static void disable_advertising(struct hci_request *req)
1036 {
1037 u8 enable = 0x00;
1038
1039 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1040 }
1041
1042 static void service_cache_off(struct work_struct *work)
1043 {
1044 struct hci_dev *hdev = container_of(work, struct hci_dev,
1045 service_cache.work);
1046 struct hci_request req;
1047
1048 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1049 return;
1050
1051 hci_req_init(&req, hdev);
1052
1053 hci_dev_lock(hdev);
1054
1055 update_eir(&req);
1056 update_class(&req);
1057
1058 hci_dev_unlock(hdev);
1059
1060 hci_req_run(&req, NULL);
1061 }
1062
1063 static void rpa_expired(struct work_struct *work)
1064 {
1065 struct hci_dev *hdev = container_of(work, struct hci_dev,
1066 rpa_expired.work);
1067 struct hci_request req;
1068
1069 BT_DBG("");
1070
1071 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
1072
1073 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags) ||
1074 hci_conn_num(hdev, LE_LINK) > 0)
1075 return;
1076
1077 /* The generation of a new RPA and programming it into the
1078 * controller happens in the enable_advertising() function.
1079 */
1080
1081 hci_req_init(&req, hdev);
1082
1083 disable_advertising(&req);
1084 enable_advertising(&req);
1085
1086 hci_req_run(&req, NULL);
1087 }
1088
1089 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1090 {
1091 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
1092 return;
1093
1094 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1095 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1096
1097 /* Non-mgmt controlled devices get this bit set
1098 * implicitly so that pairing works for them, however
1099 * for mgmt we require user-space to explicitly enable
1100 * it
1101 */
1102 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1103 }
1104
1105 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1106 void *data, u16 data_len)
1107 {
1108 struct mgmt_rp_read_info rp;
1109
1110 BT_DBG("sock %p %s", sk, hdev->name);
1111
1112 hci_dev_lock(hdev);
1113
1114 memset(&rp, 0, sizeof(rp));
1115
1116 bacpy(&rp.bdaddr, &hdev->bdaddr);
1117
1118 rp.version = hdev->hci_ver;
1119 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1120
1121 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1122 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1123
1124 memcpy(rp.dev_class, hdev->dev_class, 3);
1125
1126 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1127 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1128
1129 hci_dev_unlock(hdev);
1130
1131 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1132 sizeof(rp));
1133 }
1134
1135 static void mgmt_pending_free(struct pending_cmd *cmd)
1136 {
1137 sock_put(cmd->sk);
1138 kfree(cmd->param);
1139 kfree(cmd);
1140 }
1141
1142 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
1143 struct hci_dev *hdev, void *data,
1144 u16 len)
1145 {
1146 struct pending_cmd *cmd;
1147
1148 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
1149 if (!cmd)
1150 return NULL;
1151
1152 cmd->opcode = opcode;
1153 cmd->index = hdev->id;
1154
1155 cmd->param = kmalloc(len, GFP_KERNEL);
1156 if (!cmd->param) {
1157 kfree(cmd);
1158 return NULL;
1159 }
1160
1161 if (data)
1162 memcpy(cmd->param, data, len);
1163
1164 cmd->sk = sk;
1165 sock_hold(sk);
1166
1167 list_add(&cmd->list, &hdev->mgmt_pending);
1168
1169 return cmd;
1170 }
1171
1172 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
1173 void (*cb)(struct pending_cmd *cmd,
1174 void *data),
1175 void *data)
1176 {
1177 struct pending_cmd *cmd, *tmp;
1178
1179 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
1180 if (opcode > 0 && cmd->opcode != opcode)
1181 continue;
1182
1183 cb(cmd, data);
1184 }
1185 }
1186
1187 static void mgmt_pending_remove(struct pending_cmd *cmd)
1188 {
1189 list_del(&cmd->list);
1190 mgmt_pending_free(cmd);
1191 }
1192
1193 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1194 {
1195 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1196
1197 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
1198 sizeof(settings));
1199 }
1200
1201 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status)
1202 {
1203 BT_DBG("%s status 0x%02x", hdev->name, status);
1204
1205 if (hci_conn_count(hdev) == 0) {
1206 cancel_delayed_work(&hdev->power_off);
1207 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1208 }
1209 }
1210
1211 static void hci_stop_discovery(struct hci_request *req)
1212 {
1213 struct hci_dev *hdev = req->hdev;
1214 struct hci_cp_remote_name_req_cancel cp;
1215 struct inquiry_entry *e;
1216
1217 switch (hdev->discovery.state) {
1218 case DISCOVERY_FINDING:
1219 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
1220 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1221 } else {
1222 cancel_delayed_work(&hdev->le_scan_disable);
1223 hci_req_add_le_scan_disable(req);
1224 }
1225
1226 break;
1227
1228 case DISCOVERY_RESOLVING:
1229 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
1230 NAME_PENDING);
1231 if (!e)
1232 return;
1233
1234 bacpy(&cp.bdaddr, &e->data.bdaddr);
1235 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
1236 &cp);
1237
1238 break;
1239
1240 default:
1241 /* Passive scanning */
1242 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1243 hci_req_add_le_scan_disable(req);
1244 break;
1245 }
1246 }
1247
1248 static int clean_up_hci_state(struct hci_dev *hdev)
1249 {
1250 struct hci_request req;
1251 struct hci_conn *conn;
1252
1253 hci_req_init(&req, hdev);
1254
1255 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1256 test_bit(HCI_PSCAN, &hdev->flags)) {
1257 u8 scan = 0x00;
1258 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1259 }
1260
1261 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1262 disable_advertising(&req);
1263
1264 hci_stop_discovery(&req);
1265
1266 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1267 struct hci_cp_disconnect dc;
1268 struct hci_cp_reject_conn_req rej;
1269
1270 switch (conn->state) {
1271 case BT_CONNECTED:
1272 case BT_CONFIG:
1273 dc.handle = cpu_to_le16(conn->handle);
1274 dc.reason = 0x15; /* Terminated due to Power Off */
1275 hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1276 break;
1277 case BT_CONNECT:
1278 if (conn->type == LE_LINK)
1279 hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL,
1280 0, NULL);
1281 else if (conn->type == ACL_LINK)
1282 hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL,
1283 6, &conn->dst);
1284 break;
1285 case BT_CONNECT2:
1286 bacpy(&rej.bdaddr, &conn->dst);
1287 rej.reason = 0x15; /* Terminated due to Power Off */
1288 if (conn->type == ACL_LINK)
1289 hci_req_add(&req, HCI_OP_REJECT_CONN_REQ,
1290 sizeof(rej), &rej);
1291 else if (conn->type == SCO_LINK)
1292 hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ,
1293 sizeof(rej), &rej);
1294 break;
1295 }
1296 }
1297
1298 return hci_req_run(&req, clean_up_hci_complete);
1299 }
1300
1301 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1302 u16 len)
1303 {
1304 struct mgmt_mode *cp = data;
1305 struct pending_cmd *cmd;
1306 int err;
1307
1308 BT_DBG("request for %s", hdev->name);
1309
1310 if (cp->val != 0x00 && cp->val != 0x01)
1311 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1312 MGMT_STATUS_INVALID_PARAMS);
1313
1314 hci_dev_lock(hdev);
1315
1316 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
1317 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1318 MGMT_STATUS_BUSY);
1319 goto failed;
1320 }
1321
1322 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1323 cancel_delayed_work(&hdev->power_off);
1324
1325 if (cp->val) {
1326 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
1327 data, len);
1328 err = mgmt_powered(hdev, 1);
1329 goto failed;
1330 }
1331 }
1332
1333 if (!!cp->val == hdev_is_powered(hdev)) {
1334 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1335 goto failed;
1336 }
1337
1338 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1339 if (!cmd) {
1340 err = -ENOMEM;
1341 goto failed;
1342 }
1343
1344 if (cp->val) {
1345 queue_work(hdev->req_workqueue, &hdev->power_on);
1346 err = 0;
1347 } else {
1348 /* Disconnect connections, stop scans, etc */
1349 err = clean_up_hci_state(hdev);
1350 if (!err)
1351 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1352 HCI_POWER_OFF_TIMEOUT);
1353
1354 /* ENODATA means there were no HCI commands queued */
1355 if (err == -ENODATA) {
1356 cancel_delayed_work(&hdev->power_off);
1357 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1358 err = 0;
1359 }
1360 }
1361
1362 failed:
1363 hci_dev_unlock(hdev);
1364 return err;
1365 }
1366
1367 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
1368 struct sock *skip_sk)
1369 {
1370 struct sk_buff *skb;
1371 struct mgmt_hdr *hdr;
1372
1373 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
1374 if (!skb)
1375 return -ENOMEM;
1376
1377 hdr = (void *) skb_put(skb, sizeof(*hdr));
1378 hdr->opcode = cpu_to_le16(event);
1379 if (hdev)
1380 hdr->index = cpu_to_le16(hdev->id);
1381 else
1382 hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
1383 hdr->len = cpu_to_le16(data_len);
1384
1385 if (data)
1386 memcpy(skb_put(skb, data_len), data, data_len);
1387
1388 /* Time stamp */
1389 __net_timestamp(skb);
1390
1391 hci_send_to_control(skb, skip_sk);
1392 kfree_skb(skb);
1393
1394 return 0;
1395 }
1396
1397 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1398 {
1399 __le32 ev;
1400
1401 ev = cpu_to_le32(get_current_settings(hdev));
1402
1403 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
1404 }
1405
1406 struct cmd_lookup {
1407 struct sock *sk;
1408 struct hci_dev *hdev;
1409 u8 mgmt_status;
1410 };
1411
1412 static void settings_rsp(struct pending_cmd *cmd, void *data)
1413 {
1414 struct cmd_lookup *match = data;
1415
1416 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1417
1418 list_del(&cmd->list);
1419
1420 if (match->sk == NULL) {
1421 match->sk = cmd->sk;
1422 sock_hold(match->sk);
1423 }
1424
1425 mgmt_pending_free(cmd);
1426 }
1427
1428 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
1429 {
1430 u8 *status = data;
1431
1432 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1433 mgmt_pending_remove(cmd);
1434 }
1435
1436 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1437 {
1438 if (!lmp_bredr_capable(hdev))
1439 return MGMT_STATUS_NOT_SUPPORTED;
1440 else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1441 return MGMT_STATUS_REJECTED;
1442 else
1443 return MGMT_STATUS_SUCCESS;
1444 }
1445
1446 static u8 mgmt_le_support(struct hci_dev *hdev)
1447 {
1448 if (!lmp_le_capable(hdev))
1449 return MGMT_STATUS_NOT_SUPPORTED;
1450 else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1451 return MGMT_STATUS_REJECTED;
1452 else
1453 return MGMT_STATUS_SUCCESS;
1454 }
1455
1456 static void set_discoverable_complete(struct hci_dev *hdev, u8 status)
1457 {
1458 struct pending_cmd *cmd;
1459 struct mgmt_mode *cp;
1460 struct hci_request req;
1461 bool changed;
1462
1463 BT_DBG("status 0x%02x", status);
1464
1465 hci_dev_lock(hdev);
1466
1467 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1468 if (!cmd)
1469 goto unlock;
1470
1471 if (status) {
1472 u8 mgmt_err = mgmt_status(status);
1473 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1474 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1475 goto remove_cmd;
1476 }
1477
1478 cp = cmd->param;
1479 if (cp->val) {
1480 changed = !test_and_set_bit(HCI_DISCOVERABLE,
1481 &hdev->dev_flags);
1482
1483 if (hdev->discov_timeout > 0) {
1484 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1485 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1486 to);
1487 }
1488 } else {
1489 changed = test_and_clear_bit(HCI_DISCOVERABLE,
1490 &hdev->dev_flags);
1491 }
1492
1493 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1494
1495 if (changed)
1496 new_settings(hdev, cmd->sk);
1497
1498 /* When the discoverable mode gets changed, make sure
1499 * that class of device has the limited discoverable
1500 * bit correctly set.
1501 */
1502 hci_req_init(&req, hdev);
1503 update_class(&req);
1504 hci_req_run(&req, NULL);
1505
1506 remove_cmd:
1507 mgmt_pending_remove(cmd);
1508
1509 unlock:
1510 hci_dev_unlock(hdev);
1511 }
1512
1513 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1514 u16 len)
1515 {
1516 struct mgmt_cp_set_discoverable *cp = data;
1517 struct pending_cmd *cmd;
1518 struct hci_request req;
1519 u16 timeout;
1520 u8 scan;
1521 int err;
1522
1523 BT_DBG("request for %s", hdev->name);
1524
1525 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1526 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1527 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1528 MGMT_STATUS_REJECTED);
1529
1530 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1531 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1532 MGMT_STATUS_INVALID_PARAMS);
1533
1534 timeout = __le16_to_cpu(cp->timeout);
1535
1536 /* Disabling discoverable requires that no timeout is set,
1537 * and enabling limited discoverable requires a timeout.
1538 */
1539 if ((cp->val == 0x00 && timeout > 0) ||
1540 (cp->val == 0x02 && timeout == 0))
1541 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1542 MGMT_STATUS_INVALID_PARAMS);
1543
1544 hci_dev_lock(hdev);
1545
1546 if (!hdev_is_powered(hdev) && timeout > 0) {
1547 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1548 MGMT_STATUS_NOT_POWERED);
1549 goto failed;
1550 }
1551
1552 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1553 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1554 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1555 MGMT_STATUS_BUSY);
1556 goto failed;
1557 }
1558
1559 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
1560 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1561 MGMT_STATUS_REJECTED);
1562 goto failed;
1563 }
1564
1565 if (!hdev_is_powered(hdev)) {
1566 bool changed = false;
1567
1568 /* Setting limited discoverable when powered off is
1569 * not a valid operation since it requires a timeout
1570 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1571 */
1572 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
1573 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1574 changed = true;
1575 }
1576
1577 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1578 if (err < 0)
1579 goto failed;
1580
1581 if (changed)
1582 err = new_settings(hdev, sk);
1583
1584 goto failed;
1585 }
1586
1587 /* If the current mode is the same, then just update the timeout
1588 * value with the new value. And if only the timeout gets updated,
1589 * then no need for any HCI transactions.
1590 */
1591 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
1592 (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
1593 &hdev->dev_flags)) {
1594 cancel_delayed_work(&hdev->discov_off);
1595 hdev->discov_timeout = timeout;
1596
1597 if (cp->val && hdev->discov_timeout > 0) {
1598 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1599 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1600 to);
1601 }
1602
1603 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1604 goto failed;
1605 }
1606
1607 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1608 if (!cmd) {
1609 err = -ENOMEM;
1610 goto failed;
1611 }
1612
1613 /* Cancel any potential discoverable timeout that might be
1614 * still active and store new timeout value. The arming of
1615 * the timeout happens in the complete handler.
1616 */
1617 cancel_delayed_work(&hdev->discov_off);
1618 hdev->discov_timeout = timeout;
1619
1620 /* Limited discoverable mode */
1621 if (cp->val == 0x02)
1622 set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1623 else
1624 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1625
1626 hci_req_init(&req, hdev);
1627
1628 /* The procedure for LE-only controllers is much simpler - just
1629 * update the advertising data.
1630 */
1631 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1632 goto update_ad;
1633
1634 scan = SCAN_PAGE;
1635
1636 if (cp->val) {
1637 struct hci_cp_write_current_iac_lap hci_cp;
1638
1639 if (cp->val == 0x02) {
1640 /* Limited discoverable mode */
1641 hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1642 hci_cp.iac_lap[0] = 0x00; /* LIAC */
1643 hci_cp.iac_lap[1] = 0x8b;
1644 hci_cp.iac_lap[2] = 0x9e;
1645 hci_cp.iac_lap[3] = 0x33; /* GIAC */
1646 hci_cp.iac_lap[4] = 0x8b;
1647 hci_cp.iac_lap[5] = 0x9e;
1648 } else {
1649 /* General discoverable mode */
1650 hci_cp.num_iac = 1;
1651 hci_cp.iac_lap[0] = 0x33; /* GIAC */
1652 hci_cp.iac_lap[1] = 0x8b;
1653 hci_cp.iac_lap[2] = 0x9e;
1654 }
1655
1656 hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1657 (hci_cp.num_iac * 3) + 1, &hci_cp);
1658
1659 scan |= SCAN_INQUIRY;
1660 } else {
1661 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1662 }
1663
1664 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1665
1666 update_ad:
1667 update_adv_data(&req);
1668
1669 err = hci_req_run(&req, set_discoverable_complete);
1670 if (err < 0)
1671 mgmt_pending_remove(cmd);
1672
1673 failed:
1674 hci_dev_unlock(hdev);
1675 return err;
1676 }
1677
1678 static void write_fast_connectable(struct hci_request *req, bool enable)
1679 {
1680 struct hci_dev *hdev = req->hdev;
1681 struct hci_cp_write_page_scan_activity acp;
1682 u8 type;
1683
1684 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1685 return;
1686
1687 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1688 return;
1689
1690 if (enable) {
1691 type = PAGE_SCAN_TYPE_INTERLACED;
1692
1693 /* 160 msec page scan interval */
1694 acp.interval = cpu_to_le16(0x0100);
1695 } else {
1696 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1697
1698 /* default 1.28 sec page scan */
1699 acp.interval = cpu_to_le16(0x0800);
1700 }
1701
1702 acp.window = cpu_to_le16(0x0012);
1703
1704 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1705 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1706 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1707 sizeof(acp), &acp);
1708
1709 if (hdev->page_scan_type != type)
1710 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1711 }
1712
1713 static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1714 {
1715 struct pending_cmd *cmd;
1716 struct mgmt_mode *cp;
1717 bool changed;
1718
1719 BT_DBG("status 0x%02x", status);
1720
1721 hci_dev_lock(hdev);
1722
1723 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1724 if (!cmd)
1725 goto unlock;
1726
1727 if (status) {
1728 u8 mgmt_err = mgmt_status(status);
1729 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1730 goto remove_cmd;
1731 }
1732
1733 cp = cmd->param;
1734 if (cp->val)
1735 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1736 else
1737 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1738
1739 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1740
1741 if (changed)
1742 new_settings(hdev, cmd->sk);
1743
1744 remove_cmd:
1745 mgmt_pending_remove(cmd);
1746
1747 unlock:
1748 hci_dev_unlock(hdev);
1749 }
1750
1751 static int set_connectable_update_settings(struct hci_dev *hdev,
1752 struct sock *sk, u8 val)
1753 {
1754 bool changed = false;
1755 int err;
1756
1757 if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1758 changed = true;
1759
1760 if (val) {
1761 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1762 } else {
1763 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1764 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1765 }
1766
1767 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1768 if (err < 0)
1769 return err;
1770
1771 if (changed)
1772 return new_settings(hdev, sk);
1773
1774 return 0;
1775 }
1776
1777 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1778 u16 len)
1779 {
1780 struct mgmt_mode *cp = data;
1781 struct pending_cmd *cmd;
1782 struct hci_request req;
1783 u8 scan;
1784 int err;
1785
1786 BT_DBG("request for %s", hdev->name);
1787
1788 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1789 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1790 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1791 MGMT_STATUS_REJECTED);
1792
1793 if (cp->val != 0x00 && cp->val != 0x01)
1794 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1795 MGMT_STATUS_INVALID_PARAMS);
1796
1797 hci_dev_lock(hdev);
1798
1799 if (!hdev_is_powered(hdev)) {
1800 err = set_connectable_update_settings(hdev, sk, cp->val);
1801 goto failed;
1802 }
1803
1804 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1805 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1806 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1807 MGMT_STATUS_BUSY);
1808 goto failed;
1809 }
1810
1811 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1812 if (!cmd) {
1813 err = -ENOMEM;
1814 goto failed;
1815 }
1816
1817 hci_req_init(&req, hdev);
1818
1819 /* If BR/EDR is not enabled and we disable advertising as a
1820 * by-product of disabling connectable, we need to update the
1821 * advertising flags.
1822 */
1823 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1824 if (!cp->val) {
1825 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1826 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1827 }
1828 update_adv_data(&req);
1829 } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
1830 if (cp->val) {
1831 scan = SCAN_PAGE;
1832 } else {
1833 scan = 0;
1834
1835 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1836 hdev->discov_timeout > 0)
1837 cancel_delayed_work(&hdev->discov_off);
1838 }
1839
1840 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1841 }
1842
1843 /* If we're going from non-connectable to connectable or
1844 * vice-versa when fast connectable is enabled ensure that fast
1845 * connectable gets disabled. write_fast_connectable won't do
1846 * anything if the page scan parameters are already what they
1847 * should be.
1848 */
1849 if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1850 write_fast_connectable(&req, false);
1851
1852 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) &&
1853 hci_conn_num(hdev, LE_LINK) == 0) {
1854 disable_advertising(&req);
1855 enable_advertising(&req);
1856 }
1857
1858 err = hci_req_run(&req, set_connectable_complete);
1859 if (err < 0) {
1860 mgmt_pending_remove(cmd);
1861 if (err == -ENODATA)
1862 err = set_connectable_update_settings(hdev, sk,
1863 cp->val);
1864 goto failed;
1865 }
1866
1867 failed:
1868 hci_dev_unlock(hdev);
1869 return err;
1870 }
1871
1872 static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
1873 u16 len)
1874 {
1875 struct mgmt_mode *cp = data;
1876 bool changed;
1877 int err;
1878
1879 BT_DBG("request for %s", hdev->name);
1880
1881 if (cp->val != 0x00 && cp->val != 0x01)
1882 return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE,
1883 MGMT_STATUS_INVALID_PARAMS);
1884
1885 hci_dev_lock(hdev);
1886
1887 if (cp->val)
1888 changed = !test_and_set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1889 else
1890 changed = test_and_clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1891
1892 err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
1893 if (err < 0)
1894 goto unlock;
1895
1896 if (changed)
1897 err = new_settings(hdev, sk);
1898
1899 unlock:
1900 hci_dev_unlock(hdev);
1901 return err;
1902 }
1903
1904 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1905 u16 len)
1906 {
1907 struct mgmt_mode *cp = data;
1908 struct pending_cmd *cmd;
1909 u8 val, status;
1910 int err;
1911
1912 BT_DBG("request for %s", hdev->name);
1913
1914 status = mgmt_bredr_support(hdev);
1915 if (status)
1916 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1917 status);
1918
1919 if (cp->val != 0x00 && cp->val != 0x01)
1920 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1921 MGMT_STATUS_INVALID_PARAMS);
1922
1923 hci_dev_lock(hdev);
1924
1925 if (!hdev_is_powered(hdev)) {
1926 bool changed = false;
1927
1928 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1929 &hdev->dev_flags)) {
1930 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
1931 changed = true;
1932 }
1933
1934 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1935 if (err < 0)
1936 goto failed;
1937
1938 if (changed)
1939 err = new_settings(hdev, sk);
1940
1941 goto failed;
1942 }
1943
1944 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1945 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1946 MGMT_STATUS_BUSY);
1947 goto failed;
1948 }
1949
1950 val = !!cp->val;
1951
1952 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1953 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1954 goto failed;
1955 }
1956
1957 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1958 if (!cmd) {
1959 err = -ENOMEM;
1960 goto failed;
1961 }
1962
1963 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1964 if (err < 0) {
1965 mgmt_pending_remove(cmd);
1966 goto failed;
1967 }
1968
1969 failed:
1970 hci_dev_unlock(hdev);
1971 return err;
1972 }
1973
1974 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1975 {
1976 struct mgmt_mode *cp = data;
1977 struct pending_cmd *cmd;
1978 u8 status;
1979 int err;
1980
1981 BT_DBG("request for %s", hdev->name);
1982
1983 status = mgmt_bredr_support(hdev);
1984 if (status)
1985 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1986
1987 if (!lmp_ssp_capable(hdev))
1988 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1989 MGMT_STATUS_NOT_SUPPORTED);
1990
1991 if (cp->val != 0x00 && cp->val != 0x01)
1992 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1993 MGMT_STATUS_INVALID_PARAMS);
1994
1995 hci_dev_lock(hdev);
1996
1997 if (!hdev_is_powered(hdev)) {
1998 bool changed;
1999
2000 if (cp->val) {
2001 changed = !test_and_set_bit(HCI_SSP_ENABLED,
2002 &hdev->dev_flags);
2003 } else {
2004 changed = test_and_clear_bit(HCI_SSP_ENABLED,
2005 &hdev->dev_flags);
2006 if (!changed)
2007 changed = test_and_clear_bit(HCI_HS_ENABLED,
2008 &hdev->dev_flags);
2009 else
2010 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2011 }
2012
2013 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2014 if (err < 0)
2015 goto failed;
2016
2017 if (changed)
2018 err = new_settings(hdev, sk);
2019
2020 goto failed;
2021 }
2022
2023 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev) ||
2024 mgmt_pending_find(MGMT_OP_SET_HS, hdev)) {
2025 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2026 MGMT_STATUS_BUSY);
2027 goto failed;
2028 }
2029
2030 if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
2031 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2032 goto failed;
2033 }
2034
2035 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2036 if (!cmd) {
2037 err = -ENOMEM;
2038 goto failed;
2039 }
2040
2041 if (!cp->val && test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
2042 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
2043 sizeof(cp->val), &cp->val);
2044
2045 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
2046 if (err < 0) {
2047 mgmt_pending_remove(cmd);
2048 goto failed;
2049 }
2050
2051 failed:
2052 hci_dev_unlock(hdev);
2053 return err;
2054 }
2055
2056 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2057 {
2058 struct mgmt_mode *cp = data;
2059 bool changed;
2060 u8 status;
2061 int err;
2062
2063 BT_DBG("request for %s", hdev->name);
2064
2065 status = mgmt_bredr_support(hdev);
2066 if (status)
2067 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2068
2069 if (!lmp_ssp_capable(hdev))
2070 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2071 MGMT_STATUS_NOT_SUPPORTED);
2072
2073 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
2074 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2075 MGMT_STATUS_REJECTED);
2076
2077 if (cp->val != 0x00 && cp->val != 0x01)
2078 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2079 MGMT_STATUS_INVALID_PARAMS);
2080
2081 hci_dev_lock(hdev);
2082
2083 if (cp->val) {
2084 changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2085 } else {
2086 if (hdev_is_powered(hdev)) {
2087 err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2088 MGMT_STATUS_REJECTED);
2089 goto unlock;
2090 }
2091
2092 changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2093 }
2094
2095 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2096 if (err < 0)
2097 goto unlock;
2098
2099 if (changed)
2100 err = new_settings(hdev, sk);
2101
2102 unlock:
2103 hci_dev_unlock(hdev);
2104 return err;
2105 }
2106
2107 static void le_enable_complete(struct hci_dev *hdev, u8 status)
2108 {
2109 struct cmd_lookup match = { NULL, hdev };
2110
2111 if (status) {
2112 u8 mgmt_err = mgmt_status(status);
2113
2114 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2115 &mgmt_err);
2116 return;
2117 }
2118
2119 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2120
2121 new_settings(hdev, match.sk);
2122
2123 if (match.sk)
2124 sock_put(match.sk);
2125
2126 /* Make sure the controller has a good default for
2127 * advertising data. Restrict the update to when LE
2128 * has actually been enabled. During power on, the
2129 * update in powered_update_hci will take care of it.
2130 */
2131 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2132 struct hci_request req;
2133
2134 hci_dev_lock(hdev);
2135
2136 hci_req_init(&req, hdev);
2137 update_adv_data(&req);
2138 update_scan_rsp_data(&req);
2139 hci_req_run(&req, NULL);
2140
2141 hci_dev_unlock(hdev);
2142 }
2143 }
2144
2145 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2146 {
2147 struct mgmt_mode *cp = data;
2148 struct hci_cp_write_le_host_supported hci_cp;
2149 struct pending_cmd *cmd;
2150 struct hci_request req;
2151 int err;
2152 u8 val, enabled;
2153
2154 BT_DBG("request for %s", hdev->name);
2155
2156 if (!lmp_le_capable(hdev))
2157 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2158 MGMT_STATUS_NOT_SUPPORTED);
2159
2160 if (cp->val != 0x00 && cp->val != 0x01)
2161 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2162 MGMT_STATUS_INVALID_PARAMS);
2163
2164 /* LE-only devices do not allow toggling LE on/off */
2165 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
2166 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2167 MGMT_STATUS_REJECTED);
2168
2169 hci_dev_lock(hdev);
2170
2171 val = !!cp->val;
2172 enabled = lmp_host_le_capable(hdev);
2173
2174 if (!hdev_is_powered(hdev) || val == enabled) {
2175 bool changed = false;
2176
2177 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2178 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2179 changed = true;
2180 }
2181
2182 if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
2183 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
2184 changed = true;
2185 }
2186
2187 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2188 if (err < 0)
2189 goto unlock;
2190
2191 if (changed)
2192 err = new_settings(hdev, sk);
2193
2194 goto unlock;
2195 }
2196
2197 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
2198 mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2199 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2200 MGMT_STATUS_BUSY);
2201 goto unlock;
2202 }
2203
2204 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2205 if (!cmd) {
2206 err = -ENOMEM;
2207 goto unlock;
2208 }
2209
2210 hci_req_init(&req, hdev);
2211
2212 memset(&hci_cp, 0, sizeof(hci_cp));
2213
2214 if (val) {
2215 hci_cp.le = val;
2216 hci_cp.simul = lmp_le_br_capable(hdev);
2217 } else {
2218 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
2219 disable_advertising(&req);
2220 }
2221
2222 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2223 &hci_cp);
2224
2225 err = hci_req_run(&req, le_enable_complete);
2226 if (err < 0)
2227 mgmt_pending_remove(cmd);
2228
2229 unlock:
2230 hci_dev_unlock(hdev);
2231 return err;
2232 }
2233
2234 /* This is a helper function to test for pending mgmt commands that can
2235 * cause CoD or EIR HCI commands. We can only allow one such pending
2236 * mgmt command at a time since otherwise we cannot easily track what
2237 * the current values are, will be, and based on that calculate if a new
2238 * HCI command needs to be sent and if yes with what value.
2239 */
2240 static bool pending_eir_or_class(struct hci_dev *hdev)
2241 {
2242 struct pending_cmd *cmd;
2243
2244 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2245 switch (cmd->opcode) {
2246 case MGMT_OP_ADD_UUID:
2247 case MGMT_OP_REMOVE_UUID:
2248 case MGMT_OP_SET_DEV_CLASS:
2249 case MGMT_OP_SET_POWERED:
2250 return true;
2251 }
2252 }
2253
2254 return false;
2255 }
2256
2257 static const u8 bluetooth_base_uuid[] = {
2258 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2259 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2260 };
2261
2262 static u8 get_uuid_size(const u8 *uuid)
2263 {
2264 u32 val;
2265
2266 if (memcmp(uuid, bluetooth_base_uuid, 12))
2267 return 128;
2268
2269 val = get_unaligned_le32(&uuid[12]);
2270 if (val > 0xffff)
2271 return 32;
2272
2273 return 16;
2274 }
2275
2276 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2277 {
2278 struct pending_cmd *cmd;
2279
2280 hci_dev_lock(hdev);
2281
2282 cmd = mgmt_pending_find(mgmt_op, hdev);
2283 if (!cmd)
2284 goto unlock;
2285
2286 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
2287 hdev->dev_class, 3);
2288
2289 mgmt_pending_remove(cmd);
2290
2291 unlock:
2292 hci_dev_unlock(hdev);
2293 }
2294
2295 static void add_uuid_complete(struct hci_dev *hdev, u8 status)
2296 {
2297 BT_DBG("status 0x%02x", status);
2298
2299 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2300 }
2301
2302 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2303 {
2304 struct mgmt_cp_add_uuid *cp = data;
2305 struct pending_cmd *cmd;
2306 struct hci_request req;
2307 struct bt_uuid *uuid;
2308 int err;
2309
2310 BT_DBG("request for %s", hdev->name);
2311
2312 hci_dev_lock(hdev);
2313
2314 if (pending_eir_or_class(hdev)) {
2315 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2316 MGMT_STATUS_BUSY);
2317 goto failed;
2318 }
2319
2320 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2321 if (!uuid) {
2322 err = -ENOMEM;
2323 goto failed;
2324 }
2325
2326 memcpy(uuid->uuid, cp->uuid, 16);
2327 uuid->svc_hint = cp->svc_hint;
2328 uuid->size = get_uuid_size(cp->uuid);
2329
2330 list_add_tail(&uuid->list, &hdev->uuids);
2331
2332 hci_req_init(&req, hdev);
2333
2334 update_class(&req);
2335 update_eir(&req);
2336
2337 err = hci_req_run(&req, add_uuid_complete);
2338 if (err < 0) {
2339 if (err != -ENODATA)
2340 goto failed;
2341
2342 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2343 hdev->dev_class, 3);
2344 goto failed;
2345 }
2346
2347 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2348 if (!cmd) {
2349 err = -ENOMEM;
2350 goto failed;
2351 }
2352
2353 err = 0;
2354
2355 failed:
2356 hci_dev_unlock(hdev);
2357 return err;
2358 }
2359
2360 static bool enable_service_cache(struct hci_dev *hdev)
2361 {
2362 if (!hdev_is_powered(hdev))
2363 return false;
2364
2365 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2366 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2367 CACHE_TIMEOUT);
2368 return true;
2369 }
2370
2371 return false;
2372 }
2373
2374 static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
2375 {
2376 BT_DBG("status 0x%02x", status);
2377
2378 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2379 }
2380
2381 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2382 u16 len)
2383 {
2384 struct mgmt_cp_remove_uuid *cp = data;
2385 struct pending_cmd *cmd;
2386 struct bt_uuid *match, *tmp;
2387 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2388 struct hci_request req;
2389 int err, found;
2390
2391 BT_DBG("request for %s", hdev->name);
2392
2393 hci_dev_lock(hdev);
2394
2395 if (pending_eir_or_class(hdev)) {
2396 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2397 MGMT_STATUS_BUSY);
2398 goto unlock;
2399 }
2400
2401 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2402 hci_uuids_clear(hdev);
2403
2404 if (enable_service_cache(hdev)) {
2405 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2406 0, hdev->dev_class, 3);
2407 goto unlock;
2408 }
2409
2410 goto update_class;
2411 }
2412
2413 found = 0;
2414
2415 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2416 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2417 continue;
2418
2419 list_del(&match->list);
2420 kfree(match);
2421 found++;
2422 }
2423
2424 if (found == 0) {
2425 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2426 MGMT_STATUS_INVALID_PARAMS);
2427 goto unlock;
2428 }
2429
2430 update_class:
2431 hci_req_init(&req, hdev);
2432
2433 update_class(&req);
2434 update_eir(&req);
2435
2436 err = hci_req_run(&req, remove_uuid_complete);
2437 if (err < 0) {
2438 if (err != -ENODATA)
2439 goto unlock;
2440
2441 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2442 hdev->dev_class, 3);
2443 goto unlock;
2444 }
2445
2446 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2447 if (!cmd) {
2448 err = -ENOMEM;
2449 goto unlock;
2450 }
2451
2452 err = 0;
2453
2454 unlock:
2455 hci_dev_unlock(hdev);
2456 return err;
2457 }
2458
2459 static void set_class_complete(struct hci_dev *hdev, u8 status)
2460 {
2461 BT_DBG("status 0x%02x", status);
2462
2463 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2464 }
2465
2466 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2467 u16 len)
2468 {
2469 struct mgmt_cp_set_dev_class *cp = data;
2470 struct pending_cmd *cmd;
2471 struct hci_request req;
2472 int err;
2473
2474 BT_DBG("request for %s", hdev->name);
2475
2476 if (!lmp_bredr_capable(hdev))
2477 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2478 MGMT_STATUS_NOT_SUPPORTED);
2479
2480 hci_dev_lock(hdev);
2481
2482 if (pending_eir_or_class(hdev)) {
2483 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2484 MGMT_STATUS_BUSY);
2485 goto unlock;
2486 }
2487
2488 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2489 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2490 MGMT_STATUS_INVALID_PARAMS);
2491 goto unlock;
2492 }
2493
2494 hdev->major_class = cp->major;
2495 hdev->minor_class = cp->minor;
2496
2497 if (!hdev_is_powered(hdev)) {
2498 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2499 hdev->dev_class, 3);
2500 goto unlock;
2501 }
2502
2503 hci_req_init(&req, hdev);
2504
2505 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2506 hci_dev_unlock(hdev);
2507 cancel_delayed_work_sync(&hdev->service_cache);
2508 hci_dev_lock(hdev);
2509 update_eir(&req);
2510 }
2511
2512 update_class(&req);
2513
2514 err = hci_req_run(&req, set_class_complete);
2515 if (err < 0) {
2516 if (err != -ENODATA)
2517 goto unlock;
2518
2519 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2520 hdev->dev_class, 3);
2521 goto unlock;
2522 }
2523
2524 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2525 if (!cmd) {
2526 err = -ENOMEM;
2527 goto unlock;
2528 }
2529
2530 err = 0;
2531
2532 unlock:
2533 hci_dev_unlock(hdev);
2534 return err;
2535 }
2536
2537 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2538 u16 len)
2539 {
2540 struct mgmt_cp_load_link_keys *cp = data;
2541 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2542 sizeof(struct mgmt_link_key_info));
2543 u16 key_count, expected_len;
2544 bool changed;
2545 int i;
2546
2547 BT_DBG("request for %s", hdev->name);
2548
2549 if (!lmp_bredr_capable(hdev))
2550 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2551 MGMT_STATUS_NOT_SUPPORTED);
2552
2553 key_count = __le16_to_cpu(cp->key_count);
2554 if (key_count > max_key_count) {
2555 BT_ERR("load_link_keys: too big key_count value %u",
2556 key_count);
2557 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2558 MGMT_STATUS_INVALID_PARAMS);
2559 }
2560
2561 expected_len = sizeof(*cp) + key_count *
2562 sizeof(struct mgmt_link_key_info);
2563 if (expected_len != len) {
2564 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2565 expected_len, len);
2566 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2567 MGMT_STATUS_INVALID_PARAMS);
2568 }
2569
2570 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2571 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2572 MGMT_STATUS_INVALID_PARAMS);
2573
2574 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2575 key_count);
2576
2577 for (i = 0; i < key_count; i++) {
2578 struct mgmt_link_key_info *key = &cp->keys[i];
2579
2580 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2581 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2582 MGMT_STATUS_INVALID_PARAMS);
2583 }
2584
2585 hci_dev_lock(hdev);
2586
2587 hci_link_keys_clear(hdev);
2588
2589 if (cp->debug_keys)
2590 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
2591 &hdev->dev_flags);
2592 else
2593 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
2594 &hdev->dev_flags);
2595
2596 if (changed)
2597 new_settings(hdev, NULL);
2598
2599 for (i = 0; i < key_count; i++) {
2600 struct mgmt_link_key_info *key = &cp->keys[i];
2601
2602 /* Always ignore debug keys and require a new pairing if
2603 * the user wants to use them.
2604 */
2605 if (key->type == HCI_LK_DEBUG_COMBINATION)
2606 continue;
2607
2608 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2609 key->type, key->pin_len, NULL);
2610 }
2611
2612 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2613
2614 hci_dev_unlock(hdev);
2615
2616 return 0;
2617 }
2618
2619 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2620 u8 addr_type, struct sock *skip_sk)
2621 {
2622 struct mgmt_ev_device_unpaired ev;
2623
2624 bacpy(&ev.addr.bdaddr, bdaddr);
2625 ev.addr.type = addr_type;
2626
2627 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2628 skip_sk);
2629 }
2630
2631 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2632 u16 len)
2633 {
2634 struct mgmt_cp_unpair_device *cp = data;
2635 struct mgmt_rp_unpair_device rp;
2636 struct hci_cp_disconnect dc;
2637 struct pending_cmd *cmd;
2638 struct hci_conn *conn;
2639 int err;
2640
2641 memset(&rp, 0, sizeof(rp));
2642 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2643 rp.addr.type = cp->addr.type;
2644
2645 if (!bdaddr_type_is_valid(cp->addr.type))
2646 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2647 MGMT_STATUS_INVALID_PARAMS,
2648 &rp, sizeof(rp));
2649
2650 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2651 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2652 MGMT_STATUS_INVALID_PARAMS,
2653 &rp, sizeof(rp));
2654
2655 hci_dev_lock(hdev);
2656
2657 if (!hdev_is_powered(hdev)) {
2658 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2659 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2660 goto unlock;
2661 }
2662
2663 if (cp->addr.type == BDADDR_BREDR) {
2664 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2665 } else {
2666 u8 addr_type;
2667
2668 if (cp->addr.type == BDADDR_LE_PUBLIC)
2669 addr_type = ADDR_LE_DEV_PUBLIC;
2670 else
2671 addr_type = ADDR_LE_DEV_RANDOM;
2672
2673 hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2674
2675 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2676
2677 err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2678 }
2679
2680 if (err < 0) {
2681 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2682 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
2683 goto unlock;
2684 }
2685
2686 if (cp->disconnect) {
2687 if (cp->addr.type == BDADDR_BREDR)
2688 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2689 &cp->addr.bdaddr);
2690 else
2691 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2692 &cp->addr.bdaddr);
2693 } else {
2694 conn = NULL;
2695 }
2696
2697 if (!conn) {
2698 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2699 &rp, sizeof(rp));
2700 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2701 goto unlock;
2702 }
2703
2704 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2705 sizeof(*cp));
2706 if (!cmd) {
2707 err = -ENOMEM;
2708 goto unlock;
2709 }
2710
2711 dc.handle = cpu_to_le16(conn->handle);
2712 dc.reason = 0x13; /* Remote User Terminated Connection */
2713 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2714 if (err < 0)
2715 mgmt_pending_remove(cmd);
2716
2717 unlock:
2718 hci_dev_unlock(hdev);
2719 return err;
2720 }
2721
2722 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2723 u16 len)
2724 {
2725 struct mgmt_cp_disconnect *cp = data;
2726 struct mgmt_rp_disconnect rp;
2727 struct hci_cp_disconnect dc;
2728 struct pending_cmd *cmd;
2729 struct hci_conn *conn;
2730 int err;
2731
2732 BT_DBG("");
2733
2734 memset(&rp, 0, sizeof(rp));
2735 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2736 rp.addr.type = cp->addr.type;
2737
2738 if (!bdaddr_type_is_valid(cp->addr.type))
2739 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2740 MGMT_STATUS_INVALID_PARAMS,
2741 &rp, sizeof(rp));
2742
2743 hci_dev_lock(hdev);
2744
2745 if (!test_bit(HCI_UP, &hdev->flags)) {
2746 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2747 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2748 goto failed;
2749 }
2750
2751 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
2752 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2753 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2754 goto failed;
2755 }
2756
2757 if (cp->addr.type == BDADDR_BREDR)
2758 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2759 &cp->addr.bdaddr);
2760 else
2761 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2762
2763 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2764 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2765 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
2766 goto failed;
2767 }
2768
2769 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2770 if (!cmd) {
2771 err = -ENOMEM;
2772 goto failed;
2773 }
2774
2775 dc.handle = cpu_to_le16(conn->handle);
2776 dc.reason = HCI_ERROR_REMOTE_USER_TERM;
2777
2778 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2779 if (err < 0)
2780 mgmt_pending_remove(cmd);
2781
2782 failed:
2783 hci_dev_unlock(hdev);
2784 return err;
2785 }
2786
2787 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2788 {
2789 switch (link_type) {
2790 case LE_LINK:
2791 switch (addr_type) {
2792 case ADDR_LE_DEV_PUBLIC:
2793 return BDADDR_LE_PUBLIC;
2794
2795 default:
2796 /* Fallback to LE Random address type */
2797 return BDADDR_LE_RANDOM;
2798 }
2799
2800 default:
2801 /* Fallback to BR/EDR type */
2802 return BDADDR_BREDR;
2803 }
2804 }
2805
2806 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2807 u16 data_len)
2808 {
2809 struct mgmt_rp_get_connections *rp;
2810 struct hci_conn *c;
2811 size_t rp_len;
2812 int err;
2813 u16 i;
2814
2815 BT_DBG("");
2816
2817 hci_dev_lock(hdev);
2818
2819 if (!hdev_is_powered(hdev)) {
2820 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2821 MGMT_STATUS_NOT_POWERED);
2822 goto unlock;
2823 }
2824
2825 i = 0;
2826 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2827 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2828 i++;
2829 }
2830
2831 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2832 rp = kmalloc(rp_len, GFP_KERNEL);
2833 if (!rp) {
2834 err = -ENOMEM;
2835 goto unlock;
2836 }
2837
2838 i = 0;
2839 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2840 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2841 continue;
2842 bacpy(&rp->addr[i].bdaddr, &c->dst);
2843 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2844 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2845 continue;
2846 i++;
2847 }
2848
2849 rp->conn_count = cpu_to_le16(i);
2850
2851 /* Recalculate length in case of filtered SCO connections, etc */
2852 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2853
2854 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2855 rp_len);
2856
2857 kfree(rp);
2858
2859 unlock:
2860 hci_dev_unlock(hdev);
2861 return err;
2862 }
2863
2864 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2865 struct mgmt_cp_pin_code_neg_reply *cp)
2866 {
2867 struct pending_cmd *cmd;
2868 int err;
2869
2870 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2871 sizeof(*cp));
2872 if (!cmd)
2873 return -ENOMEM;
2874
2875 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2876 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2877 if (err < 0)
2878 mgmt_pending_remove(cmd);
2879
2880 return err;
2881 }
2882
2883 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2884 u16 len)
2885 {
2886 struct hci_conn *conn;
2887 struct mgmt_cp_pin_code_reply *cp = data;
2888 struct hci_cp_pin_code_reply reply;
2889 struct pending_cmd *cmd;
2890 int err;
2891
2892 BT_DBG("");
2893
2894 hci_dev_lock(hdev);
2895
2896 if (!hdev_is_powered(hdev)) {
2897 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2898 MGMT_STATUS_NOT_POWERED);
2899 goto failed;
2900 }
2901
2902 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2903 if (!conn) {
2904 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2905 MGMT_STATUS_NOT_CONNECTED);
2906 goto failed;
2907 }
2908
2909 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2910 struct mgmt_cp_pin_code_neg_reply ncp;
2911
2912 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2913
2914 BT_ERR("PIN code is not 16 bytes long");
2915
2916 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2917 if (err >= 0)
2918 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2919 MGMT_STATUS_INVALID_PARAMS);
2920
2921 goto failed;
2922 }
2923
2924 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2925 if (!cmd) {
2926 err = -ENOMEM;
2927 goto failed;
2928 }
2929
2930 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2931 reply.pin_len = cp->pin_len;
2932 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2933
2934 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2935 if (err < 0)
2936 mgmt_pending_remove(cmd);
2937
2938 failed:
2939 hci_dev_unlock(hdev);
2940 return err;
2941 }
2942
2943 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2944 u16 len)
2945 {
2946 struct mgmt_cp_set_io_capability *cp = data;
2947
2948 BT_DBG("");
2949
2950 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2951 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2952 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
2953
2954 hci_dev_lock(hdev);
2955
2956 hdev->io_capability = cp->io_capability;
2957
2958 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2959 hdev->io_capability);
2960
2961 hci_dev_unlock(hdev);
2962
2963 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
2964 0);
2965 }
2966
2967 static struct pending_cmd *find_pairing(struct hci_conn *conn)
2968 {
2969 struct hci_dev *hdev = conn->hdev;
2970 struct pending_cmd *cmd;
2971
2972 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2973 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2974 continue;
2975
2976 if (cmd->user_data != conn)
2977 continue;
2978
2979 return cmd;
2980 }
2981
2982 return NULL;
2983 }
2984
2985 static void pairing_complete(struct pending_cmd *cmd, u8 status)
2986 {
2987 struct mgmt_rp_pair_device rp;
2988 struct hci_conn *conn = cmd->user_data;
2989
2990 bacpy(&rp.addr.bdaddr, &conn->dst);
2991 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2992
2993 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
2994 &rp, sizeof(rp));
2995
2996 /* So we don't get further callbacks for this connection */
2997 conn->connect_cfm_cb = NULL;
2998 conn->security_cfm_cb = NULL;
2999 conn->disconn_cfm_cb = NULL;
3000
3001 hci_conn_drop(conn);
3002
3003 mgmt_pending_remove(cmd);
3004 }
3005
3006 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3007 {
3008 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3009 struct pending_cmd *cmd;
3010
3011 cmd = find_pairing(conn);
3012 if (cmd)
3013 pairing_complete(cmd, status);
3014 }
3015
3016 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3017 {
3018 struct pending_cmd *cmd;
3019
3020 BT_DBG("status %u", status);
3021
3022 cmd = find_pairing(conn);
3023 if (!cmd)
3024 BT_DBG("Unable to find a pending command");
3025 else
3026 pairing_complete(cmd, mgmt_status(status));
3027 }
3028
3029 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3030 {
3031 struct pending_cmd *cmd;
3032
3033 BT_DBG("status %u", status);
3034
3035 if (!status)
3036 return;
3037
3038 cmd = find_pairing(conn);
3039 if (!cmd)
3040 BT_DBG("Unable to find a pending command");
3041 else
3042 pairing_complete(cmd, mgmt_status(status));
3043 }
3044
3045 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3046 u16 len)
3047 {
3048 struct mgmt_cp_pair_device *cp = data;
3049 struct mgmt_rp_pair_device rp;
3050 struct pending_cmd *cmd;
3051 u8 sec_level, auth_type;
3052 struct hci_conn *conn;
3053 int err;
3054
3055 BT_DBG("");
3056
3057 memset(&rp, 0, sizeof(rp));
3058 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3059 rp.addr.type = cp->addr.type;
3060
3061 if (!bdaddr_type_is_valid(cp->addr.type))
3062 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3063 MGMT_STATUS_INVALID_PARAMS,
3064 &rp, sizeof(rp));
3065
3066 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3067 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3068 MGMT_STATUS_INVALID_PARAMS,
3069 &rp, sizeof(rp));
3070
3071 hci_dev_lock(hdev);
3072
3073 if (!hdev_is_powered(hdev)) {
3074 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3075 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
3076 goto unlock;
3077 }
3078
3079 sec_level = BT_SECURITY_MEDIUM;
3080 auth_type = HCI_AT_DEDICATED_BONDING;
3081
3082 if (cp->addr.type == BDADDR_BREDR) {
3083 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3084 auth_type);
3085 } else {
3086 u8 addr_type;
3087
3088 /* Convert from L2CAP channel address type to HCI address type
3089 */
3090 if (cp->addr.type == BDADDR_LE_PUBLIC)
3091 addr_type = ADDR_LE_DEV_PUBLIC;
3092 else
3093 addr_type = ADDR_LE_DEV_RANDOM;
3094
3095 /* When pairing a new device, it is expected to remember
3096 * this device for future connections. Adding the connection
3097 * parameter information ahead of time allows tracking
3098 * of the slave preferred values and will speed up any
3099 * further connection establishment.
3100 *
3101 * If connection parameters already exist, then they
3102 * will be kept and this function does nothing.
3103 */
3104 hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3105
3106 conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
3107 sec_level, auth_type);
3108 }
3109
3110 if (IS_ERR(conn)) {
3111 int status;
3112
3113 if (PTR_ERR(conn) == -EBUSY)
3114 status = MGMT_STATUS_BUSY;
3115 else
3116 status = MGMT_STATUS_CONNECT_FAILED;
3117
3118 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3119 status, &rp,
3120 sizeof(rp));
3121 goto unlock;
3122 }
3123
3124 if (conn->connect_cfm_cb) {
3125 hci_conn_drop(conn);
3126 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3127 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3128 goto unlock;
3129 }
3130
3131 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3132 if (!cmd) {
3133 err = -ENOMEM;
3134 hci_conn_drop(conn);
3135 goto unlock;
3136 }
3137
3138 /* For LE, just connecting isn't a proof that the pairing finished */
3139 if (cp->addr.type == BDADDR_BREDR) {
3140 conn->connect_cfm_cb = pairing_complete_cb;
3141 conn->security_cfm_cb = pairing_complete_cb;
3142 conn->disconn_cfm_cb = pairing_complete_cb;
3143 } else {
3144 conn->connect_cfm_cb = le_pairing_complete_cb;
3145 conn->security_cfm_cb = le_pairing_complete_cb;
3146 conn->disconn_cfm_cb = le_pairing_complete_cb;
3147 }
3148
3149 conn->io_capability = cp->io_cap;
3150 cmd->user_data = conn;
3151
3152 if (conn->state == BT_CONNECTED &&
3153 hci_conn_security(conn, sec_level, auth_type))
3154 pairing_complete(cmd, 0);
3155
3156 err = 0;
3157
3158 unlock:
3159 hci_dev_unlock(hdev);
3160 return err;
3161 }
3162
3163 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3164 u16 len)
3165 {
3166 struct mgmt_addr_info *addr = data;
3167 struct pending_cmd *cmd;
3168 struct hci_conn *conn;
3169 int err;
3170
3171 BT_DBG("");
3172
3173 hci_dev_lock(hdev);
3174
3175 if (!hdev_is_powered(hdev)) {
3176 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3177 MGMT_STATUS_NOT_POWERED);
3178 goto unlock;
3179 }
3180
3181 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3182 if (!cmd) {
3183 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3184 MGMT_STATUS_INVALID_PARAMS);
3185 goto unlock;
3186 }
3187
3188 conn = cmd->user_data;
3189
3190 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3191 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3192 MGMT_STATUS_INVALID_PARAMS);
3193 goto unlock;
3194 }
3195
3196 pairing_complete(cmd, MGMT_STATUS_CANCELLED);
3197
3198 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3199 addr, sizeof(*addr));
3200 unlock:
3201 hci_dev_unlock(hdev);
3202 return err;
3203 }
3204
3205 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3206 struct mgmt_addr_info *addr, u16 mgmt_op,
3207 u16 hci_op, __le32 passkey)
3208 {
3209 struct pending_cmd *cmd;
3210 struct hci_conn *conn;
3211 int err;
3212
3213 hci_dev_lock(hdev);
3214
3215 if (!hdev_is_powered(hdev)) {
3216 err = cmd_complete(sk, hdev->id, mgmt_op,
3217 MGMT_STATUS_NOT_POWERED, addr,
3218 sizeof(*addr));
3219 goto done;
3220 }
3221
3222 if (addr->type == BDADDR_BREDR)
3223 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3224 else
3225 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
3226
3227 if (!conn) {
3228 err = cmd_complete(sk, hdev->id, mgmt_op,
3229 MGMT_STATUS_NOT_CONNECTED, addr,
3230 sizeof(*addr));
3231 goto done;
3232 }
3233
3234 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3235 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3236 if (!err)
3237 err = cmd_complete(sk, hdev->id, mgmt_op,
3238 MGMT_STATUS_SUCCESS, addr,
3239 sizeof(*addr));
3240 else
3241 err = cmd_complete(sk, hdev->id, mgmt_op,
3242 MGMT_STATUS_FAILED, addr,
3243 sizeof(*addr));
3244
3245 goto done;
3246 }
3247
3248 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3249 if (!cmd) {
3250 err = -ENOMEM;
3251 goto done;
3252 }
3253
3254 /* Continue with pairing via HCI */
3255 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3256 struct hci_cp_user_passkey_reply cp;
3257
3258 bacpy(&cp.bdaddr, &addr->bdaddr);
3259 cp.passkey = passkey;
3260 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3261 } else
3262 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3263 &addr->bdaddr);
3264
3265 if (err < 0)
3266 mgmt_pending_remove(cmd);
3267
3268 done:
3269 hci_dev_unlock(hdev);
3270 return err;
3271 }
3272
3273 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3274 void *data, u16 len)
3275 {
3276 struct mgmt_cp_pin_code_neg_reply *cp = data;
3277
3278 BT_DBG("");
3279
3280 return user_pairing_resp(sk, hdev, &cp->addr,
3281 MGMT_OP_PIN_CODE_NEG_REPLY,
3282 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3283 }
3284
3285 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3286 u16 len)
3287 {
3288 struct mgmt_cp_user_confirm_reply *cp = data;
3289
3290 BT_DBG("");
3291
3292 if (len != sizeof(*cp))
3293 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3294 MGMT_STATUS_INVALID_PARAMS);
3295
3296 return user_pairing_resp(sk, hdev, &cp->addr,
3297 MGMT_OP_USER_CONFIRM_REPLY,
3298 HCI_OP_USER_CONFIRM_REPLY, 0);
3299 }
3300
3301 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3302 void *data, u16 len)
3303 {
3304 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3305
3306 BT_DBG("");
3307
3308 return user_pairing_resp(sk, hdev, &cp->addr,
3309 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3310 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3311 }
3312
3313 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3314 u16 len)
3315 {
3316 struct mgmt_cp_user_passkey_reply *cp = data;
3317
3318 BT_DBG("");
3319
3320 return user_pairing_resp(sk, hdev, &cp->addr,
3321 MGMT_OP_USER_PASSKEY_REPLY,
3322 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3323 }
3324
3325 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3326 void *data, u16 len)
3327 {
3328 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3329
3330 BT_DBG("");
3331
3332 return user_pairing_resp(sk, hdev, &cp->addr,
3333 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3334 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3335 }
3336
3337 static void update_name(struct hci_request *req)
3338 {
3339 struct hci_dev *hdev = req->hdev;
3340 struct hci_cp_write_local_name cp;
3341
3342 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3343
3344 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3345 }
3346
3347 static void set_name_complete(struct hci_dev *hdev, u8 status)
3348 {
3349 struct mgmt_cp_set_local_name *cp;
3350 struct pending_cmd *cmd;
3351
3352 BT_DBG("status 0x%02x", status);
3353
3354 hci_dev_lock(hdev);
3355
3356 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3357 if (!cmd)
3358 goto unlock;
3359
3360 cp = cmd->param;
3361
3362 if (status)
3363 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3364 mgmt_status(status));
3365 else
3366 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3367 cp, sizeof(*cp));
3368
3369 mgmt_pending_remove(cmd);
3370
3371 unlock:
3372 hci_dev_unlock(hdev);
3373 }
3374
3375 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3376 u16 len)
3377 {
3378 struct mgmt_cp_set_local_name *cp = data;
3379 struct pending_cmd *cmd;
3380 struct hci_request req;
3381 int err;
3382
3383 BT_DBG("");
3384
3385 hci_dev_lock(hdev);
3386
3387 /* If the old values are the same as the new ones just return a
3388 * direct command complete event.
3389 */
3390 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3391 !memcmp(hdev->short_name, cp->short_name,
3392 sizeof(hdev->short_name))) {
3393 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3394 data, len);
3395 goto failed;
3396 }
3397
3398 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3399
3400 if (!hdev_is_powered(hdev)) {
3401 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3402
3403 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3404 data, len);
3405 if (err < 0)
3406 goto failed;
3407
3408 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
3409 sk);
3410
3411 goto failed;
3412 }
3413
3414 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3415 if (!cmd) {
3416 err = -ENOMEM;
3417 goto failed;
3418 }
3419
3420 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3421
3422 hci_req_init(&req, hdev);
3423
3424 if (lmp_bredr_capable(hdev)) {
3425 update_name(&req);
3426 update_eir(&req);
3427 }
3428
3429 /* The name is stored in the scan response data and so
3430 * no need to udpate the advertising data here.
3431 */
3432 if (lmp_le_capable(hdev))
3433 update_scan_rsp_data(&req);
3434
3435 err = hci_req_run(&req, set_name_complete);
3436 if (err < 0)
3437 mgmt_pending_remove(cmd);
3438
3439 failed:
3440 hci_dev_unlock(hdev);
3441 return err;
3442 }
3443
3444 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3445 void *data, u16 data_len)
3446 {
3447 struct pending_cmd *cmd;
3448 int err;
3449
3450 BT_DBG("%s", hdev->name);
3451
3452 hci_dev_lock(hdev);
3453
3454 if (!hdev_is_powered(hdev)) {
3455 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3456 MGMT_STATUS_NOT_POWERED);
3457 goto unlock;
3458 }
3459
3460 if (!lmp_ssp_capable(hdev)) {
3461 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3462 MGMT_STATUS_NOT_SUPPORTED);
3463 goto unlock;
3464 }
3465
3466 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3467 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3468 MGMT_STATUS_BUSY);
3469 goto unlock;
3470 }
3471
3472 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3473 if (!cmd) {
3474 err = -ENOMEM;
3475 goto unlock;
3476 }
3477
3478 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
3479 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
3480 0, NULL);
3481 else
3482 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3483
3484 if (err < 0)
3485 mgmt_pending_remove(cmd);
3486
3487 unlock:
3488 hci_dev_unlock(hdev);
3489 return err;
3490 }
3491
3492 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3493 void *data, u16 len)
3494 {
3495 int err;
3496
3497 BT_DBG("%s ", hdev->name);
3498
3499 hci_dev_lock(hdev);
3500
3501 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3502 struct mgmt_cp_add_remote_oob_data *cp = data;
3503 u8 status;
3504
3505 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3506 cp->hash, cp->randomizer);
3507 if (err < 0)
3508 status = MGMT_STATUS_FAILED;
3509 else
3510 status = MGMT_STATUS_SUCCESS;
3511
3512 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3513 status, &cp->addr, sizeof(cp->addr));
3514 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3515 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3516 u8 status;
3517
3518 err = hci_add_remote_oob_ext_data(hdev, &cp->addr.bdaddr,
3519 cp->hash192,
3520 cp->randomizer192,
3521 cp->hash256,
3522 cp->randomizer256);
3523 if (err < 0)
3524 status = MGMT_STATUS_FAILED;
3525 else
3526 status = MGMT_STATUS_SUCCESS;
3527
3528 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3529 status, &cp->addr, sizeof(cp->addr));
3530 } else {
3531 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3532 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3533 MGMT_STATUS_INVALID_PARAMS);
3534 }
3535
3536 hci_dev_unlock(hdev);
3537 return err;
3538 }
3539
3540 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3541 void *data, u16 len)
3542 {
3543 struct mgmt_cp_remove_remote_oob_data *cp = data;
3544 u8 status;
3545 int err;
3546
3547 BT_DBG("%s", hdev->name);
3548
3549 hci_dev_lock(hdev);
3550
3551 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
3552 if (err < 0)
3553 status = MGMT_STATUS_INVALID_PARAMS;
3554 else
3555 status = MGMT_STATUS_SUCCESS;
3556
3557 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3558 status, &cp->addr, sizeof(cp->addr));
3559
3560 hci_dev_unlock(hdev);
3561 return err;
3562 }
3563
3564 static int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
3565 {
3566 struct pending_cmd *cmd;
3567 u8 type;
3568 int err;
3569
3570 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3571
3572 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3573 if (!cmd)
3574 return -ENOENT;
3575
3576 type = hdev->discovery.type;
3577
3578 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3579 &type, sizeof(type));
3580 mgmt_pending_remove(cmd);
3581
3582 return err;
3583 }
3584
3585 static void start_discovery_complete(struct hci_dev *hdev, u8 status)
3586 {
3587 unsigned long timeout = 0;
3588
3589 BT_DBG("status %d", status);
3590
3591 if (status) {
3592 hci_dev_lock(hdev);
3593 mgmt_start_discovery_failed(hdev, status);
3594 hci_dev_unlock(hdev);
3595 return;
3596 }
3597
3598 hci_dev_lock(hdev);
3599 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3600 hci_dev_unlock(hdev);
3601
3602 switch (hdev->discovery.type) {
3603 case DISCOV_TYPE_LE:
3604 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3605 break;
3606
3607 case DISCOV_TYPE_INTERLEAVED:
3608 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
3609 break;
3610
3611 case DISCOV_TYPE_BREDR:
3612 break;
3613
3614 default:
3615 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
3616 }
3617
3618 if (!timeout)
3619 return;
3620
3621 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable, timeout);
3622 }
3623
3624 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3625 void *data, u16 len)
3626 {
3627 struct mgmt_cp_start_discovery *cp = data;
3628 struct pending_cmd *cmd;
3629 struct hci_cp_le_set_scan_param param_cp;
3630 struct hci_cp_le_set_scan_enable enable_cp;
3631 struct hci_cp_inquiry inq_cp;
3632 struct hci_request req;
3633 /* General inquiry access code (GIAC) */
3634 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3635 u8 status, own_addr_type;
3636 int err;
3637
3638 BT_DBG("%s", hdev->name);
3639
3640 hci_dev_lock(hdev);
3641
3642 if (!hdev_is_powered(hdev)) {
3643 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3644 MGMT_STATUS_NOT_POWERED);
3645 goto failed;
3646 }
3647
3648 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3649 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3650 MGMT_STATUS_BUSY);
3651 goto failed;
3652 }
3653
3654 if (hdev->discovery.state != DISCOVERY_STOPPED) {
3655 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3656 MGMT_STATUS_BUSY);
3657 goto failed;
3658 }
3659
3660 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
3661 if (!cmd) {
3662 err = -ENOMEM;
3663 goto failed;
3664 }
3665
3666 hdev->discovery.type = cp->type;
3667
3668 hci_req_init(&req, hdev);
3669
3670 switch (hdev->discovery.type) {
3671 case DISCOV_TYPE_BREDR:
3672 status = mgmt_bredr_support(hdev);
3673 if (status) {
3674 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3675 status);
3676 mgmt_pending_remove(cmd);
3677 goto failed;
3678 }
3679
3680 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3681 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3682 MGMT_STATUS_BUSY);
3683 mgmt_pending_remove(cmd);
3684 goto failed;
3685 }
3686
3687 hci_inquiry_cache_flush(hdev);
3688
3689 memset(&inq_cp, 0, sizeof(inq_cp));
3690 memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
3691 inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
3692 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
3693 break;
3694
3695 case DISCOV_TYPE_LE:
3696 case DISCOV_TYPE_INTERLEAVED:
3697 status = mgmt_le_support(hdev);
3698 if (status) {
3699 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3700 status);
3701 mgmt_pending_remove(cmd);
3702 goto failed;
3703 }
3704
3705 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3706 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3707 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3708 MGMT_STATUS_NOT_SUPPORTED);
3709 mgmt_pending_remove(cmd);
3710 goto failed;
3711 }
3712
3713 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3714 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3715 MGMT_STATUS_REJECTED);
3716 mgmt_pending_remove(cmd);
3717 goto failed;
3718 }
3719
3720 /* If controller is scanning, it means the background scanning
3721 * is running. Thus, we should temporarily stop it in order to
3722 * set the discovery scanning parameters.
3723 */
3724 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
3725 hci_req_add_le_scan_disable(&req);
3726
3727 memset(&param_cp, 0, sizeof(param_cp));
3728
3729 /* All active scans will be done with either a resolvable
3730 * private address (when privacy feature has been enabled)
3731 * or unresolvable private address.
3732 */
3733 err = hci_update_random_address(&req, true, &own_addr_type);
3734 if (err < 0) {
3735 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3736 MGMT_STATUS_FAILED);
3737 mgmt_pending_remove(cmd);
3738 goto failed;
3739 }
3740
3741 param_cp.type = LE_SCAN_ACTIVE;
3742 param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
3743 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3744 param_cp.own_address_type = own_addr_type;
3745 hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
3746 &param_cp);
3747
3748 memset(&enable_cp, 0, sizeof(enable_cp));
3749 enable_cp.enable = LE_SCAN_ENABLE;
3750 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3751 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
3752 &enable_cp);
3753 break;
3754
3755 default:
3756 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3757 MGMT_STATUS_INVALID_PARAMS);
3758 mgmt_pending_remove(cmd);
3759 goto failed;
3760 }
3761
3762 err = hci_req_run(&req, start_discovery_complete);
3763 if (err < 0)
3764 mgmt_pending_remove(cmd);
3765 else
3766 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3767
3768 failed:
3769 hci_dev_unlock(hdev);
3770 return err;
3771 }
3772
3773 static int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
3774 {
3775 struct pending_cmd *cmd;
3776 int err;
3777
3778 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
3779 if (!cmd)
3780 return -ENOENT;
3781
3782 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3783 &hdev->discovery.type, sizeof(hdev->discovery.type));
3784 mgmt_pending_remove(cmd);
3785
3786 return err;
3787 }
3788
3789 static void stop_discovery_complete(struct hci_dev *hdev, u8 status)
3790 {
3791 BT_DBG("status %d", status);
3792
3793 hci_dev_lock(hdev);
3794
3795 if (status) {
3796 mgmt_stop_discovery_failed(hdev, status);
3797 goto unlock;
3798 }
3799
3800 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3801
3802 unlock:
3803 hci_dev_unlock(hdev);
3804 }
3805
3806 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
3807 u16 len)
3808 {
3809 struct mgmt_cp_stop_discovery *mgmt_cp = data;
3810 struct pending_cmd *cmd;
3811 struct hci_request req;
3812 int err;
3813
3814 BT_DBG("%s", hdev->name);
3815
3816 hci_dev_lock(hdev);
3817
3818 if (!hci_discovery_active(hdev)) {
3819 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3820 MGMT_STATUS_REJECTED, &mgmt_cp->type,
3821 sizeof(mgmt_cp->type));
3822 goto unlock;
3823 }
3824
3825 if (hdev->discovery.type != mgmt_cp->type) {
3826 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3827 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
3828 sizeof(mgmt_cp->type));
3829 goto unlock;
3830 }
3831
3832 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
3833 if (!cmd) {
3834 err = -ENOMEM;
3835 goto unlock;
3836 }
3837
3838 hci_req_init(&req, hdev);
3839
3840 hci_stop_discovery(&req);
3841
3842 err = hci_req_run(&req, stop_discovery_complete);
3843 if (!err) {
3844 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
3845 goto unlock;
3846 }
3847
3848 mgmt_pending_remove(cmd);
3849
3850 /* If no HCI commands were sent we're done */
3851 if (err == -ENODATA) {
3852 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
3853 &mgmt_cp->type, sizeof(mgmt_cp->type));
3854 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3855 }
3856
3857 unlock:
3858 hci_dev_unlock(hdev);
3859 return err;
3860 }
3861
3862 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
3863 u16 len)
3864 {
3865 struct mgmt_cp_confirm_name *cp = data;
3866 struct inquiry_entry *e;
3867 int err;
3868
3869 BT_DBG("%s", hdev->name);
3870
3871 hci_dev_lock(hdev);
3872
3873 if (!hci_discovery_active(hdev)) {
3874 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3875 MGMT_STATUS_FAILED, &cp->addr,
3876 sizeof(cp->addr));
3877 goto failed;
3878 }
3879
3880 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
3881 if (!e) {
3882 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3883 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
3884 sizeof(cp->addr));
3885 goto failed;
3886 }
3887
3888 if (cp->name_known) {
3889 e->name_state = NAME_KNOWN;
3890 list_del(&e->list);
3891 } else {
3892 e->name_state = NAME_NEEDED;
3893 hci_inquiry_cache_update_resolve(hdev, e);
3894 }
3895
3896 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
3897 sizeof(cp->addr));
3898
3899 failed:
3900 hci_dev_unlock(hdev);
3901 return err;
3902 }
3903
3904 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
3905 u16 len)
3906 {
3907 struct mgmt_cp_block_device *cp = data;
3908 u8 status;
3909 int err;
3910
3911 BT_DBG("%s", hdev->name);
3912
3913 if (!bdaddr_type_is_valid(cp->addr.type))
3914 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
3915 MGMT_STATUS_INVALID_PARAMS,
3916 &cp->addr, sizeof(cp->addr));
3917
3918 hci_dev_lock(hdev);
3919
3920 err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type);
3921 if (err < 0) {
3922 status = MGMT_STATUS_FAILED;
3923 goto done;
3924 }
3925
3926 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
3927 sk);
3928 status = MGMT_STATUS_SUCCESS;
3929
3930 done:
3931 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
3932 &cp->addr, sizeof(cp->addr));
3933
3934 hci_dev_unlock(hdev);
3935
3936 return err;
3937 }
3938
3939 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
3940 u16 len)
3941 {
3942 struct mgmt_cp_unblock_device *cp = data;
3943 u8 status;
3944 int err;
3945
3946 BT_DBG("%s", hdev->name);
3947
3948 if (!bdaddr_type_is_valid(cp->addr.type))
3949 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
3950 MGMT_STATUS_INVALID_PARAMS,
3951 &cp->addr, sizeof(cp->addr));
3952
3953 hci_dev_lock(hdev);
3954
3955 err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type);
3956 if (err < 0) {
3957 status = MGMT_STATUS_INVALID_PARAMS;
3958 goto done;
3959 }
3960
3961 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
3962 sk);
3963 status = MGMT_STATUS_SUCCESS;
3964
3965 done:
3966 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
3967 &cp->addr, sizeof(cp->addr));
3968
3969 hci_dev_unlock(hdev);
3970
3971 return err;
3972 }
3973
3974 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
3975 u16 len)
3976 {
3977 struct mgmt_cp_set_device_id *cp = data;
3978 struct hci_request req;
3979 int err;
3980 __u16 source;
3981
3982 BT_DBG("%s", hdev->name);
3983
3984 source = __le16_to_cpu(cp->source);
3985
3986 if (source > 0x0002)
3987 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
3988 MGMT_STATUS_INVALID_PARAMS);
3989
3990 hci_dev_lock(hdev);
3991
3992 hdev->devid_source = source;
3993 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
3994 hdev->devid_product = __le16_to_cpu(cp->product);
3995 hdev->devid_version = __le16_to_cpu(cp->version);
3996
3997 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
3998
3999 hci_req_init(&req, hdev);
4000 update_eir(&req);
4001 hci_req_run(&req, NULL);
4002
4003 hci_dev_unlock(hdev);
4004
4005 return err;
4006 }
4007
4008 static void set_advertising_complete(struct hci_dev *hdev, u8 status)
4009 {
4010 struct cmd_lookup match = { NULL, hdev };
4011
4012 if (status) {
4013 u8 mgmt_err = mgmt_status(status);
4014
4015 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
4016 cmd_status_rsp, &mgmt_err);
4017 return;
4018 }
4019
4020 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
4021 &match);
4022
4023 new_settings(hdev, match.sk);
4024
4025 if (match.sk)
4026 sock_put(match.sk);
4027 }
4028
4029 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
4030 u16 len)
4031 {
4032 struct mgmt_mode *cp = data;
4033 struct pending_cmd *cmd;
4034 struct hci_request req;
4035 u8 val, enabled, status;
4036 int err;
4037
4038 BT_DBG("request for %s", hdev->name);
4039
4040 status = mgmt_le_support(hdev);
4041 if (status)
4042 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4043 status);
4044
4045 if (cp->val != 0x00 && cp->val != 0x01)
4046 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4047 MGMT_STATUS_INVALID_PARAMS);
4048
4049 hci_dev_lock(hdev);
4050
4051 val = !!cp->val;
4052 enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
4053
4054 /* The following conditions are ones which mean that we should
4055 * not do any HCI communication but directly send a mgmt
4056 * response to user space (after toggling the flag if
4057 * necessary).
4058 */
4059 if (!hdev_is_powered(hdev) || val == enabled ||
4060 hci_conn_num(hdev, LE_LINK) > 0) {
4061 bool changed = false;
4062
4063 if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
4064 change_bit(HCI_ADVERTISING, &hdev->dev_flags);
4065 changed = true;
4066 }
4067
4068 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
4069 if (err < 0)
4070 goto unlock;
4071
4072 if (changed)
4073 err = new_settings(hdev, sk);
4074
4075 goto unlock;
4076 }
4077
4078 if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
4079 mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
4080 err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4081 MGMT_STATUS_BUSY);
4082 goto unlock;
4083 }
4084
4085 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
4086 if (!cmd) {
4087 err = -ENOMEM;
4088 goto unlock;
4089 }
4090
4091 hci_req_init(&req, hdev);
4092
4093 if (val)
4094 enable_advertising(&req);
4095 else
4096 disable_advertising(&req);
4097
4098 err = hci_req_run(&req, set_advertising_complete);
4099 if (err < 0)
4100 mgmt_pending_remove(cmd);
4101
4102 unlock:
4103 hci_dev_unlock(hdev);
4104 return err;
4105 }
4106
4107 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
4108 void *data, u16 len)
4109 {
4110 struct mgmt_cp_set_static_address *cp = data;
4111 int err;
4112
4113 BT_DBG("%s", hdev->name);
4114
4115 if (!lmp_le_capable(hdev))
4116 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4117 MGMT_STATUS_NOT_SUPPORTED);
4118
4119 if (hdev_is_powered(hdev))
4120 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4121 MGMT_STATUS_REJECTED);
4122
4123 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
4124 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
4125 return cmd_status(sk, hdev->id,
4126 MGMT_OP_SET_STATIC_ADDRESS,
4127 MGMT_STATUS_INVALID_PARAMS);
4128
4129 /* Two most significant bits shall be set */
4130 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
4131 return cmd_status(sk, hdev->id,
4132 MGMT_OP_SET_STATIC_ADDRESS,
4133 MGMT_STATUS_INVALID_PARAMS);
4134 }
4135
4136 hci_dev_lock(hdev);
4137
4138 bacpy(&hdev->static_addr, &cp->bdaddr);
4139
4140 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0);
4141
4142 hci_dev_unlock(hdev);
4143
4144 return err;
4145 }
4146
4147 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
4148 void *data, u16 len)
4149 {
4150 struct mgmt_cp_set_scan_params *cp = data;
4151 __u16 interval, window;
4152 int err;
4153
4154 BT_DBG("%s", hdev->name);
4155
4156 if (!lmp_le_capable(hdev))
4157 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4158 MGMT_STATUS_NOT_SUPPORTED);
4159
4160 interval = __le16_to_cpu(cp->interval);
4161
4162 if (interval < 0x0004 || interval > 0x4000)
4163 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4164 MGMT_STATUS_INVALID_PARAMS);
4165
4166 window = __le16_to_cpu(cp->window);
4167
4168 if (window < 0x0004 || window > 0x4000)
4169 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4170 MGMT_STATUS_INVALID_PARAMS);
4171
4172 if (window > interval)
4173 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4174 MGMT_STATUS_INVALID_PARAMS);
4175
4176 hci_dev_lock(hdev);
4177
4178 hdev->le_scan_interval = interval;
4179 hdev->le_scan_window = window;
4180
4181 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);
4182
4183 /* If background scan is running, restart it so new parameters are
4184 * loaded.
4185 */
4186 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
4187 hdev->discovery.state == DISCOVERY_STOPPED) {
4188 struct hci_request req;
4189
4190 hci_req_init(&req, hdev);
4191
4192 hci_req_add_le_scan_disable(&req);
4193 hci_req_add_le_passive_scan(&req);
4194
4195 hci_req_run(&req, NULL);
4196 }
4197
4198 hci_dev_unlock(hdev);
4199
4200 return err;
4201 }
4202
4203 static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
4204 {
4205 struct pending_cmd *cmd;
4206
4207 BT_DBG("status 0x%02x", status);
4208
4209 hci_dev_lock(hdev);
4210
4211 cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4212 if (!cmd)
4213 goto unlock;
4214
4215 if (status) {
4216 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4217 mgmt_status(status));
4218 } else {
4219 struct mgmt_mode *cp = cmd->param;
4220
4221 if (cp->val)
4222 set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4223 else
4224 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4225
4226 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4227 new_settings(hdev, cmd->sk);
4228 }
4229
4230 mgmt_pending_remove(cmd);
4231
4232 unlock:
4233 hci_dev_unlock(hdev);
4234 }
4235
4236 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4237 void *data, u16 len)
4238 {
4239 struct mgmt_mode *cp = data;
4240 struct pending_cmd *cmd;
4241 struct hci_request req;
4242 int err;
4243
4244 BT_DBG("%s", hdev->name);
4245
4246 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
4247 hdev->hci_ver < BLUETOOTH_VER_1_2)
4248 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4249 MGMT_STATUS_NOT_SUPPORTED);
4250
4251 if (cp->val != 0x00 && cp->val != 0x01)
4252 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4253 MGMT_STATUS_INVALID_PARAMS);
4254
4255 if (!hdev_is_powered(hdev))
4256 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4257 MGMT_STATUS_NOT_POWERED);
4258
4259 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4260 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4261 MGMT_STATUS_REJECTED);
4262
4263 hci_dev_lock(hdev);
4264
4265 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4266 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4267 MGMT_STATUS_BUSY);
4268 goto unlock;
4269 }
4270
4271 if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
4272 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4273 hdev);
4274 goto unlock;
4275 }
4276
4277 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4278 data, len);
4279 if (!cmd) {
4280 err = -ENOMEM;
4281 goto unlock;
4282 }
4283
4284 hci_req_init(&req, hdev);
4285
4286 write_fast_connectable(&req, cp->val);
4287
4288 err = hci_req_run(&req, fast_connectable_complete);
4289 if (err < 0) {
4290 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4291 MGMT_STATUS_FAILED);
4292 mgmt_pending_remove(cmd);
4293 }
4294
4295 unlock:
4296 hci_dev_unlock(hdev);
4297
4298 return err;
4299 }
4300
4301 static void set_bredr_scan(struct hci_request *req)
4302 {
4303 struct hci_dev *hdev = req->hdev;
4304 u8 scan = 0;
4305
4306 /* Ensure that fast connectable is disabled. This function will
4307 * not do anything if the page scan parameters are already what
4308 * they should be.
4309 */
4310 write_fast_connectable(req, false);
4311
4312 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4313 scan |= SCAN_PAGE;
4314 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
4315 scan |= SCAN_INQUIRY;
4316
4317 if (scan)
4318 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
4319 }
4320
4321 static void set_bredr_complete(struct hci_dev *hdev, u8 status)
4322 {
4323 struct pending_cmd *cmd;
4324
4325 BT_DBG("status 0x%02x", status);
4326
4327 hci_dev_lock(hdev);
4328
4329 cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
4330 if (!cmd)
4331 goto unlock;
4332
4333 if (status) {
4334 u8 mgmt_err = mgmt_status(status);
4335
4336 /* We need to restore the flag if related HCI commands
4337 * failed.
4338 */
4339 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4340
4341 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4342 } else {
4343 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4344 new_settings(hdev, cmd->sk);
4345 }
4346
4347 mgmt_pending_remove(cmd);
4348
4349 unlock:
4350 hci_dev_unlock(hdev);
4351 }
4352
4353 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4354 {
4355 struct mgmt_mode *cp = data;
4356 struct pending_cmd *cmd;
4357 struct hci_request req;
4358 int err;
4359
4360 BT_DBG("request for %s", hdev->name);
4361
4362 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4363 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4364 MGMT_STATUS_NOT_SUPPORTED);
4365
4366 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4367 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4368 MGMT_STATUS_REJECTED);
4369
4370 if (cp->val != 0x00 && cp->val != 0x01)
4371 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4372 MGMT_STATUS_INVALID_PARAMS);
4373
4374 hci_dev_lock(hdev);
4375
4376 if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4377 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4378 goto unlock;
4379 }
4380
4381 if (!hdev_is_powered(hdev)) {
4382 if (!cp->val) {
4383 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4384 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4385 clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4386 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4387 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4388 }
4389
4390 change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4391
4392 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4393 if (err < 0)
4394 goto unlock;
4395
4396 err = new_settings(hdev, sk);
4397 goto unlock;
4398 }
4399
4400 /* Reject disabling when powered on */
4401 if (!cp->val) {
4402 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4403 MGMT_STATUS_REJECTED);
4404 goto unlock;
4405 }
4406
4407 if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
4408 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4409 MGMT_STATUS_BUSY);
4410 goto unlock;
4411 }
4412
4413 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4414 if (!cmd) {
4415 err = -ENOMEM;
4416 goto unlock;
4417 }
4418
4419 /* We need to flip the bit already here so that update_adv_data
4420 * generates the correct flags.
4421 */
4422 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4423
4424 hci_req_init(&req, hdev);
4425
4426 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4427 set_bredr_scan(&req);
4428
4429 /* Since only the advertising data flags will change, there
4430 * is no need to update the scan response data.
4431 */
4432 update_adv_data(&req);
4433
4434 err = hci_req_run(&req, set_bredr_complete);
4435 if (err < 0)
4436 mgmt_pending_remove(cmd);
4437
4438 unlock:
4439 hci_dev_unlock(hdev);
4440 return err;
4441 }
4442
4443 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4444 void *data, u16 len)
4445 {
4446 struct mgmt_mode *cp = data;
4447 struct pending_cmd *cmd;
4448 u8 val, status;
4449 int err;
4450
4451 BT_DBG("request for %s", hdev->name);
4452
4453 status = mgmt_bredr_support(hdev);
4454 if (status)
4455 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4456 status);
4457
4458 if (!lmp_sc_capable(hdev) &&
4459 !test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
4460 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4461 MGMT_STATUS_NOT_SUPPORTED);
4462
4463 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4464 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4465 MGMT_STATUS_INVALID_PARAMS);
4466
4467 hci_dev_lock(hdev);
4468
4469 if (!hdev_is_powered(hdev)) {
4470 bool changed;
4471
4472 if (cp->val) {
4473 changed = !test_and_set_bit(HCI_SC_ENABLED,
4474 &hdev->dev_flags);
4475 if (cp->val == 0x02)
4476 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4477 else
4478 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4479 } else {
4480 changed = test_and_clear_bit(HCI_SC_ENABLED,
4481 &hdev->dev_flags);
4482 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4483 }
4484
4485 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4486 if (err < 0)
4487 goto failed;
4488
4489 if (changed)
4490 err = new_settings(hdev, sk);
4491
4492 goto failed;
4493 }
4494
4495 if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4496 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4497 MGMT_STATUS_BUSY);
4498 goto failed;
4499 }
4500
4501 val = !!cp->val;
4502
4503 if (val == test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
4504 (cp->val == 0x02) == test_bit(HCI_SC_ONLY, &hdev->dev_flags)) {
4505 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4506 goto failed;
4507 }
4508
4509 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4510 if (!cmd) {
4511 err = -ENOMEM;
4512 goto failed;
4513 }
4514
4515 err = hci_send_cmd(hdev, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4516 if (err < 0) {
4517 mgmt_pending_remove(cmd);
4518 goto failed;
4519 }
4520
4521 if (cp->val == 0x02)
4522 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4523 else
4524 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4525
4526 failed:
4527 hci_dev_unlock(hdev);
4528 return err;
4529 }
4530
4531 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
4532 void *data, u16 len)
4533 {
4534 struct mgmt_mode *cp = data;
4535 bool changed, use_changed;
4536 int err;
4537
4538 BT_DBG("request for %s", hdev->name);
4539
4540 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4541 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
4542 MGMT_STATUS_INVALID_PARAMS);
4543
4544 hci_dev_lock(hdev);
4545
4546 if (cp->val)
4547 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
4548 &hdev->dev_flags);
4549 else
4550 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
4551 &hdev->dev_flags);
4552
4553 if (cp->val == 0x02)
4554 use_changed = !test_and_set_bit(HCI_USE_DEBUG_KEYS,
4555 &hdev->dev_flags);
4556 else
4557 use_changed = test_and_clear_bit(HCI_USE_DEBUG_KEYS,
4558 &hdev->dev_flags);
4559
4560 if (hdev_is_powered(hdev) && use_changed &&
4561 test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
4562 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
4563 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
4564 sizeof(mode), &mode);
4565 }
4566
4567 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
4568 if (err < 0)
4569 goto unlock;
4570
4571 if (changed)
4572 err = new_settings(hdev, sk);
4573
4574 unlock:
4575 hci_dev_unlock(hdev);
4576 return err;
4577 }
4578
4579 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4580 u16 len)
4581 {
4582 struct mgmt_cp_set_privacy *cp = cp_data;
4583 bool changed;
4584 int err;
4585
4586 BT_DBG("request for %s", hdev->name);
4587
4588 if (!lmp_le_capable(hdev))
4589 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4590 MGMT_STATUS_NOT_SUPPORTED);
4591
4592 if (cp->privacy != 0x00 && cp->privacy != 0x01)
4593 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4594 MGMT_STATUS_INVALID_PARAMS);
4595
4596 if (hdev_is_powered(hdev))
4597 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4598 MGMT_STATUS_REJECTED);
4599
4600 hci_dev_lock(hdev);
4601
4602 /* If user space supports this command it is also expected to
4603 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
4604 */
4605 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4606
4607 if (cp->privacy) {
4608 changed = !test_and_set_bit(HCI_PRIVACY, &hdev->dev_flags);
4609 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
4610 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4611 } else {
4612 changed = test_and_clear_bit(HCI_PRIVACY, &hdev->dev_flags);
4613 memset(hdev->irk, 0, sizeof(hdev->irk));
4614 clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4615 }
4616
4617 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
4618 if (err < 0)
4619 goto unlock;
4620
4621 if (changed)
4622 err = new_settings(hdev, sk);
4623
4624 unlock:
4625 hci_dev_unlock(hdev);
4626 return err;
4627 }
4628
4629 static bool irk_is_valid(struct mgmt_irk_info *irk)
4630 {
4631 switch (irk->addr.type) {
4632 case BDADDR_LE_PUBLIC:
4633 return true;
4634
4635 case BDADDR_LE_RANDOM:
4636 /* Two most significant bits shall be set */
4637 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4638 return false;
4639 return true;
4640 }
4641
4642 return false;
4643 }
4644
4645 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4646 u16 len)
4647 {
4648 struct mgmt_cp_load_irks *cp = cp_data;
4649 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
4650 sizeof(struct mgmt_irk_info));
4651 u16 irk_count, expected_len;
4652 int i, err;
4653
4654 BT_DBG("request for %s", hdev->name);
4655
4656 if (!lmp_le_capable(hdev))
4657 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4658 MGMT_STATUS_NOT_SUPPORTED);
4659
4660 irk_count = __le16_to_cpu(cp->irk_count);
4661 if (irk_count > max_irk_count) {
4662 BT_ERR("load_irks: too big irk_count value %u", irk_count);
4663 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4664 MGMT_STATUS_INVALID_PARAMS);
4665 }
4666
4667 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
4668 if (expected_len != len) {
4669 BT_ERR("load_irks: expected %u bytes, got %u bytes",
4670 expected_len, len);
4671 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4672 MGMT_STATUS_INVALID_PARAMS);
4673 }
4674
4675 BT_DBG("%s irk_count %u", hdev->name, irk_count);
4676
4677 for (i = 0; i < irk_count; i++) {
4678 struct mgmt_irk_info *key = &cp->irks[i];
4679
4680 if (!irk_is_valid(key))
4681 return cmd_status(sk, hdev->id,
4682 MGMT_OP_LOAD_IRKS,
4683 MGMT_STATUS_INVALID_PARAMS);
4684 }
4685
4686 hci_dev_lock(hdev);
4687
4688 hci_smp_irks_clear(hdev);
4689
4690 for (i = 0; i < irk_count; i++) {
4691 struct mgmt_irk_info *irk = &cp->irks[i];
4692 u8 addr_type;
4693
4694 if (irk->addr.type == BDADDR_LE_PUBLIC)
4695 addr_type = ADDR_LE_DEV_PUBLIC;
4696 else
4697 addr_type = ADDR_LE_DEV_RANDOM;
4698
4699 hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
4700 BDADDR_ANY);
4701 }
4702
4703 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4704
4705 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
4706
4707 hci_dev_unlock(hdev);
4708
4709 return err;
4710 }
4711
4712 static bool ltk_is_valid(struct mgmt_ltk_info *key)
4713 {
4714 if (key->master != 0x00 && key->master != 0x01)
4715 return false;
4716
4717 switch (key->addr.type) {
4718 case BDADDR_LE_PUBLIC:
4719 return true;
4720
4721 case BDADDR_LE_RANDOM:
4722 /* Two most significant bits shall be set */
4723 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4724 return false;
4725 return true;
4726 }
4727
4728 return false;
4729 }
4730
4731 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
4732 void *cp_data, u16 len)
4733 {
4734 struct mgmt_cp_load_long_term_keys *cp = cp_data;
4735 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
4736 sizeof(struct mgmt_ltk_info));
4737 u16 key_count, expected_len;
4738 int i, err;
4739
4740 BT_DBG("request for %s", hdev->name);
4741
4742 if (!lmp_le_capable(hdev))
4743 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4744 MGMT_STATUS_NOT_SUPPORTED);
4745
4746 key_count = __le16_to_cpu(cp->key_count);
4747 if (key_count > max_key_count) {
4748 BT_ERR("load_ltks: too big key_count value %u", key_count);
4749 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4750 MGMT_STATUS_INVALID_PARAMS);
4751 }
4752
4753 expected_len = sizeof(*cp) + key_count *
4754 sizeof(struct mgmt_ltk_info);
4755 if (expected_len != len) {
4756 BT_ERR("load_keys: expected %u bytes, got %u bytes",
4757 expected_len, len);
4758 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4759 MGMT_STATUS_INVALID_PARAMS);
4760 }
4761
4762 BT_DBG("%s key_count %u", hdev->name, key_count);
4763
4764 for (i = 0; i < key_count; i++) {
4765 struct mgmt_ltk_info *key = &cp->keys[i];
4766
4767 if (!ltk_is_valid(key))
4768 return cmd_status(sk, hdev->id,
4769 MGMT_OP_LOAD_LONG_TERM_KEYS,
4770 MGMT_STATUS_INVALID_PARAMS);
4771 }
4772
4773 hci_dev_lock(hdev);
4774
4775 hci_smp_ltks_clear(hdev);
4776
4777 for (i = 0; i < key_count; i++) {
4778 struct mgmt_ltk_info *key = &cp->keys[i];
4779 u8 type, addr_type, authenticated;
4780
4781 if (key->addr.type == BDADDR_LE_PUBLIC)
4782 addr_type = ADDR_LE_DEV_PUBLIC;
4783 else
4784 addr_type = ADDR_LE_DEV_RANDOM;
4785
4786 if (key->master)
4787 type = SMP_LTK;
4788 else
4789 type = SMP_LTK_SLAVE;
4790
4791 switch (key->type) {
4792 case MGMT_LTK_UNAUTHENTICATED:
4793 authenticated = 0x00;
4794 break;
4795 case MGMT_LTK_AUTHENTICATED:
4796 authenticated = 0x01;
4797 break;
4798 default:
4799 continue;
4800 }
4801
4802 hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
4803 authenticated, key->val, key->enc_size, key->ediv,
4804 key->rand);
4805 }
4806
4807 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
4808 NULL, 0);
4809
4810 hci_dev_unlock(hdev);
4811
4812 return err;
4813 }
4814
4815 struct cmd_conn_lookup {
4816 struct hci_conn *conn;
4817 bool valid_tx_power;
4818 u8 mgmt_status;
4819 };
4820
4821 static void get_conn_info_complete(struct pending_cmd *cmd, void *data)
4822 {
4823 struct cmd_conn_lookup *match = data;
4824 struct mgmt_cp_get_conn_info *cp;
4825 struct mgmt_rp_get_conn_info rp;
4826 struct hci_conn *conn = cmd->user_data;
4827
4828 if (conn != match->conn)
4829 return;
4830
4831 cp = (struct mgmt_cp_get_conn_info *) cmd->param;
4832
4833 memset(&rp, 0, sizeof(rp));
4834 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4835 rp.addr.type = cp->addr.type;
4836
4837 if (!match->mgmt_status) {
4838 rp.rssi = conn->rssi;
4839
4840 if (match->valid_tx_power) {
4841 rp.tx_power = conn->tx_power;
4842 rp.max_tx_power = conn->max_tx_power;
4843 } else {
4844 rp.tx_power = HCI_TX_POWER_INVALID;
4845 rp.max_tx_power = HCI_TX_POWER_INVALID;
4846 }
4847 }
4848
4849 cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
4850 match->mgmt_status, &rp, sizeof(rp));
4851
4852 hci_conn_drop(conn);
4853
4854 mgmt_pending_remove(cmd);
4855 }
4856
4857 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 status)
4858 {
4859 struct hci_cp_read_rssi *cp;
4860 struct hci_conn *conn;
4861 struct cmd_conn_lookup match;
4862 u16 handle;
4863
4864 BT_DBG("status 0x%02x", status);
4865
4866 hci_dev_lock(hdev);
4867
4868 /* TX power data is valid in case request completed successfully,
4869 * otherwise we assume it's not valid. At the moment we assume that
4870 * either both or none of current and max values are valid to keep code
4871 * simple.
4872 */
4873 match.valid_tx_power = !status;
4874
4875 /* Commands sent in request are either Read RSSI or Read Transmit Power
4876 * Level so we check which one was last sent to retrieve connection
4877 * handle. Both commands have handle as first parameter so it's safe to
4878 * cast data on the same command struct.
4879 *
4880 * First command sent is always Read RSSI and we fail only if it fails.
4881 * In other case we simply override error to indicate success as we
4882 * already remembered if TX power value is actually valid.
4883 */
4884 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
4885 if (!cp) {
4886 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
4887 status = 0;
4888 }
4889
4890 if (!cp) {
4891 BT_ERR("invalid sent_cmd in response");
4892 goto unlock;
4893 }
4894
4895 handle = __le16_to_cpu(cp->handle);
4896 conn = hci_conn_hash_lookup_handle(hdev, handle);
4897 if (!conn) {
4898 BT_ERR("unknown handle (%d) in response", handle);
4899 goto unlock;
4900 }
4901
4902 match.conn = conn;
4903 match.mgmt_status = mgmt_status(status);
4904
4905 /* Cache refresh is complete, now reply for mgmt request for given
4906 * connection only.
4907 */
4908 mgmt_pending_foreach(MGMT_OP_GET_CONN_INFO, hdev,
4909 get_conn_info_complete, &match);
4910
4911 unlock:
4912 hci_dev_unlock(hdev);
4913 }
4914
4915 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
4916 u16 len)
4917 {
4918 struct mgmt_cp_get_conn_info *cp = data;
4919 struct mgmt_rp_get_conn_info rp;
4920 struct hci_conn *conn;
4921 unsigned long conn_info_age;
4922 int err = 0;
4923
4924 BT_DBG("%s", hdev->name);
4925
4926 memset(&rp, 0, sizeof(rp));
4927 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4928 rp.addr.type = cp->addr.type;
4929
4930 if (!bdaddr_type_is_valid(cp->addr.type))
4931 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4932 MGMT_STATUS_INVALID_PARAMS,
4933 &rp, sizeof(rp));
4934
4935 hci_dev_lock(hdev);
4936
4937 if (!hdev_is_powered(hdev)) {
4938 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4939 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
4940 goto unlock;
4941 }
4942
4943 if (cp->addr.type == BDADDR_BREDR)
4944 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
4945 &cp->addr.bdaddr);
4946 else
4947 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
4948
4949 if (!conn || conn->state != BT_CONNECTED) {
4950 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4951 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
4952 goto unlock;
4953 }
4954
4955 /* To avoid client trying to guess when to poll again for information we
4956 * calculate conn info age as random value between min/max set in hdev.
4957 */
4958 conn_info_age = hdev->conn_info_min_age +
4959 prandom_u32_max(hdev->conn_info_max_age -
4960 hdev->conn_info_min_age);
4961
4962 /* Query controller to refresh cached values if they are too old or were
4963 * never read.
4964 */
4965 if (time_after(jiffies, conn->conn_info_timestamp +
4966 msecs_to_jiffies(conn_info_age)) ||
4967 !conn->conn_info_timestamp) {
4968 struct hci_request req;
4969 struct hci_cp_read_tx_power req_txp_cp;
4970 struct hci_cp_read_rssi req_rssi_cp;
4971 struct pending_cmd *cmd;
4972
4973 hci_req_init(&req, hdev);
4974 req_rssi_cp.handle = cpu_to_le16(conn->handle);
4975 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
4976 &req_rssi_cp);
4977
4978 /* For LE links TX power does not change thus we don't need to
4979 * query for it once value is known.
4980 */
4981 if (!bdaddr_type_is_le(cp->addr.type) ||
4982 conn->tx_power == HCI_TX_POWER_INVALID) {
4983 req_txp_cp.handle = cpu_to_le16(conn->handle);
4984 req_txp_cp.type = 0x00;
4985 hci_req_add(&req, HCI_OP_READ_TX_POWER,
4986 sizeof(req_txp_cp), &req_txp_cp);
4987 }
4988
4989 /* Max TX power needs to be read only once per connection */
4990 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
4991 req_txp_cp.handle = cpu_to_le16(conn->handle);
4992 req_txp_cp.type = 0x01;
4993 hci_req_add(&req, HCI_OP_READ_TX_POWER,
4994 sizeof(req_txp_cp), &req_txp_cp);
4995 }
4996
4997 err = hci_req_run(&req, conn_info_refresh_complete);
4998 if (err < 0)
4999 goto unlock;
5000
5001 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
5002 data, len);
5003 if (!cmd) {
5004 err = -ENOMEM;
5005 goto unlock;
5006 }
5007
5008 hci_conn_hold(conn);
5009 cmd->user_data = conn;
5010
5011 conn->conn_info_timestamp = jiffies;
5012 } else {
5013 /* Cache is valid, just reply with values cached in hci_conn */
5014 rp.rssi = conn->rssi;
5015 rp.tx_power = conn->tx_power;
5016 rp.max_tx_power = conn->max_tx_power;
5017
5018 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5019 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
5020 }
5021
5022 unlock:
5023 hci_dev_unlock(hdev);
5024 return err;
5025 }
5026
5027 static void get_clock_info_complete(struct hci_dev *hdev, u8 status)
5028 {
5029 struct mgmt_cp_get_clock_info *cp;
5030 struct mgmt_rp_get_clock_info rp;
5031 struct hci_cp_read_clock *hci_cp;
5032 struct pending_cmd *cmd;
5033 struct hci_conn *conn;
5034
5035 BT_DBG("%s status %u", hdev->name, status);
5036
5037 hci_dev_lock(hdev);
5038
5039 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
5040 if (!hci_cp)
5041 goto unlock;
5042
5043 if (hci_cp->which) {
5044 u16 handle = __le16_to_cpu(hci_cp->handle);
5045 conn = hci_conn_hash_lookup_handle(hdev, handle);
5046 } else {
5047 conn = NULL;
5048 }
5049
5050 cmd = mgmt_pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
5051 if (!cmd)
5052 goto unlock;
5053
5054 cp = cmd->param;
5055
5056 memset(&rp, 0, sizeof(rp));
5057 memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
5058
5059 if (status)
5060 goto send_rsp;
5061
5062 rp.local_clock = cpu_to_le32(hdev->clock);
5063
5064 if (conn) {
5065 rp.piconet_clock = cpu_to_le32(conn->clock);
5066 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
5067 }
5068
5069 send_rsp:
5070 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
5071 &rp, sizeof(rp));
5072 mgmt_pending_remove(cmd);
5073 if (conn)
5074 hci_conn_drop(conn);
5075
5076 unlock:
5077 hci_dev_unlock(hdev);
5078 }
5079
5080 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
5081 u16 len)
5082 {
5083 struct mgmt_cp_get_clock_info *cp = data;
5084 struct mgmt_rp_get_clock_info rp;
5085 struct hci_cp_read_clock hci_cp;
5086 struct pending_cmd *cmd;
5087 struct hci_request req;
5088 struct hci_conn *conn;
5089 int err;
5090
5091 BT_DBG("%s", hdev->name);
5092
5093 memset(&rp, 0, sizeof(rp));
5094 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5095 rp.addr.type = cp->addr.type;
5096
5097 if (cp->addr.type != BDADDR_BREDR)
5098 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5099 MGMT_STATUS_INVALID_PARAMS,
5100 &rp, sizeof(rp));
5101
5102 hci_dev_lock(hdev);
5103
5104 if (!hdev_is_powered(hdev)) {
5105 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5106 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
5107 goto unlock;
5108 }
5109
5110 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5111 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5112 &cp->addr.bdaddr);
5113 if (!conn || conn->state != BT_CONNECTED) {
5114 err = cmd_complete(sk, hdev->id,
5115 MGMT_OP_GET_CLOCK_INFO,
5116 MGMT_STATUS_NOT_CONNECTED,
5117 &rp, sizeof(rp));
5118 goto unlock;
5119 }
5120 } else {
5121 conn = NULL;
5122 }
5123
5124 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
5125 if (!cmd) {
5126 err = -ENOMEM;
5127 goto unlock;
5128 }
5129
5130 hci_req_init(&req, hdev);
5131
5132 memset(&hci_cp, 0, sizeof(hci_cp));
5133 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5134
5135 if (conn) {
5136 hci_conn_hold(conn);
5137 cmd->user_data = conn;
5138
5139 hci_cp.handle = cpu_to_le16(conn->handle);
5140 hci_cp.which = 0x01; /* Piconet clock */
5141 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5142 }
5143
5144 err = hci_req_run(&req, get_clock_info_complete);
5145 if (err < 0)
5146 mgmt_pending_remove(cmd);
5147
5148 unlock:
5149 hci_dev_unlock(hdev);
5150 return err;
5151 }
5152
5153 static void device_added(struct sock *sk, struct hci_dev *hdev,
5154 bdaddr_t *bdaddr, u8 type, u8 action)
5155 {
5156 struct mgmt_ev_device_added ev;
5157
5158 bacpy(&ev.addr.bdaddr, bdaddr);
5159 ev.addr.type = type;
5160 ev.action = action;
5161
5162 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
5163 }
5164
5165 static int add_device(struct sock *sk, struct hci_dev *hdev,
5166 void *data, u16 len)
5167 {
5168 struct mgmt_cp_add_device *cp = data;
5169 u8 auto_conn, addr_type;
5170 int err;
5171
5172 BT_DBG("%s", hdev->name);
5173
5174 if (!bdaddr_type_is_le(cp->addr.type) ||
5175 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
5176 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5177 MGMT_STATUS_INVALID_PARAMS,
5178 &cp->addr, sizeof(cp->addr));
5179
5180 if (cp->action != 0x00 && cp->action != 0x01)
5181 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5182 MGMT_STATUS_INVALID_PARAMS,
5183 &cp->addr, sizeof(cp->addr));
5184
5185 hci_dev_lock(hdev);
5186
5187 if (cp->addr.type == BDADDR_LE_PUBLIC)
5188 addr_type = ADDR_LE_DEV_PUBLIC;
5189 else
5190 addr_type = ADDR_LE_DEV_RANDOM;
5191
5192 if (cp->action)
5193 auto_conn = HCI_AUTO_CONN_ALWAYS;
5194 else
5195 auto_conn = HCI_AUTO_CONN_REPORT;
5196
5197 /* If the connection parameters don't exist for this device,
5198 * they will be created and configured with defaults.
5199 */
5200 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
5201 auto_conn) < 0) {
5202 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5203 MGMT_STATUS_FAILED,
5204 &cp->addr, sizeof(cp->addr));
5205 goto unlock;
5206 }
5207
5208 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
5209
5210 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5211 MGMT_STATUS_SUCCESS, &cp->addr, sizeof(cp->addr));
5212
5213 unlock:
5214 hci_dev_unlock(hdev);
5215 return err;
5216 }
5217
5218 static void device_removed(struct sock *sk, struct hci_dev *hdev,
5219 bdaddr_t *bdaddr, u8 type)
5220 {
5221 struct mgmt_ev_device_removed ev;
5222
5223 bacpy(&ev.addr.bdaddr, bdaddr);
5224 ev.addr.type = type;
5225
5226 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
5227 }
5228
5229 static int remove_device(struct sock *sk, struct hci_dev *hdev,
5230 void *data, u16 len)
5231 {
5232 struct mgmt_cp_remove_device *cp = data;
5233 int err;
5234
5235 BT_DBG("%s", hdev->name);
5236
5237 hci_dev_lock(hdev);
5238
5239 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5240 struct hci_conn_params *params;
5241 u8 addr_type;
5242
5243 if (!bdaddr_type_is_le(cp->addr.type)) {
5244 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5245 MGMT_STATUS_INVALID_PARAMS,
5246 &cp->addr, sizeof(cp->addr));
5247 goto unlock;
5248 }
5249
5250 if (cp->addr.type == BDADDR_LE_PUBLIC)
5251 addr_type = ADDR_LE_DEV_PUBLIC;
5252 else
5253 addr_type = ADDR_LE_DEV_RANDOM;
5254
5255 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5256 addr_type);
5257 if (!params) {
5258 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5259 MGMT_STATUS_INVALID_PARAMS,
5260 &cp->addr, sizeof(cp->addr));
5261 goto unlock;
5262 }
5263
5264 if (params->auto_connect == HCI_AUTO_CONN_DISABLED) {
5265 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5266 MGMT_STATUS_INVALID_PARAMS,
5267 &cp->addr, sizeof(cp->addr));
5268 goto unlock;
5269 }
5270
5271 list_del(&params->action);
5272 list_del(&params->list);
5273 kfree(params);
5274 hci_update_background_scan(hdev);
5275
5276 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
5277 } else {
5278 if (cp->addr.type) {
5279 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5280 MGMT_STATUS_INVALID_PARAMS,
5281 &cp->addr, sizeof(cp->addr));
5282 goto unlock;
5283 }
5284
5285 hci_conn_params_clear_enabled(hdev);
5286 }
5287
5288 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5289 MGMT_STATUS_SUCCESS, &cp->addr, sizeof(cp->addr));
5290
5291 unlock:
5292 hci_dev_unlock(hdev);
5293 return err;
5294 }
5295
5296 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
5297 u16 len)
5298 {
5299 struct mgmt_cp_load_conn_param *cp = data;
5300 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
5301 sizeof(struct mgmt_conn_param));
5302 u16 param_count, expected_len;
5303 int i;
5304
5305 if (!lmp_le_capable(hdev))
5306 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5307 MGMT_STATUS_NOT_SUPPORTED);
5308
5309 param_count = __le16_to_cpu(cp->param_count);
5310 if (param_count > max_param_count) {
5311 BT_ERR("load_conn_param: too big param_count value %u",
5312 param_count);
5313 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5314 MGMT_STATUS_INVALID_PARAMS);
5315 }
5316
5317 expected_len = sizeof(*cp) + param_count *
5318 sizeof(struct mgmt_conn_param);
5319 if (expected_len != len) {
5320 BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
5321 expected_len, len);
5322 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5323 MGMT_STATUS_INVALID_PARAMS);
5324 }
5325
5326 BT_DBG("%s param_count %u", hdev->name, param_count);
5327
5328 hci_dev_lock(hdev);
5329
5330 hci_conn_params_clear_disabled(hdev);
5331
5332 for (i = 0; i < param_count; i++) {
5333 struct mgmt_conn_param *param = &cp->params[i];
5334 struct hci_conn_params *hci_param;
5335 u16 min, max, latency, timeout;
5336 u8 addr_type;
5337
5338 BT_DBG("Adding %pMR (type %u)", &param->addr.bdaddr,
5339 param->addr.type);
5340
5341 if (param->addr.type == BDADDR_LE_PUBLIC) {
5342 addr_type = ADDR_LE_DEV_PUBLIC;
5343 } else if (param->addr.type == BDADDR_LE_RANDOM) {
5344 addr_type = ADDR_LE_DEV_RANDOM;
5345 } else {
5346 BT_ERR("Ignoring invalid connection parameters");
5347 continue;
5348 }
5349
5350 min = le16_to_cpu(param->min_interval);
5351 max = le16_to_cpu(param->max_interval);
5352 latency = le16_to_cpu(param->latency);
5353 timeout = le16_to_cpu(param->timeout);
5354
5355 BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
5356 min, max, latency, timeout);
5357
5358 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
5359 BT_ERR("Ignoring invalid connection parameters");
5360 continue;
5361 }
5362
5363 hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
5364 addr_type);
5365 if (!hci_param) {
5366 BT_ERR("Failed to add connection parameters");
5367 continue;
5368 }
5369
5370 hci_param->conn_min_interval = min;
5371 hci_param->conn_max_interval = max;
5372 hci_param->conn_latency = latency;
5373 hci_param->supervision_timeout = timeout;
5374 }
5375
5376 hci_dev_unlock(hdev);
5377
5378 return cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0, NULL, 0);
5379 }
5380
5381 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
5382 void *data, u16 len)
5383 {
5384 struct mgmt_cp_set_external_config *cp = data;
5385 bool changed;
5386 int err;
5387
5388 BT_DBG("%s", hdev->name);
5389
5390 if (hdev_is_powered(hdev))
5391 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5392 MGMT_STATUS_REJECTED);
5393
5394 if (cp->config != 0x00 && cp->config != 0x01)
5395 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5396 MGMT_STATUS_INVALID_PARAMS);
5397
5398 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
5399 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5400 MGMT_STATUS_NOT_SUPPORTED);
5401
5402 hci_dev_lock(hdev);
5403
5404 if (cp->config)
5405 changed = !test_and_set_bit(HCI_EXT_CONFIGURED,
5406 &hdev->dev_flags);
5407 else
5408 changed = test_and_clear_bit(HCI_EXT_CONFIGURED,
5409 &hdev->dev_flags);
5410
5411 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
5412 if (err < 0)
5413 goto unlock;
5414
5415 if (!changed)
5416 goto unlock;
5417
5418 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) == is_configured(hdev)) {
5419 mgmt_index_removed(hdev);
5420 change_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
5421 mgmt_index_added(hdev);
5422 }
5423
5424 unlock:
5425 hci_dev_unlock(hdev);
5426 return err;
5427 }
5428
5429 static const struct mgmt_handler {
5430 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
5431 u16 data_len);
5432 bool var_len;
5433 size_t data_len;
5434 } mgmt_handlers[] = {
5435 { NULL }, /* 0x0000 (no command) */
5436 { read_version, false, MGMT_READ_VERSION_SIZE },
5437 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
5438 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
5439 { read_controller_info, false, MGMT_READ_INFO_SIZE },
5440 { set_powered, false, MGMT_SETTING_SIZE },
5441 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
5442 { set_connectable, false, MGMT_SETTING_SIZE },
5443 { set_fast_connectable, false, MGMT_SETTING_SIZE },
5444 { set_pairable, false, MGMT_SETTING_SIZE },
5445 { set_link_security, false, MGMT_SETTING_SIZE },
5446 { set_ssp, false, MGMT_SETTING_SIZE },
5447 { set_hs, false, MGMT_SETTING_SIZE },
5448 { set_le, false, MGMT_SETTING_SIZE },
5449 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
5450 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
5451 { add_uuid, false, MGMT_ADD_UUID_SIZE },
5452 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
5453 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
5454 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
5455 { disconnect, false, MGMT_DISCONNECT_SIZE },
5456 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
5457 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
5458 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
5459 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
5460 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
5461 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
5462 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
5463 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
5464 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
5465 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
5466 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
5467 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
5468 { add_remote_oob_data, true, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
5469 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
5470 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
5471 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
5472 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
5473 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
5474 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
5475 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
5476 { set_advertising, false, MGMT_SETTING_SIZE },
5477 { set_bredr, false, MGMT_SETTING_SIZE },
5478 { set_static_address, false, MGMT_SET_STATIC_ADDRESS_SIZE },
5479 { set_scan_params, false, MGMT_SET_SCAN_PARAMS_SIZE },
5480 { set_secure_conn, false, MGMT_SETTING_SIZE },
5481 { set_debug_keys, false, MGMT_SETTING_SIZE },
5482 { set_privacy, false, MGMT_SET_PRIVACY_SIZE },
5483 { load_irks, true, MGMT_LOAD_IRKS_SIZE },
5484 { get_conn_info, false, MGMT_GET_CONN_INFO_SIZE },
5485 { get_clock_info, false, MGMT_GET_CLOCK_INFO_SIZE },
5486 { add_device, false, MGMT_ADD_DEVICE_SIZE },
5487 { remove_device, false, MGMT_REMOVE_DEVICE_SIZE },
5488 { load_conn_param, true, MGMT_LOAD_CONN_PARAM_SIZE },
5489 { read_unconf_index_list, false, MGMT_READ_UNCONF_INDEX_LIST_SIZE },
5490 { read_config_info, false, MGMT_READ_CONFIG_INFO_SIZE },
5491 { set_external_config, false, MGMT_SET_EXTERNAL_CONFIG_SIZE },
5492 };
5493
5494 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
5495 {
5496 void *buf;
5497 u8 *cp;
5498 struct mgmt_hdr *hdr;
5499 u16 opcode, index, len;
5500 struct hci_dev *hdev = NULL;
5501 const struct mgmt_handler *handler;
5502 int err;
5503
5504 BT_DBG("got %zu bytes", msglen);
5505
5506 if (msglen < sizeof(*hdr))
5507 return -EINVAL;
5508
5509 buf = kmalloc(msglen, GFP_KERNEL);
5510 if (!buf)
5511 return -ENOMEM;
5512
5513 if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
5514 err = -EFAULT;
5515 goto done;
5516 }
5517
5518 hdr = buf;
5519 opcode = __le16_to_cpu(hdr->opcode);
5520 index = __le16_to_cpu(hdr->index);
5521 len = __le16_to_cpu(hdr->len);
5522
5523 if (len != msglen - sizeof(*hdr)) {
5524 err = -EINVAL;
5525 goto done;
5526 }
5527
5528 if (index != MGMT_INDEX_NONE) {
5529 hdev = hci_dev_get(index);
5530 if (!hdev) {
5531 err = cmd_status(sk, index, opcode,
5532 MGMT_STATUS_INVALID_INDEX);
5533 goto done;
5534 }
5535
5536 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
5537 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5538 err = cmd_status(sk, index, opcode,
5539 MGMT_STATUS_INVALID_INDEX);
5540 goto done;
5541 }
5542
5543 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
5544 opcode != MGMT_OP_READ_CONFIG_INFO &&
5545 opcode != MGMT_OP_SET_EXTERNAL_CONFIG) {
5546 err = cmd_status(sk, index, opcode,
5547 MGMT_STATUS_INVALID_INDEX);
5548 goto done;
5549 }
5550 }
5551
5552 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
5553 mgmt_handlers[opcode].func == NULL) {
5554 BT_DBG("Unknown op %u", opcode);
5555 err = cmd_status(sk, index, opcode,
5556 MGMT_STATUS_UNKNOWN_COMMAND);
5557 goto done;
5558 }
5559
5560 if (hdev && (opcode <= MGMT_OP_READ_INDEX_LIST ||
5561 opcode == MGMT_OP_READ_UNCONF_INDEX_LIST)) {
5562 err = cmd_status(sk, index, opcode,
5563 MGMT_STATUS_INVALID_INDEX);
5564 goto done;
5565 }
5566
5567 if (!hdev && (opcode > MGMT_OP_READ_INDEX_LIST &&
5568 opcode != MGMT_OP_READ_UNCONF_INDEX_LIST)) {
5569 err = cmd_status(sk, index, opcode,
5570 MGMT_STATUS_INVALID_INDEX);
5571 goto done;
5572 }
5573
5574 handler = &mgmt_handlers[opcode];
5575
5576 if ((handler->var_len && len < handler->data_len) ||
5577 (!handler->var_len && len != handler->data_len)) {
5578 err = cmd_status(sk, index, opcode,
5579 MGMT_STATUS_INVALID_PARAMS);
5580 goto done;
5581 }
5582
5583 if (hdev)
5584 mgmt_init_hdev(sk, hdev);
5585
5586 cp = buf + sizeof(*hdr);
5587
5588 err = handler->func(sk, hdev, cp, len);
5589 if (err < 0)
5590 goto done;
5591
5592 err = msglen;
5593
5594 done:
5595 if (hdev)
5596 hci_dev_put(hdev);
5597
5598 kfree(buf);
5599 return err;
5600 }
5601
5602 void mgmt_index_added(struct hci_dev *hdev)
5603 {
5604 if (hdev->dev_type != HCI_BREDR)
5605 return;
5606
5607 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
5608 return;
5609
5610 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
5611 mgmt_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0, NULL);
5612 else
5613 mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
5614 }
5615
5616 void mgmt_index_removed(struct hci_dev *hdev)
5617 {
5618 u8 status = MGMT_STATUS_INVALID_INDEX;
5619
5620 if (hdev->dev_type != HCI_BREDR)
5621 return;
5622
5623 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
5624 return;
5625
5626 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
5627
5628 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
5629 mgmt_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0, NULL);
5630 else
5631 mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
5632 }
5633
5634 /* This function requires the caller holds hdev->lock */
5635 static void restart_le_actions(struct hci_dev *hdev)
5636 {
5637 struct hci_conn_params *p;
5638
5639 list_for_each_entry(p, &hdev->le_conn_params, list) {
5640 /* Needed for AUTO_OFF case where might not "really"
5641 * have been powered off.
5642 */
5643 list_del_init(&p->action);
5644
5645 switch (p->auto_connect) {
5646 case HCI_AUTO_CONN_ALWAYS:
5647 list_add(&p->action, &hdev->pend_le_conns);
5648 break;
5649 case HCI_AUTO_CONN_REPORT:
5650 list_add(&p->action, &hdev->pend_le_reports);
5651 break;
5652 default:
5653 break;
5654 }
5655 }
5656
5657 hci_update_background_scan(hdev);
5658 }
5659
5660 static void powered_complete(struct hci_dev *hdev, u8 status)
5661 {
5662 struct cmd_lookup match = { NULL, hdev };
5663
5664 BT_DBG("status 0x%02x", status);
5665
5666 hci_dev_lock(hdev);
5667
5668 restart_le_actions(hdev);
5669
5670 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
5671
5672 new_settings(hdev, match.sk);
5673
5674 hci_dev_unlock(hdev);
5675
5676 if (match.sk)
5677 sock_put(match.sk);
5678 }
5679
5680 static int powered_update_hci(struct hci_dev *hdev)
5681 {
5682 struct hci_request req;
5683 u8 link_sec;
5684
5685 hci_req_init(&req, hdev);
5686
5687 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
5688 !lmp_host_ssp_capable(hdev)) {
5689 u8 ssp = 1;
5690
5691 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
5692 }
5693
5694 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
5695 lmp_bredr_capable(hdev)) {
5696 struct hci_cp_write_le_host_supported cp;
5697
5698 cp.le = 1;
5699 cp.simul = lmp_le_br_capable(hdev);
5700
5701 /* Check first if we already have the right
5702 * host state (host features set)
5703 */
5704 if (cp.le != lmp_host_le_capable(hdev) ||
5705 cp.simul != lmp_host_le_br_capable(hdev))
5706 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
5707 sizeof(cp), &cp);
5708 }
5709
5710 if (lmp_le_capable(hdev)) {
5711 /* Make sure the controller has a good default for
5712 * advertising data. This also applies to the case
5713 * where BR/EDR was toggled during the AUTO_OFF phase.
5714 */
5715 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
5716 update_adv_data(&req);
5717 update_scan_rsp_data(&req);
5718 }
5719
5720 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
5721 enable_advertising(&req);
5722 }
5723
5724 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
5725 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
5726 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
5727 sizeof(link_sec), &link_sec);
5728
5729 if (lmp_bredr_capable(hdev)) {
5730 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
5731 set_bredr_scan(&req);
5732 update_class(&req);
5733 update_name(&req);
5734 update_eir(&req);
5735 }
5736
5737 return hci_req_run(&req, powered_complete);
5738 }
5739
5740 int mgmt_powered(struct hci_dev *hdev, u8 powered)
5741 {
5742 struct cmd_lookup match = { NULL, hdev };
5743 u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
5744 u8 zero_cod[] = { 0, 0, 0 };
5745 int err;
5746
5747 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
5748 return 0;
5749
5750 if (powered) {
5751 if (powered_update_hci(hdev) == 0)
5752 return 0;
5753
5754 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
5755 &match);
5756 goto new_settings;
5757 }
5758
5759 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
5760 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered);
5761
5762 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
5763 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
5764 zero_cod, sizeof(zero_cod), NULL);
5765
5766 new_settings:
5767 err = new_settings(hdev, match.sk);
5768
5769 if (match.sk)
5770 sock_put(match.sk);
5771
5772 return err;
5773 }
5774
5775 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
5776 {
5777 struct pending_cmd *cmd;
5778 u8 status;
5779
5780 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
5781 if (!cmd)
5782 return;
5783
5784 if (err == -ERFKILL)
5785 status = MGMT_STATUS_RFKILLED;
5786 else
5787 status = MGMT_STATUS_FAILED;
5788
5789 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
5790
5791 mgmt_pending_remove(cmd);
5792 }
5793
5794 void mgmt_discoverable_timeout(struct hci_dev *hdev)
5795 {
5796 struct hci_request req;
5797
5798 hci_dev_lock(hdev);
5799
5800 /* When discoverable timeout triggers, then just make sure
5801 * the limited discoverable flag is cleared. Even in the case
5802 * of a timeout triggered from general discoverable, it is
5803 * safe to unconditionally clear the flag.
5804 */
5805 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
5806 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
5807
5808 hci_req_init(&req, hdev);
5809 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
5810 u8 scan = SCAN_PAGE;
5811 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
5812 sizeof(scan), &scan);
5813 }
5814 update_class(&req);
5815 update_adv_data(&req);
5816 hci_req_run(&req, NULL);
5817
5818 hdev->discov_timeout = 0;
5819
5820 new_settings(hdev, NULL);
5821
5822 hci_dev_unlock(hdev);
5823 }
5824
5825 void mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
5826 {
5827 bool changed;
5828
5829 /* Nothing needed here if there's a pending command since that
5830 * commands request completion callback takes care of everything
5831 * necessary.
5832 */
5833 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
5834 return;
5835
5836 /* Powering off may clear the scan mode - don't let that interfere */
5837 if (!discoverable && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5838 return;
5839
5840 if (discoverable) {
5841 changed = !test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
5842 } else {
5843 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
5844 changed = test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
5845 }
5846
5847 if (changed) {
5848 struct hci_request req;
5849
5850 /* In case this change in discoverable was triggered by
5851 * a disabling of connectable there could be a need to
5852 * update the advertising flags.
5853 */
5854 hci_req_init(&req, hdev);
5855 update_adv_data(&req);
5856 hci_req_run(&req, NULL);
5857
5858 new_settings(hdev, NULL);
5859 }
5860 }
5861
5862 void mgmt_connectable(struct hci_dev *hdev, u8 connectable)
5863 {
5864 bool changed;
5865
5866 /* Nothing needed here if there's a pending command since that
5867 * commands request completion callback takes care of everything
5868 * necessary.
5869 */
5870 if (mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
5871 return;
5872
5873 /* Powering off may clear the scan mode - don't let that interfere */
5874 if (!connectable && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5875 return;
5876
5877 if (connectable)
5878 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
5879 else
5880 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
5881
5882 if (changed)
5883 new_settings(hdev, NULL);
5884 }
5885
5886 void mgmt_advertising(struct hci_dev *hdev, u8 advertising)
5887 {
5888 /* Powering off may stop advertising - don't let that interfere */
5889 if (!advertising && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5890 return;
5891
5892 if (advertising)
5893 set_bit(HCI_ADVERTISING, &hdev->dev_flags);
5894 else
5895 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
5896 }
5897
5898 void mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
5899 {
5900 u8 mgmt_err = mgmt_status(status);
5901
5902 if (scan & SCAN_PAGE)
5903 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
5904 cmd_status_rsp, &mgmt_err);
5905
5906 if (scan & SCAN_INQUIRY)
5907 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
5908 cmd_status_rsp, &mgmt_err);
5909 }
5910
5911 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
5912 bool persistent)
5913 {
5914 struct mgmt_ev_new_link_key ev;
5915
5916 memset(&ev, 0, sizeof(ev));
5917
5918 ev.store_hint = persistent;
5919 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
5920 ev.key.addr.type = BDADDR_BREDR;
5921 ev.key.type = key->type;
5922 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
5923 ev.key.pin_len = key->pin_len;
5924
5925 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
5926 }
5927
5928 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
5929 {
5930 if (ltk->authenticated)
5931 return MGMT_LTK_AUTHENTICATED;
5932
5933 return MGMT_LTK_UNAUTHENTICATED;
5934 }
5935
5936 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
5937 {
5938 struct mgmt_ev_new_long_term_key ev;
5939
5940 memset(&ev, 0, sizeof(ev));
5941
5942 /* Devices using resolvable or non-resolvable random addresses
5943 * without providing an indentity resolving key don't require
5944 * to store long term keys. Their addresses will change the
5945 * next time around.
5946 *
5947 * Only when a remote device provides an identity address
5948 * make sure the long term key is stored. If the remote
5949 * identity is known, the long term keys are internally
5950 * mapped to the identity address. So allow static random
5951 * and public addresses here.
5952 */
5953 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
5954 (key->bdaddr.b[5] & 0xc0) != 0xc0)
5955 ev.store_hint = 0x00;
5956 else
5957 ev.store_hint = persistent;
5958
5959 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
5960 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
5961 ev.key.type = mgmt_ltk_type(key);
5962 ev.key.enc_size = key->enc_size;
5963 ev.key.ediv = key->ediv;
5964 ev.key.rand = key->rand;
5965
5966 if (key->type == SMP_LTK)
5967 ev.key.master = 1;
5968
5969 memcpy(ev.key.val, key->val, sizeof(key->val));
5970
5971 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
5972 }
5973
5974 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
5975 {
5976 struct mgmt_ev_new_irk ev;
5977
5978 memset(&ev, 0, sizeof(ev));
5979
5980 /* For identity resolving keys from devices that are already
5981 * using a public address or static random address, do not
5982 * ask for storing this key. The identity resolving key really
5983 * is only mandatory for devices using resovlable random
5984 * addresses.
5985 *
5986 * Storing all identity resolving keys has the downside that
5987 * they will be also loaded on next boot of they system. More
5988 * identity resolving keys, means more time during scanning is
5989 * needed to actually resolve these addresses.
5990 */
5991 if (bacmp(&irk->rpa, BDADDR_ANY))
5992 ev.store_hint = 0x01;
5993 else
5994 ev.store_hint = 0x00;
5995
5996 bacpy(&ev.rpa, &irk->rpa);
5997 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
5998 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
5999 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
6000
6001 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
6002 }
6003
6004 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
6005 bool persistent)
6006 {
6007 struct mgmt_ev_new_csrk ev;
6008
6009 memset(&ev, 0, sizeof(ev));
6010
6011 /* Devices using resolvable or non-resolvable random addresses
6012 * without providing an indentity resolving key don't require
6013 * to store signature resolving keys. Their addresses will change
6014 * the next time around.
6015 *
6016 * Only when a remote device provides an identity address
6017 * make sure the signature resolving key is stored. So allow
6018 * static random and public addresses here.
6019 */
6020 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6021 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
6022 ev.store_hint = 0x00;
6023 else
6024 ev.store_hint = persistent;
6025
6026 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
6027 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
6028 ev.key.master = csrk->master;
6029 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
6030
6031 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
6032 }
6033
6034 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
6035 u8 bdaddr_type, u8 store_hint, u16 min_interval,
6036 u16 max_interval, u16 latency, u16 timeout)
6037 {
6038 struct mgmt_ev_new_conn_param ev;
6039
6040 if (!hci_is_identity_address(bdaddr, bdaddr_type))
6041 return;
6042
6043 memset(&ev, 0, sizeof(ev));
6044 bacpy(&ev.addr.bdaddr, bdaddr);
6045 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
6046 ev.store_hint = store_hint;
6047 ev.min_interval = cpu_to_le16(min_interval);
6048 ev.max_interval = cpu_to_le16(max_interval);
6049 ev.latency = cpu_to_le16(latency);
6050 ev.timeout = cpu_to_le16(timeout);
6051
6052 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
6053 }
6054
6055 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
6056 u8 data_len)
6057 {
6058 eir[eir_len++] = sizeof(type) + data_len;
6059 eir[eir_len++] = type;
6060 memcpy(&eir[eir_len], data, data_len);
6061 eir_len += data_len;
6062
6063 return eir_len;
6064 }
6065
6066 void mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6067 u8 addr_type, u32 flags, u8 *name, u8 name_len,
6068 u8 *dev_class)
6069 {
6070 char buf[512];
6071 struct mgmt_ev_device_connected *ev = (void *) buf;
6072 u16 eir_len = 0;
6073
6074 bacpy(&ev->addr.bdaddr, bdaddr);
6075 ev->addr.type = link_to_bdaddr(link_type, addr_type);
6076
6077 ev->flags = __cpu_to_le32(flags);
6078
6079 if (name_len > 0)
6080 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
6081 name, name_len);
6082
6083 if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
6084 eir_len = eir_append_data(ev->eir, eir_len,
6085 EIR_CLASS_OF_DEV, dev_class, 3);
6086
6087 ev->eir_len = cpu_to_le16(eir_len);
6088
6089 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
6090 sizeof(*ev) + eir_len, NULL);
6091 }
6092
6093 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
6094 {
6095 struct mgmt_cp_disconnect *cp = cmd->param;
6096 struct sock **sk = data;
6097 struct mgmt_rp_disconnect rp;
6098
6099 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6100 rp.addr.type = cp->addr.type;
6101
6102 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
6103 sizeof(rp));
6104
6105 *sk = cmd->sk;
6106 sock_hold(*sk);
6107
6108 mgmt_pending_remove(cmd);
6109 }
6110
6111 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
6112 {
6113 struct hci_dev *hdev = data;
6114 struct mgmt_cp_unpair_device *cp = cmd->param;
6115 struct mgmt_rp_unpair_device rp;
6116
6117 memset(&rp, 0, sizeof(rp));
6118 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6119 rp.addr.type = cp->addr.type;
6120
6121 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
6122
6123 cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
6124
6125 mgmt_pending_remove(cmd);
6126 }
6127
6128 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
6129 u8 link_type, u8 addr_type, u8 reason,
6130 bool mgmt_connected)
6131 {
6132 struct mgmt_ev_device_disconnected ev;
6133 struct pending_cmd *power_off;
6134 struct sock *sk = NULL;
6135
6136 power_off = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6137 if (power_off) {
6138 struct mgmt_mode *cp = power_off->param;
6139
6140 /* The connection is still in hci_conn_hash so test for 1
6141 * instead of 0 to know if this is the last one.
6142 */
6143 if (!cp->val && hci_conn_count(hdev) == 1) {
6144 cancel_delayed_work(&hdev->power_off);
6145 queue_work(hdev->req_workqueue, &hdev->power_off.work);
6146 }
6147 }
6148
6149 if (!mgmt_connected)
6150 return;
6151
6152 if (link_type != ACL_LINK && link_type != LE_LINK)
6153 return;
6154
6155 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
6156
6157 bacpy(&ev.addr.bdaddr, bdaddr);
6158 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6159 ev.reason = reason;
6160
6161 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
6162
6163 if (sk)
6164 sock_put(sk);
6165
6166 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6167 hdev);
6168 }
6169
6170 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
6171 u8 link_type, u8 addr_type, u8 status)
6172 {
6173 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
6174 struct mgmt_cp_disconnect *cp;
6175 struct mgmt_rp_disconnect rp;
6176 struct pending_cmd *cmd;
6177
6178 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6179 hdev);
6180
6181 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
6182 if (!cmd)
6183 return;
6184
6185 cp = cmd->param;
6186
6187 if (bacmp(bdaddr, &cp->addr.bdaddr))
6188 return;
6189
6190 if (cp->addr.type != bdaddr_type)
6191 return;
6192
6193 bacpy(&rp.addr.bdaddr, bdaddr);
6194 rp.addr.type = bdaddr_type;
6195
6196 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
6197 mgmt_status(status), &rp, sizeof(rp));
6198
6199 mgmt_pending_remove(cmd);
6200 }
6201
6202 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6203 u8 addr_type, u8 status)
6204 {
6205 struct mgmt_ev_connect_failed ev;
6206 struct pending_cmd *power_off;
6207
6208 power_off = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6209 if (power_off) {
6210 struct mgmt_mode *cp = power_off->param;
6211
6212 /* The connection is still in hci_conn_hash so test for 1
6213 * instead of 0 to know if this is the last one.
6214 */
6215 if (!cp->val && hci_conn_count(hdev) == 1) {
6216 cancel_delayed_work(&hdev->power_off);
6217 queue_work(hdev->req_workqueue, &hdev->power_off.work);
6218 }
6219 }
6220
6221 bacpy(&ev.addr.bdaddr, bdaddr);
6222 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6223 ev.status = mgmt_status(status);
6224
6225 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
6226 }
6227
6228 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
6229 {
6230 struct mgmt_ev_pin_code_request ev;
6231
6232 bacpy(&ev.addr.bdaddr, bdaddr);
6233 ev.addr.type = BDADDR_BREDR;
6234 ev.secure = secure;
6235
6236 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
6237 }
6238
6239 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6240 u8 status)
6241 {
6242 struct pending_cmd *cmd;
6243 struct mgmt_rp_pin_code_reply rp;
6244
6245 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
6246 if (!cmd)
6247 return;
6248
6249 bacpy(&rp.addr.bdaddr, bdaddr);
6250 rp.addr.type = BDADDR_BREDR;
6251
6252 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
6253 mgmt_status(status), &rp, sizeof(rp));
6254
6255 mgmt_pending_remove(cmd);
6256 }
6257
6258 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6259 u8 status)
6260 {
6261 struct pending_cmd *cmd;
6262 struct mgmt_rp_pin_code_reply rp;
6263
6264 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
6265 if (!cmd)
6266 return;
6267
6268 bacpy(&rp.addr.bdaddr, bdaddr);
6269 rp.addr.type = BDADDR_BREDR;
6270
6271 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
6272 mgmt_status(status), &rp, sizeof(rp));
6273
6274 mgmt_pending_remove(cmd);
6275 }
6276
6277 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6278 u8 link_type, u8 addr_type, u32 value,
6279 u8 confirm_hint)
6280 {
6281 struct mgmt_ev_user_confirm_request ev;
6282
6283 BT_DBG("%s", hdev->name);
6284
6285 bacpy(&ev.addr.bdaddr, bdaddr);
6286 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6287 ev.confirm_hint = confirm_hint;
6288 ev.value = cpu_to_le32(value);
6289
6290 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
6291 NULL);
6292 }
6293
6294 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6295 u8 link_type, u8 addr_type)
6296 {
6297 struct mgmt_ev_user_passkey_request ev;
6298
6299 BT_DBG("%s", hdev->name);
6300
6301 bacpy(&ev.addr.bdaddr, bdaddr);
6302 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6303
6304 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
6305 NULL);
6306 }
6307
6308 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6309 u8 link_type, u8 addr_type, u8 status,
6310 u8 opcode)
6311 {
6312 struct pending_cmd *cmd;
6313 struct mgmt_rp_user_confirm_reply rp;
6314 int err;
6315
6316 cmd = mgmt_pending_find(opcode, hdev);
6317 if (!cmd)
6318 return -ENOENT;
6319
6320 bacpy(&rp.addr.bdaddr, bdaddr);
6321 rp.addr.type = link_to_bdaddr(link_type, addr_type);
6322 err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
6323 &rp, sizeof(rp));
6324
6325 mgmt_pending_remove(cmd);
6326
6327 return err;
6328 }
6329
6330 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6331 u8 link_type, u8 addr_type, u8 status)
6332 {
6333 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6334 status, MGMT_OP_USER_CONFIRM_REPLY);
6335 }
6336
6337 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6338 u8 link_type, u8 addr_type, u8 status)
6339 {
6340 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6341 status,
6342 MGMT_OP_USER_CONFIRM_NEG_REPLY);
6343 }
6344
6345 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6346 u8 link_type, u8 addr_type, u8 status)
6347 {
6348 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6349 status, MGMT_OP_USER_PASSKEY_REPLY);
6350 }
6351
6352 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6353 u8 link_type, u8 addr_type, u8 status)
6354 {
6355 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6356 status,
6357 MGMT_OP_USER_PASSKEY_NEG_REPLY);
6358 }
6359
6360 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
6361 u8 link_type, u8 addr_type, u32 passkey,
6362 u8 entered)
6363 {
6364 struct mgmt_ev_passkey_notify ev;
6365
6366 BT_DBG("%s", hdev->name);
6367
6368 bacpy(&ev.addr.bdaddr, bdaddr);
6369 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6370 ev.passkey = __cpu_to_le32(passkey);
6371 ev.entered = entered;
6372
6373 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
6374 }
6375
6376 void mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6377 u8 addr_type, u8 status)
6378 {
6379 struct mgmt_ev_auth_failed ev;
6380
6381 bacpy(&ev.addr.bdaddr, bdaddr);
6382 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6383 ev.status = mgmt_status(status);
6384
6385 mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
6386 }
6387
6388 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
6389 {
6390 struct cmd_lookup match = { NULL, hdev };
6391 bool changed;
6392
6393 if (status) {
6394 u8 mgmt_err = mgmt_status(status);
6395 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
6396 cmd_status_rsp, &mgmt_err);
6397 return;
6398 }
6399
6400 if (test_bit(HCI_AUTH, &hdev->flags))
6401 changed = !test_and_set_bit(HCI_LINK_SECURITY,
6402 &hdev->dev_flags);
6403 else
6404 changed = test_and_clear_bit(HCI_LINK_SECURITY,
6405 &hdev->dev_flags);
6406
6407 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
6408 &match);
6409
6410 if (changed)
6411 new_settings(hdev, match.sk);
6412
6413 if (match.sk)
6414 sock_put(match.sk);
6415 }
6416
6417 static void clear_eir(struct hci_request *req)
6418 {
6419 struct hci_dev *hdev = req->hdev;
6420 struct hci_cp_write_eir cp;
6421
6422 if (!lmp_ext_inq_capable(hdev))
6423 return;
6424
6425 memset(hdev->eir, 0, sizeof(hdev->eir));
6426
6427 memset(&cp, 0, sizeof(cp));
6428
6429 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
6430 }
6431
6432 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
6433 {
6434 struct cmd_lookup match = { NULL, hdev };
6435 struct hci_request req;
6436 bool changed = false;
6437
6438 if (status) {
6439 u8 mgmt_err = mgmt_status(status);
6440
6441 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
6442 &hdev->dev_flags)) {
6443 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
6444 new_settings(hdev, NULL);
6445 }
6446
6447 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
6448 &mgmt_err);
6449 return;
6450 }
6451
6452 if (enable) {
6453 changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
6454 } else {
6455 changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
6456 if (!changed)
6457 changed = test_and_clear_bit(HCI_HS_ENABLED,
6458 &hdev->dev_flags);
6459 else
6460 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
6461 }
6462
6463 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
6464
6465 if (changed)
6466 new_settings(hdev, match.sk);
6467
6468 if (match.sk)
6469 sock_put(match.sk);
6470
6471 hci_req_init(&req, hdev);
6472
6473 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
6474 if (test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
6475 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
6476 sizeof(enable), &enable);
6477 update_eir(&req);
6478 } else {
6479 clear_eir(&req);
6480 }
6481
6482 hci_req_run(&req, NULL);
6483 }
6484
6485 void mgmt_sc_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
6486 {
6487 struct cmd_lookup match = { NULL, hdev };
6488 bool changed = false;
6489
6490 if (status) {
6491 u8 mgmt_err = mgmt_status(status);
6492
6493 if (enable) {
6494 if (test_and_clear_bit(HCI_SC_ENABLED,
6495 &hdev->dev_flags))
6496 new_settings(hdev, NULL);
6497 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
6498 }
6499
6500 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
6501 cmd_status_rsp, &mgmt_err);
6502 return;
6503 }
6504
6505 if (enable) {
6506 changed = !test_and_set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
6507 } else {
6508 changed = test_and_clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
6509 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
6510 }
6511
6512 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
6513 settings_rsp, &match);
6514
6515 if (changed)
6516 new_settings(hdev, match.sk);
6517
6518 if (match.sk)
6519 sock_put(match.sk);
6520 }
6521
6522 static void sk_lookup(struct pending_cmd *cmd, void *data)
6523 {
6524 struct cmd_lookup *match = data;
6525
6526 if (match->sk == NULL) {
6527 match->sk = cmd->sk;
6528 sock_hold(match->sk);
6529 }
6530 }
6531
6532 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
6533 u8 status)
6534 {
6535 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
6536
6537 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
6538 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
6539 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
6540
6541 if (!status)
6542 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
6543 NULL);
6544
6545 if (match.sk)
6546 sock_put(match.sk);
6547 }
6548
6549 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
6550 {
6551 struct mgmt_cp_set_local_name ev;
6552 struct pending_cmd *cmd;
6553
6554 if (status)
6555 return;
6556
6557 memset(&ev, 0, sizeof(ev));
6558 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
6559 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
6560
6561 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
6562 if (!cmd) {
6563 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
6564
6565 /* If this is a HCI command related to powering on the
6566 * HCI dev don't send any mgmt signals.
6567 */
6568 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
6569 return;
6570 }
6571
6572 mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
6573 cmd ? cmd->sk : NULL);
6574 }
6575
6576 void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
6577 u8 *randomizer192, u8 *hash256,
6578 u8 *randomizer256, u8 status)
6579 {
6580 struct pending_cmd *cmd;
6581
6582 BT_DBG("%s status %u", hdev->name, status);
6583
6584 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
6585 if (!cmd)
6586 return;
6587
6588 if (status) {
6589 cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
6590 mgmt_status(status));
6591 } else {
6592 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
6593 hash256 && randomizer256) {
6594 struct mgmt_rp_read_local_oob_ext_data rp;
6595
6596 memcpy(rp.hash192, hash192, sizeof(rp.hash192));
6597 memcpy(rp.randomizer192, randomizer192,
6598 sizeof(rp.randomizer192));
6599
6600 memcpy(rp.hash256, hash256, sizeof(rp.hash256));
6601 memcpy(rp.randomizer256, randomizer256,
6602 sizeof(rp.randomizer256));
6603
6604 cmd_complete(cmd->sk, hdev->id,
6605 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
6606 &rp, sizeof(rp));
6607 } else {
6608 struct mgmt_rp_read_local_oob_data rp;
6609
6610 memcpy(rp.hash, hash192, sizeof(rp.hash));
6611 memcpy(rp.randomizer, randomizer192,
6612 sizeof(rp.randomizer));
6613
6614 cmd_complete(cmd->sk, hdev->id,
6615 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
6616 &rp, sizeof(rp));
6617 }
6618 }
6619
6620 mgmt_pending_remove(cmd);
6621 }
6622
6623 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6624 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
6625 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
6626 {
6627 char buf[512];
6628 struct mgmt_ev_device_found *ev = (void *) buf;
6629 struct smp_irk *irk;
6630 size_t ev_size;
6631
6632 /* Don't send events for a non-kernel initiated discovery. With
6633 * LE one exception is if we have pend_le_reports > 0 in which
6634 * case we're doing passive scanning and want these events.
6635 */
6636 if (!hci_discovery_active(hdev)) {
6637 if (link_type == ACL_LINK)
6638 return;
6639 if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
6640 return;
6641 }
6642
6643 /* Make sure that the buffer is big enough. The 5 extra bytes
6644 * are for the potential CoD field.
6645 */
6646 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
6647 return;
6648
6649 memset(buf, 0, sizeof(buf));
6650
6651 irk = hci_get_irk(hdev, bdaddr, addr_type);
6652 if (irk) {
6653 bacpy(&ev->addr.bdaddr, &irk->bdaddr);
6654 ev->addr.type = link_to_bdaddr(link_type, irk->addr_type);
6655 } else {
6656 bacpy(&ev->addr.bdaddr, bdaddr);
6657 ev->addr.type = link_to_bdaddr(link_type, addr_type);
6658 }
6659
6660 ev->rssi = rssi;
6661 ev->flags = cpu_to_le32(flags);
6662
6663 if (eir_len > 0)
6664 memcpy(ev->eir, eir, eir_len);
6665
6666 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
6667 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
6668 dev_class, 3);
6669
6670 if (scan_rsp_len > 0)
6671 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
6672
6673 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
6674 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
6675
6676 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
6677 }
6678
6679 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6680 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
6681 {
6682 struct mgmt_ev_device_found *ev;
6683 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
6684 u16 eir_len;
6685
6686 ev = (struct mgmt_ev_device_found *) buf;
6687
6688 memset(buf, 0, sizeof(buf));
6689
6690 bacpy(&ev->addr.bdaddr, bdaddr);
6691 ev->addr.type = link_to_bdaddr(link_type, addr_type);
6692 ev->rssi = rssi;
6693
6694 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
6695 name_len);
6696
6697 ev->eir_len = cpu_to_le16(eir_len);
6698
6699 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
6700 }
6701
6702 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
6703 {
6704 struct mgmt_ev_discovering ev;
6705 struct pending_cmd *cmd;
6706
6707 BT_DBG("%s discovering %u", hdev->name, discovering);
6708
6709 if (discovering)
6710 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
6711 else
6712 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
6713
6714 if (cmd != NULL) {
6715 u8 type = hdev->discovery.type;
6716
6717 cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
6718 sizeof(type));
6719 mgmt_pending_remove(cmd);
6720 }
6721
6722 memset(&ev, 0, sizeof(ev));
6723 ev.type = hdev->discovery.type;
6724 ev.discovering = discovering;
6725
6726 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
6727 }
6728
6729 static void adv_enable_complete(struct hci_dev *hdev, u8 status)
6730 {
6731 BT_DBG("%s status %u", hdev->name, status);
6732
6733 /* Clear the advertising mgmt setting if we failed to re-enable it */
6734 if (status) {
6735 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
6736 new_settings(hdev, NULL);
6737 }
6738 }
6739
6740 void mgmt_reenable_advertising(struct hci_dev *hdev)
6741 {
6742 struct hci_request req;
6743
6744 if (hci_conn_num(hdev, LE_LINK) > 0)
6745 return;
6746
6747 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
6748 return;
6749
6750 hci_req_init(&req, hdev);
6751 enable_advertising(&req);
6752
6753 /* If this fails we have no option but to let user space know
6754 * that we've disabled advertising.
6755 */
6756 if (hci_req_run(&req, adv_enable_complete) < 0) {
6757 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
6758 new_settings(hdev, NULL);
6759 }
6760 }