]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/bluetooth/mgmt.c
Bluetooth: Don't try background scanning if LE is not enabled
[mirror_ubuntu-bionic-kernel.git] / net / bluetooth / mgmt.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI Management interface */
26
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/l2cap.h>
33 #include <net/bluetooth/mgmt.h>
34
35 #include "smp.h"
36
37 #define MGMT_VERSION 1
38 #define MGMT_REVISION 7
39
40 static const u16 mgmt_commands[] = {
41 MGMT_OP_READ_INDEX_LIST,
42 MGMT_OP_READ_INFO,
43 MGMT_OP_SET_POWERED,
44 MGMT_OP_SET_DISCOVERABLE,
45 MGMT_OP_SET_CONNECTABLE,
46 MGMT_OP_SET_FAST_CONNECTABLE,
47 MGMT_OP_SET_PAIRABLE,
48 MGMT_OP_SET_LINK_SECURITY,
49 MGMT_OP_SET_SSP,
50 MGMT_OP_SET_HS,
51 MGMT_OP_SET_LE,
52 MGMT_OP_SET_DEV_CLASS,
53 MGMT_OP_SET_LOCAL_NAME,
54 MGMT_OP_ADD_UUID,
55 MGMT_OP_REMOVE_UUID,
56 MGMT_OP_LOAD_LINK_KEYS,
57 MGMT_OP_LOAD_LONG_TERM_KEYS,
58 MGMT_OP_DISCONNECT,
59 MGMT_OP_GET_CONNECTIONS,
60 MGMT_OP_PIN_CODE_REPLY,
61 MGMT_OP_PIN_CODE_NEG_REPLY,
62 MGMT_OP_SET_IO_CAPABILITY,
63 MGMT_OP_PAIR_DEVICE,
64 MGMT_OP_CANCEL_PAIR_DEVICE,
65 MGMT_OP_UNPAIR_DEVICE,
66 MGMT_OP_USER_CONFIRM_REPLY,
67 MGMT_OP_USER_CONFIRM_NEG_REPLY,
68 MGMT_OP_USER_PASSKEY_REPLY,
69 MGMT_OP_USER_PASSKEY_NEG_REPLY,
70 MGMT_OP_READ_LOCAL_OOB_DATA,
71 MGMT_OP_ADD_REMOTE_OOB_DATA,
72 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
73 MGMT_OP_START_DISCOVERY,
74 MGMT_OP_STOP_DISCOVERY,
75 MGMT_OP_CONFIRM_NAME,
76 MGMT_OP_BLOCK_DEVICE,
77 MGMT_OP_UNBLOCK_DEVICE,
78 MGMT_OP_SET_DEVICE_ID,
79 MGMT_OP_SET_ADVERTISING,
80 MGMT_OP_SET_BREDR,
81 MGMT_OP_SET_STATIC_ADDRESS,
82 MGMT_OP_SET_SCAN_PARAMS,
83 MGMT_OP_SET_SECURE_CONN,
84 MGMT_OP_SET_DEBUG_KEYS,
85 MGMT_OP_SET_PRIVACY,
86 MGMT_OP_LOAD_IRKS,
87 MGMT_OP_GET_CONN_INFO,
88 MGMT_OP_GET_CLOCK_INFO,
89 MGMT_OP_ADD_DEVICE,
90 MGMT_OP_REMOVE_DEVICE,
91 MGMT_OP_LOAD_CONN_PARAM,
92 MGMT_OP_READ_UNCONF_INDEX_LIST,
93 MGMT_OP_READ_CONFIG_INFO,
94 MGMT_OP_SET_EXTERNAL_CONFIG,
95 MGMT_OP_SET_PUBLIC_ADDRESS,
96 };
97
98 static const u16 mgmt_events[] = {
99 MGMT_EV_CONTROLLER_ERROR,
100 MGMT_EV_INDEX_ADDED,
101 MGMT_EV_INDEX_REMOVED,
102 MGMT_EV_NEW_SETTINGS,
103 MGMT_EV_CLASS_OF_DEV_CHANGED,
104 MGMT_EV_LOCAL_NAME_CHANGED,
105 MGMT_EV_NEW_LINK_KEY,
106 MGMT_EV_NEW_LONG_TERM_KEY,
107 MGMT_EV_DEVICE_CONNECTED,
108 MGMT_EV_DEVICE_DISCONNECTED,
109 MGMT_EV_CONNECT_FAILED,
110 MGMT_EV_PIN_CODE_REQUEST,
111 MGMT_EV_USER_CONFIRM_REQUEST,
112 MGMT_EV_USER_PASSKEY_REQUEST,
113 MGMT_EV_AUTH_FAILED,
114 MGMT_EV_DEVICE_FOUND,
115 MGMT_EV_DISCOVERING,
116 MGMT_EV_DEVICE_BLOCKED,
117 MGMT_EV_DEVICE_UNBLOCKED,
118 MGMT_EV_DEVICE_UNPAIRED,
119 MGMT_EV_PASSKEY_NOTIFY,
120 MGMT_EV_NEW_IRK,
121 MGMT_EV_NEW_CSRK,
122 MGMT_EV_DEVICE_ADDED,
123 MGMT_EV_DEVICE_REMOVED,
124 MGMT_EV_NEW_CONN_PARAM,
125 MGMT_EV_UNCONF_INDEX_ADDED,
126 MGMT_EV_UNCONF_INDEX_REMOVED,
127 MGMT_EV_NEW_CONFIG_OPTIONS,
128 };
129
130 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
131
132 #define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
133 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
134
135 struct pending_cmd {
136 struct list_head list;
137 u16 opcode;
138 int index;
139 void *param;
140 struct sock *sk;
141 void *user_data;
142 };
143
144 /* HCI to MGMT error code conversion table */
145 static u8 mgmt_status_table[] = {
146 MGMT_STATUS_SUCCESS,
147 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
148 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
149 MGMT_STATUS_FAILED, /* Hardware Failure */
150 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
151 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
152 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
153 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
154 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
155 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
156 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
157 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
158 MGMT_STATUS_BUSY, /* Command Disallowed */
159 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
160 MGMT_STATUS_REJECTED, /* Rejected Security */
161 MGMT_STATUS_REJECTED, /* Rejected Personal */
162 MGMT_STATUS_TIMEOUT, /* Host Timeout */
163 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
164 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
165 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
166 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
167 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
168 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
169 MGMT_STATUS_BUSY, /* Repeated Attempts */
170 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
171 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
172 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
173 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
174 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
175 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
176 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
177 MGMT_STATUS_FAILED, /* Unspecified Error */
178 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
179 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
180 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
181 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
182 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
183 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
184 MGMT_STATUS_FAILED, /* Unit Link Key Used */
185 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
186 MGMT_STATUS_TIMEOUT, /* Instant Passed */
187 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
188 MGMT_STATUS_FAILED, /* Transaction Collision */
189 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
190 MGMT_STATUS_REJECTED, /* QoS Rejected */
191 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
192 MGMT_STATUS_REJECTED, /* Insufficient Security */
193 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
194 MGMT_STATUS_BUSY, /* Role Switch Pending */
195 MGMT_STATUS_FAILED, /* Slot Violation */
196 MGMT_STATUS_FAILED, /* Role Switch Failed */
197 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
198 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
199 MGMT_STATUS_BUSY, /* Host Busy Pairing */
200 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
201 MGMT_STATUS_BUSY, /* Controller Busy */
202 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
203 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
204 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
205 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
206 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
207 };
208
209 static u8 mgmt_status(u8 hci_status)
210 {
211 if (hci_status < ARRAY_SIZE(mgmt_status_table))
212 return mgmt_status_table[hci_status];
213
214 return MGMT_STATUS_FAILED;
215 }
216
217 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
218 struct sock *skip_sk)
219 {
220 struct sk_buff *skb;
221 struct mgmt_hdr *hdr;
222
223 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
224 if (!skb)
225 return -ENOMEM;
226
227 hdr = (void *) skb_put(skb, sizeof(*hdr));
228 hdr->opcode = cpu_to_le16(event);
229 if (hdev)
230 hdr->index = cpu_to_le16(hdev->id);
231 else
232 hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
233 hdr->len = cpu_to_le16(data_len);
234
235 if (data)
236 memcpy(skb_put(skb, data_len), data, data_len);
237
238 /* Time stamp */
239 __net_timestamp(skb);
240
241 hci_send_to_control(skb, skip_sk);
242 kfree_skb(skb);
243
244 return 0;
245 }
246
247 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
248 {
249 struct sk_buff *skb;
250 struct mgmt_hdr *hdr;
251 struct mgmt_ev_cmd_status *ev;
252 int err;
253
254 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
255
256 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
257 if (!skb)
258 return -ENOMEM;
259
260 hdr = (void *) skb_put(skb, sizeof(*hdr));
261
262 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS);
263 hdr->index = cpu_to_le16(index);
264 hdr->len = cpu_to_le16(sizeof(*ev));
265
266 ev = (void *) skb_put(skb, sizeof(*ev));
267 ev->status = status;
268 ev->opcode = cpu_to_le16(cmd);
269
270 err = sock_queue_rcv_skb(sk, skb);
271 if (err < 0)
272 kfree_skb(skb);
273
274 return err;
275 }
276
277 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
278 void *rp, size_t rp_len)
279 {
280 struct sk_buff *skb;
281 struct mgmt_hdr *hdr;
282 struct mgmt_ev_cmd_complete *ev;
283 int err;
284
285 BT_DBG("sock %p", sk);
286
287 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
288 if (!skb)
289 return -ENOMEM;
290
291 hdr = (void *) skb_put(skb, sizeof(*hdr));
292
293 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
294 hdr->index = cpu_to_le16(index);
295 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
296
297 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
298 ev->opcode = cpu_to_le16(cmd);
299 ev->status = status;
300
301 if (rp)
302 memcpy(ev->data, rp, rp_len);
303
304 err = sock_queue_rcv_skb(sk, skb);
305 if (err < 0)
306 kfree_skb(skb);
307
308 return err;
309 }
310
311 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
312 u16 data_len)
313 {
314 struct mgmt_rp_read_version rp;
315
316 BT_DBG("sock %p", sk);
317
318 rp.version = MGMT_VERSION;
319 rp.revision = cpu_to_le16(MGMT_REVISION);
320
321 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
322 sizeof(rp));
323 }
324
325 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
326 u16 data_len)
327 {
328 struct mgmt_rp_read_commands *rp;
329 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
330 const u16 num_events = ARRAY_SIZE(mgmt_events);
331 __le16 *opcode;
332 size_t rp_size;
333 int i, err;
334
335 BT_DBG("sock %p", sk);
336
337 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
338
339 rp = kmalloc(rp_size, GFP_KERNEL);
340 if (!rp)
341 return -ENOMEM;
342
343 rp->num_commands = cpu_to_le16(num_commands);
344 rp->num_events = cpu_to_le16(num_events);
345
346 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
347 put_unaligned_le16(mgmt_commands[i], opcode);
348
349 for (i = 0; i < num_events; i++, opcode++)
350 put_unaligned_le16(mgmt_events[i], opcode);
351
352 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
353 rp_size);
354 kfree(rp);
355
356 return err;
357 }
358
359 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
360 u16 data_len)
361 {
362 struct mgmt_rp_read_index_list *rp;
363 struct hci_dev *d;
364 size_t rp_len;
365 u16 count;
366 int err;
367
368 BT_DBG("sock %p", sk);
369
370 read_lock(&hci_dev_list_lock);
371
372 count = 0;
373 list_for_each_entry(d, &hci_dev_list, list) {
374 if (d->dev_type == HCI_BREDR &&
375 !test_bit(HCI_UNCONFIGURED, &d->dev_flags))
376 count++;
377 }
378
379 rp_len = sizeof(*rp) + (2 * count);
380 rp = kmalloc(rp_len, GFP_ATOMIC);
381 if (!rp) {
382 read_unlock(&hci_dev_list_lock);
383 return -ENOMEM;
384 }
385
386 count = 0;
387 list_for_each_entry(d, &hci_dev_list, list) {
388 if (test_bit(HCI_SETUP, &d->dev_flags) ||
389 test_bit(HCI_CONFIG, &d->dev_flags) ||
390 test_bit(HCI_USER_CHANNEL, &d->dev_flags))
391 continue;
392
393 /* Devices marked as raw-only are neither configured
394 * nor unconfigured controllers.
395 */
396 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
397 continue;
398
399 if (d->dev_type == HCI_BREDR &&
400 !test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
401 rp->index[count++] = cpu_to_le16(d->id);
402 BT_DBG("Added hci%u", d->id);
403 }
404 }
405
406 rp->num_controllers = cpu_to_le16(count);
407 rp_len = sizeof(*rp) + (2 * count);
408
409 read_unlock(&hci_dev_list_lock);
410
411 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
412 rp_len);
413
414 kfree(rp);
415
416 return err;
417 }
418
419 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
420 void *data, u16 data_len)
421 {
422 struct mgmt_rp_read_unconf_index_list *rp;
423 struct hci_dev *d;
424 size_t rp_len;
425 u16 count;
426 int err;
427
428 BT_DBG("sock %p", sk);
429
430 read_lock(&hci_dev_list_lock);
431
432 count = 0;
433 list_for_each_entry(d, &hci_dev_list, list) {
434 if (d->dev_type == HCI_BREDR &&
435 test_bit(HCI_UNCONFIGURED, &d->dev_flags))
436 count++;
437 }
438
439 rp_len = sizeof(*rp) + (2 * count);
440 rp = kmalloc(rp_len, GFP_ATOMIC);
441 if (!rp) {
442 read_unlock(&hci_dev_list_lock);
443 return -ENOMEM;
444 }
445
446 count = 0;
447 list_for_each_entry(d, &hci_dev_list, list) {
448 if (test_bit(HCI_SETUP, &d->dev_flags) ||
449 test_bit(HCI_CONFIG, &d->dev_flags) ||
450 test_bit(HCI_USER_CHANNEL, &d->dev_flags))
451 continue;
452
453 /* Devices marked as raw-only are neither configured
454 * nor unconfigured controllers.
455 */
456 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
457 continue;
458
459 if (d->dev_type == HCI_BREDR &&
460 test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
461 rp->index[count++] = cpu_to_le16(d->id);
462 BT_DBG("Added hci%u", d->id);
463 }
464 }
465
466 rp->num_controllers = cpu_to_le16(count);
467 rp_len = sizeof(*rp) + (2 * count);
468
469 read_unlock(&hci_dev_list_lock);
470
471 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_UNCONF_INDEX_LIST,
472 0, rp, rp_len);
473
474 kfree(rp);
475
476 return err;
477 }
478
479 static bool is_configured(struct hci_dev *hdev)
480 {
481 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
482 !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
483 return false;
484
485 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
486 !bacmp(&hdev->public_addr, BDADDR_ANY))
487 return false;
488
489 return true;
490 }
491
492 static __le32 get_missing_options(struct hci_dev *hdev)
493 {
494 u32 options = 0;
495
496 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
497 !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
498 options |= MGMT_OPTION_EXTERNAL_CONFIG;
499
500 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
501 !bacmp(&hdev->public_addr, BDADDR_ANY))
502 options |= MGMT_OPTION_PUBLIC_ADDRESS;
503
504 return cpu_to_le32(options);
505 }
506
507 static int new_options(struct hci_dev *hdev, struct sock *skip)
508 {
509 __le32 options = get_missing_options(hdev);
510
511 return mgmt_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
512 sizeof(options), skip);
513 }
514
515 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
516 {
517 __le32 options = get_missing_options(hdev);
518
519 return cmd_complete(sk, hdev->id, opcode, 0, &options,
520 sizeof(options));
521 }
522
523 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
524 void *data, u16 data_len)
525 {
526 struct mgmt_rp_read_config_info rp;
527 u32 options = 0;
528
529 BT_DBG("sock %p %s", sk, hdev->name);
530
531 hci_dev_lock(hdev);
532
533 memset(&rp, 0, sizeof(rp));
534 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
535
536 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
537 options |= MGMT_OPTION_EXTERNAL_CONFIG;
538
539 if (hdev->set_bdaddr)
540 options |= MGMT_OPTION_PUBLIC_ADDRESS;
541
542 rp.supported_options = cpu_to_le32(options);
543 rp.missing_options = get_missing_options(hdev);
544
545 hci_dev_unlock(hdev);
546
547 return cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0, &rp,
548 sizeof(rp));
549 }
550
551 static u32 get_supported_settings(struct hci_dev *hdev)
552 {
553 u32 settings = 0;
554
555 settings |= MGMT_SETTING_POWERED;
556 settings |= MGMT_SETTING_PAIRABLE;
557 settings |= MGMT_SETTING_DEBUG_KEYS;
558
559 if (lmp_bredr_capable(hdev)) {
560 settings |= MGMT_SETTING_CONNECTABLE;
561 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
562 settings |= MGMT_SETTING_FAST_CONNECTABLE;
563 settings |= MGMT_SETTING_DISCOVERABLE;
564 settings |= MGMT_SETTING_BREDR;
565 settings |= MGMT_SETTING_LINK_SECURITY;
566
567 if (lmp_ssp_capable(hdev)) {
568 settings |= MGMT_SETTING_SSP;
569 settings |= MGMT_SETTING_HS;
570 }
571
572 if (lmp_sc_capable(hdev) ||
573 test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
574 settings |= MGMT_SETTING_SECURE_CONN;
575 }
576
577 if (lmp_le_capable(hdev)) {
578 settings |= MGMT_SETTING_LE;
579 settings |= MGMT_SETTING_ADVERTISING;
580 settings |= MGMT_SETTING_PRIVACY;
581 }
582
583 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
584 hdev->set_bdaddr)
585 settings |= MGMT_SETTING_CONFIGURATION;
586
587 return settings;
588 }
589
590 static u32 get_current_settings(struct hci_dev *hdev)
591 {
592 u32 settings = 0;
593
594 if (hdev_is_powered(hdev))
595 settings |= MGMT_SETTING_POWERED;
596
597 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
598 settings |= MGMT_SETTING_CONNECTABLE;
599
600 if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
601 settings |= MGMT_SETTING_FAST_CONNECTABLE;
602
603 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
604 settings |= MGMT_SETTING_DISCOVERABLE;
605
606 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
607 settings |= MGMT_SETTING_PAIRABLE;
608
609 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
610 settings |= MGMT_SETTING_BREDR;
611
612 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
613 settings |= MGMT_SETTING_LE;
614
615 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
616 settings |= MGMT_SETTING_LINK_SECURITY;
617
618 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
619 settings |= MGMT_SETTING_SSP;
620
621 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
622 settings |= MGMT_SETTING_HS;
623
624 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
625 settings |= MGMT_SETTING_ADVERTISING;
626
627 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
628 settings |= MGMT_SETTING_SECURE_CONN;
629
630 if (test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags))
631 settings |= MGMT_SETTING_DEBUG_KEYS;
632
633 if (test_bit(HCI_PRIVACY, &hdev->dev_flags))
634 settings |= MGMT_SETTING_PRIVACY;
635
636 return settings;
637 }
638
639 #define PNP_INFO_SVCLASS_ID 0x1200
640
641 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
642 {
643 u8 *ptr = data, *uuids_start = NULL;
644 struct bt_uuid *uuid;
645
646 if (len < 4)
647 return ptr;
648
649 list_for_each_entry(uuid, &hdev->uuids, list) {
650 u16 uuid16;
651
652 if (uuid->size != 16)
653 continue;
654
655 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
656 if (uuid16 < 0x1100)
657 continue;
658
659 if (uuid16 == PNP_INFO_SVCLASS_ID)
660 continue;
661
662 if (!uuids_start) {
663 uuids_start = ptr;
664 uuids_start[0] = 1;
665 uuids_start[1] = EIR_UUID16_ALL;
666 ptr += 2;
667 }
668
669 /* Stop if not enough space to put next UUID */
670 if ((ptr - data) + sizeof(u16) > len) {
671 uuids_start[1] = EIR_UUID16_SOME;
672 break;
673 }
674
675 *ptr++ = (uuid16 & 0x00ff);
676 *ptr++ = (uuid16 & 0xff00) >> 8;
677 uuids_start[0] += sizeof(uuid16);
678 }
679
680 return ptr;
681 }
682
683 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
684 {
685 u8 *ptr = data, *uuids_start = NULL;
686 struct bt_uuid *uuid;
687
688 if (len < 6)
689 return ptr;
690
691 list_for_each_entry(uuid, &hdev->uuids, list) {
692 if (uuid->size != 32)
693 continue;
694
695 if (!uuids_start) {
696 uuids_start = ptr;
697 uuids_start[0] = 1;
698 uuids_start[1] = EIR_UUID32_ALL;
699 ptr += 2;
700 }
701
702 /* Stop if not enough space to put next UUID */
703 if ((ptr - data) + sizeof(u32) > len) {
704 uuids_start[1] = EIR_UUID32_SOME;
705 break;
706 }
707
708 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
709 ptr += sizeof(u32);
710 uuids_start[0] += sizeof(u32);
711 }
712
713 return ptr;
714 }
715
716 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
717 {
718 u8 *ptr = data, *uuids_start = NULL;
719 struct bt_uuid *uuid;
720
721 if (len < 18)
722 return ptr;
723
724 list_for_each_entry(uuid, &hdev->uuids, list) {
725 if (uuid->size != 128)
726 continue;
727
728 if (!uuids_start) {
729 uuids_start = ptr;
730 uuids_start[0] = 1;
731 uuids_start[1] = EIR_UUID128_ALL;
732 ptr += 2;
733 }
734
735 /* Stop if not enough space to put next UUID */
736 if ((ptr - data) + 16 > len) {
737 uuids_start[1] = EIR_UUID128_SOME;
738 break;
739 }
740
741 memcpy(ptr, uuid->uuid, 16);
742 ptr += 16;
743 uuids_start[0] += 16;
744 }
745
746 return ptr;
747 }
748
749 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
750 {
751 struct pending_cmd *cmd;
752
753 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
754 if (cmd->opcode == opcode)
755 return cmd;
756 }
757
758 return NULL;
759 }
760
761 static struct pending_cmd *mgmt_pending_find_data(u16 opcode,
762 struct hci_dev *hdev,
763 const void *data)
764 {
765 struct pending_cmd *cmd;
766
767 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
768 if (cmd->user_data != data)
769 continue;
770 if (cmd->opcode == opcode)
771 return cmd;
772 }
773
774 return NULL;
775 }
776
777 static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
778 {
779 u8 ad_len = 0;
780 size_t name_len;
781
782 name_len = strlen(hdev->dev_name);
783 if (name_len > 0) {
784 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
785
786 if (name_len > max_len) {
787 name_len = max_len;
788 ptr[1] = EIR_NAME_SHORT;
789 } else
790 ptr[1] = EIR_NAME_COMPLETE;
791
792 ptr[0] = name_len + 1;
793
794 memcpy(ptr + 2, hdev->dev_name, name_len);
795
796 ad_len += (name_len + 2);
797 ptr += (name_len + 2);
798 }
799
800 return ad_len;
801 }
802
803 static void update_scan_rsp_data(struct hci_request *req)
804 {
805 struct hci_dev *hdev = req->hdev;
806 struct hci_cp_le_set_scan_rsp_data cp;
807 u8 len;
808
809 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
810 return;
811
812 memset(&cp, 0, sizeof(cp));
813
814 len = create_scan_rsp_data(hdev, cp.data);
815
816 if (hdev->scan_rsp_data_len == len &&
817 memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
818 return;
819
820 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
821 hdev->scan_rsp_data_len = len;
822
823 cp.length = len;
824
825 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
826 }
827
828 static u8 get_adv_discov_flags(struct hci_dev *hdev)
829 {
830 struct pending_cmd *cmd;
831
832 /* If there's a pending mgmt command the flags will not yet have
833 * their final values, so check for this first.
834 */
835 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
836 if (cmd) {
837 struct mgmt_mode *cp = cmd->param;
838 if (cp->val == 0x01)
839 return LE_AD_GENERAL;
840 else if (cp->val == 0x02)
841 return LE_AD_LIMITED;
842 } else {
843 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
844 return LE_AD_LIMITED;
845 else if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
846 return LE_AD_GENERAL;
847 }
848
849 return 0;
850 }
851
852 static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
853 {
854 u8 ad_len = 0, flags = 0;
855
856 flags |= get_adv_discov_flags(hdev);
857
858 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
859 flags |= LE_AD_NO_BREDR;
860
861 if (flags) {
862 BT_DBG("adv flags 0x%02x", flags);
863
864 ptr[0] = 2;
865 ptr[1] = EIR_FLAGS;
866 ptr[2] = flags;
867
868 ad_len += 3;
869 ptr += 3;
870 }
871
872 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
873 ptr[0] = 2;
874 ptr[1] = EIR_TX_POWER;
875 ptr[2] = (u8) hdev->adv_tx_power;
876
877 ad_len += 3;
878 ptr += 3;
879 }
880
881 return ad_len;
882 }
883
884 static void update_adv_data(struct hci_request *req)
885 {
886 struct hci_dev *hdev = req->hdev;
887 struct hci_cp_le_set_adv_data cp;
888 u8 len;
889
890 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
891 return;
892
893 memset(&cp, 0, sizeof(cp));
894
895 len = create_adv_data(hdev, cp.data);
896
897 if (hdev->adv_data_len == len &&
898 memcmp(cp.data, hdev->adv_data, len) == 0)
899 return;
900
901 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
902 hdev->adv_data_len = len;
903
904 cp.length = len;
905
906 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
907 }
908
909 static void create_eir(struct hci_dev *hdev, u8 *data)
910 {
911 u8 *ptr = data;
912 size_t name_len;
913
914 name_len = strlen(hdev->dev_name);
915
916 if (name_len > 0) {
917 /* EIR Data type */
918 if (name_len > 48) {
919 name_len = 48;
920 ptr[1] = EIR_NAME_SHORT;
921 } else
922 ptr[1] = EIR_NAME_COMPLETE;
923
924 /* EIR Data length */
925 ptr[0] = name_len + 1;
926
927 memcpy(ptr + 2, hdev->dev_name, name_len);
928
929 ptr += (name_len + 2);
930 }
931
932 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
933 ptr[0] = 2;
934 ptr[1] = EIR_TX_POWER;
935 ptr[2] = (u8) hdev->inq_tx_power;
936
937 ptr += 3;
938 }
939
940 if (hdev->devid_source > 0) {
941 ptr[0] = 9;
942 ptr[1] = EIR_DEVICE_ID;
943
944 put_unaligned_le16(hdev->devid_source, ptr + 2);
945 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
946 put_unaligned_le16(hdev->devid_product, ptr + 6);
947 put_unaligned_le16(hdev->devid_version, ptr + 8);
948
949 ptr += 10;
950 }
951
952 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
953 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
954 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
955 }
956
957 static void update_eir(struct hci_request *req)
958 {
959 struct hci_dev *hdev = req->hdev;
960 struct hci_cp_write_eir cp;
961
962 if (!hdev_is_powered(hdev))
963 return;
964
965 if (!lmp_ext_inq_capable(hdev))
966 return;
967
968 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
969 return;
970
971 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
972 return;
973
974 memset(&cp, 0, sizeof(cp));
975
976 create_eir(hdev, cp.data);
977
978 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
979 return;
980
981 memcpy(hdev->eir, cp.data, sizeof(cp.data));
982
983 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
984 }
985
986 static u8 get_service_classes(struct hci_dev *hdev)
987 {
988 struct bt_uuid *uuid;
989 u8 val = 0;
990
991 list_for_each_entry(uuid, &hdev->uuids, list)
992 val |= uuid->svc_hint;
993
994 return val;
995 }
996
997 static void update_class(struct hci_request *req)
998 {
999 struct hci_dev *hdev = req->hdev;
1000 u8 cod[3];
1001
1002 BT_DBG("%s", hdev->name);
1003
1004 if (!hdev_is_powered(hdev))
1005 return;
1006
1007 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1008 return;
1009
1010 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1011 return;
1012
1013 cod[0] = hdev->minor_class;
1014 cod[1] = hdev->major_class;
1015 cod[2] = get_service_classes(hdev);
1016
1017 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
1018 cod[1] |= 0x20;
1019
1020 if (memcmp(cod, hdev->dev_class, 3) == 0)
1021 return;
1022
1023 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1024 }
1025
1026 static bool get_connectable(struct hci_dev *hdev)
1027 {
1028 struct pending_cmd *cmd;
1029
1030 /* If there's a pending mgmt command the flag will not yet have
1031 * it's final value, so check for this first.
1032 */
1033 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1034 if (cmd) {
1035 struct mgmt_mode *cp = cmd->param;
1036 return cp->val;
1037 }
1038
1039 return test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1040 }
1041
1042 static void enable_advertising(struct hci_request *req)
1043 {
1044 struct hci_dev *hdev = req->hdev;
1045 struct hci_cp_le_set_adv_param cp;
1046 u8 own_addr_type, enable = 0x01;
1047 bool connectable;
1048
1049 /* Clear the HCI_ADVERTISING bit temporarily so that the
1050 * hci_update_random_address knows that it's safe to go ahead
1051 * and write a new random address. The flag will be set back on
1052 * as soon as the SET_ADV_ENABLE HCI command completes.
1053 */
1054 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
1055
1056 connectable = get_connectable(hdev);
1057
1058 /* Set require_privacy to true only when non-connectable
1059 * advertising is used. In that case it is fine to use a
1060 * non-resolvable private address.
1061 */
1062 if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
1063 return;
1064
1065 memset(&cp, 0, sizeof(cp));
1066 cp.min_interval = cpu_to_le16(0x0800);
1067 cp.max_interval = cpu_to_le16(0x0800);
1068 cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
1069 cp.own_address_type = own_addr_type;
1070 cp.channel_map = hdev->le_adv_channel_map;
1071
1072 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1073
1074 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1075 }
1076
1077 static void disable_advertising(struct hci_request *req)
1078 {
1079 u8 enable = 0x00;
1080
1081 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1082 }
1083
1084 static void service_cache_off(struct work_struct *work)
1085 {
1086 struct hci_dev *hdev = container_of(work, struct hci_dev,
1087 service_cache.work);
1088 struct hci_request req;
1089
1090 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1091 return;
1092
1093 hci_req_init(&req, hdev);
1094
1095 hci_dev_lock(hdev);
1096
1097 update_eir(&req);
1098 update_class(&req);
1099
1100 hci_dev_unlock(hdev);
1101
1102 hci_req_run(&req, NULL);
1103 }
1104
1105 static void rpa_expired(struct work_struct *work)
1106 {
1107 struct hci_dev *hdev = container_of(work, struct hci_dev,
1108 rpa_expired.work);
1109 struct hci_request req;
1110
1111 BT_DBG("");
1112
1113 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
1114
1115 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags) ||
1116 hci_conn_num(hdev, LE_LINK) > 0)
1117 return;
1118
1119 /* The generation of a new RPA and programming it into the
1120 * controller happens in the enable_advertising() function.
1121 */
1122
1123 hci_req_init(&req, hdev);
1124
1125 disable_advertising(&req);
1126 enable_advertising(&req);
1127
1128 hci_req_run(&req, NULL);
1129 }
1130
1131 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1132 {
1133 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
1134 return;
1135
1136 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1137 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1138
1139 /* Non-mgmt controlled devices get this bit set
1140 * implicitly so that pairing works for them, however
1141 * for mgmt we require user-space to explicitly enable
1142 * it
1143 */
1144 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1145 }
1146
1147 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1148 void *data, u16 data_len)
1149 {
1150 struct mgmt_rp_read_info rp;
1151
1152 BT_DBG("sock %p %s", sk, hdev->name);
1153
1154 hci_dev_lock(hdev);
1155
1156 memset(&rp, 0, sizeof(rp));
1157
1158 bacpy(&rp.bdaddr, &hdev->bdaddr);
1159
1160 rp.version = hdev->hci_ver;
1161 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1162
1163 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1164 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1165
1166 memcpy(rp.dev_class, hdev->dev_class, 3);
1167
1168 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1169 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1170
1171 hci_dev_unlock(hdev);
1172
1173 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1174 sizeof(rp));
1175 }
1176
1177 static void mgmt_pending_free(struct pending_cmd *cmd)
1178 {
1179 sock_put(cmd->sk);
1180 kfree(cmd->param);
1181 kfree(cmd);
1182 }
1183
1184 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
1185 struct hci_dev *hdev, void *data,
1186 u16 len)
1187 {
1188 struct pending_cmd *cmd;
1189
1190 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
1191 if (!cmd)
1192 return NULL;
1193
1194 cmd->opcode = opcode;
1195 cmd->index = hdev->id;
1196
1197 cmd->param = kmalloc(len, GFP_KERNEL);
1198 if (!cmd->param) {
1199 kfree(cmd);
1200 return NULL;
1201 }
1202
1203 if (data)
1204 memcpy(cmd->param, data, len);
1205
1206 cmd->sk = sk;
1207 sock_hold(sk);
1208
1209 list_add(&cmd->list, &hdev->mgmt_pending);
1210
1211 return cmd;
1212 }
1213
1214 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
1215 void (*cb)(struct pending_cmd *cmd,
1216 void *data),
1217 void *data)
1218 {
1219 struct pending_cmd *cmd, *tmp;
1220
1221 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
1222 if (opcode > 0 && cmd->opcode != opcode)
1223 continue;
1224
1225 cb(cmd, data);
1226 }
1227 }
1228
1229 static void mgmt_pending_remove(struct pending_cmd *cmd)
1230 {
1231 list_del(&cmd->list);
1232 mgmt_pending_free(cmd);
1233 }
1234
1235 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1236 {
1237 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1238
1239 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
1240 sizeof(settings));
1241 }
1242
1243 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status)
1244 {
1245 BT_DBG("%s status 0x%02x", hdev->name, status);
1246
1247 if (hci_conn_count(hdev) == 0) {
1248 cancel_delayed_work(&hdev->power_off);
1249 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1250 }
1251 }
1252
1253 static void hci_stop_discovery(struct hci_request *req)
1254 {
1255 struct hci_dev *hdev = req->hdev;
1256 struct hci_cp_remote_name_req_cancel cp;
1257 struct inquiry_entry *e;
1258
1259 switch (hdev->discovery.state) {
1260 case DISCOVERY_FINDING:
1261 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
1262 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1263 } else {
1264 cancel_delayed_work(&hdev->le_scan_disable);
1265 hci_req_add_le_scan_disable(req);
1266 }
1267
1268 break;
1269
1270 case DISCOVERY_RESOLVING:
1271 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
1272 NAME_PENDING);
1273 if (!e)
1274 return;
1275
1276 bacpy(&cp.bdaddr, &e->data.bdaddr);
1277 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
1278 &cp);
1279
1280 break;
1281
1282 default:
1283 /* Passive scanning */
1284 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1285 hci_req_add_le_scan_disable(req);
1286 break;
1287 }
1288 }
1289
1290 static int clean_up_hci_state(struct hci_dev *hdev)
1291 {
1292 struct hci_request req;
1293 struct hci_conn *conn;
1294
1295 hci_req_init(&req, hdev);
1296
1297 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1298 test_bit(HCI_PSCAN, &hdev->flags)) {
1299 u8 scan = 0x00;
1300 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1301 }
1302
1303 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1304 disable_advertising(&req);
1305
1306 hci_stop_discovery(&req);
1307
1308 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1309 struct hci_cp_disconnect dc;
1310 struct hci_cp_reject_conn_req rej;
1311
1312 switch (conn->state) {
1313 case BT_CONNECTED:
1314 case BT_CONFIG:
1315 dc.handle = cpu_to_le16(conn->handle);
1316 dc.reason = 0x15; /* Terminated due to Power Off */
1317 hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1318 break;
1319 case BT_CONNECT:
1320 if (conn->type == LE_LINK)
1321 hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL,
1322 0, NULL);
1323 else if (conn->type == ACL_LINK)
1324 hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL,
1325 6, &conn->dst);
1326 break;
1327 case BT_CONNECT2:
1328 bacpy(&rej.bdaddr, &conn->dst);
1329 rej.reason = 0x15; /* Terminated due to Power Off */
1330 if (conn->type == ACL_LINK)
1331 hci_req_add(&req, HCI_OP_REJECT_CONN_REQ,
1332 sizeof(rej), &rej);
1333 else if (conn->type == SCO_LINK)
1334 hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ,
1335 sizeof(rej), &rej);
1336 break;
1337 }
1338 }
1339
1340 return hci_req_run(&req, clean_up_hci_complete);
1341 }
1342
1343 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1344 u16 len)
1345 {
1346 struct mgmt_mode *cp = data;
1347 struct pending_cmd *cmd;
1348 int err;
1349
1350 BT_DBG("request for %s", hdev->name);
1351
1352 if (cp->val != 0x00 && cp->val != 0x01)
1353 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1354 MGMT_STATUS_INVALID_PARAMS);
1355
1356 hci_dev_lock(hdev);
1357
1358 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
1359 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1360 MGMT_STATUS_BUSY);
1361 goto failed;
1362 }
1363
1364 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1365 cancel_delayed_work(&hdev->power_off);
1366
1367 if (cp->val) {
1368 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
1369 data, len);
1370 err = mgmt_powered(hdev, 1);
1371 goto failed;
1372 }
1373 }
1374
1375 if (!!cp->val == hdev_is_powered(hdev)) {
1376 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1377 goto failed;
1378 }
1379
1380 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1381 if (!cmd) {
1382 err = -ENOMEM;
1383 goto failed;
1384 }
1385
1386 if (cp->val) {
1387 queue_work(hdev->req_workqueue, &hdev->power_on);
1388 err = 0;
1389 } else {
1390 /* Disconnect connections, stop scans, etc */
1391 err = clean_up_hci_state(hdev);
1392 if (!err)
1393 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1394 HCI_POWER_OFF_TIMEOUT);
1395
1396 /* ENODATA means there were no HCI commands queued */
1397 if (err == -ENODATA) {
1398 cancel_delayed_work(&hdev->power_off);
1399 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1400 err = 0;
1401 }
1402 }
1403
1404 failed:
1405 hci_dev_unlock(hdev);
1406 return err;
1407 }
1408
1409 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1410 {
1411 __le32 ev;
1412
1413 ev = cpu_to_le32(get_current_settings(hdev));
1414
1415 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
1416 }
1417
1418 struct cmd_lookup {
1419 struct sock *sk;
1420 struct hci_dev *hdev;
1421 u8 mgmt_status;
1422 };
1423
1424 static void settings_rsp(struct pending_cmd *cmd, void *data)
1425 {
1426 struct cmd_lookup *match = data;
1427
1428 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1429
1430 list_del(&cmd->list);
1431
1432 if (match->sk == NULL) {
1433 match->sk = cmd->sk;
1434 sock_hold(match->sk);
1435 }
1436
1437 mgmt_pending_free(cmd);
1438 }
1439
1440 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
1441 {
1442 u8 *status = data;
1443
1444 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1445 mgmt_pending_remove(cmd);
1446 }
1447
1448 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1449 {
1450 if (!lmp_bredr_capable(hdev))
1451 return MGMT_STATUS_NOT_SUPPORTED;
1452 else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1453 return MGMT_STATUS_REJECTED;
1454 else
1455 return MGMT_STATUS_SUCCESS;
1456 }
1457
1458 static u8 mgmt_le_support(struct hci_dev *hdev)
1459 {
1460 if (!lmp_le_capable(hdev))
1461 return MGMT_STATUS_NOT_SUPPORTED;
1462 else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1463 return MGMT_STATUS_REJECTED;
1464 else
1465 return MGMT_STATUS_SUCCESS;
1466 }
1467
1468 static void set_discoverable_complete(struct hci_dev *hdev, u8 status)
1469 {
1470 struct pending_cmd *cmd;
1471 struct mgmt_mode *cp;
1472 struct hci_request req;
1473 bool changed;
1474
1475 BT_DBG("status 0x%02x", status);
1476
1477 hci_dev_lock(hdev);
1478
1479 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1480 if (!cmd)
1481 goto unlock;
1482
1483 if (status) {
1484 u8 mgmt_err = mgmt_status(status);
1485 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1486 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1487 goto remove_cmd;
1488 }
1489
1490 cp = cmd->param;
1491 if (cp->val) {
1492 changed = !test_and_set_bit(HCI_DISCOVERABLE,
1493 &hdev->dev_flags);
1494
1495 if (hdev->discov_timeout > 0) {
1496 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1497 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1498 to);
1499 }
1500 } else {
1501 changed = test_and_clear_bit(HCI_DISCOVERABLE,
1502 &hdev->dev_flags);
1503 }
1504
1505 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1506
1507 if (changed)
1508 new_settings(hdev, cmd->sk);
1509
1510 /* When the discoverable mode gets changed, make sure
1511 * that class of device has the limited discoverable
1512 * bit correctly set.
1513 */
1514 hci_req_init(&req, hdev);
1515 update_class(&req);
1516 hci_req_run(&req, NULL);
1517
1518 remove_cmd:
1519 mgmt_pending_remove(cmd);
1520
1521 unlock:
1522 hci_dev_unlock(hdev);
1523 }
1524
1525 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1526 u16 len)
1527 {
1528 struct mgmt_cp_set_discoverable *cp = data;
1529 struct pending_cmd *cmd;
1530 struct hci_request req;
1531 u16 timeout;
1532 u8 scan;
1533 int err;
1534
1535 BT_DBG("request for %s", hdev->name);
1536
1537 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1538 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1539 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1540 MGMT_STATUS_REJECTED);
1541
1542 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1543 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1544 MGMT_STATUS_INVALID_PARAMS);
1545
1546 timeout = __le16_to_cpu(cp->timeout);
1547
1548 /* Disabling discoverable requires that no timeout is set,
1549 * and enabling limited discoverable requires a timeout.
1550 */
1551 if ((cp->val == 0x00 && timeout > 0) ||
1552 (cp->val == 0x02 && timeout == 0))
1553 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1554 MGMT_STATUS_INVALID_PARAMS);
1555
1556 hci_dev_lock(hdev);
1557
1558 if (!hdev_is_powered(hdev) && timeout > 0) {
1559 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1560 MGMT_STATUS_NOT_POWERED);
1561 goto failed;
1562 }
1563
1564 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1565 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1566 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1567 MGMT_STATUS_BUSY);
1568 goto failed;
1569 }
1570
1571 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
1572 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1573 MGMT_STATUS_REJECTED);
1574 goto failed;
1575 }
1576
1577 if (!hdev_is_powered(hdev)) {
1578 bool changed = false;
1579
1580 /* Setting limited discoverable when powered off is
1581 * not a valid operation since it requires a timeout
1582 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1583 */
1584 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
1585 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1586 changed = true;
1587 }
1588
1589 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1590 if (err < 0)
1591 goto failed;
1592
1593 if (changed)
1594 err = new_settings(hdev, sk);
1595
1596 goto failed;
1597 }
1598
1599 /* If the current mode is the same, then just update the timeout
1600 * value with the new value. And if only the timeout gets updated,
1601 * then no need for any HCI transactions.
1602 */
1603 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
1604 (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
1605 &hdev->dev_flags)) {
1606 cancel_delayed_work(&hdev->discov_off);
1607 hdev->discov_timeout = timeout;
1608
1609 if (cp->val && hdev->discov_timeout > 0) {
1610 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1611 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1612 to);
1613 }
1614
1615 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1616 goto failed;
1617 }
1618
1619 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1620 if (!cmd) {
1621 err = -ENOMEM;
1622 goto failed;
1623 }
1624
1625 /* Cancel any potential discoverable timeout that might be
1626 * still active and store new timeout value. The arming of
1627 * the timeout happens in the complete handler.
1628 */
1629 cancel_delayed_work(&hdev->discov_off);
1630 hdev->discov_timeout = timeout;
1631
1632 /* Limited discoverable mode */
1633 if (cp->val == 0x02)
1634 set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1635 else
1636 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1637
1638 hci_req_init(&req, hdev);
1639
1640 /* The procedure for LE-only controllers is much simpler - just
1641 * update the advertising data.
1642 */
1643 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1644 goto update_ad;
1645
1646 scan = SCAN_PAGE;
1647
1648 if (cp->val) {
1649 struct hci_cp_write_current_iac_lap hci_cp;
1650
1651 if (cp->val == 0x02) {
1652 /* Limited discoverable mode */
1653 hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1654 hci_cp.iac_lap[0] = 0x00; /* LIAC */
1655 hci_cp.iac_lap[1] = 0x8b;
1656 hci_cp.iac_lap[2] = 0x9e;
1657 hci_cp.iac_lap[3] = 0x33; /* GIAC */
1658 hci_cp.iac_lap[4] = 0x8b;
1659 hci_cp.iac_lap[5] = 0x9e;
1660 } else {
1661 /* General discoverable mode */
1662 hci_cp.num_iac = 1;
1663 hci_cp.iac_lap[0] = 0x33; /* GIAC */
1664 hci_cp.iac_lap[1] = 0x8b;
1665 hci_cp.iac_lap[2] = 0x9e;
1666 }
1667
1668 hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1669 (hci_cp.num_iac * 3) + 1, &hci_cp);
1670
1671 scan |= SCAN_INQUIRY;
1672 } else {
1673 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1674 }
1675
1676 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1677
1678 update_ad:
1679 update_adv_data(&req);
1680
1681 err = hci_req_run(&req, set_discoverable_complete);
1682 if (err < 0)
1683 mgmt_pending_remove(cmd);
1684
1685 failed:
1686 hci_dev_unlock(hdev);
1687 return err;
1688 }
1689
1690 static void write_fast_connectable(struct hci_request *req, bool enable)
1691 {
1692 struct hci_dev *hdev = req->hdev;
1693 struct hci_cp_write_page_scan_activity acp;
1694 u8 type;
1695
1696 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1697 return;
1698
1699 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1700 return;
1701
1702 if (enable) {
1703 type = PAGE_SCAN_TYPE_INTERLACED;
1704
1705 /* 160 msec page scan interval */
1706 acp.interval = cpu_to_le16(0x0100);
1707 } else {
1708 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1709
1710 /* default 1.28 sec page scan */
1711 acp.interval = cpu_to_le16(0x0800);
1712 }
1713
1714 acp.window = cpu_to_le16(0x0012);
1715
1716 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1717 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1718 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1719 sizeof(acp), &acp);
1720
1721 if (hdev->page_scan_type != type)
1722 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1723 }
1724
1725 static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1726 {
1727 struct pending_cmd *cmd;
1728 struct mgmt_mode *cp;
1729 bool changed;
1730
1731 BT_DBG("status 0x%02x", status);
1732
1733 hci_dev_lock(hdev);
1734
1735 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1736 if (!cmd)
1737 goto unlock;
1738
1739 if (status) {
1740 u8 mgmt_err = mgmt_status(status);
1741 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1742 goto remove_cmd;
1743 }
1744
1745 cp = cmd->param;
1746 if (cp->val)
1747 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1748 else
1749 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1750
1751 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1752
1753 if (changed) {
1754 new_settings(hdev, cmd->sk);
1755 hci_update_background_scan(hdev);
1756 }
1757
1758 remove_cmd:
1759 mgmt_pending_remove(cmd);
1760
1761 unlock:
1762 hci_dev_unlock(hdev);
1763 }
1764
1765 static int set_connectable_update_settings(struct hci_dev *hdev,
1766 struct sock *sk, u8 val)
1767 {
1768 bool changed = false;
1769 int err;
1770
1771 if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1772 changed = true;
1773
1774 if (val) {
1775 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1776 } else {
1777 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1778 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1779 }
1780
1781 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1782 if (err < 0)
1783 return err;
1784
1785 if (changed)
1786 return new_settings(hdev, sk);
1787
1788 return 0;
1789 }
1790
1791 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1792 u16 len)
1793 {
1794 struct mgmt_mode *cp = data;
1795 struct pending_cmd *cmd;
1796 struct hci_request req;
1797 u8 scan;
1798 int err;
1799
1800 BT_DBG("request for %s", hdev->name);
1801
1802 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1803 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1804 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1805 MGMT_STATUS_REJECTED);
1806
1807 if (cp->val != 0x00 && cp->val != 0x01)
1808 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1809 MGMT_STATUS_INVALID_PARAMS);
1810
1811 hci_dev_lock(hdev);
1812
1813 if (!hdev_is_powered(hdev)) {
1814 err = set_connectable_update_settings(hdev, sk, cp->val);
1815 goto failed;
1816 }
1817
1818 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1819 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1820 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1821 MGMT_STATUS_BUSY);
1822 goto failed;
1823 }
1824
1825 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1826 if (!cmd) {
1827 err = -ENOMEM;
1828 goto failed;
1829 }
1830
1831 hci_req_init(&req, hdev);
1832
1833 /* If BR/EDR is not enabled and we disable advertising as a
1834 * by-product of disabling connectable, we need to update the
1835 * advertising flags.
1836 */
1837 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1838 if (!cp->val) {
1839 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1840 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1841 }
1842 update_adv_data(&req);
1843 } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
1844 if (cp->val) {
1845 scan = SCAN_PAGE;
1846 } else {
1847 scan = 0;
1848
1849 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1850 hdev->discov_timeout > 0)
1851 cancel_delayed_work(&hdev->discov_off);
1852 }
1853
1854 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1855 }
1856
1857 /* If we're going from non-connectable to connectable or
1858 * vice-versa when fast connectable is enabled ensure that fast
1859 * connectable gets disabled. write_fast_connectable won't do
1860 * anything if the page scan parameters are already what they
1861 * should be.
1862 */
1863 if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1864 write_fast_connectable(&req, false);
1865
1866 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) &&
1867 hci_conn_num(hdev, LE_LINK) == 0) {
1868 disable_advertising(&req);
1869 enable_advertising(&req);
1870 }
1871
1872 err = hci_req_run(&req, set_connectable_complete);
1873 if (err < 0) {
1874 mgmt_pending_remove(cmd);
1875 if (err == -ENODATA)
1876 err = set_connectable_update_settings(hdev, sk,
1877 cp->val);
1878 goto failed;
1879 }
1880
1881 failed:
1882 hci_dev_unlock(hdev);
1883 return err;
1884 }
1885
1886 static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
1887 u16 len)
1888 {
1889 struct mgmt_mode *cp = data;
1890 bool changed;
1891 int err;
1892
1893 BT_DBG("request for %s", hdev->name);
1894
1895 if (cp->val != 0x00 && cp->val != 0x01)
1896 return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE,
1897 MGMT_STATUS_INVALID_PARAMS);
1898
1899 hci_dev_lock(hdev);
1900
1901 if (cp->val)
1902 changed = !test_and_set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1903 else
1904 changed = test_and_clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1905
1906 err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
1907 if (err < 0)
1908 goto unlock;
1909
1910 if (changed)
1911 err = new_settings(hdev, sk);
1912
1913 unlock:
1914 hci_dev_unlock(hdev);
1915 return err;
1916 }
1917
1918 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1919 u16 len)
1920 {
1921 struct mgmt_mode *cp = data;
1922 struct pending_cmd *cmd;
1923 u8 val, status;
1924 int err;
1925
1926 BT_DBG("request for %s", hdev->name);
1927
1928 status = mgmt_bredr_support(hdev);
1929 if (status)
1930 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1931 status);
1932
1933 if (cp->val != 0x00 && cp->val != 0x01)
1934 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1935 MGMT_STATUS_INVALID_PARAMS);
1936
1937 hci_dev_lock(hdev);
1938
1939 if (!hdev_is_powered(hdev)) {
1940 bool changed = false;
1941
1942 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1943 &hdev->dev_flags)) {
1944 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
1945 changed = true;
1946 }
1947
1948 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1949 if (err < 0)
1950 goto failed;
1951
1952 if (changed)
1953 err = new_settings(hdev, sk);
1954
1955 goto failed;
1956 }
1957
1958 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1959 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1960 MGMT_STATUS_BUSY);
1961 goto failed;
1962 }
1963
1964 val = !!cp->val;
1965
1966 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1967 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1968 goto failed;
1969 }
1970
1971 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1972 if (!cmd) {
1973 err = -ENOMEM;
1974 goto failed;
1975 }
1976
1977 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1978 if (err < 0) {
1979 mgmt_pending_remove(cmd);
1980 goto failed;
1981 }
1982
1983 failed:
1984 hci_dev_unlock(hdev);
1985 return err;
1986 }
1987
1988 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1989 {
1990 struct mgmt_mode *cp = data;
1991 struct pending_cmd *cmd;
1992 u8 status;
1993 int err;
1994
1995 BT_DBG("request for %s", hdev->name);
1996
1997 status = mgmt_bredr_support(hdev);
1998 if (status)
1999 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
2000
2001 if (!lmp_ssp_capable(hdev))
2002 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2003 MGMT_STATUS_NOT_SUPPORTED);
2004
2005 if (cp->val != 0x00 && cp->val != 0x01)
2006 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2007 MGMT_STATUS_INVALID_PARAMS);
2008
2009 hci_dev_lock(hdev);
2010
2011 if (!hdev_is_powered(hdev)) {
2012 bool changed;
2013
2014 if (cp->val) {
2015 changed = !test_and_set_bit(HCI_SSP_ENABLED,
2016 &hdev->dev_flags);
2017 } else {
2018 changed = test_and_clear_bit(HCI_SSP_ENABLED,
2019 &hdev->dev_flags);
2020 if (!changed)
2021 changed = test_and_clear_bit(HCI_HS_ENABLED,
2022 &hdev->dev_flags);
2023 else
2024 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2025 }
2026
2027 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2028 if (err < 0)
2029 goto failed;
2030
2031 if (changed)
2032 err = new_settings(hdev, sk);
2033
2034 goto failed;
2035 }
2036
2037 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev) ||
2038 mgmt_pending_find(MGMT_OP_SET_HS, hdev)) {
2039 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2040 MGMT_STATUS_BUSY);
2041 goto failed;
2042 }
2043
2044 if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
2045 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2046 goto failed;
2047 }
2048
2049 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2050 if (!cmd) {
2051 err = -ENOMEM;
2052 goto failed;
2053 }
2054
2055 if (!cp->val && test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
2056 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
2057 sizeof(cp->val), &cp->val);
2058
2059 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
2060 if (err < 0) {
2061 mgmt_pending_remove(cmd);
2062 goto failed;
2063 }
2064
2065 failed:
2066 hci_dev_unlock(hdev);
2067 return err;
2068 }
2069
2070 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2071 {
2072 struct mgmt_mode *cp = data;
2073 bool changed;
2074 u8 status;
2075 int err;
2076
2077 BT_DBG("request for %s", hdev->name);
2078
2079 status = mgmt_bredr_support(hdev);
2080 if (status)
2081 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2082
2083 if (!lmp_ssp_capable(hdev))
2084 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2085 MGMT_STATUS_NOT_SUPPORTED);
2086
2087 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
2088 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2089 MGMT_STATUS_REJECTED);
2090
2091 if (cp->val != 0x00 && cp->val != 0x01)
2092 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2093 MGMT_STATUS_INVALID_PARAMS);
2094
2095 hci_dev_lock(hdev);
2096
2097 if (cp->val) {
2098 changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2099 } else {
2100 if (hdev_is_powered(hdev)) {
2101 err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2102 MGMT_STATUS_REJECTED);
2103 goto unlock;
2104 }
2105
2106 changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2107 }
2108
2109 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2110 if (err < 0)
2111 goto unlock;
2112
2113 if (changed)
2114 err = new_settings(hdev, sk);
2115
2116 unlock:
2117 hci_dev_unlock(hdev);
2118 return err;
2119 }
2120
2121 static void le_enable_complete(struct hci_dev *hdev, u8 status)
2122 {
2123 struct cmd_lookup match = { NULL, hdev };
2124
2125 if (status) {
2126 u8 mgmt_err = mgmt_status(status);
2127
2128 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2129 &mgmt_err);
2130 return;
2131 }
2132
2133 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2134
2135 new_settings(hdev, match.sk);
2136
2137 if (match.sk)
2138 sock_put(match.sk);
2139
2140 /* Make sure the controller has a good default for
2141 * advertising data. Restrict the update to when LE
2142 * has actually been enabled. During power on, the
2143 * update in powered_update_hci will take care of it.
2144 */
2145 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2146 struct hci_request req;
2147
2148 hci_dev_lock(hdev);
2149
2150 hci_req_init(&req, hdev);
2151 update_adv_data(&req);
2152 update_scan_rsp_data(&req);
2153 hci_req_run(&req, NULL);
2154
2155 hci_update_background_scan(hdev);
2156
2157 hci_dev_unlock(hdev);
2158 }
2159 }
2160
2161 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2162 {
2163 struct mgmt_mode *cp = data;
2164 struct hci_cp_write_le_host_supported hci_cp;
2165 struct pending_cmd *cmd;
2166 struct hci_request req;
2167 int err;
2168 u8 val, enabled;
2169
2170 BT_DBG("request for %s", hdev->name);
2171
2172 if (!lmp_le_capable(hdev))
2173 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2174 MGMT_STATUS_NOT_SUPPORTED);
2175
2176 if (cp->val != 0x00 && cp->val != 0x01)
2177 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2178 MGMT_STATUS_INVALID_PARAMS);
2179
2180 /* LE-only devices do not allow toggling LE on/off */
2181 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
2182 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2183 MGMT_STATUS_REJECTED);
2184
2185 hci_dev_lock(hdev);
2186
2187 val = !!cp->val;
2188 enabled = lmp_host_le_capable(hdev);
2189
2190 if (!hdev_is_powered(hdev) || val == enabled) {
2191 bool changed = false;
2192
2193 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2194 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2195 changed = true;
2196 }
2197
2198 if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
2199 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
2200 changed = true;
2201 }
2202
2203 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2204 if (err < 0)
2205 goto unlock;
2206
2207 if (changed)
2208 err = new_settings(hdev, sk);
2209
2210 goto unlock;
2211 }
2212
2213 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
2214 mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2215 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2216 MGMT_STATUS_BUSY);
2217 goto unlock;
2218 }
2219
2220 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2221 if (!cmd) {
2222 err = -ENOMEM;
2223 goto unlock;
2224 }
2225
2226 hci_req_init(&req, hdev);
2227
2228 memset(&hci_cp, 0, sizeof(hci_cp));
2229
2230 if (val) {
2231 hci_cp.le = val;
2232 hci_cp.simul = lmp_le_br_capable(hdev);
2233 } else {
2234 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
2235 disable_advertising(&req);
2236 }
2237
2238 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2239 &hci_cp);
2240
2241 err = hci_req_run(&req, le_enable_complete);
2242 if (err < 0)
2243 mgmt_pending_remove(cmd);
2244
2245 unlock:
2246 hci_dev_unlock(hdev);
2247 return err;
2248 }
2249
2250 /* This is a helper function to test for pending mgmt commands that can
2251 * cause CoD or EIR HCI commands. We can only allow one such pending
2252 * mgmt command at a time since otherwise we cannot easily track what
2253 * the current values are, will be, and based on that calculate if a new
2254 * HCI command needs to be sent and if yes with what value.
2255 */
2256 static bool pending_eir_or_class(struct hci_dev *hdev)
2257 {
2258 struct pending_cmd *cmd;
2259
2260 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2261 switch (cmd->opcode) {
2262 case MGMT_OP_ADD_UUID:
2263 case MGMT_OP_REMOVE_UUID:
2264 case MGMT_OP_SET_DEV_CLASS:
2265 case MGMT_OP_SET_POWERED:
2266 return true;
2267 }
2268 }
2269
2270 return false;
2271 }
2272
2273 static const u8 bluetooth_base_uuid[] = {
2274 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2275 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2276 };
2277
2278 static u8 get_uuid_size(const u8 *uuid)
2279 {
2280 u32 val;
2281
2282 if (memcmp(uuid, bluetooth_base_uuid, 12))
2283 return 128;
2284
2285 val = get_unaligned_le32(&uuid[12]);
2286 if (val > 0xffff)
2287 return 32;
2288
2289 return 16;
2290 }
2291
2292 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2293 {
2294 struct pending_cmd *cmd;
2295
2296 hci_dev_lock(hdev);
2297
2298 cmd = mgmt_pending_find(mgmt_op, hdev);
2299 if (!cmd)
2300 goto unlock;
2301
2302 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
2303 hdev->dev_class, 3);
2304
2305 mgmt_pending_remove(cmd);
2306
2307 unlock:
2308 hci_dev_unlock(hdev);
2309 }
2310
2311 static void add_uuid_complete(struct hci_dev *hdev, u8 status)
2312 {
2313 BT_DBG("status 0x%02x", status);
2314
2315 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2316 }
2317
2318 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2319 {
2320 struct mgmt_cp_add_uuid *cp = data;
2321 struct pending_cmd *cmd;
2322 struct hci_request req;
2323 struct bt_uuid *uuid;
2324 int err;
2325
2326 BT_DBG("request for %s", hdev->name);
2327
2328 hci_dev_lock(hdev);
2329
2330 if (pending_eir_or_class(hdev)) {
2331 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2332 MGMT_STATUS_BUSY);
2333 goto failed;
2334 }
2335
2336 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2337 if (!uuid) {
2338 err = -ENOMEM;
2339 goto failed;
2340 }
2341
2342 memcpy(uuid->uuid, cp->uuid, 16);
2343 uuid->svc_hint = cp->svc_hint;
2344 uuid->size = get_uuid_size(cp->uuid);
2345
2346 list_add_tail(&uuid->list, &hdev->uuids);
2347
2348 hci_req_init(&req, hdev);
2349
2350 update_class(&req);
2351 update_eir(&req);
2352
2353 err = hci_req_run(&req, add_uuid_complete);
2354 if (err < 0) {
2355 if (err != -ENODATA)
2356 goto failed;
2357
2358 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2359 hdev->dev_class, 3);
2360 goto failed;
2361 }
2362
2363 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2364 if (!cmd) {
2365 err = -ENOMEM;
2366 goto failed;
2367 }
2368
2369 err = 0;
2370
2371 failed:
2372 hci_dev_unlock(hdev);
2373 return err;
2374 }
2375
2376 static bool enable_service_cache(struct hci_dev *hdev)
2377 {
2378 if (!hdev_is_powered(hdev))
2379 return false;
2380
2381 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2382 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2383 CACHE_TIMEOUT);
2384 return true;
2385 }
2386
2387 return false;
2388 }
2389
2390 static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
2391 {
2392 BT_DBG("status 0x%02x", status);
2393
2394 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2395 }
2396
2397 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2398 u16 len)
2399 {
2400 struct mgmt_cp_remove_uuid *cp = data;
2401 struct pending_cmd *cmd;
2402 struct bt_uuid *match, *tmp;
2403 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2404 struct hci_request req;
2405 int err, found;
2406
2407 BT_DBG("request for %s", hdev->name);
2408
2409 hci_dev_lock(hdev);
2410
2411 if (pending_eir_or_class(hdev)) {
2412 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2413 MGMT_STATUS_BUSY);
2414 goto unlock;
2415 }
2416
2417 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2418 hci_uuids_clear(hdev);
2419
2420 if (enable_service_cache(hdev)) {
2421 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2422 0, hdev->dev_class, 3);
2423 goto unlock;
2424 }
2425
2426 goto update_class;
2427 }
2428
2429 found = 0;
2430
2431 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2432 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2433 continue;
2434
2435 list_del(&match->list);
2436 kfree(match);
2437 found++;
2438 }
2439
2440 if (found == 0) {
2441 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2442 MGMT_STATUS_INVALID_PARAMS);
2443 goto unlock;
2444 }
2445
2446 update_class:
2447 hci_req_init(&req, hdev);
2448
2449 update_class(&req);
2450 update_eir(&req);
2451
2452 err = hci_req_run(&req, remove_uuid_complete);
2453 if (err < 0) {
2454 if (err != -ENODATA)
2455 goto unlock;
2456
2457 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2458 hdev->dev_class, 3);
2459 goto unlock;
2460 }
2461
2462 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2463 if (!cmd) {
2464 err = -ENOMEM;
2465 goto unlock;
2466 }
2467
2468 err = 0;
2469
2470 unlock:
2471 hci_dev_unlock(hdev);
2472 return err;
2473 }
2474
2475 static void set_class_complete(struct hci_dev *hdev, u8 status)
2476 {
2477 BT_DBG("status 0x%02x", status);
2478
2479 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2480 }
2481
2482 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2483 u16 len)
2484 {
2485 struct mgmt_cp_set_dev_class *cp = data;
2486 struct pending_cmd *cmd;
2487 struct hci_request req;
2488 int err;
2489
2490 BT_DBG("request for %s", hdev->name);
2491
2492 if (!lmp_bredr_capable(hdev))
2493 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2494 MGMT_STATUS_NOT_SUPPORTED);
2495
2496 hci_dev_lock(hdev);
2497
2498 if (pending_eir_or_class(hdev)) {
2499 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2500 MGMT_STATUS_BUSY);
2501 goto unlock;
2502 }
2503
2504 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2505 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2506 MGMT_STATUS_INVALID_PARAMS);
2507 goto unlock;
2508 }
2509
2510 hdev->major_class = cp->major;
2511 hdev->minor_class = cp->minor;
2512
2513 if (!hdev_is_powered(hdev)) {
2514 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2515 hdev->dev_class, 3);
2516 goto unlock;
2517 }
2518
2519 hci_req_init(&req, hdev);
2520
2521 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2522 hci_dev_unlock(hdev);
2523 cancel_delayed_work_sync(&hdev->service_cache);
2524 hci_dev_lock(hdev);
2525 update_eir(&req);
2526 }
2527
2528 update_class(&req);
2529
2530 err = hci_req_run(&req, set_class_complete);
2531 if (err < 0) {
2532 if (err != -ENODATA)
2533 goto unlock;
2534
2535 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2536 hdev->dev_class, 3);
2537 goto unlock;
2538 }
2539
2540 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2541 if (!cmd) {
2542 err = -ENOMEM;
2543 goto unlock;
2544 }
2545
2546 err = 0;
2547
2548 unlock:
2549 hci_dev_unlock(hdev);
2550 return err;
2551 }
2552
2553 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2554 u16 len)
2555 {
2556 struct mgmt_cp_load_link_keys *cp = data;
2557 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2558 sizeof(struct mgmt_link_key_info));
2559 u16 key_count, expected_len;
2560 bool changed;
2561 int i;
2562
2563 BT_DBG("request for %s", hdev->name);
2564
2565 if (!lmp_bredr_capable(hdev))
2566 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2567 MGMT_STATUS_NOT_SUPPORTED);
2568
2569 key_count = __le16_to_cpu(cp->key_count);
2570 if (key_count > max_key_count) {
2571 BT_ERR("load_link_keys: too big key_count value %u",
2572 key_count);
2573 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2574 MGMT_STATUS_INVALID_PARAMS);
2575 }
2576
2577 expected_len = sizeof(*cp) + key_count *
2578 sizeof(struct mgmt_link_key_info);
2579 if (expected_len != len) {
2580 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2581 expected_len, len);
2582 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2583 MGMT_STATUS_INVALID_PARAMS);
2584 }
2585
2586 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2587 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2588 MGMT_STATUS_INVALID_PARAMS);
2589
2590 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2591 key_count);
2592
2593 for (i = 0; i < key_count; i++) {
2594 struct mgmt_link_key_info *key = &cp->keys[i];
2595
2596 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2597 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2598 MGMT_STATUS_INVALID_PARAMS);
2599 }
2600
2601 hci_dev_lock(hdev);
2602
2603 hci_link_keys_clear(hdev);
2604
2605 if (cp->debug_keys)
2606 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
2607 &hdev->dev_flags);
2608 else
2609 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
2610 &hdev->dev_flags);
2611
2612 if (changed)
2613 new_settings(hdev, NULL);
2614
2615 for (i = 0; i < key_count; i++) {
2616 struct mgmt_link_key_info *key = &cp->keys[i];
2617
2618 /* Always ignore debug keys and require a new pairing if
2619 * the user wants to use them.
2620 */
2621 if (key->type == HCI_LK_DEBUG_COMBINATION)
2622 continue;
2623
2624 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2625 key->type, key->pin_len, NULL);
2626 }
2627
2628 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2629
2630 hci_dev_unlock(hdev);
2631
2632 return 0;
2633 }
2634
2635 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2636 u8 addr_type, struct sock *skip_sk)
2637 {
2638 struct mgmt_ev_device_unpaired ev;
2639
2640 bacpy(&ev.addr.bdaddr, bdaddr);
2641 ev.addr.type = addr_type;
2642
2643 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2644 skip_sk);
2645 }
2646
2647 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2648 u16 len)
2649 {
2650 struct mgmt_cp_unpair_device *cp = data;
2651 struct mgmt_rp_unpair_device rp;
2652 struct hci_cp_disconnect dc;
2653 struct pending_cmd *cmd;
2654 struct hci_conn *conn;
2655 int err;
2656
2657 memset(&rp, 0, sizeof(rp));
2658 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2659 rp.addr.type = cp->addr.type;
2660
2661 if (!bdaddr_type_is_valid(cp->addr.type))
2662 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2663 MGMT_STATUS_INVALID_PARAMS,
2664 &rp, sizeof(rp));
2665
2666 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2667 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2668 MGMT_STATUS_INVALID_PARAMS,
2669 &rp, sizeof(rp));
2670
2671 hci_dev_lock(hdev);
2672
2673 if (!hdev_is_powered(hdev)) {
2674 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2675 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2676 goto unlock;
2677 }
2678
2679 if (cp->addr.type == BDADDR_BREDR) {
2680 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2681 } else {
2682 u8 addr_type;
2683
2684 if (cp->addr.type == BDADDR_LE_PUBLIC)
2685 addr_type = ADDR_LE_DEV_PUBLIC;
2686 else
2687 addr_type = ADDR_LE_DEV_RANDOM;
2688
2689 hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2690
2691 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2692
2693 err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2694 }
2695
2696 if (err < 0) {
2697 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2698 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
2699 goto unlock;
2700 }
2701
2702 if (cp->disconnect) {
2703 if (cp->addr.type == BDADDR_BREDR)
2704 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2705 &cp->addr.bdaddr);
2706 else
2707 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2708 &cp->addr.bdaddr);
2709 } else {
2710 conn = NULL;
2711 }
2712
2713 if (!conn) {
2714 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2715 &rp, sizeof(rp));
2716 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2717 goto unlock;
2718 }
2719
2720 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2721 sizeof(*cp));
2722 if (!cmd) {
2723 err = -ENOMEM;
2724 goto unlock;
2725 }
2726
2727 dc.handle = cpu_to_le16(conn->handle);
2728 dc.reason = 0x13; /* Remote User Terminated Connection */
2729 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2730 if (err < 0)
2731 mgmt_pending_remove(cmd);
2732
2733 unlock:
2734 hci_dev_unlock(hdev);
2735 return err;
2736 }
2737
2738 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2739 u16 len)
2740 {
2741 struct mgmt_cp_disconnect *cp = data;
2742 struct mgmt_rp_disconnect rp;
2743 struct hci_cp_disconnect dc;
2744 struct pending_cmd *cmd;
2745 struct hci_conn *conn;
2746 int err;
2747
2748 BT_DBG("");
2749
2750 memset(&rp, 0, sizeof(rp));
2751 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2752 rp.addr.type = cp->addr.type;
2753
2754 if (!bdaddr_type_is_valid(cp->addr.type))
2755 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2756 MGMT_STATUS_INVALID_PARAMS,
2757 &rp, sizeof(rp));
2758
2759 hci_dev_lock(hdev);
2760
2761 if (!test_bit(HCI_UP, &hdev->flags)) {
2762 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2763 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2764 goto failed;
2765 }
2766
2767 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
2768 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2769 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2770 goto failed;
2771 }
2772
2773 if (cp->addr.type == BDADDR_BREDR)
2774 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2775 &cp->addr.bdaddr);
2776 else
2777 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2778
2779 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2780 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2781 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
2782 goto failed;
2783 }
2784
2785 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2786 if (!cmd) {
2787 err = -ENOMEM;
2788 goto failed;
2789 }
2790
2791 dc.handle = cpu_to_le16(conn->handle);
2792 dc.reason = HCI_ERROR_REMOTE_USER_TERM;
2793
2794 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2795 if (err < 0)
2796 mgmt_pending_remove(cmd);
2797
2798 failed:
2799 hci_dev_unlock(hdev);
2800 return err;
2801 }
2802
2803 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2804 {
2805 switch (link_type) {
2806 case LE_LINK:
2807 switch (addr_type) {
2808 case ADDR_LE_DEV_PUBLIC:
2809 return BDADDR_LE_PUBLIC;
2810
2811 default:
2812 /* Fallback to LE Random address type */
2813 return BDADDR_LE_RANDOM;
2814 }
2815
2816 default:
2817 /* Fallback to BR/EDR type */
2818 return BDADDR_BREDR;
2819 }
2820 }
2821
2822 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2823 u16 data_len)
2824 {
2825 struct mgmt_rp_get_connections *rp;
2826 struct hci_conn *c;
2827 size_t rp_len;
2828 int err;
2829 u16 i;
2830
2831 BT_DBG("");
2832
2833 hci_dev_lock(hdev);
2834
2835 if (!hdev_is_powered(hdev)) {
2836 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2837 MGMT_STATUS_NOT_POWERED);
2838 goto unlock;
2839 }
2840
2841 i = 0;
2842 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2843 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2844 i++;
2845 }
2846
2847 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2848 rp = kmalloc(rp_len, GFP_KERNEL);
2849 if (!rp) {
2850 err = -ENOMEM;
2851 goto unlock;
2852 }
2853
2854 i = 0;
2855 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2856 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2857 continue;
2858 bacpy(&rp->addr[i].bdaddr, &c->dst);
2859 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2860 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2861 continue;
2862 i++;
2863 }
2864
2865 rp->conn_count = cpu_to_le16(i);
2866
2867 /* Recalculate length in case of filtered SCO connections, etc */
2868 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2869
2870 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2871 rp_len);
2872
2873 kfree(rp);
2874
2875 unlock:
2876 hci_dev_unlock(hdev);
2877 return err;
2878 }
2879
2880 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2881 struct mgmt_cp_pin_code_neg_reply *cp)
2882 {
2883 struct pending_cmd *cmd;
2884 int err;
2885
2886 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2887 sizeof(*cp));
2888 if (!cmd)
2889 return -ENOMEM;
2890
2891 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2892 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2893 if (err < 0)
2894 mgmt_pending_remove(cmd);
2895
2896 return err;
2897 }
2898
2899 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2900 u16 len)
2901 {
2902 struct hci_conn *conn;
2903 struct mgmt_cp_pin_code_reply *cp = data;
2904 struct hci_cp_pin_code_reply reply;
2905 struct pending_cmd *cmd;
2906 int err;
2907
2908 BT_DBG("");
2909
2910 hci_dev_lock(hdev);
2911
2912 if (!hdev_is_powered(hdev)) {
2913 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2914 MGMT_STATUS_NOT_POWERED);
2915 goto failed;
2916 }
2917
2918 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2919 if (!conn) {
2920 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2921 MGMT_STATUS_NOT_CONNECTED);
2922 goto failed;
2923 }
2924
2925 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2926 struct mgmt_cp_pin_code_neg_reply ncp;
2927
2928 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2929
2930 BT_ERR("PIN code is not 16 bytes long");
2931
2932 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2933 if (err >= 0)
2934 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2935 MGMT_STATUS_INVALID_PARAMS);
2936
2937 goto failed;
2938 }
2939
2940 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2941 if (!cmd) {
2942 err = -ENOMEM;
2943 goto failed;
2944 }
2945
2946 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2947 reply.pin_len = cp->pin_len;
2948 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2949
2950 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2951 if (err < 0)
2952 mgmt_pending_remove(cmd);
2953
2954 failed:
2955 hci_dev_unlock(hdev);
2956 return err;
2957 }
2958
2959 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2960 u16 len)
2961 {
2962 struct mgmt_cp_set_io_capability *cp = data;
2963
2964 BT_DBG("");
2965
2966 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2967 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2968 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
2969
2970 hci_dev_lock(hdev);
2971
2972 hdev->io_capability = cp->io_capability;
2973
2974 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2975 hdev->io_capability);
2976
2977 hci_dev_unlock(hdev);
2978
2979 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
2980 0);
2981 }
2982
2983 static struct pending_cmd *find_pairing(struct hci_conn *conn)
2984 {
2985 struct hci_dev *hdev = conn->hdev;
2986 struct pending_cmd *cmd;
2987
2988 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2989 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2990 continue;
2991
2992 if (cmd->user_data != conn)
2993 continue;
2994
2995 return cmd;
2996 }
2997
2998 return NULL;
2999 }
3000
3001 static void pairing_complete(struct pending_cmd *cmd, u8 status)
3002 {
3003 struct mgmt_rp_pair_device rp;
3004 struct hci_conn *conn = cmd->user_data;
3005
3006 bacpy(&rp.addr.bdaddr, &conn->dst);
3007 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3008
3009 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
3010 &rp, sizeof(rp));
3011
3012 /* So we don't get further callbacks for this connection */
3013 conn->connect_cfm_cb = NULL;
3014 conn->security_cfm_cb = NULL;
3015 conn->disconn_cfm_cb = NULL;
3016
3017 hci_conn_drop(conn);
3018
3019 mgmt_pending_remove(cmd);
3020 }
3021
3022 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3023 {
3024 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3025 struct pending_cmd *cmd;
3026
3027 cmd = find_pairing(conn);
3028 if (cmd)
3029 pairing_complete(cmd, status);
3030 }
3031
3032 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3033 {
3034 struct pending_cmd *cmd;
3035
3036 BT_DBG("status %u", status);
3037
3038 cmd = find_pairing(conn);
3039 if (!cmd)
3040 BT_DBG("Unable to find a pending command");
3041 else
3042 pairing_complete(cmd, mgmt_status(status));
3043 }
3044
3045 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3046 {
3047 struct pending_cmd *cmd;
3048
3049 BT_DBG("status %u", status);
3050
3051 if (!status)
3052 return;
3053
3054 cmd = find_pairing(conn);
3055 if (!cmd)
3056 BT_DBG("Unable to find a pending command");
3057 else
3058 pairing_complete(cmd, mgmt_status(status));
3059 }
3060
3061 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3062 u16 len)
3063 {
3064 struct mgmt_cp_pair_device *cp = data;
3065 struct mgmt_rp_pair_device rp;
3066 struct pending_cmd *cmd;
3067 u8 sec_level, auth_type;
3068 struct hci_conn *conn;
3069 int err;
3070
3071 BT_DBG("");
3072
3073 memset(&rp, 0, sizeof(rp));
3074 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3075 rp.addr.type = cp->addr.type;
3076
3077 if (!bdaddr_type_is_valid(cp->addr.type))
3078 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3079 MGMT_STATUS_INVALID_PARAMS,
3080 &rp, sizeof(rp));
3081
3082 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3083 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3084 MGMT_STATUS_INVALID_PARAMS,
3085 &rp, sizeof(rp));
3086
3087 hci_dev_lock(hdev);
3088
3089 if (!hdev_is_powered(hdev)) {
3090 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3091 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
3092 goto unlock;
3093 }
3094
3095 sec_level = BT_SECURITY_MEDIUM;
3096 auth_type = HCI_AT_DEDICATED_BONDING;
3097
3098 if (cp->addr.type == BDADDR_BREDR) {
3099 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3100 auth_type);
3101 } else {
3102 u8 addr_type;
3103
3104 /* Convert from L2CAP channel address type to HCI address type
3105 */
3106 if (cp->addr.type == BDADDR_LE_PUBLIC)
3107 addr_type = ADDR_LE_DEV_PUBLIC;
3108 else
3109 addr_type = ADDR_LE_DEV_RANDOM;
3110
3111 /* When pairing a new device, it is expected to remember
3112 * this device for future connections. Adding the connection
3113 * parameter information ahead of time allows tracking
3114 * of the slave preferred values and will speed up any
3115 * further connection establishment.
3116 *
3117 * If connection parameters already exist, then they
3118 * will be kept and this function does nothing.
3119 */
3120 hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3121
3122 /* Request a connection with master = true role */
3123 conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
3124 sec_level, HCI_LE_CONN_TIMEOUT, true);
3125 }
3126
3127 if (IS_ERR(conn)) {
3128 int status;
3129
3130 if (PTR_ERR(conn) == -EBUSY)
3131 status = MGMT_STATUS_BUSY;
3132 else
3133 status = MGMT_STATUS_CONNECT_FAILED;
3134
3135 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3136 status, &rp,
3137 sizeof(rp));
3138 goto unlock;
3139 }
3140
3141 if (conn->connect_cfm_cb) {
3142 hci_conn_drop(conn);
3143 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3144 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3145 goto unlock;
3146 }
3147
3148 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3149 if (!cmd) {
3150 err = -ENOMEM;
3151 hci_conn_drop(conn);
3152 goto unlock;
3153 }
3154
3155 /* For LE, just connecting isn't a proof that the pairing finished */
3156 if (cp->addr.type == BDADDR_BREDR) {
3157 conn->connect_cfm_cb = pairing_complete_cb;
3158 conn->security_cfm_cb = pairing_complete_cb;
3159 conn->disconn_cfm_cb = pairing_complete_cb;
3160 } else {
3161 conn->connect_cfm_cb = le_pairing_complete_cb;
3162 conn->security_cfm_cb = le_pairing_complete_cb;
3163 conn->disconn_cfm_cb = le_pairing_complete_cb;
3164 }
3165
3166 conn->io_capability = cp->io_cap;
3167 cmd->user_data = conn;
3168
3169 if (conn->state == BT_CONNECTED &&
3170 hci_conn_security(conn, sec_level, auth_type))
3171 pairing_complete(cmd, 0);
3172
3173 err = 0;
3174
3175 unlock:
3176 hci_dev_unlock(hdev);
3177 return err;
3178 }
3179
3180 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3181 u16 len)
3182 {
3183 struct mgmt_addr_info *addr = data;
3184 struct pending_cmd *cmd;
3185 struct hci_conn *conn;
3186 int err;
3187
3188 BT_DBG("");
3189
3190 hci_dev_lock(hdev);
3191
3192 if (!hdev_is_powered(hdev)) {
3193 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3194 MGMT_STATUS_NOT_POWERED);
3195 goto unlock;
3196 }
3197
3198 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3199 if (!cmd) {
3200 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3201 MGMT_STATUS_INVALID_PARAMS);
3202 goto unlock;
3203 }
3204
3205 conn = cmd->user_data;
3206
3207 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3208 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3209 MGMT_STATUS_INVALID_PARAMS);
3210 goto unlock;
3211 }
3212
3213 pairing_complete(cmd, MGMT_STATUS_CANCELLED);
3214
3215 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3216 addr, sizeof(*addr));
3217 unlock:
3218 hci_dev_unlock(hdev);
3219 return err;
3220 }
3221
3222 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3223 struct mgmt_addr_info *addr, u16 mgmt_op,
3224 u16 hci_op, __le32 passkey)
3225 {
3226 struct pending_cmd *cmd;
3227 struct hci_conn *conn;
3228 int err;
3229
3230 hci_dev_lock(hdev);
3231
3232 if (!hdev_is_powered(hdev)) {
3233 err = cmd_complete(sk, hdev->id, mgmt_op,
3234 MGMT_STATUS_NOT_POWERED, addr,
3235 sizeof(*addr));
3236 goto done;
3237 }
3238
3239 if (addr->type == BDADDR_BREDR)
3240 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3241 else
3242 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
3243
3244 if (!conn) {
3245 err = cmd_complete(sk, hdev->id, mgmt_op,
3246 MGMT_STATUS_NOT_CONNECTED, addr,
3247 sizeof(*addr));
3248 goto done;
3249 }
3250
3251 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3252 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3253 if (!err)
3254 err = cmd_complete(sk, hdev->id, mgmt_op,
3255 MGMT_STATUS_SUCCESS, addr,
3256 sizeof(*addr));
3257 else
3258 err = cmd_complete(sk, hdev->id, mgmt_op,
3259 MGMT_STATUS_FAILED, addr,
3260 sizeof(*addr));
3261
3262 goto done;
3263 }
3264
3265 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3266 if (!cmd) {
3267 err = -ENOMEM;
3268 goto done;
3269 }
3270
3271 /* Continue with pairing via HCI */
3272 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3273 struct hci_cp_user_passkey_reply cp;
3274
3275 bacpy(&cp.bdaddr, &addr->bdaddr);
3276 cp.passkey = passkey;
3277 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3278 } else
3279 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3280 &addr->bdaddr);
3281
3282 if (err < 0)
3283 mgmt_pending_remove(cmd);
3284
3285 done:
3286 hci_dev_unlock(hdev);
3287 return err;
3288 }
3289
3290 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3291 void *data, u16 len)
3292 {
3293 struct mgmt_cp_pin_code_neg_reply *cp = data;
3294
3295 BT_DBG("");
3296
3297 return user_pairing_resp(sk, hdev, &cp->addr,
3298 MGMT_OP_PIN_CODE_NEG_REPLY,
3299 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3300 }
3301
3302 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3303 u16 len)
3304 {
3305 struct mgmt_cp_user_confirm_reply *cp = data;
3306
3307 BT_DBG("");
3308
3309 if (len != sizeof(*cp))
3310 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3311 MGMT_STATUS_INVALID_PARAMS);
3312
3313 return user_pairing_resp(sk, hdev, &cp->addr,
3314 MGMT_OP_USER_CONFIRM_REPLY,
3315 HCI_OP_USER_CONFIRM_REPLY, 0);
3316 }
3317
3318 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3319 void *data, u16 len)
3320 {
3321 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3322
3323 BT_DBG("");
3324
3325 return user_pairing_resp(sk, hdev, &cp->addr,
3326 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3327 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3328 }
3329
3330 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3331 u16 len)
3332 {
3333 struct mgmt_cp_user_passkey_reply *cp = data;
3334
3335 BT_DBG("");
3336
3337 return user_pairing_resp(sk, hdev, &cp->addr,
3338 MGMT_OP_USER_PASSKEY_REPLY,
3339 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3340 }
3341
3342 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3343 void *data, u16 len)
3344 {
3345 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3346
3347 BT_DBG("");
3348
3349 return user_pairing_resp(sk, hdev, &cp->addr,
3350 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3351 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3352 }
3353
3354 static void update_name(struct hci_request *req)
3355 {
3356 struct hci_dev *hdev = req->hdev;
3357 struct hci_cp_write_local_name cp;
3358
3359 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3360
3361 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3362 }
3363
3364 static void set_name_complete(struct hci_dev *hdev, u8 status)
3365 {
3366 struct mgmt_cp_set_local_name *cp;
3367 struct pending_cmd *cmd;
3368
3369 BT_DBG("status 0x%02x", status);
3370
3371 hci_dev_lock(hdev);
3372
3373 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3374 if (!cmd)
3375 goto unlock;
3376
3377 cp = cmd->param;
3378
3379 if (status)
3380 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3381 mgmt_status(status));
3382 else
3383 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3384 cp, sizeof(*cp));
3385
3386 mgmt_pending_remove(cmd);
3387
3388 unlock:
3389 hci_dev_unlock(hdev);
3390 }
3391
3392 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3393 u16 len)
3394 {
3395 struct mgmt_cp_set_local_name *cp = data;
3396 struct pending_cmd *cmd;
3397 struct hci_request req;
3398 int err;
3399
3400 BT_DBG("");
3401
3402 hci_dev_lock(hdev);
3403
3404 /* If the old values are the same as the new ones just return a
3405 * direct command complete event.
3406 */
3407 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3408 !memcmp(hdev->short_name, cp->short_name,
3409 sizeof(hdev->short_name))) {
3410 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3411 data, len);
3412 goto failed;
3413 }
3414
3415 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3416
3417 if (!hdev_is_powered(hdev)) {
3418 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3419
3420 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3421 data, len);
3422 if (err < 0)
3423 goto failed;
3424
3425 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
3426 sk);
3427
3428 goto failed;
3429 }
3430
3431 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3432 if (!cmd) {
3433 err = -ENOMEM;
3434 goto failed;
3435 }
3436
3437 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3438
3439 hci_req_init(&req, hdev);
3440
3441 if (lmp_bredr_capable(hdev)) {
3442 update_name(&req);
3443 update_eir(&req);
3444 }
3445
3446 /* The name is stored in the scan response data and so
3447 * no need to udpate the advertising data here.
3448 */
3449 if (lmp_le_capable(hdev))
3450 update_scan_rsp_data(&req);
3451
3452 err = hci_req_run(&req, set_name_complete);
3453 if (err < 0)
3454 mgmt_pending_remove(cmd);
3455
3456 failed:
3457 hci_dev_unlock(hdev);
3458 return err;
3459 }
3460
3461 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3462 void *data, u16 data_len)
3463 {
3464 struct pending_cmd *cmd;
3465 int err;
3466
3467 BT_DBG("%s", hdev->name);
3468
3469 hci_dev_lock(hdev);
3470
3471 if (!hdev_is_powered(hdev)) {
3472 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3473 MGMT_STATUS_NOT_POWERED);
3474 goto unlock;
3475 }
3476
3477 if (!lmp_ssp_capable(hdev)) {
3478 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3479 MGMT_STATUS_NOT_SUPPORTED);
3480 goto unlock;
3481 }
3482
3483 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3484 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3485 MGMT_STATUS_BUSY);
3486 goto unlock;
3487 }
3488
3489 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3490 if (!cmd) {
3491 err = -ENOMEM;
3492 goto unlock;
3493 }
3494
3495 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
3496 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
3497 0, NULL);
3498 else
3499 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3500
3501 if (err < 0)
3502 mgmt_pending_remove(cmd);
3503
3504 unlock:
3505 hci_dev_unlock(hdev);
3506 return err;
3507 }
3508
3509 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3510 void *data, u16 len)
3511 {
3512 int err;
3513
3514 BT_DBG("%s ", hdev->name);
3515
3516 hci_dev_lock(hdev);
3517
3518 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3519 struct mgmt_cp_add_remote_oob_data *cp = data;
3520 u8 status;
3521
3522 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3523 cp->hash, cp->randomizer);
3524 if (err < 0)
3525 status = MGMT_STATUS_FAILED;
3526 else
3527 status = MGMT_STATUS_SUCCESS;
3528
3529 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3530 status, &cp->addr, sizeof(cp->addr));
3531 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3532 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3533 u8 status;
3534
3535 err = hci_add_remote_oob_ext_data(hdev, &cp->addr.bdaddr,
3536 cp->hash192,
3537 cp->randomizer192,
3538 cp->hash256,
3539 cp->randomizer256);
3540 if (err < 0)
3541 status = MGMT_STATUS_FAILED;
3542 else
3543 status = MGMT_STATUS_SUCCESS;
3544
3545 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3546 status, &cp->addr, sizeof(cp->addr));
3547 } else {
3548 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3549 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3550 MGMT_STATUS_INVALID_PARAMS);
3551 }
3552
3553 hci_dev_unlock(hdev);
3554 return err;
3555 }
3556
3557 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3558 void *data, u16 len)
3559 {
3560 struct mgmt_cp_remove_remote_oob_data *cp = data;
3561 u8 status;
3562 int err;
3563
3564 BT_DBG("%s", hdev->name);
3565
3566 hci_dev_lock(hdev);
3567
3568 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
3569 if (err < 0)
3570 status = MGMT_STATUS_INVALID_PARAMS;
3571 else
3572 status = MGMT_STATUS_SUCCESS;
3573
3574 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3575 status, &cp->addr, sizeof(cp->addr));
3576
3577 hci_dev_unlock(hdev);
3578 return err;
3579 }
3580
3581 static int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
3582 {
3583 struct pending_cmd *cmd;
3584 u8 type;
3585 int err;
3586
3587 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3588
3589 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3590 if (!cmd)
3591 return -ENOENT;
3592
3593 type = hdev->discovery.type;
3594
3595 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3596 &type, sizeof(type));
3597 mgmt_pending_remove(cmd);
3598
3599 return err;
3600 }
3601
3602 static void start_discovery_complete(struct hci_dev *hdev, u8 status)
3603 {
3604 unsigned long timeout = 0;
3605
3606 BT_DBG("status %d", status);
3607
3608 if (status) {
3609 hci_dev_lock(hdev);
3610 mgmt_start_discovery_failed(hdev, status);
3611 hci_dev_unlock(hdev);
3612 return;
3613 }
3614
3615 hci_dev_lock(hdev);
3616 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3617 hci_dev_unlock(hdev);
3618
3619 switch (hdev->discovery.type) {
3620 case DISCOV_TYPE_LE:
3621 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3622 break;
3623
3624 case DISCOV_TYPE_INTERLEAVED:
3625 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
3626 break;
3627
3628 case DISCOV_TYPE_BREDR:
3629 break;
3630
3631 default:
3632 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
3633 }
3634
3635 if (!timeout)
3636 return;
3637
3638 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable, timeout);
3639 }
3640
3641 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3642 void *data, u16 len)
3643 {
3644 struct mgmt_cp_start_discovery *cp = data;
3645 struct pending_cmd *cmd;
3646 struct hci_cp_le_set_scan_param param_cp;
3647 struct hci_cp_le_set_scan_enable enable_cp;
3648 struct hci_cp_inquiry inq_cp;
3649 struct hci_request req;
3650 /* General inquiry access code (GIAC) */
3651 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3652 u8 status, own_addr_type;
3653 int err;
3654
3655 BT_DBG("%s", hdev->name);
3656
3657 hci_dev_lock(hdev);
3658
3659 if (!hdev_is_powered(hdev)) {
3660 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3661 MGMT_STATUS_NOT_POWERED);
3662 goto failed;
3663 }
3664
3665 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3666 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3667 MGMT_STATUS_BUSY);
3668 goto failed;
3669 }
3670
3671 if (hdev->discovery.state != DISCOVERY_STOPPED) {
3672 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3673 MGMT_STATUS_BUSY);
3674 goto failed;
3675 }
3676
3677 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
3678 if (!cmd) {
3679 err = -ENOMEM;
3680 goto failed;
3681 }
3682
3683 hdev->discovery.type = cp->type;
3684
3685 hci_req_init(&req, hdev);
3686
3687 switch (hdev->discovery.type) {
3688 case DISCOV_TYPE_BREDR:
3689 status = mgmt_bredr_support(hdev);
3690 if (status) {
3691 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3692 status);
3693 mgmt_pending_remove(cmd);
3694 goto failed;
3695 }
3696
3697 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3698 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3699 MGMT_STATUS_BUSY);
3700 mgmt_pending_remove(cmd);
3701 goto failed;
3702 }
3703
3704 hci_inquiry_cache_flush(hdev);
3705
3706 memset(&inq_cp, 0, sizeof(inq_cp));
3707 memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
3708 inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
3709 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
3710 break;
3711
3712 case DISCOV_TYPE_LE:
3713 case DISCOV_TYPE_INTERLEAVED:
3714 status = mgmt_le_support(hdev);
3715 if (status) {
3716 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3717 status);
3718 mgmt_pending_remove(cmd);
3719 goto failed;
3720 }
3721
3722 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3723 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3724 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3725 MGMT_STATUS_NOT_SUPPORTED);
3726 mgmt_pending_remove(cmd);
3727 goto failed;
3728 }
3729
3730 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3731 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3732 MGMT_STATUS_REJECTED);
3733 mgmt_pending_remove(cmd);
3734 goto failed;
3735 }
3736
3737 /* If controller is scanning, it means the background scanning
3738 * is running. Thus, we should temporarily stop it in order to
3739 * set the discovery scanning parameters.
3740 */
3741 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
3742 hci_req_add_le_scan_disable(&req);
3743
3744 memset(&param_cp, 0, sizeof(param_cp));
3745
3746 /* All active scans will be done with either a resolvable
3747 * private address (when privacy feature has been enabled)
3748 * or unresolvable private address.
3749 */
3750 err = hci_update_random_address(&req, true, &own_addr_type);
3751 if (err < 0) {
3752 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3753 MGMT_STATUS_FAILED);
3754 mgmt_pending_remove(cmd);
3755 goto failed;
3756 }
3757
3758 param_cp.type = LE_SCAN_ACTIVE;
3759 param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
3760 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3761 param_cp.own_address_type = own_addr_type;
3762 hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
3763 &param_cp);
3764
3765 memset(&enable_cp, 0, sizeof(enable_cp));
3766 enable_cp.enable = LE_SCAN_ENABLE;
3767 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3768 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
3769 &enable_cp);
3770 break;
3771
3772 default:
3773 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3774 MGMT_STATUS_INVALID_PARAMS);
3775 mgmt_pending_remove(cmd);
3776 goto failed;
3777 }
3778
3779 err = hci_req_run(&req, start_discovery_complete);
3780 if (err < 0)
3781 mgmt_pending_remove(cmd);
3782 else
3783 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3784
3785 failed:
3786 hci_dev_unlock(hdev);
3787 return err;
3788 }
3789
3790 static int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
3791 {
3792 struct pending_cmd *cmd;
3793 int err;
3794
3795 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
3796 if (!cmd)
3797 return -ENOENT;
3798
3799 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3800 &hdev->discovery.type, sizeof(hdev->discovery.type));
3801 mgmt_pending_remove(cmd);
3802
3803 return err;
3804 }
3805
3806 static void stop_discovery_complete(struct hci_dev *hdev, u8 status)
3807 {
3808 BT_DBG("status %d", status);
3809
3810 hci_dev_lock(hdev);
3811
3812 if (status) {
3813 mgmt_stop_discovery_failed(hdev, status);
3814 goto unlock;
3815 }
3816
3817 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3818
3819 unlock:
3820 hci_dev_unlock(hdev);
3821 }
3822
3823 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
3824 u16 len)
3825 {
3826 struct mgmt_cp_stop_discovery *mgmt_cp = data;
3827 struct pending_cmd *cmd;
3828 struct hci_request req;
3829 int err;
3830
3831 BT_DBG("%s", hdev->name);
3832
3833 hci_dev_lock(hdev);
3834
3835 if (!hci_discovery_active(hdev)) {
3836 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3837 MGMT_STATUS_REJECTED, &mgmt_cp->type,
3838 sizeof(mgmt_cp->type));
3839 goto unlock;
3840 }
3841
3842 if (hdev->discovery.type != mgmt_cp->type) {
3843 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3844 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
3845 sizeof(mgmt_cp->type));
3846 goto unlock;
3847 }
3848
3849 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
3850 if (!cmd) {
3851 err = -ENOMEM;
3852 goto unlock;
3853 }
3854
3855 hci_req_init(&req, hdev);
3856
3857 hci_stop_discovery(&req);
3858
3859 err = hci_req_run(&req, stop_discovery_complete);
3860 if (!err) {
3861 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
3862 goto unlock;
3863 }
3864
3865 mgmt_pending_remove(cmd);
3866
3867 /* If no HCI commands were sent we're done */
3868 if (err == -ENODATA) {
3869 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
3870 &mgmt_cp->type, sizeof(mgmt_cp->type));
3871 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3872 }
3873
3874 unlock:
3875 hci_dev_unlock(hdev);
3876 return err;
3877 }
3878
3879 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
3880 u16 len)
3881 {
3882 struct mgmt_cp_confirm_name *cp = data;
3883 struct inquiry_entry *e;
3884 int err;
3885
3886 BT_DBG("%s", hdev->name);
3887
3888 hci_dev_lock(hdev);
3889
3890 if (!hci_discovery_active(hdev)) {
3891 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3892 MGMT_STATUS_FAILED, &cp->addr,
3893 sizeof(cp->addr));
3894 goto failed;
3895 }
3896
3897 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
3898 if (!e) {
3899 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3900 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
3901 sizeof(cp->addr));
3902 goto failed;
3903 }
3904
3905 if (cp->name_known) {
3906 e->name_state = NAME_KNOWN;
3907 list_del(&e->list);
3908 } else {
3909 e->name_state = NAME_NEEDED;
3910 hci_inquiry_cache_update_resolve(hdev, e);
3911 }
3912
3913 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
3914 sizeof(cp->addr));
3915
3916 failed:
3917 hci_dev_unlock(hdev);
3918 return err;
3919 }
3920
3921 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
3922 u16 len)
3923 {
3924 struct mgmt_cp_block_device *cp = data;
3925 u8 status;
3926 int err;
3927
3928 BT_DBG("%s", hdev->name);
3929
3930 if (!bdaddr_type_is_valid(cp->addr.type))
3931 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
3932 MGMT_STATUS_INVALID_PARAMS,
3933 &cp->addr, sizeof(cp->addr));
3934
3935 hci_dev_lock(hdev);
3936
3937 err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type);
3938 if (err < 0) {
3939 status = MGMT_STATUS_FAILED;
3940 goto done;
3941 }
3942
3943 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
3944 sk);
3945 status = MGMT_STATUS_SUCCESS;
3946
3947 done:
3948 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
3949 &cp->addr, sizeof(cp->addr));
3950
3951 hci_dev_unlock(hdev);
3952
3953 return err;
3954 }
3955
3956 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
3957 u16 len)
3958 {
3959 struct mgmt_cp_unblock_device *cp = data;
3960 u8 status;
3961 int err;
3962
3963 BT_DBG("%s", hdev->name);
3964
3965 if (!bdaddr_type_is_valid(cp->addr.type))
3966 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
3967 MGMT_STATUS_INVALID_PARAMS,
3968 &cp->addr, sizeof(cp->addr));
3969
3970 hci_dev_lock(hdev);
3971
3972 err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type);
3973 if (err < 0) {
3974 status = MGMT_STATUS_INVALID_PARAMS;
3975 goto done;
3976 }
3977
3978 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
3979 sk);
3980 status = MGMT_STATUS_SUCCESS;
3981
3982 done:
3983 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
3984 &cp->addr, sizeof(cp->addr));
3985
3986 hci_dev_unlock(hdev);
3987
3988 return err;
3989 }
3990
3991 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
3992 u16 len)
3993 {
3994 struct mgmt_cp_set_device_id *cp = data;
3995 struct hci_request req;
3996 int err;
3997 __u16 source;
3998
3999 BT_DBG("%s", hdev->name);
4000
4001 source = __le16_to_cpu(cp->source);
4002
4003 if (source > 0x0002)
4004 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
4005 MGMT_STATUS_INVALID_PARAMS);
4006
4007 hci_dev_lock(hdev);
4008
4009 hdev->devid_source = source;
4010 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
4011 hdev->devid_product = __le16_to_cpu(cp->product);
4012 hdev->devid_version = __le16_to_cpu(cp->version);
4013
4014 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
4015
4016 hci_req_init(&req, hdev);
4017 update_eir(&req);
4018 hci_req_run(&req, NULL);
4019
4020 hci_dev_unlock(hdev);
4021
4022 return err;
4023 }
4024
4025 static void set_advertising_complete(struct hci_dev *hdev, u8 status)
4026 {
4027 struct cmd_lookup match = { NULL, hdev };
4028
4029 if (status) {
4030 u8 mgmt_err = mgmt_status(status);
4031
4032 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
4033 cmd_status_rsp, &mgmt_err);
4034 return;
4035 }
4036
4037 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
4038 &match);
4039
4040 new_settings(hdev, match.sk);
4041
4042 if (match.sk)
4043 sock_put(match.sk);
4044 }
4045
4046 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
4047 u16 len)
4048 {
4049 struct mgmt_mode *cp = data;
4050 struct pending_cmd *cmd;
4051 struct hci_request req;
4052 u8 val, enabled, status;
4053 int err;
4054
4055 BT_DBG("request for %s", hdev->name);
4056
4057 status = mgmt_le_support(hdev);
4058 if (status)
4059 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4060 status);
4061
4062 if (cp->val != 0x00 && cp->val != 0x01)
4063 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4064 MGMT_STATUS_INVALID_PARAMS);
4065
4066 hci_dev_lock(hdev);
4067
4068 val = !!cp->val;
4069 enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
4070
4071 /* The following conditions are ones which mean that we should
4072 * not do any HCI communication but directly send a mgmt
4073 * response to user space (after toggling the flag if
4074 * necessary).
4075 */
4076 if (!hdev_is_powered(hdev) || val == enabled ||
4077 hci_conn_num(hdev, LE_LINK) > 0) {
4078 bool changed = false;
4079
4080 if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
4081 change_bit(HCI_ADVERTISING, &hdev->dev_flags);
4082 changed = true;
4083 }
4084
4085 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
4086 if (err < 0)
4087 goto unlock;
4088
4089 if (changed)
4090 err = new_settings(hdev, sk);
4091
4092 goto unlock;
4093 }
4094
4095 if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
4096 mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
4097 err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4098 MGMT_STATUS_BUSY);
4099 goto unlock;
4100 }
4101
4102 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
4103 if (!cmd) {
4104 err = -ENOMEM;
4105 goto unlock;
4106 }
4107
4108 hci_req_init(&req, hdev);
4109
4110 if (val)
4111 enable_advertising(&req);
4112 else
4113 disable_advertising(&req);
4114
4115 err = hci_req_run(&req, set_advertising_complete);
4116 if (err < 0)
4117 mgmt_pending_remove(cmd);
4118
4119 unlock:
4120 hci_dev_unlock(hdev);
4121 return err;
4122 }
4123
4124 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
4125 void *data, u16 len)
4126 {
4127 struct mgmt_cp_set_static_address *cp = data;
4128 int err;
4129
4130 BT_DBG("%s", hdev->name);
4131
4132 if (!lmp_le_capable(hdev))
4133 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4134 MGMT_STATUS_NOT_SUPPORTED);
4135
4136 if (hdev_is_powered(hdev))
4137 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4138 MGMT_STATUS_REJECTED);
4139
4140 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
4141 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
4142 return cmd_status(sk, hdev->id,
4143 MGMT_OP_SET_STATIC_ADDRESS,
4144 MGMT_STATUS_INVALID_PARAMS);
4145
4146 /* Two most significant bits shall be set */
4147 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
4148 return cmd_status(sk, hdev->id,
4149 MGMT_OP_SET_STATIC_ADDRESS,
4150 MGMT_STATUS_INVALID_PARAMS);
4151 }
4152
4153 hci_dev_lock(hdev);
4154
4155 bacpy(&hdev->static_addr, &cp->bdaddr);
4156
4157 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0);
4158
4159 hci_dev_unlock(hdev);
4160
4161 return err;
4162 }
4163
4164 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
4165 void *data, u16 len)
4166 {
4167 struct mgmt_cp_set_scan_params *cp = data;
4168 __u16 interval, window;
4169 int err;
4170
4171 BT_DBG("%s", hdev->name);
4172
4173 if (!lmp_le_capable(hdev))
4174 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4175 MGMT_STATUS_NOT_SUPPORTED);
4176
4177 interval = __le16_to_cpu(cp->interval);
4178
4179 if (interval < 0x0004 || interval > 0x4000)
4180 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4181 MGMT_STATUS_INVALID_PARAMS);
4182
4183 window = __le16_to_cpu(cp->window);
4184
4185 if (window < 0x0004 || window > 0x4000)
4186 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4187 MGMT_STATUS_INVALID_PARAMS);
4188
4189 if (window > interval)
4190 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4191 MGMT_STATUS_INVALID_PARAMS);
4192
4193 hci_dev_lock(hdev);
4194
4195 hdev->le_scan_interval = interval;
4196 hdev->le_scan_window = window;
4197
4198 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);
4199
4200 /* If background scan is running, restart it so new parameters are
4201 * loaded.
4202 */
4203 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
4204 hdev->discovery.state == DISCOVERY_STOPPED) {
4205 struct hci_request req;
4206
4207 hci_req_init(&req, hdev);
4208
4209 hci_req_add_le_scan_disable(&req);
4210 hci_req_add_le_passive_scan(&req);
4211
4212 hci_req_run(&req, NULL);
4213 }
4214
4215 hci_dev_unlock(hdev);
4216
4217 return err;
4218 }
4219
4220 static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
4221 {
4222 struct pending_cmd *cmd;
4223
4224 BT_DBG("status 0x%02x", status);
4225
4226 hci_dev_lock(hdev);
4227
4228 cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4229 if (!cmd)
4230 goto unlock;
4231
4232 if (status) {
4233 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4234 mgmt_status(status));
4235 } else {
4236 struct mgmt_mode *cp = cmd->param;
4237
4238 if (cp->val)
4239 set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4240 else
4241 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4242
4243 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4244 new_settings(hdev, cmd->sk);
4245 }
4246
4247 mgmt_pending_remove(cmd);
4248
4249 unlock:
4250 hci_dev_unlock(hdev);
4251 }
4252
4253 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4254 void *data, u16 len)
4255 {
4256 struct mgmt_mode *cp = data;
4257 struct pending_cmd *cmd;
4258 struct hci_request req;
4259 int err;
4260
4261 BT_DBG("%s", hdev->name);
4262
4263 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
4264 hdev->hci_ver < BLUETOOTH_VER_1_2)
4265 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4266 MGMT_STATUS_NOT_SUPPORTED);
4267
4268 if (cp->val != 0x00 && cp->val != 0x01)
4269 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4270 MGMT_STATUS_INVALID_PARAMS);
4271
4272 if (!hdev_is_powered(hdev))
4273 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4274 MGMT_STATUS_NOT_POWERED);
4275
4276 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4277 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4278 MGMT_STATUS_REJECTED);
4279
4280 hci_dev_lock(hdev);
4281
4282 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4283 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4284 MGMT_STATUS_BUSY);
4285 goto unlock;
4286 }
4287
4288 if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
4289 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4290 hdev);
4291 goto unlock;
4292 }
4293
4294 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4295 data, len);
4296 if (!cmd) {
4297 err = -ENOMEM;
4298 goto unlock;
4299 }
4300
4301 hci_req_init(&req, hdev);
4302
4303 write_fast_connectable(&req, cp->val);
4304
4305 err = hci_req_run(&req, fast_connectable_complete);
4306 if (err < 0) {
4307 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4308 MGMT_STATUS_FAILED);
4309 mgmt_pending_remove(cmd);
4310 }
4311
4312 unlock:
4313 hci_dev_unlock(hdev);
4314
4315 return err;
4316 }
4317
4318 static void set_bredr_scan(struct hci_request *req)
4319 {
4320 struct hci_dev *hdev = req->hdev;
4321 u8 scan = 0;
4322
4323 /* Ensure that fast connectable is disabled. This function will
4324 * not do anything if the page scan parameters are already what
4325 * they should be.
4326 */
4327 write_fast_connectable(req, false);
4328
4329 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4330 scan |= SCAN_PAGE;
4331 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
4332 scan |= SCAN_INQUIRY;
4333
4334 if (scan)
4335 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
4336 }
4337
4338 static void set_bredr_complete(struct hci_dev *hdev, u8 status)
4339 {
4340 struct pending_cmd *cmd;
4341
4342 BT_DBG("status 0x%02x", status);
4343
4344 hci_dev_lock(hdev);
4345
4346 cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
4347 if (!cmd)
4348 goto unlock;
4349
4350 if (status) {
4351 u8 mgmt_err = mgmt_status(status);
4352
4353 /* We need to restore the flag if related HCI commands
4354 * failed.
4355 */
4356 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4357
4358 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4359 } else {
4360 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4361 new_settings(hdev, cmd->sk);
4362 }
4363
4364 mgmt_pending_remove(cmd);
4365
4366 unlock:
4367 hci_dev_unlock(hdev);
4368 }
4369
4370 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4371 {
4372 struct mgmt_mode *cp = data;
4373 struct pending_cmd *cmd;
4374 struct hci_request req;
4375 int err;
4376
4377 BT_DBG("request for %s", hdev->name);
4378
4379 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4380 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4381 MGMT_STATUS_NOT_SUPPORTED);
4382
4383 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4384 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4385 MGMT_STATUS_REJECTED);
4386
4387 if (cp->val != 0x00 && cp->val != 0x01)
4388 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4389 MGMT_STATUS_INVALID_PARAMS);
4390
4391 hci_dev_lock(hdev);
4392
4393 if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4394 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4395 goto unlock;
4396 }
4397
4398 if (!hdev_is_powered(hdev)) {
4399 if (!cp->val) {
4400 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4401 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4402 clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4403 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4404 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4405 }
4406
4407 change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4408
4409 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4410 if (err < 0)
4411 goto unlock;
4412
4413 err = new_settings(hdev, sk);
4414 goto unlock;
4415 }
4416
4417 /* Reject disabling when powered on */
4418 if (!cp->val) {
4419 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4420 MGMT_STATUS_REJECTED);
4421 goto unlock;
4422 }
4423
4424 if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
4425 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4426 MGMT_STATUS_BUSY);
4427 goto unlock;
4428 }
4429
4430 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4431 if (!cmd) {
4432 err = -ENOMEM;
4433 goto unlock;
4434 }
4435
4436 /* We need to flip the bit already here so that update_adv_data
4437 * generates the correct flags.
4438 */
4439 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4440
4441 hci_req_init(&req, hdev);
4442
4443 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4444 set_bredr_scan(&req);
4445
4446 /* Since only the advertising data flags will change, there
4447 * is no need to update the scan response data.
4448 */
4449 update_adv_data(&req);
4450
4451 err = hci_req_run(&req, set_bredr_complete);
4452 if (err < 0)
4453 mgmt_pending_remove(cmd);
4454
4455 unlock:
4456 hci_dev_unlock(hdev);
4457 return err;
4458 }
4459
4460 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4461 void *data, u16 len)
4462 {
4463 struct mgmt_mode *cp = data;
4464 struct pending_cmd *cmd;
4465 u8 val, status;
4466 int err;
4467
4468 BT_DBG("request for %s", hdev->name);
4469
4470 status = mgmt_bredr_support(hdev);
4471 if (status)
4472 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4473 status);
4474
4475 if (!lmp_sc_capable(hdev) &&
4476 !test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
4477 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4478 MGMT_STATUS_NOT_SUPPORTED);
4479
4480 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4481 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4482 MGMT_STATUS_INVALID_PARAMS);
4483
4484 hci_dev_lock(hdev);
4485
4486 if (!hdev_is_powered(hdev)) {
4487 bool changed;
4488
4489 if (cp->val) {
4490 changed = !test_and_set_bit(HCI_SC_ENABLED,
4491 &hdev->dev_flags);
4492 if (cp->val == 0x02)
4493 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4494 else
4495 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4496 } else {
4497 changed = test_and_clear_bit(HCI_SC_ENABLED,
4498 &hdev->dev_flags);
4499 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4500 }
4501
4502 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4503 if (err < 0)
4504 goto failed;
4505
4506 if (changed)
4507 err = new_settings(hdev, sk);
4508
4509 goto failed;
4510 }
4511
4512 if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4513 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4514 MGMT_STATUS_BUSY);
4515 goto failed;
4516 }
4517
4518 val = !!cp->val;
4519
4520 if (val == test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
4521 (cp->val == 0x02) == test_bit(HCI_SC_ONLY, &hdev->dev_flags)) {
4522 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4523 goto failed;
4524 }
4525
4526 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4527 if (!cmd) {
4528 err = -ENOMEM;
4529 goto failed;
4530 }
4531
4532 err = hci_send_cmd(hdev, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4533 if (err < 0) {
4534 mgmt_pending_remove(cmd);
4535 goto failed;
4536 }
4537
4538 if (cp->val == 0x02)
4539 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4540 else
4541 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4542
4543 failed:
4544 hci_dev_unlock(hdev);
4545 return err;
4546 }
4547
4548 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
4549 void *data, u16 len)
4550 {
4551 struct mgmt_mode *cp = data;
4552 bool changed, use_changed;
4553 int err;
4554
4555 BT_DBG("request for %s", hdev->name);
4556
4557 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4558 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
4559 MGMT_STATUS_INVALID_PARAMS);
4560
4561 hci_dev_lock(hdev);
4562
4563 if (cp->val)
4564 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
4565 &hdev->dev_flags);
4566 else
4567 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
4568 &hdev->dev_flags);
4569
4570 if (cp->val == 0x02)
4571 use_changed = !test_and_set_bit(HCI_USE_DEBUG_KEYS,
4572 &hdev->dev_flags);
4573 else
4574 use_changed = test_and_clear_bit(HCI_USE_DEBUG_KEYS,
4575 &hdev->dev_flags);
4576
4577 if (hdev_is_powered(hdev) && use_changed &&
4578 test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
4579 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
4580 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
4581 sizeof(mode), &mode);
4582 }
4583
4584 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
4585 if (err < 0)
4586 goto unlock;
4587
4588 if (changed)
4589 err = new_settings(hdev, sk);
4590
4591 unlock:
4592 hci_dev_unlock(hdev);
4593 return err;
4594 }
4595
4596 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4597 u16 len)
4598 {
4599 struct mgmt_cp_set_privacy *cp = cp_data;
4600 bool changed;
4601 int err;
4602
4603 BT_DBG("request for %s", hdev->name);
4604
4605 if (!lmp_le_capable(hdev))
4606 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4607 MGMT_STATUS_NOT_SUPPORTED);
4608
4609 if (cp->privacy != 0x00 && cp->privacy != 0x01)
4610 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4611 MGMT_STATUS_INVALID_PARAMS);
4612
4613 if (hdev_is_powered(hdev))
4614 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4615 MGMT_STATUS_REJECTED);
4616
4617 hci_dev_lock(hdev);
4618
4619 /* If user space supports this command it is also expected to
4620 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
4621 */
4622 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4623
4624 if (cp->privacy) {
4625 changed = !test_and_set_bit(HCI_PRIVACY, &hdev->dev_flags);
4626 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
4627 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4628 } else {
4629 changed = test_and_clear_bit(HCI_PRIVACY, &hdev->dev_flags);
4630 memset(hdev->irk, 0, sizeof(hdev->irk));
4631 clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4632 }
4633
4634 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
4635 if (err < 0)
4636 goto unlock;
4637
4638 if (changed)
4639 err = new_settings(hdev, sk);
4640
4641 unlock:
4642 hci_dev_unlock(hdev);
4643 return err;
4644 }
4645
4646 static bool irk_is_valid(struct mgmt_irk_info *irk)
4647 {
4648 switch (irk->addr.type) {
4649 case BDADDR_LE_PUBLIC:
4650 return true;
4651
4652 case BDADDR_LE_RANDOM:
4653 /* Two most significant bits shall be set */
4654 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4655 return false;
4656 return true;
4657 }
4658
4659 return false;
4660 }
4661
4662 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4663 u16 len)
4664 {
4665 struct mgmt_cp_load_irks *cp = cp_data;
4666 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
4667 sizeof(struct mgmt_irk_info));
4668 u16 irk_count, expected_len;
4669 int i, err;
4670
4671 BT_DBG("request for %s", hdev->name);
4672
4673 if (!lmp_le_capable(hdev))
4674 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4675 MGMT_STATUS_NOT_SUPPORTED);
4676
4677 irk_count = __le16_to_cpu(cp->irk_count);
4678 if (irk_count > max_irk_count) {
4679 BT_ERR("load_irks: too big irk_count value %u", irk_count);
4680 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4681 MGMT_STATUS_INVALID_PARAMS);
4682 }
4683
4684 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
4685 if (expected_len != len) {
4686 BT_ERR("load_irks: expected %u bytes, got %u bytes",
4687 expected_len, len);
4688 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4689 MGMT_STATUS_INVALID_PARAMS);
4690 }
4691
4692 BT_DBG("%s irk_count %u", hdev->name, irk_count);
4693
4694 for (i = 0; i < irk_count; i++) {
4695 struct mgmt_irk_info *key = &cp->irks[i];
4696
4697 if (!irk_is_valid(key))
4698 return cmd_status(sk, hdev->id,
4699 MGMT_OP_LOAD_IRKS,
4700 MGMT_STATUS_INVALID_PARAMS);
4701 }
4702
4703 hci_dev_lock(hdev);
4704
4705 hci_smp_irks_clear(hdev);
4706
4707 for (i = 0; i < irk_count; i++) {
4708 struct mgmt_irk_info *irk = &cp->irks[i];
4709 u8 addr_type;
4710
4711 if (irk->addr.type == BDADDR_LE_PUBLIC)
4712 addr_type = ADDR_LE_DEV_PUBLIC;
4713 else
4714 addr_type = ADDR_LE_DEV_RANDOM;
4715
4716 hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
4717 BDADDR_ANY);
4718 }
4719
4720 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4721
4722 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
4723
4724 hci_dev_unlock(hdev);
4725
4726 return err;
4727 }
4728
4729 static bool ltk_is_valid(struct mgmt_ltk_info *key)
4730 {
4731 if (key->master != 0x00 && key->master != 0x01)
4732 return false;
4733
4734 switch (key->addr.type) {
4735 case BDADDR_LE_PUBLIC:
4736 return true;
4737
4738 case BDADDR_LE_RANDOM:
4739 /* Two most significant bits shall be set */
4740 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4741 return false;
4742 return true;
4743 }
4744
4745 return false;
4746 }
4747
4748 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
4749 void *cp_data, u16 len)
4750 {
4751 struct mgmt_cp_load_long_term_keys *cp = cp_data;
4752 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
4753 sizeof(struct mgmt_ltk_info));
4754 u16 key_count, expected_len;
4755 int i, err;
4756
4757 BT_DBG("request for %s", hdev->name);
4758
4759 if (!lmp_le_capable(hdev))
4760 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4761 MGMT_STATUS_NOT_SUPPORTED);
4762
4763 key_count = __le16_to_cpu(cp->key_count);
4764 if (key_count > max_key_count) {
4765 BT_ERR("load_ltks: too big key_count value %u", key_count);
4766 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4767 MGMT_STATUS_INVALID_PARAMS);
4768 }
4769
4770 expected_len = sizeof(*cp) + key_count *
4771 sizeof(struct mgmt_ltk_info);
4772 if (expected_len != len) {
4773 BT_ERR("load_keys: expected %u bytes, got %u bytes",
4774 expected_len, len);
4775 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4776 MGMT_STATUS_INVALID_PARAMS);
4777 }
4778
4779 BT_DBG("%s key_count %u", hdev->name, key_count);
4780
4781 for (i = 0; i < key_count; i++) {
4782 struct mgmt_ltk_info *key = &cp->keys[i];
4783
4784 if (!ltk_is_valid(key))
4785 return cmd_status(sk, hdev->id,
4786 MGMT_OP_LOAD_LONG_TERM_KEYS,
4787 MGMT_STATUS_INVALID_PARAMS);
4788 }
4789
4790 hci_dev_lock(hdev);
4791
4792 hci_smp_ltks_clear(hdev);
4793
4794 for (i = 0; i < key_count; i++) {
4795 struct mgmt_ltk_info *key = &cp->keys[i];
4796 u8 type, addr_type, authenticated;
4797
4798 if (key->addr.type == BDADDR_LE_PUBLIC)
4799 addr_type = ADDR_LE_DEV_PUBLIC;
4800 else
4801 addr_type = ADDR_LE_DEV_RANDOM;
4802
4803 if (key->master)
4804 type = SMP_LTK;
4805 else
4806 type = SMP_LTK_SLAVE;
4807
4808 switch (key->type) {
4809 case MGMT_LTK_UNAUTHENTICATED:
4810 authenticated = 0x00;
4811 break;
4812 case MGMT_LTK_AUTHENTICATED:
4813 authenticated = 0x01;
4814 break;
4815 default:
4816 continue;
4817 }
4818
4819 hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
4820 authenticated, key->val, key->enc_size, key->ediv,
4821 key->rand);
4822 }
4823
4824 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
4825 NULL, 0);
4826
4827 hci_dev_unlock(hdev);
4828
4829 return err;
4830 }
4831
4832 struct cmd_conn_lookup {
4833 struct hci_conn *conn;
4834 bool valid_tx_power;
4835 u8 mgmt_status;
4836 };
4837
4838 static void get_conn_info_complete(struct pending_cmd *cmd, void *data)
4839 {
4840 struct cmd_conn_lookup *match = data;
4841 struct mgmt_cp_get_conn_info *cp;
4842 struct mgmt_rp_get_conn_info rp;
4843 struct hci_conn *conn = cmd->user_data;
4844
4845 if (conn != match->conn)
4846 return;
4847
4848 cp = (struct mgmt_cp_get_conn_info *) cmd->param;
4849
4850 memset(&rp, 0, sizeof(rp));
4851 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4852 rp.addr.type = cp->addr.type;
4853
4854 if (!match->mgmt_status) {
4855 rp.rssi = conn->rssi;
4856
4857 if (match->valid_tx_power) {
4858 rp.tx_power = conn->tx_power;
4859 rp.max_tx_power = conn->max_tx_power;
4860 } else {
4861 rp.tx_power = HCI_TX_POWER_INVALID;
4862 rp.max_tx_power = HCI_TX_POWER_INVALID;
4863 }
4864 }
4865
4866 cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
4867 match->mgmt_status, &rp, sizeof(rp));
4868
4869 hci_conn_drop(conn);
4870
4871 mgmt_pending_remove(cmd);
4872 }
4873
4874 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 status)
4875 {
4876 struct hci_cp_read_rssi *cp;
4877 struct hci_conn *conn;
4878 struct cmd_conn_lookup match;
4879 u16 handle;
4880
4881 BT_DBG("status 0x%02x", status);
4882
4883 hci_dev_lock(hdev);
4884
4885 /* TX power data is valid in case request completed successfully,
4886 * otherwise we assume it's not valid. At the moment we assume that
4887 * either both or none of current and max values are valid to keep code
4888 * simple.
4889 */
4890 match.valid_tx_power = !status;
4891
4892 /* Commands sent in request are either Read RSSI or Read Transmit Power
4893 * Level so we check which one was last sent to retrieve connection
4894 * handle. Both commands have handle as first parameter so it's safe to
4895 * cast data on the same command struct.
4896 *
4897 * First command sent is always Read RSSI and we fail only if it fails.
4898 * In other case we simply override error to indicate success as we
4899 * already remembered if TX power value is actually valid.
4900 */
4901 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
4902 if (!cp) {
4903 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
4904 status = 0;
4905 }
4906
4907 if (!cp) {
4908 BT_ERR("invalid sent_cmd in response");
4909 goto unlock;
4910 }
4911
4912 handle = __le16_to_cpu(cp->handle);
4913 conn = hci_conn_hash_lookup_handle(hdev, handle);
4914 if (!conn) {
4915 BT_ERR("unknown handle (%d) in response", handle);
4916 goto unlock;
4917 }
4918
4919 match.conn = conn;
4920 match.mgmt_status = mgmt_status(status);
4921
4922 /* Cache refresh is complete, now reply for mgmt request for given
4923 * connection only.
4924 */
4925 mgmt_pending_foreach(MGMT_OP_GET_CONN_INFO, hdev,
4926 get_conn_info_complete, &match);
4927
4928 unlock:
4929 hci_dev_unlock(hdev);
4930 }
4931
4932 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
4933 u16 len)
4934 {
4935 struct mgmt_cp_get_conn_info *cp = data;
4936 struct mgmt_rp_get_conn_info rp;
4937 struct hci_conn *conn;
4938 unsigned long conn_info_age;
4939 int err = 0;
4940
4941 BT_DBG("%s", hdev->name);
4942
4943 memset(&rp, 0, sizeof(rp));
4944 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4945 rp.addr.type = cp->addr.type;
4946
4947 if (!bdaddr_type_is_valid(cp->addr.type))
4948 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4949 MGMT_STATUS_INVALID_PARAMS,
4950 &rp, sizeof(rp));
4951
4952 hci_dev_lock(hdev);
4953
4954 if (!hdev_is_powered(hdev)) {
4955 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4956 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
4957 goto unlock;
4958 }
4959
4960 if (cp->addr.type == BDADDR_BREDR)
4961 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
4962 &cp->addr.bdaddr);
4963 else
4964 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
4965
4966 if (!conn || conn->state != BT_CONNECTED) {
4967 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4968 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
4969 goto unlock;
4970 }
4971
4972 /* To avoid client trying to guess when to poll again for information we
4973 * calculate conn info age as random value between min/max set in hdev.
4974 */
4975 conn_info_age = hdev->conn_info_min_age +
4976 prandom_u32_max(hdev->conn_info_max_age -
4977 hdev->conn_info_min_age);
4978
4979 /* Query controller to refresh cached values if they are too old or were
4980 * never read.
4981 */
4982 if (time_after(jiffies, conn->conn_info_timestamp +
4983 msecs_to_jiffies(conn_info_age)) ||
4984 !conn->conn_info_timestamp) {
4985 struct hci_request req;
4986 struct hci_cp_read_tx_power req_txp_cp;
4987 struct hci_cp_read_rssi req_rssi_cp;
4988 struct pending_cmd *cmd;
4989
4990 hci_req_init(&req, hdev);
4991 req_rssi_cp.handle = cpu_to_le16(conn->handle);
4992 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
4993 &req_rssi_cp);
4994
4995 /* For LE links TX power does not change thus we don't need to
4996 * query for it once value is known.
4997 */
4998 if (!bdaddr_type_is_le(cp->addr.type) ||
4999 conn->tx_power == HCI_TX_POWER_INVALID) {
5000 req_txp_cp.handle = cpu_to_le16(conn->handle);
5001 req_txp_cp.type = 0x00;
5002 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5003 sizeof(req_txp_cp), &req_txp_cp);
5004 }
5005
5006 /* Max TX power needs to be read only once per connection */
5007 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
5008 req_txp_cp.handle = cpu_to_le16(conn->handle);
5009 req_txp_cp.type = 0x01;
5010 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5011 sizeof(req_txp_cp), &req_txp_cp);
5012 }
5013
5014 err = hci_req_run(&req, conn_info_refresh_complete);
5015 if (err < 0)
5016 goto unlock;
5017
5018 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
5019 data, len);
5020 if (!cmd) {
5021 err = -ENOMEM;
5022 goto unlock;
5023 }
5024
5025 hci_conn_hold(conn);
5026 cmd->user_data = conn;
5027
5028 conn->conn_info_timestamp = jiffies;
5029 } else {
5030 /* Cache is valid, just reply with values cached in hci_conn */
5031 rp.rssi = conn->rssi;
5032 rp.tx_power = conn->tx_power;
5033 rp.max_tx_power = conn->max_tx_power;
5034
5035 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5036 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
5037 }
5038
5039 unlock:
5040 hci_dev_unlock(hdev);
5041 return err;
5042 }
5043
5044 static void get_clock_info_complete(struct hci_dev *hdev, u8 status)
5045 {
5046 struct mgmt_cp_get_clock_info *cp;
5047 struct mgmt_rp_get_clock_info rp;
5048 struct hci_cp_read_clock *hci_cp;
5049 struct pending_cmd *cmd;
5050 struct hci_conn *conn;
5051
5052 BT_DBG("%s status %u", hdev->name, status);
5053
5054 hci_dev_lock(hdev);
5055
5056 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
5057 if (!hci_cp)
5058 goto unlock;
5059
5060 if (hci_cp->which) {
5061 u16 handle = __le16_to_cpu(hci_cp->handle);
5062 conn = hci_conn_hash_lookup_handle(hdev, handle);
5063 } else {
5064 conn = NULL;
5065 }
5066
5067 cmd = mgmt_pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
5068 if (!cmd)
5069 goto unlock;
5070
5071 cp = cmd->param;
5072
5073 memset(&rp, 0, sizeof(rp));
5074 memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
5075
5076 if (status)
5077 goto send_rsp;
5078
5079 rp.local_clock = cpu_to_le32(hdev->clock);
5080
5081 if (conn) {
5082 rp.piconet_clock = cpu_to_le32(conn->clock);
5083 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
5084 }
5085
5086 send_rsp:
5087 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
5088 &rp, sizeof(rp));
5089 mgmt_pending_remove(cmd);
5090 if (conn)
5091 hci_conn_drop(conn);
5092
5093 unlock:
5094 hci_dev_unlock(hdev);
5095 }
5096
5097 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
5098 u16 len)
5099 {
5100 struct mgmt_cp_get_clock_info *cp = data;
5101 struct mgmt_rp_get_clock_info rp;
5102 struct hci_cp_read_clock hci_cp;
5103 struct pending_cmd *cmd;
5104 struct hci_request req;
5105 struct hci_conn *conn;
5106 int err;
5107
5108 BT_DBG("%s", hdev->name);
5109
5110 memset(&rp, 0, sizeof(rp));
5111 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5112 rp.addr.type = cp->addr.type;
5113
5114 if (cp->addr.type != BDADDR_BREDR)
5115 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5116 MGMT_STATUS_INVALID_PARAMS,
5117 &rp, sizeof(rp));
5118
5119 hci_dev_lock(hdev);
5120
5121 if (!hdev_is_powered(hdev)) {
5122 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5123 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
5124 goto unlock;
5125 }
5126
5127 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5128 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5129 &cp->addr.bdaddr);
5130 if (!conn || conn->state != BT_CONNECTED) {
5131 err = cmd_complete(sk, hdev->id,
5132 MGMT_OP_GET_CLOCK_INFO,
5133 MGMT_STATUS_NOT_CONNECTED,
5134 &rp, sizeof(rp));
5135 goto unlock;
5136 }
5137 } else {
5138 conn = NULL;
5139 }
5140
5141 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
5142 if (!cmd) {
5143 err = -ENOMEM;
5144 goto unlock;
5145 }
5146
5147 hci_req_init(&req, hdev);
5148
5149 memset(&hci_cp, 0, sizeof(hci_cp));
5150 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5151
5152 if (conn) {
5153 hci_conn_hold(conn);
5154 cmd->user_data = conn;
5155
5156 hci_cp.handle = cpu_to_le16(conn->handle);
5157 hci_cp.which = 0x01; /* Piconet clock */
5158 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5159 }
5160
5161 err = hci_req_run(&req, get_clock_info_complete);
5162 if (err < 0)
5163 mgmt_pending_remove(cmd);
5164
5165 unlock:
5166 hci_dev_unlock(hdev);
5167 return err;
5168 }
5169
5170 static void device_added(struct sock *sk, struct hci_dev *hdev,
5171 bdaddr_t *bdaddr, u8 type, u8 action)
5172 {
5173 struct mgmt_ev_device_added ev;
5174
5175 bacpy(&ev.addr.bdaddr, bdaddr);
5176 ev.addr.type = type;
5177 ev.action = action;
5178
5179 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
5180 }
5181
5182 static int add_device(struct sock *sk, struct hci_dev *hdev,
5183 void *data, u16 len)
5184 {
5185 struct mgmt_cp_add_device *cp = data;
5186 u8 auto_conn, addr_type;
5187 int err;
5188
5189 BT_DBG("%s", hdev->name);
5190
5191 if (!bdaddr_type_is_le(cp->addr.type) ||
5192 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
5193 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5194 MGMT_STATUS_INVALID_PARAMS,
5195 &cp->addr, sizeof(cp->addr));
5196
5197 if (cp->action != 0x00 && cp->action != 0x01)
5198 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5199 MGMT_STATUS_INVALID_PARAMS,
5200 &cp->addr, sizeof(cp->addr));
5201
5202 hci_dev_lock(hdev);
5203
5204 if (cp->addr.type == BDADDR_LE_PUBLIC)
5205 addr_type = ADDR_LE_DEV_PUBLIC;
5206 else
5207 addr_type = ADDR_LE_DEV_RANDOM;
5208
5209 if (cp->action)
5210 auto_conn = HCI_AUTO_CONN_ALWAYS;
5211 else
5212 auto_conn = HCI_AUTO_CONN_REPORT;
5213
5214 /* If the connection parameters don't exist for this device,
5215 * they will be created and configured with defaults.
5216 */
5217 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
5218 auto_conn) < 0) {
5219 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5220 MGMT_STATUS_FAILED,
5221 &cp->addr, sizeof(cp->addr));
5222 goto unlock;
5223 }
5224
5225 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
5226
5227 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5228 MGMT_STATUS_SUCCESS, &cp->addr, sizeof(cp->addr));
5229
5230 unlock:
5231 hci_dev_unlock(hdev);
5232 return err;
5233 }
5234
5235 static void device_removed(struct sock *sk, struct hci_dev *hdev,
5236 bdaddr_t *bdaddr, u8 type)
5237 {
5238 struct mgmt_ev_device_removed ev;
5239
5240 bacpy(&ev.addr.bdaddr, bdaddr);
5241 ev.addr.type = type;
5242
5243 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
5244 }
5245
5246 static int remove_device(struct sock *sk, struct hci_dev *hdev,
5247 void *data, u16 len)
5248 {
5249 struct mgmt_cp_remove_device *cp = data;
5250 int err;
5251
5252 BT_DBG("%s", hdev->name);
5253
5254 hci_dev_lock(hdev);
5255
5256 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5257 struct hci_conn_params *params;
5258 u8 addr_type;
5259
5260 if (!bdaddr_type_is_le(cp->addr.type)) {
5261 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5262 MGMT_STATUS_INVALID_PARAMS,
5263 &cp->addr, sizeof(cp->addr));
5264 goto unlock;
5265 }
5266
5267 if (cp->addr.type == BDADDR_LE_PUBLIC)
5268 addr_type = ADDR_LE_DEV_PUBLIC;
5269 else
5270 addr_type = ADDR_LE_DEV_RANDOM;
5271
5272 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5273 addr_type);
5274 if (!params) {
5275 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5276 MGMT_STATUS_INVALID_PARAMS,
5277 &cp->addr, sizeof(cp->addr));
5278 goto unlock;
5279 }
5280
5281 if (params->auto_connect == HCI_AUTO_CONN_DISABLED) {
5282 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5283 MGMT_STATUS_INVALID_PARAMS,
5284 &cp->addr, sizeof(cp->addr));
5285 goto unlock;
5286 }
5287
5288 list_del(&params->action);
5289 list_del(&params->list);
5290 kfree(params);
5291 hci_update_background_scan(hdev);
5292
5293 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
5294 } else {
5295 struct hci_conn_params *p, *tmp;
5296
5297 if (cp->addr.type) {
5298 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5299 MGMT_STATUS_INVALID_PARAMS,
5300 &cp->addr, sizeof(cp->addr));
5301 goto unlock;
5302 }
5303
5304 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
5305 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
5306 continue;
5307 device_removed(sk, hdev, &p->addr, p->addr_type);
5308 list_del(&p->action);
5309 list_del(&p->list);
5310 kfree(p);
5311 }
5312
5313 BT_DBG("All LE connection parameters were removed");
5314
5315 hci_update_background_scan(hdev);
5316 }
5317
5318 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5319 MGMT_STATUS_SUCCESS, &cp->addr, sizeof(cp->addr));
5320
5321 unlock:
5322 hci_dev_unlock(hdev);
5323 return err;
5324 }
5325
5326 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
5327 u16 len)
5328 {
5329 struct mgmt_cp_load_conn_param *cp = data;
5330 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
5331 sizeof(struct mgmt_conn_param));
5332 u16 param_count, expected_len;
5333 int i;
5334
5335 if (!lmp_le_capable(hdev))
5336 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5337 MGMT_STATUS_NOT_SUPPORTED);
5338
5339 param_count = __le16_to_cpu(cp->param_count);
5340 if (param_count > max_param_count) {
5341 BT_ERR("load_conn_param: too big param_count value %u",
5342 param_count);
5343 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5344 MGMT_STATUS_INVALID_PARAMS);
5345 }
5346
5347 expected_len = sizeof(*cp) + param_count *
5348 sizeof(struct mgmt_conn_param);
5349 if (expected_len != len) {
5350 BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
5351 expected_len, len);
5352 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5353 MGMT_STATUS_INVALID_PARAMS);
5354 }
5355
5356 BT_DBG("%s param_count %u", hdev->name, param_count);
5357
5358 hci_dev_lock(hdev);
5359
5360 hci_conn_params_clear_disabled(hdev);
5361
5362 for (i = 0; i < param_count; i++) {
5363 struct mgmt_conn_param *param = &cp->params[i];
5364 struct hci_conn_params *hci_param;
5365 u16 min, max, latency, timeout;
5366 u8 addr_type;
5367
5368 BT_DBG("Adding %pMR (type %u)", &param->addr.bdaddr,
5369 param->addr.type);
5370
5371 if (param->addr.type == BDADDR_LE_PUBLIC) {
5372 addr_type = ADDR_LE_DEV_PUBLIC;
5373 } else if (param->addr.type == BDADDR_LE_RANDOM) {
5374 addr_type = ADDR_LE_DEV_RANDOM;
5375 } else {
5376 BT_ERR("Ignoring invalid connection parameters");
5377 continue;
5378 }
5379
5380 min = le16_to_cpu(param->min_interval);
5381 max = le16_to_cpu(param->max_interval);
5382 latency = le16_to_cpu(param->latency);
5383 timeout = le16_to_cpu(param->timeout);
5384
5385 BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
5386 min, max, latency, timeout);
5387
5388 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
5389 BT_ERR("Ignoring invalid connection parameters");
5390 continue;
5391 }
5392
5393 hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
5394 addr_type);
5395 if (!hci_param) {
5396 BT_ERR("Failed to add connection parameters");
5397 continue;
5398 }
5399
5400 hci_param->conn_min_interval = min;
5401 hci_param->conn_max_interval = max;
5402 hci_param->conn_latency = latency;
5403 hci_param->supervision_timeout = timeout;
5404 }
5405
5406 hci_dev_unlock(hdev);
5407
5408 return cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0, NULL, 0);
5409 }
5410
5411 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
5412 void *data, u16 len)
5413 {
5414 struct mgmt_cp_set_external_config *cp = data;
5415 bool changed;
5416 int err;
5417
5418 BT_DBG("%s", hdev->name);
5419
5420 if (hdev_is_powered(hdev))
5421 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5422 MGMT_STATUS_REJECTED);
5423
5424 if (cp->config != 0x00 && cp->config != 0x01)
5425 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5426 MGMT_STATUS_INVALID_PARAMS);
5427
5428 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
5429 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5430 MGMT_STATUS_NOT_SUPPORTED);
5431
5432 hci_dev_lock(hdev);
5433
5434 if (cp->config)
5435 changed = !test_and_set_bit(HCI_EXT_CONFIGURED,
5436 &hdev->dev_flags);
5437 else
5438 changed = test_and_clear_bit(HCI_EXT_CONFIGURED,
5439 &hdev->dev_flags);
5440
5441 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
5442 if (err < 0)
5443 goto unlock;
5444
5445 if (!changed)
5446 goto unlock;
5447
5448 err = new_options(hdev, sk);
5449
5450 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) == is_configured(hdev)) {
5451 mgmt_index_removed(hdev);
5452
5453 if (test_and_change_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
5454 set_bit(HCI_CONFIG, &hdev->dev_flags);
5455 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
5456
5457 queue_work(hdev->req_workqueue, &hdev->power_on);
5458 } else {
5459 set_bit(HCI_RAW, &hdev->flags);
5460 mgmt_index_added(hdev);
5461 }
5462 }
5463
5464 unlock:
5465 hci_dev_unlock(hdev);
5466 return err;
5467 }
5468
5469 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
5470 void *data, u16 len)
5471 {
5472 struct mgmt_cp_set_public_address *cp = data;
5473 bool changed;
5474 int err;
5475
5476 BT_DBG("%s", hdev->name);
5477
5478 if (hdev_is_powered(hdev))
5479 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5480 MGMT_STATUS_REJECTED);
5481
5482 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
5483 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5484 MGMT_STATUS_INVALID_PARAMS);
5485
5486 if (!hdev->set_bdaddr)
5487 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5488 MGMT_STATUS_NOT_SUPPORTED);
5489
5490 hci_dev_lock(hdev);
5491
5492 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
5493 bacpy(&hdev->public_addr, &cp->bdaddr);
5494
5495 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
5496 if (err < 0)
5497 goto unlock;
5498
5499 if (!changed)
5500 goto unlock;
5501
5502 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
5503 err = new_options(hdev, sk);
5504
5505 if (is_configured(hdev)) {
5506 mgmt_index_removed(hdev);
5507
5508 clear_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
5509
5510 set_bit(HCI_CONFIG, &hdev->dev_flags);
5511 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
5512
5513 queue_work(hdev->req_workqueue, &hdev->power_on);
5514 }
5515
5516 unlock:
5517 hci_dev_unlock(hdev);
5518 return err;
5519 }
5520
5521 static const struct mgmt_handler {
5522 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
5523 u16 data_len);
5524 bool var_len;
5525 size_t data_len;
5526 } mgmt_handlers[] = {
5527 { NULL }, /* 0x0000 (no command) */
5528 { read_version, false, MGMT_READ_VERSION_SIZE },
5529 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
5530 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
5531 { read_controller_info, false, MGMT_READ_INFO_SIZE },
5532 { set_powered, false, MGMT_SETTING_SIZE },
5533 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
5534 { set_connectable, false, MGMT_SETTING_SIZE },
5535 { set_fast_connectable, false, MGMT_SETTING_SIZE },
5536 { set_pairable, false, MGMT_SETTING_SIZE },
5537 { set_link_security, false, MGMT_SETTING_SIZE },
5538 { set_ssp, false, MGMT_SETTING_SIZE },
5539 { set_hs, false, MGMT_SETTING_SIZE },
5540 { set_le, false, MGMT_SETTING_SIZE },
5541 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
5542 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
5543 { add_uuid, false, MGMT_ADD_UUID_SIZE },
5544 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
5545 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
5546 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
5547 { disconnect, false, MGMT_DISCONNECT_SIZE },
5548 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
5549 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
5550 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
5551 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
5552 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
5553 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
5554 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
5555 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
5556 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
5557 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
5558 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
5559 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
5560 { add_remote_oob_data, true, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
5561 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
5562 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
5563 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
5564 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
5565 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
5566 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
5567 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
5568 { set_advertising, false, MGMT_SETTING_SIZE },
5569 { set_bredr, false, MGMT_SETTING_SIZE },
5570 { set_static_address, false, MGMT_SET_STATIC_ADDRESS_SIZE },
5571 { set_scan_params, false, MGMT_SET_SCAN_PARAMS_SIZE },
5572 { set_secure_conn, false, MGMT_SETTING_SIZE },
5573 { set_debug_keys, false, MGMT_SETTING_SIZE },
5574 { set_privacy, false, MGMT_SET_PRIVACY_SIZE },
5575 { load_irks, true, MGMT_LOAD_IRKS_SIZE },
5576 { get_conn_info, false, MGMT_GET_CONN_INFO_SIZE },
5577 { get_clock_info, false, MGMT_GET_CLOCK_INFO_SIZE },
5578 { add_device, false, MGMT_ADD_DEVICE_SIZE },
5579 { remove_device, false, MGMT_REMOVE_DEVICE_SIZE },
5580 { load_conn_param, true, MGMT_LOAD_CONN_PARAM_SIZE },
5581 { read_unconf_index_list, false, MGMT_READ_UNCONF_INDEX_LIST_SIZE },
5582 { read_config_info, false, MGMT_READ_CONFIG_INFO_SIZE },
5583 { set_external_config, false, MGMT_SET_EXTERNAL_CONFIG_SIZE },
5584 { set_public_address, false, MGMT_SET_PUBLIC_ADDRESS_SIZE },
5585 };
5586
5587 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
5588 {
5589 void *buf;
5590 u8 *cp;
5591 struct mgmt_hdr *hdr;
5592 u16 opcode, index, len;
5593 struct hci_dev *hdev = NULL;
5594 const struct mgmt_handler *handler;
5595 int err;
5596
5597 BT_DBG("got %zu bytes", msglen);
5598
5599 if (msglen < sizeof(*hdr))
5600 return -EINVAL;
5601
5602 buf = kmalloc(msglen, GFP_KERNEL);
5603 if (!buf)
5604 return -ENOMEM;
5605
5606 if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
5607 err = -EFAULT;
5608 goto done;
5609 }
5610
5611 hdr = buf;
5612 opcode = __le16_to_cpu(hdr->opcode);
5613 index = __le16_to_cpu(hdr->index);
5614 len = __le16_to_cpu(hdr->len);
5615
5616 if (len != msglen - sizeof(*hdr)) {
5617 err = -EINVAL;
5618 goto done;
5619 }
5620
5621 if (index != MGMT_INDEX_NONE) {
5622 hdev = hci_dev_get(index);
5623 if (!hdev) {
5624 err = cmd_status(sk, index, opcode,
5625 MGMT_STATUS_INVALID_INDEX);
5626 goto done;
5627 }
5628
5629 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
5630 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
5631 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5632 err = cmd_status(sk, index, opcode,
5633 MGMT_STATUS_INVALID_INDEX);
5634 goto done;
5635 }
5636
5637 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
5638 opcode != MGMT_OP_READ_CONFIG_INFO &&
5639 opcode != MGMT_OP_SET_EXTERNAL_CONFIG &&
5640 opcode != MGMT_OP_SET_PUBLIC_ADDRESS) {
5641 err = cmd_status(sk, index, opcode,
5642 MGMT_STATUS_INVALID_INDEX);
5643 goto done;
5644 }
5645 }
5646
5647 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
5648 mgmt_handlers[opcode].func == NULL) {
5649 BT_DBG("Unknown op %u", opcode);
5650 err = cmd_status(sk, index, opcode,
5651 MGMT_STATUS_UNKNOWN_COMMAND);
5652 goto done;
5653 }
5654
5655 if (hdev && (opcode <= MGMT_OP_READ_INDEX_LIST ||
5656 opcode == MGMT_OP_READ_UNCONF_INDEX_LIST)) {
5657 err = cmd_status(sk, index, opcode,
5658 MGMT_STATUS_INVALID_INDEX);
5659 goto done;
5660 }
5661
5662 if (!hdev && (opcode > MGMT_OP_READ_INDEX_LIST &&
5663 opcode != MGMT_OP_READ_UNCONF_INDEX_LIST)) {
5664 err = cmd_status(sk, index, opcode,
5665 MGMT_STATUS_INVALID_INDEX);
5666 goto done;
5667 }
5668
5669 handler = &mgmt_handlers[opcode];
5670
5671 if ((handler->var_len && len < handler->data_len) ||
5672 (!handler->var_len && len != handler->data_len)) {
5673 err = cmd_status(sk, index, opcode,
5674 MGMT_STATUS_INVALID_PARAMS);
5675 goto done;
5676 }
5677
5678 if (hdev)
5679 mgmt_init_hdev(sk, hdev);
5680
5681 cp = buf + sizeof(*hdr);
5682
5683 err = handler->func(sk, hdev, cp, len);
5684 if (err < 0)
5685 goto done;
5686
5687 err = msglen;
5688
5689 done:
5690 if (hdev)
5691 hci_dev_put(hdev);
5692
5693 kfree(buf);
5694 return err;
5695 }
5696
5697 void mgmt_index_added(struct hci_dev *hdev)
5698 {
5699 if (hdev->dev_type != HCI_BREDR)
5700 return;
5701
5702 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
5703 return;
5704
5705 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
5706 mgmt_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0, NULL);
5707 else
5708 mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
5709 }
5710
5711 void mgmt_index_removed(struct hci_dev *hdev)
5712 {
5713 u8 status = MGMT_STATUS_INVALID_INDEX;
5714
5715 if (hdev->dev_type != HCI_BREDR)
5716 return;
5717
5718 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
5719 return;
5720
5721 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
5722
5723 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
5724 mgmt_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0, NULL);
5725 else
5726 mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
5727 }
5728
5729 /* This function requires the caller holds hdev->lock */
5730 static void restart_le_actions(struct hci_dev *hdev)
5731 {
5732 struct hci_conn_params *p;
5733
5734 list_for_each_entry(p, &hdev->le_conn_params, list) {
5735 /* Needed for AUTO_OFF case where might not "really"
5736 * have been powered off.
5737 */
5738 list_del_init(&p->action);
5739
5740 switch (p->auto_connect) {
5741 case HCI_AUTO_CONN_ALWAYS:
5742 list_add(&p->action, &hdev->pend_le_conns);
5743 break;
5744 case HCI_AUTO_CONN_REPORT:
5745 list_add(&p->action, &hdev->pend_le_reports);
5746 break;
5747 default:
5748 break;
5749 }
5750 }
5751
5752 hci_update_background_scan(hdev);
5753 }
5754
5755 static void powered_complete(struct hci_dev *hdev, u8 status)
5756 {
5757 struct cmd_lookup match = { NULL, hdev };
5758
5759 BT_DBG("status 0x%02x", status);
5760
5761 hci_dev_lock(hdev);
5762
5763 restart_le_actions(hdev);
5764
5765 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
5766
5767 new_settings(hdev, match.sk);
5768
5769 hci_dev_unlock(hdev);
5770
5771 if (match.sk)
5772 sock_put(match.sk);
5773 }
5774
5775 static int powered_update_hci(struct hci_dev *hdev)
5776 {
5777 struct hci_request req;
5778 u8 link_sec;
5779
5780 hci_req_init(&req, hdev);
5781
5782 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
5783 !lmp_host_ssp_capable(hdev)) {
5784 u8 ssp = 1;
5785
5786 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
5787 }
5788
5789 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
5790 lmp_bredr_capable(hdev)) {
5791 struct hci_cp_write_le_host_supported cp;
5792
5793 cp.le = 1;
5794 cp.simul = lmp_le_br_capable(hdev);
5795
5796 /* Check first if we already have the right
5797 * host state (host features set)
5798 */
5799 if (cp.le != lmp_host_le_capable(hdev) ||
5800 cp.simul != lmp_host_le_br_capable(hdev))
5801 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
5802 sizeof(cp), &cp);
5803 }
5804
5805 if (lmp_le_capable(hdev)) {
5806 /* Make sure the controller has a good default for
5807 * advertising data. This also applies to the case
5808 * where BR/EDR was toggled during the AUTO_OFF phase.
5809 */
5810 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
5811 update_adv_data(&req);
5812 update_scan_rsp_data(&req);
5813 }
5814
5815 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
5816 enable_advertising(&req);
5817 }
5818
5819 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
5820 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
5821 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
5822 sizeof(link_sec), &link_sec);
5823
5824 if (lmp_bredr_capable(hdev)) {
5825 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
5826 set_bredr_scan(&req);
5827 update_class(&req);
5828 update_name(&req);
5829 update_eir(&req);
5830 }
5831
5832 return hci_req_run(&req, powered_complete);
5833 }
5834
5835 int mgmt_powered(struct hci_dev *hdev, u8 powered)
5836 {
5837 struct cmd_lookup match = { NULL, hdev };
5838 u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
5839 u8 zero_cod[] = { 0, 0, 0 };
5840 int err;
5841
5842 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
5843 return 0;
5844
5845 if (powered) {
5846 if (powered_update_hci(hdev) == 0)
5847 return 0;
5848
5849 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
5850 &match);
5851 goto new_settings;
5852 }
5853
5854 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
5855 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered);
5856
5857 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
5858 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
5859 zero_cod, sizeof(zero_cod), NULL);
5860
5861 new_settings:
5862 err = new_settings(hdev, match.sk);
5863
5864 if (match.sk)
5865 sock_put(match.sk);
5866
5867 return err;
5868 }
5869
5870 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
5871 {
5872 struct pending_cmd *cmd;
5873 u8 status;
5874
5875 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
5876 if (!cmd)
5877 return;
5878
5879 if (err == -ERFKILL)
5880 status = MGMT_STATUS_RFKILLED;
5881 else
5882 status = MGMT_STATUS_FAILED;
5883
5884 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
5885
5886 mgmt_pending_remove(cmd);
5887 }
5888
5889 void mgmt_discoverable_timeout(struct hci_dev *hdev)
5890 {
5891 struct hci_request req;
5892
5893 hci_dev_lock(hdev);
5894
5895 /* When discoverable timeout triggers, then just make sure
5896 * the limited discoverable flag is cleared. Even in the case
5897 * of a timeout triggered from general discoverable, it is
5898 * safe to unconditionally clear the flag.
5899 */
5900 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
5901 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
5902
5903 hci_req_init(&req, hdev);
5904 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
5905 u8 scan = SCAN_PAGE;
5906 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
5907 sizeof(scan), &scan);
5908 }
5909 update_class(&req);
5910 update_adv_data(&req);
5911 hci_req_run(&req, NULL);
5912
5913 hdev->discov_timeout = 0;
5914
5915 new_settings(hdev, NULL);
5916
5917 hci_dev_unlock(hdev);
5918 }
5919
5920 void mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
5921 {
5922 bool changed;
5923
5924 /* Nothing needed here if there's a pending command since that
5925 * commands request completion callback takes care of everything
5926 * necessary.
5927 */
5928 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
5929 return;
5930
5931 /* Powering off may clear the scan mode - don't let that interfere */
5932 if (!discoverable && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5933 return;
5934
5935 if (discoverable) {
5936 changed = !test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
5937 } else {
5938 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
5939 changed = test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
5940 }
5941
5942 if (changed) {
5943 struct hci_request req;
5944
5945 /* In case this change in discoverable was triggered by
5946 * a disabling of connectable there could be a need to
5947 * update the advertising flags.
5948 */
5949 hci_req_init(&req, hdev);
5950 update_adv_data(&req);
5951 hci_req_run(&req, NULL);
5952
5953 new_settings(hdev, NULL);
5954 }
5955 }
5956
5957 void mgmt_connectable(struct hci_dev *hdev, u8 connectable)
5958 {
5959 bool changed;
5960
5961 /* Nothing needed here if there's a pending command since that
5962 * commands request completion callback takes care of everything
5963 * necessary.
5964 */
5965 if (mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
5966 return;
5967
5968 /* Powering off may clear the scan mode - don't let that interfere */
5969 if (!connectable && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5970 return;
5971
5972 if (connectable)
5973 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
5974 else
5975 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
5976
5977 if (changed)
5978 new_settings(hdev, NULL);
5979 }
5980
5981 void mgmt_advertising(struct hci_dev *hdev, u8 advertising)
5982 {
5983 /* Powering off may stop advertising - don't let that interfere */
5984 if (!advertising && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5985 return;
5986
5987 if (advertising)
5988 set_bit(HCI_ADVERTISING, &hdev->dev_flags);
5989 else
5990 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
5991 }
5992
5993 void mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
5994 {
5995 u8 mgmt_err = mgmt_status(status);
5996
5997 if (scan & SCAN_PAGE)
5998 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
5999 cmd_status_rsp, &mgmt_err);
6000
6001 if (scan & SCAN_INQUIRY)
6002 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
6003 cmd_status_rsp, &mgmt_err);
6004 }
6005
6006 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
6007 bool persistent)
6008 {
6009 struct mgmt_ev_new_link_key ev;
6010
6011 memset(&ev, 0, sizeof(ev));
6012
6013 ev.store_hint = persistent;
6014 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6015 ev.key.addr.type = BDADDR_BREDR;
6016 ev.key.type = key->type;
6017 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
6018 ev.key.pin_len = key->pin_len;
6019
6020 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
6021 }
6022
6023 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
6024 {
6025 if (ltk->authenticated)
6026 return MGMT_LTK_AUTHENTICATED;
6027
6028 return MGMT_LTK_UNAUTHENTICATED;
6029 }
6030
6031 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
6032 {
6033 struct mgmt_ev_new_long_term_key ev;
6034
6035 memset(&ev, 0, sizeof(ev));
6036
6037 /* Devices using resolvable or non-resolvable random addresses
6038 * without providing an indentity resolving key don't require
6039 * to store long term keys. Their addresses will change the
6040 * next time around.
6041 *
6042 * Only when a remote device provides an identity address
6043 * make sure the long term key is stored. If the remote
6044 * identity is known, the long term keys are internally
6045 * mapped to the identity address. So allow static random
6046 * and public addresses here.
6047 */
6048 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6049 (key->bdaddr.b[5] & 0xc0) != 0xc0)
6050 ev.store_hint = 0x00;
6051 else
6052 ev.store_hint = persistent;
6053
6054 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6055 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
6056 ev.key.type = mgmt_ltk_type(key);
6057 ev.key.enc_size = key->enc_size;
6058 ev.key.ediv = key->ediv;
6059 ev.key.rand = key->rand;
6060
6061 if (key->type == SMP_LTK)
6062 ev.key.master = 1;
6063
6064 memcpy(ev.key.val, key->val, sizeof(key->val));
6065
6066 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
6067 }
6068
6069 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
6070 {
6071 struct mgmt_ev_new_irk ev;
6072
6073 memset(&ev, 0, sizeof(ev));
6074
6075 /* For identity resolving keys from devices that are already
6076 * using a public address or static random address, do not
6077 * ask for storing this key. The identity resolving key really
6078 * is only mandatory for devices using resovlable random
6079 * addresses.
6080 *
6081 * Storing all identity resolving keys has the downside that
6082 * they will be also loaded on next boot of they system. More
6083 * identity resolving keys, means more time during scanning is
6084 * needed to actually resolve these addresses.
6085 */
6086 if (bacmp(&irk->rpa, BDADDR_ANY))
6087 ev.store_hint = 0x01;
6088 else
6089 ev.store_hint = 0x00;
6090
6091 bacpy(&ev.rpa, &irk->rpa);
6092 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
6093 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
6094 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
6095
6096 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
6097 }
6098
6099 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
6100 bool persistent)
6101 {
6102 struct mgmt_ev_new_csrk ev;
6103
6104 memset(&ev, 0, sizeof(ev));
6105
6106 /* Devices using resolvable or non-resolvable random addresses
6107 * without providing an indentity resolving key don't require
6108 * to store signature resolving keys. Their addresses will change
6109 * the next time around.
6110 *
6111 * Only when a remote device provides an identity address
6112 * make sure the signature resolving key is stored. So allow
6113 * static random and public addresses here.
6114 */
6115 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6116 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
6117 ev.store_hint = 0x00;
6118 else
6119 ev.store_hint = persistent;
6120
6121 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
6122 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
6123 ev.key.master = csrk->master;
6124 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
6125
6126 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
6127 }
6128
6129 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
6130 u8 bdaddr_type, u8 store_hint, u16 min_interval,
6131 u16 max_interval, u16 latency, u16 timeout)
6132 {
6133 struct mgmt_ev_new_conn_param ev;
6134
6135 if (!hci_is_identity_address(bdaddr, bdaddr_type))
6136 return;
6137
6138 memset(&ev, 0, sizeof(ev));
6139 bacpy(&ev.addr.bdaddr, bdaddr);
6140 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
6141 ev.store_hint = store_hint;
6142 ev.min_interval = cpu_to_le16(min_interval);
6143 ev.max_interval = cpu_to_le16(max_interval);
6144 ev.latency = cpu_to_le16(latency);
6145 ev.timeout = cpu_to_le16(timeout);
6146
6147 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
6148 }
6149
6150 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
6151 u8 data_len)
6152 {
6153 eir[eir_len++] = sizeof(type) + data_len;
6154 eir[eir_len++] = type;
6155 memcpy(&eir[eir_len], data, data_len);
6156 eir_len += data_len;
6157
6158 return eir_len;
6159 }
6160
6161 void mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6162 u8 addr_type, u32 flags, u8 *name, u8 name_len,
6163 u8 *dev_class)
6164 {
6165 char buf[512];
6166 struct mgmt_ev_device_connected *ev = (void *) buf;
6167 u16 eir_len = 0;
6168
6169 bacpy(&ev->addr.bdaddr, bdaddr);
6170 ev->addr.type = link_to_bdaddr(link_type, addr_type);
6171
6172 ev->flags = __cpu_to_le32(flags);
6173
6174 if (name_len > 0)
6175 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
6176 name, name_len);
6177
6178 if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
6179 eir_len = eir_append_data(ev->eir, eir_len,
6180 EIR_CLASS_OF_DEV, dev_class, 3);
6181
6182 ev->eir_len = cpu_to_le16(eir_len);
6183
6184 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
6185 sizeof(*ev) + eir_len, NULL);
6186 }
6187
6188 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
6189 {
6190 struct mgmt_cp_disconnect *cp = cmd->param;
6191 struct sock **sk = data;
6192 struct mgmt_rp_disconnect rp;
6193
6194 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6195 rp.addr.type = cp->addr.type;
6196
6197 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
6198 sizeof(rp));
6199
6200 *sk = cmd->sk;
6201 sock_hold(*sk);
6202
6203 mgmt_pending_remove(cmd);
6204 }
6205
6206 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
6207 {
6208 struct hci_dev *hdev = data;
6209 struct mgmt_cp_unpair_device *cp = cmd->param;
6210 struct mgmt_rp_unpair_device rp;
6211
6212 memset(&rp, 0, sizeof(rp));
6213 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6214 rp.addr.type = cp->addr.type;
6215
6216 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
6217
6218 cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
6219
6220 mgmt_pending_remove(cmd);
6221 }
6222
6223 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
6224 u8 link_type, u8 addr_type, u8 reason,
6225 bool mgmt_connected)
6226 {
6227 struct mgmt_ev_device_disconnected ev;
6228 struct pending_cmd *power_off;
6229 struct sock *sk = NULL;
6230
6231 power_off = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6232 if (power_off) {
6233 struct mgmt_mode *cp = power_off->param;
6234
6235 /* The connection is still in hci_conn_hash so test for 1
6236 * instead of 0 to know if this is the last one.
6237 */
6238 if (!cp->val && hci_conn_count(hdev) == 1) {
6239 cancel_delayed_work(&hdev->power_off);
6240 queue_work(hdev->req_workqueue, &hdev->power_off.work);
6241 }
6242 }
6243
6244 if (!mgmt_connected)
6245 return;
6246
6247 if (link_type != ACL_LINK && link_type != LE_LINK)
6248 return;
6249
6250 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
6251
6252 bacpy(&ev.addr.bdaddr, bdaddr);
6253 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6254 ev.reason = reason;
6255
6256 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
6257
6258 if (sk)
6259 sock_put(sk);
6260
6261 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6262 hdev);
6263 }
6264
6265 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
6266 u8 link_type, u8 addr_type, u8 status)
6267 {
6268 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
6269 struct mgmt_cp_disconnect *cp;
6270 struct mgmt_rp_disconnect rp;
6271 struct pending_cmd *cmd;
6272
6273 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6274 hdev);
6275
6276 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
6277 if (!cmd)
6278 return;
6279
6280 cp = cmd->param;
6281
6282 if (bacmp(bdaddr, &cp->addr.bdaddr))
6283 return;
6284
6285 if (cp->addr.type != bdaddr_type)
6286 return;
6287
6288 bacpy(&rp.addr.bdaddr, bdaddr);
6289 rp.addr.type = bdaddr_type;
6290
6291 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
6292 mgmt_status(status), &rp, sizeof(rp));
6293
6294 mgmt_pending_remove(cmd);
6295 }
6296
6297 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6298 u8 addr_type, u8 status)
6299 {
6300 struct mgmt_ev_connect_failed ev;
6301 struct pending_cmd *power_off;
6302
6303 power_off = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6304 if (power_off) {
6305 struct mgmt_mode *cp = power_off->param;
6306
6307 /* The connection is still in hci_conn_hash so test for 1
6308 * instead of 0 to know if this is the last one.
6309 */
6310 if (!cp->val && hci_conn_count(hdev) == 1) {
6311 cancel_delayed_work(&hdev->power_off);
6312 queue_work(hdev->req_workqueue, &hdev->power_off.work);
6313 }
6314 }
6315
6316 bacpy(&ev.addr.bdaddr, bdaddr);
6317 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6318 ev.status = mgmt_status(status);
6319
6320 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
6321 }
6322
6323 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
6324 {
6325 struct mgmt_ev_pin_code_request ev;
6326
6327 bacpy(&ev.addr.bdaddr, bdaddr);
6328 ev.addr.type = BDADDR_BREDR;
6329 ev.secure = secure;
6330
6331 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
6332 }
6333
6334 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6335 u8 status)
6336 {
6337 struct pending_cmd *cmd;
6338 struct mgmt_rp_pin_code_reply rp;
6339
6340 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
6341 if (!cmd)
6342 return;
6343
6344 bacpy(&rp.addr.bdaddr, bdaddr);
6345 rp.addr.type = BDADDR_BREDR;
6346
6347 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
6348 mgmt_status(status), &rp, sizeof(rp));
6349
6350 mgmt_pending_remove(cmd);
6351 }
6352
6353 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6354 u8 status)
6355 {
6356 struct pending_cmd *cmd;
6357 struct mgmt_rp_pin_code_reply rp;
6358
6359 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
6360 if (!cmd)
6361 return;
6362
6363 bacpy(&rp.addr.bdaddr, bdaddr);
6364 rp.addr.type = BDADDR_BREDR;
6365
6366 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
6367 mgmt_status(status), &rp, sizeof(rp));
6368
6369 mgmt_pending_remove(cmd);
6370 }
6371
6372 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6373 u8 link_type, u8 addr_type, u32 value,
6374 u8 confirm_hint)
6375 {
6376 struct mgmt_ev_user_confirm_request ev;
6377
6378 BT_DBG("%s", hdev->name);
6379
6380 bacpy(&ev.addr.bdaddr, bdaddr);
6381 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6382 ev.confirm_hint = confirm_hint;
6383 ev.value = cpu_to_le32(value);
6384
6385 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
6386 NULL);
6387 }
6388
6389 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6390 u8 link_type, u8 addr_type)
6391 {
6392 struct mgmt_ev_user_passkey_request ev;
6393
6394 BT_DBG("%s", hdev->name);
6395
6396 bacpy(&ev.addr.bdaddr, bdaddr);
6397 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6398
6399 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
6400 NULL);
6401 }
6402
6403 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6404 u8 link_type, u8 addr_type, u8 status,
6405 u8 opcode)
6406 {
6407 struct pending_cmd *cmd;
6408 struct mgmt_rp_user_confirm_reply rp;
6409 int err;
6410
6411 cmd = mgmt_pending_find(opcode, hdev);
6412 if (!cmd)
6413 return -ENOENT;
6414
6415 bacpy(&rp.addr.bdaddr, bdaddr);
6416 rp.addr.type = link_to_bdaddr(link_type, addr_type);
6417 err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
6418 &rp, sizeof(rp));
6419
6420 mgmt_pending_remove(cmd);
6421
6422 return err;
6423 }
6424
6425 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6426 u8 link_type, u8 addr_type, u8 status)
6427 {
6428 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6429 status, MGMT_OP_USER_CONFIRM_REPLY);
6430 }
6431
6432 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6433 u8 link_type, u8 addr_type, u8 status)
6434 {
6435 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6436 status,
6437 MGMT_OP_USER_CONFIRM_NEG_REPLY);
6438 }
6439
6440 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6441 u8 link_type, u8 addr_type, u8 status)
6442 {
6443 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6444 status, MGMT_OP_USER_PASSKEY_REPLY);
6445 }
6446
6447 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6448 u8 link_type, u8 addr_type, u8 status)
6449 {
6450 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6451 status,
6452 MGMT_OP_USER_PASSKEY_NEG_REPLY);
6453 }
6454
6455 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
6456 u8 link_type, u8 addr_type, u32 passkey,
6457 u8 entered)
6458 {
6459 struct mgmt_ev_passkey_notify ev;
6460
6461 BT_DBG("%s", hdev->name);
6462
6463 bacpy(&ev.addr.bdaddr, bdaddr);
6464 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6465 ev.passkey = __cpu_to_le32(passkey);
6466 ev.entered = entered;
6467
6468 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
6469 }
6470
6471 void mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6472 u8 addr_type, u8 status)
6473 {
6474 struct mgmt_ev_auth_failed ev;
6475
6476 bacpy(&ev.addr.bdaddr, bdaddr);
6477 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6478 ev.status = mgmt_status(status);
6479
6480 mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
6481 }
6482
6483 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
6484 {
6485 struct cmd_lookup match = { NULL, hdev };
6486 bool changed;
6487
6488 if (status) {
6489 u8 mgmt_err = mgmt_status(status);
6490 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
6491 cmd_status_rsp, &mgmt_err);
6492 return;
6493 }
6494
6495 if (test_bit(HCI_AUTH, &hdev->flags))
6496 changed = !test_and_set_bit(HCI_LINK_SECURITY,
6497 &hdev->dev_flags);
6498 else
6499 changed = test_and_clear_bit(HCI_LINK_SECURITY,
6500 &hdev->dev_flags);
6501
6502 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
6503 &match);
6504
6505 if (changed)
6506 new_settings(hdev, match.sk);
6507
6508 if (match.sk)
6509 sock_put(match.sk);
6510 }
6511
6512 static void clear_eir(struct hci_request *req)
6513 {
6514 struct hci_dev *hdev = req->hdev;
6515 struct hci_cp_write_eir cp;
6516
6517 if (!lmp_ext_inq_capable(hdev))
6518 return;
6519
6520 memset(hdev->eir, 0, sizeof(hdev->eir));
6521
6522 memset(&cp, 0, sizeof(cp));
6523
6524 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
6525 }
6526
6527 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
6528 {
6529 struct cmd_lookup match = { NULL, hdev };
6530 struct hci_request req;
6531 bool changed = false;
6532
6533 if (status) {
6534 u8 mgmt_err = mgmt_status(status);
6535
6536 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
6537 &hdev->dev_flags)) {
6538 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
6539 new_settings(hdev, NULL);
6540 }
6541
6542 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
6543 &mgmt_err);
6544 return;
6545 }
6546
6547 if (enable) {
6548 changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
6549 } else {
6550 changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
6551 if (!changed)
6552 changed = test_and_clear_bit(HCI_HS_ENABLED,
6553 &hdev->dev_flags);
6554 else
6555 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
6556 }
6557
6558 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
6559
6560 if (changed)
6561 new_settings(hdev, match.sk);
6562
6563 if (match.sk)
6564 sock_put(match.sk);
6565
6566 hci_req_init(&req, hdev);
6567
6568 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
6569 if (test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
6570 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
6571 sizeof(enable), &enable);
6572 update_eir(&req);
6573 } else {
6574 clear_eir(&req);
6575 }
6576
6577 hci_req_run(&req, NULL);
6578 }
6579
6580 void mgmt_sc_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
6581 {
6582 struct cmd_lookup match = { NULL, hdev };
6583 bool changed = false;
6584
6585 if (status) {
6586 u8 mgmt_err = mgmt_status(status);
6587
6588 if (enable) {
6589 if (test_and_clear_bit(HCI_SC_ENABLED,
6590 &hdev->dev_flags))
6591 new_settings(hdev, NULL);
6592 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
6593 }
6594
6595 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
6596 cmd_status_rsp, &mgmt_err);
6597 return;
6598 }
6599
6600 if (enable) {
6601 changed = !test_and_set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
6602 } else {
6603 changed = test_and_clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
6604 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
6605 }
6606
6607 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
6608 settings_rsp, &match);
6609
6610 if (changed)
6611 new_settings(hdev, match.sk);
6612
6613 if (match.sk)
6614 sock_put(match.sk);
6615 }
6616
6617 static void sk_lookup(struct pending_cmd *cmd, void *data)
6618 {
6619 struct cmd_lookup *match = data;
6620
6621 if (match->sk == NULL) {
6622 match->sk = cmd->sk;
6623 sock_hold(match->sk);
6624 }
6625 }
6626
6627 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
6628 u8 status)
6629 {
6630 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
6631
6632 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
6633 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
6634 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
6635
6636 if (!status)
6637 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
6638 NULL);
6639
6640 if (match.sk)
6641 sock_put(match.sk);
6642 }
6643
6644 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
6645 {
6646 struct mgmt_cp_set_local_name ev;
6647 struct pending_cmd *cmd;
6648
6649 if (status)
6650 return;
6651
6652 memset(&ev, 0, sizeof(ev));
6653 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
6654 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
6655
6656 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
6657 if (!cmd) {
6658 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
6659
6660 /* If this is a HCI command related to powering on the
6661 * HCI dev don't send any mgmt signals.
6662 */
6663 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
6664 return;
6665 }
6666
6667 mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
6668 cmd ? cmd->sk : NULL);
6669 }
6670
6671 void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
6672 u8 *randomizer192, u8 *hash256,
6673 u8 *randomizer256, u8 status)
6674 {
6675 struct pending_cmd *cmd;
6676
6677 BT_DBG("%s status %u", hdev->name, status);
6678
6679 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
6680 if (!cmd)
6681 return;
6682
6683 if (status) {
6684 cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
6685 mgmt_status(status));
6686 } else {
6687 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
6688 hash256 && randomizer256) {
6689 struct mgmt_rp_read_local_oob_ext_data rp;
6690
6691 memcpy(rp.hash192, hash192, sizeof(rp.hash192));
6692 memcpy(rp.randomizer192, randomizer192,
6693 sizeof(rp.randomizer192));
6694
6695 memcpy(rp.hash256, hash256, sizeof(rp.hash256));
6696 memcpy(rp.randomizer256, randomizer256,
6697 sizeof(rp.randomizer256));
6698
6699 cmd_complete(cmd->sk, hdev->id,
6700 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
6701 &rp, sizeof(rp));
6702 } else {
6703 struct mgmt_rp_read_local_oob_data rp;
6704
6705 memcpy(rp.hash, hash192, sizeof(rp.hash));
6706 memcpy(rp.randomizer, randomizer192,
6707 sizeof(rp.randomizer));
6708
6709 cmd_complete(cmd->sk, hdev->id,
6710 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
6711 &rp, sizeof(rp));
6712 }
6713 }
6714
6715 mgmt_pending_remove(cmd);
6716 }
6717
6718 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6719 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
6720 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
6721 {
6722 char buf[512];
6723 struct mgmt_ev_device_found *ev = (void *) buf;
6724 size_t ev_size;
6725
6726 /* Don't send events for a non-kernel initiated discovery. With
6727 * LE one exception is if we have pend_le_reports > 0 in which
6728 * case we're doing passive scanning and want these events.
6729 */
6730 if (!hci_discovery_active(hdev)) {
6731 if (link_type == ACL_LINK)
6732 return;
6733 if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
6734 return;
6735 }
6736
6737 /* Make sure that the buffer is big enough. The 5 extra bytes
6738 * are for the potential CoD field.
6739 */
6740 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
6741 return;
6742
6743 memset(buf, 0, sizeof(buf));
6744
6745 bacpy(&ev->addr.bdaddr, bdaddr);
6746 ev->addr.type = link_to_bdaddr(link_type, addr_type);
6747 ev->rssi = rssi;
6748 ev->flags = cpu_to_le32(flags);
6749
6750 if (eir_len > 0)
6751 memcpy(ev->eir, eir, eir_len);
6752
6753 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
6754 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
6755 dev_class, 3);
6756
6757 if (scan_rsp_len > 0)
6758 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
6759
6760 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
6761 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
6762
6763 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
6764 }
6765
6766 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6767 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
6768 {
6769 struct mgmt_ev_device_found *ev;
6770 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
6771 u16 eir_len;
6772
6773 ev = (struct mgmt_ev_device_found *) buf;
6774
6775 memset(buf, 0, sizeof(buf));
6776
6777 bacpy(&ev->addr.bdaddr, bdaddr);
6778 ev->addr.type = link_to_bdaddr(link_type, addr_type);
6779 ev->rssi = rssi;
6780
6781 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
6782 name_len);
6783
6784 ev->eir_len = cpu_to_le16(eir_len);
6785
6786 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
6787 }
6788
6789 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
6790 {
6791 struct mgmt_ev_discovering ev;
6792 struct pending_cmd *cmd;
6793
6794 BT_DBG("%s discovering %u", hdev->name, discovering);
6795
6796 if (discovering)
6797 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
6798 else
6799 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
6800
6801 if (cmd != NULL) {
6802 u8 type = hdev->discovery.type;
6803
6804 cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
6805 sizeof(type));
6806 mgmt_pending_remove(cmd);
6807 }
6808
6809 memset(&ev, 0, sizeof(ev));
6810 ev.type = hdev->discovery.type;
6811 ev.discovering = discovering;
6812
6813 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
6814 }
6815
6816 static void adv_enable_complete(struct hci_dev *hdev, u8 status)
6817 {
6818 BT_DBG("%s status %u", hdev->name, status);
6819
6820 /* Clear the advertising mgmt setting if we failed to re-enable it */
6821 if (status) {
6822 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
6823 new_settings(hdev, NULL);
6824 }
6825 }
6826
6827 void mgmt_reenable_advertising(struct hci_dev *hdev)
6828 {
6829 struct hci_request req;
6830
6831 if (hci_conn_num(hdev, LE_LINK) > 0)
6832 return;
6833
6834 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
6835 return;
6836
6837 hci_req_init(&req, hdev);
6838 enable_advertising(&req);
6839
6840 /* If this fails we have no option but to let user space know
6841 * that we've disabled advertising.
6842 */
6843 if (hci_req_run(&req, adv_enable_complete) < 0) {
6844 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
6845 new_settings(hdev, NULL);
6846 }
6847 }