]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/bluetooth/mgmt.c
Bluetooth: Enable passive scanning whenever we're connectable
[mirror_ubuntu-bionic-kernel.git] / net / bluetooth / mgmt.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI Management interface */
26
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/l2cap.h>
33 #include <net/bluetooth/mgmt.h>
34
35 #include "smp.h"
36
37 #define MGMT_VERSION 1
38 #define MGMT_REVISION 7
39
40 static const u16 mgmt_commands[] = {
41 MGMT_OP_READ_INDEX_LIST,
42 MGMT_OP_READ_INFO,
43 MGMT_OP_SET_POWERED,
44 MGMT_OP_SET_DISCOVERABLE,
45 MGMT_OP_SET_CONNECTABLE,
46 MGMT_OP_SET_FAST_CONNECTABLE,
47 MGMT_OP_SET_PAIRABLE,
48 MGMT_OP_SET_LINK_SECURITY,
49 MGMT_OP_SET_SSP,
50 MGMT_OP_SET_HS,
51 MGMT_OP_SET_LE,
52 MGMT_OP_SET_DEV_CLASS,
53 MGMT_OP_SET_LOCAL_NAME,
54 MGMT_OP_ADD_UUID,
55 MGMT_OP_REMOVE_UUID,
56 MGMT_OP_LOAD_LINK_KEYS,
57 MGMT_OP_LOAD_LONG_TERM_KEYS,
58 MGMT_OP_DISCONNECT,
59 MGMT_OP_GET_CONNECTIONS,
60 MGMT_OP_PIN_CODE_REPLY,
61 MGMT_OP_PIN_CODE_NEG_REPLY,
62 MGMT_OP_SET_IO_CAPABILITY,
63 MGMT_OP_PAIR_DEVICE,
64 MGMT_OP_CANCEL_PAIR_DEVICE,
65 MGMT_OP_UNPAIR_DEVICE,
66 MGMT_OP_USER_CONFIRM_REPLY,
67 MGMT_OP_USER_CONFIRM_NEG_REPLY,
68 MGMT_OP_USER_PASSKEY_REPLY,
69 MGMT_OP_USER_PASSKEY_NEG_REPLY,
70 MGMT_OP_READ_LOCAL_OOB_DATA,
71 MGMT_OP_ADD_REMOTE_OOB_DATA,
72 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
73 MGMT_OP_START_DISCOVERY,
74 MGMT_OP_STOP_DISCOVERY,
75 MGMT_OP_CONFIRM_NAME,
76 MGMT_OP_BLOCK_DEVICE,
77 MGMT_OP_UNBLOCK_DEVICE,
78 MGMT_OP_SET_DEVICE_ID,
79 MGMT_OP_SET_ADVERTISING,
80 MGMT_OP_SET_BREDR,
81 MGMT_OP_SET_STATIC_ADDRESS,
82 MGMT_OP_SET_SCAN_PARAMS,
83 MGMT_OP_SET_SECURE_CONN,
84 MGMT_OP_SET_DEBUG_KEYS,
85 MGMT_OP_SET_PRIVACY,
86 MGMT_OP_LOAD_IRKS,
87 MGMT_OP_GET_CONN_INFO,
88 MGMT_OP_GET_CLOCK_INFO,
89 MGMT_OP_ADD_DEVICE,
90 MGMT_OP_REMOVE_DEVICE,
91 MGMT_OP_LOAD_CONN_PARAM,
92 MGMT_OP_READ_UNCONF_INDEX_LIST,
93 MGMT_OP_READ_CONFIG_INFO,
94 MGMT_OP_SET_EXTERNAL_CONFIG,
95 MGMT_OP_SET_PUBLIC_ADDRESS,
96 };
97
98 static const u16 mgmt_events[] = {
99 MGMT_EV_CONTROLLER_ERROR,
100 MGMT_EV_INDEX_ADDED,
101 MGMT_EV_INDEX_REMOVED,
102 MGMT_EV_NEW_SETTINGS,
103 MGMT_EV_CLASS_OF_DEV_CHANGED,
104 MGMT_EV_LOCAL_NAME_CHANGED,
105 MGMT_EV_NEW_LINK_KEY,
106 MGMT_EV_NEW_LONG_TERM_KEY,
107 MGMT_EV_DEVICE_CONNECTED,
108 MGMT_EV_DEVICE_DISCONNECTED,
109 MGMT_EV_CONNECT_FAILED,
110 MGMT_EV_PIN_CODE_REQUEST,
111 MGMT_EV_USER_CONFIRM_REQUEST,
112 MGMT_EV_USER_PASSKEY_REQUEST,
113 MGMT_EV_AUTH_FAILED,
114 MGMT_EV_DEVICE_FOUND,
115 MGMT_EV_DISCOVERING,
116 MGMT_EV_DEVICE_BLOCKED,
117 MGMT_EV_DEVICE_UNBLOCKED,
118 MGMT_EV_DEVICE_UNPAIRED,
119 MGMT_EV_PASSKEY_NOTIFY,
120 MGMT_EV_NEW_IRK,
121 MGMT_EV_NEW_CSRK,
122 MGMT_EV_DEVICE_ADDED,
123 MGMT_EV_DEVICE_REMOVED,
124 MGMT_EV_NEW_CONN_PARAM,
125 MGMT_EV_UNCONF_INDEX_ADDED,
126 MGMT_EV_UNCONF_INDEX_REMOVED,
127 MGMT_EV_NEW_CONFIG_OPTIONS,
128 };
129
130 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
131
132 #define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
133 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
134
135 struct pending_cmd {
136 struct list_head list;
137 u16 opcode;
138 int index;
139 void *param;
140 struct sock *sk;
141 void *user_data;
142 };
143
144 /* HCI to MGMT error code conversion table */
145 static u8 mgmt_status_table[] = {
146 MGMT_STATUS_SUCCESS,
147 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
148 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
149 MGMT_STATUS_FAILED, /* Hardware Failure */
150 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
151 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
152 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
153 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
154 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
155 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
156 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
157 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
158 MGMT_STATUS_BUSY, /* Command Disallowed */
159 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
160 MGMT_STATUS_REJECTED, /* Rejected Security */
161 MGMT_STATUS_REJECTED, /* Rejected Personal */
162 MGMT_STATUS_TIMEOUT, /* Host Timeout */
163 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
164 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
165 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
166 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
167 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
168 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
169 MGMT_STATUS_BUSY, /* Repeated Attempts */
170 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
171 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
172 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
173 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
174 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
175 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
176 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
177 MGMT_STATUS_FAILED, /* Unspecified Error */
178 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
179 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
180 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
181 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
182 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
183 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
184 MGMT_STATUS_FAILED, /* Unit Link Key Used */
185 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
186 MGMT_STATUS_TIMEOUT, /* Instant Passed */
187 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
188 MGMT_STATUS_FAILED, /* Transaction Collision */
189 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
190 MGMT_STATUS_REJECTED, /* QoS Rejected */
191 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
192 MGMT_STATUS_REJECTED, /* Insufficient Security */
193 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
194 MGMT_STATUS_BUSY, /* Role Switch Pending */
195 MGMT_STATUS_FAILED, /* Slot Violation */
196 MGMT_STATUS_FAILED, /* Role Switch Failed */
197 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
198 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
199 MGMT_STATUS_BUSY, /* Host Busy Pairing */
200 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
201 MGMT_STATUS_BUSY, /* Controller Busy */
202 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
203 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
204 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
205 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
206 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
207 };
208
209 static u8 mgmt_status(u8 hci_status)
210 {
211 if (hci_status < ARRAY_SIZE(mgmt_status_table))
212 return mgmt_status_table[hci_status];
213
214 return MGMT_STATUS_FAILED;
215 }
216
217 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
218 struct sock *skip_sk)
219 {
220 struct sk_buff *skb;
221 struct mgmt_hdr *hdr;
222
223 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
224 if (!skb)
225 return -ENOMEM;
226
227 hdr = (void *) skb_put(skb, sizeof(*hdr));
228 hdr->opcode = cpu_to_le16(event);
229 if (hdev)
230 hdr->index = cpu_to_le16(hdev->id);
231 else
232 hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
233 hdr->len = cpu_to_le16(data_len);
234
235 if (data)
236 memcpy(skb_put(skb, data_len), data, data_len);
237
238 /* Time stamp */
239 __net_timestamp(skb);
240
241 hci_send_to_control(skb, skip_sk);
242 kfree_skb(skb);
243
244 return 0;
245 }
246
247 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
248 {
249 struct sk_buff *skb;
250 struct mgmt_hdr *hdr;
251 struct mgmt_ev_cmd_status *ev;
252 int err;
253
254 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
255
256 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
257 if (!skb)
258 return -ENOMEM;
259
260 hdr = (void *) skb_put(skb, sizeof(*hdr));
261
262 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS);
263 hdr->index = cpu_to_le16(index);
264 hdr->len = cpu_to_le16(sizeof(*ev));
265
266 ev = (void *) skb_put(skb, sizeof(*ev));
267 ev->status = status;
268 ev->opcode = cpu_to_le16(cmd);
269
270 err = sock_queue_rcv_skb(sk, skb);
271 if (err < 0)
272 kfree_skb(skb);
273
274 return err;
275 }
276
277 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
278 void *rp, size_t rp_len)
279 {
280 struct sk_buff *skb;
281 struct mgmt_hdr *hdr;
282 struct mgmt_ev_cmd_complete *ev;
283 int err;
284
285 BT_DBG("sock %p", sk);
286
287 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
288 if (!skb)
289 return -ENOMEM;
290
291 hdr = (void *) skb_put(skb, sizeof(*hdr));
292
293 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
294 hdr->index = cpu_to_le16(index);
295 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
296
297 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
298 ev->opcode = cpu_to_le16(cmd);
299 ev->status = status;
300
301 if (rp)
302 memcpy(ev->data, rp, rp_len);
303
304 err = sock_queue_rcv_skb(sk, skb);
305 if (err < 0)
306 kfree_skb(skb);
307
308 return err;
309 }
310
311 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
312 u16 data_len)
313 {
314 struct mgmt_rp_read_version rp;
315
316 BT_DBG("sock %p", sk);
317
318 rp.version = MGMT_VERSION;
319 rp.revision = cpu_to_le16(MGMT_REVISION);
320
321 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
322 sizeof(rp));
323 }
324
325 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
326 u16 data_len)
327 {
328 struct mgmt_rp_read_commands *rp;
329 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
330 const u16 num_events = ARRAY_SIZE(mgmt_events);
331 __le16 *opcode;
332 size_t rp_size;
333 int i, err;
334
335 BT_DBG("sock %p", sk);
336
337 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
338
339 rp = kmalloc(rp_size, GFP_KERNEL);
340 if (!rp)
341 return -ENOMEM;
342
343 rp->num_commands = cpu_to_le16(num_commands);
344 rp->num_events = cpu_to_le16(num_events);
345
346 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
347 put_unaligned_le16(mgmt_commands[i], opcode);
348
349 for (i = 0; i < num_events; i++, opcode++)
350 put_unaligned_le16(mgmt_events[i], opcode);
351
352 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
353 rp_size);
354 kfree(rp);
355
356 return err;
357 }
358
359 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
360 u16 data_len)
361 {
362 struct mgmt_rp_read_index_list *rp;
363 struct hci_dev *d;
364 size_t rp_len;
365 u16 count;
366 int err;
367
368 BT_DBG("sock %p", sk);
369
370 read_lock(&hci_dev_list_lock);
371
372 count = 0;
373 list_for_each_entry(d, &hci_dev_list, list) {
374 if (d->dev_type == HCI_BREDR &&
375 !test_bit(HCI_UNCONFIGURED, &d->dev_flags))
376 count++;
377 }
378
379 rp_len = sizeof(*rp) + (2 * count);
380 rp = kmalloc(rp_len, GFP_ATOMIC);
381 if (!rp) {
382 read_unlock(&hci_dev_list_lock);
383 return -ENOMEM;
384 }
385
386 count = 0;
387 list_for_each_entry(d, &hci_dev_list, list) {
388 if (test_bit(HCI_SETUP, &d->dev_flags) ||
389 test_bit(HCI_CONFIG, &d->dev_flags) ||
390 test_bit(HCI_USER_CHANNEL, &d->dev_flags))
391 continue;
392
393 /* Devices marked as raw-only are neither configured
394 * nor unconfigured controllers.
395 */
396 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
397 continue;
398
399 if (d->dev_type == HCI_BREDR &&
400 !test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
401 rp->index[count++] = cpu_to_le16(d->id);
402 BT_DBG("Added hci%u", d->id);
403 }
404 }
405
406 rp->num_controllers = cpu_to_le16(count);
407 rp_len = sizeof(*rp) + (2 * count);
408
409 read_unlock(&hci_dev_list_lock);
410
411 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
412 rp_len);
413
414 kfree(rp);
415
416 return err;
417 }
418
419 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
420 void *data, u16 data_len)
421 {
422 struct mgmt_rp_read_unconf_index_list *rp;
423 struct hci_dev *d;
424 size_t rp_len;
425 u16 count;
426 int err;
427
428 BT_DBG("sock %p", sk);
429
430 read_lock(&hci_dev_list_lock);
431
432 count = 0;
433 list_for_each_entry(d, &hci_dev_list, list) {
434 if (d->dev_type == HCI_BREDR &&
435 test_bit(HCI_UNCONFIGURED, &d->dev_flags))
436 count++;
437 }
438
439 rp_len = sizeof(*rp) + (2 * count);
440 rp = kmalloc(rp_len, GFP_ATOMIC);
441 if (!rp) {
442 read_unlock(&hci_dev_list_lock);
443 return -ENOMEM;
444 }
445
446 count = 0;
447 list_for_each_entry(d, &hci_dev_list, list) {
448 if (test_bit(HCI_SETUP, &d->dev_flags) ||
449 test_bit(HCI_CONFIG, &d->dev_flags) ||
450 test_bit(HCI_USER_CHANNEL, &d->dev_flags))
451 continue;
452
453 /* Devices marked as raw-only are neither configured
454 * nor unconfigured controllers.
455 */
456 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
457 continue;
458
459 if (d->dev_type == HCI_BREDR &&
460 test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
461 rp->index[count++] = cpu_to_le16(d->id);
462 BT_DBG("Added hci%u", d->id);
463 }
464 }
465
466 rp->num_controllers = cpu_to_le16(count);
467 rp_len = sizeof(*rp) + (2 * count);
468
469 read_unlock(&hci_dev_list_lock);
470
471 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_UNCONF_INDEX_LIST,
472 0, rp, rp_len);
473
474 kfree(rp);
475
476 return err;
477 }
478
479 static bool is_configured(struct hci_dev *hdev)
480 {
481 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
482 !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
483 return false;
484
485 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
486 !bacmp(&hdev->public_addr, BDADDR_ANY))
487 return false;
488
489 return true;
490 }
491
492 static __le32 get_missing_options(struct hci_dev *hdev)
493 {
494 u32 options = 0;
495
496 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
497 !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
498 options |= MGMT_OPTION_EXTERNAL_CONFIG;
499
500 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
501 !bacmp(&hdev->public_addr, BDADDR_ANY))
502 options |= MGMT_OPTION_PUBLIC_ADDRESS;
503
504 return cpu_to_le32(options);
505 }
506
507 static int new_options(struct hci_dev *hdev, struct sock *skip)
508 {
509 __le32 options = get_missing_options(hdev);
510
511 return mgmt_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
512 sizeof(options), skip);
513 }
514
515 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
516 {
517 __le32 options = get_missing_options(hdev);
518
519 return cmd_complete(sk, hdev->id, opcode, 0, &options,
520 sizeof(options));
521 }
522
523 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
524 void *data, u16 data_len)
525 {
526 struct mgmt_rp_read_config_info rp;
527 u32 options = 0;
528
529 BT_DBG("sock %p %s", sk, hdev->name);
530
531 hci_dev_lock(hdev);
532
533 memset(&rp, 0, sizeof(rp));
534 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
535
536 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
537 options |= MGMT_OPTION_EXTERNAL_CONFIG;
538
539 if (hdev->set_bdaddr)
540 options |= MGMT_OPTION_PUBLIC_ADDRESS;
541
542 rp.supported_options = cpu_to_le32(options);
543 rp.missing_options = get_missing_options(hdev);
544
545 hci_dev_unlock(hdev);
546
547 return cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0, &rp,
548 sizeof(rp));
549 }
550
551 static u32 get_supported_settings(struct hci_dev *hdev)
552 {
553 u32 settings = 0;
554
555 settings |= MGMT_SETTING_POWERED;
556 settings |= MGMT_SETTING_PAIRABLE;
557 settings |= MGMT_SETTING_DEBUG_KEYS;
558
559 if (lmp_bredr_capable(hdev)) {
560 settings |= MGMT_SETTING_CONNECTABLE;
561 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
562 settings |= MGMT_SETTING_FAST_CONNECTABLE;
563 settings |= MGMT_SETTING_DISCOVERABLE;
564 settings |= MGMT_SETTING_BREDR;
565 settings |= MGMT_SETTING_LINK_SECURITY;
566
567 if (lmp_ssp_capable(hdev)) {
568 settings |= MGMT_SETTING_SSP;
569 settings |= MGMT_SETTING_HS;
570 }
571
572 if (lmp_sc_capable(hdev) ||
573 test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
574 settings |= MGMT_SETTING_SECURE_CONN;
575 }
576
577 if (lmp_le_capable(hdev)) {
578 settings |= MGMT_SETTING_LE;
579 settings |= MGMT_SETTING_ADVERTISING;
580 settings |= MGMT_SETTING_PRIVACY;
581 }
582
583 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
584 hdev->set_bdaddr)
585 settings |= MGMT_SETTING_CONFIGURATION;
586
587 return settings;
588 }
589
590 static u32 get_current_settings(struct hci_dev *hdev)
591 {
592 u32 settings = 0;
593
594 if (hdev_is_powered(hdev))
595 settings |= MGMT_SETTING_POWERED;
596
597 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
598 settings |= MGMT_SETTING_CONNECTABLE;
599
600 if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
601 settings |= MGMT_SETTING_FAST_CONNECTABLE;
602
603 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
604 settings |= MGMT_SETTING_DISCOVERABLE;
605
606 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
607 settings |= MGMT_SETTING_PAIRABLE;
608
609 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
610 settings |= MGMT_SETTING_BREDR;
611
612 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
613 settings |= MGMT_SETTING_LE;
614
615 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
616 settings |= MGMT_SETTING_LINK_SECURITY;
617
618 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
619 settings |= MGMT_SETTING_SSP;
620
621 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
622 settings |= MGMT_SETTING_HS;
623
624 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
625 settings |= MGMT_SETTING_ADVERTISING;
626
627 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
628 settings |= MGMT_SETTING_SECURE_CONN;
629
630 if (test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags))
631 settings |= MGMT_SETTING_DEBUG_KEYS;
632
633 if (test_bit(HCI_PRIVACY, &hdev->dev_flags))
634 settings |= MGMT_SETTING_PRIVACY;
635
636 return settings;
637 }
638
639 #define PNP_INFO_SVCLASS_ID 0x1200
640
641 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
642 {
643 u8 *ptr = data, *uuids_start = NULL;
644 struct bt_uuid *uuid;
645
646 if (len < 4)
647 return ptr;
648
649 list_for_each_entry(uuid, &hdev->uuids, list) {
650 u16 uuid16;
651
652 if (uuid->size != 16)
653 continue;
654
655 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
656 if (uuid16 < 0x1100)
657 continue;
658
659 if (uuid16 == PNP_INFO_SVCLASS_ID)
660 continue;
661
662 if (!uuids_start) {
663 uuids_start = ptr;
664 uuids_start[0] = 1;
665 uuids_start[1] = EIR_UUID16_ALL;
666 ptr += 2;
667 }
668
669 /* Stop if not enough space to put next UUID */
670 if ((ptr - data) + sizeof(u16) > len) {
671 uuids_start[1] = EIR_UUID16_SOME;
672 break;
673 }
674
675 *ptr++ = (uuid16 & 0x00ff);
676 *ptr++ = (uuid16 & 0xff00) >> 8;
677 uuids_start[0] += sizeof(uuid16);
678 }
679
680 return ptr;
681 }
682
683 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
684 {
685 u8 *ptr = data, *uuids_start = NULL;
686 struct bt_uuid *uuid;
687
688 if (len < 6)
689 return ptr;
690
691 list_for_each_entry(uuid, &hdev->uuids, list) {
692 if (uuid->size != 32)
693 continue;
694
695 if (!uuids_start) {
696 uuids_start = ptr;
697 uuids_start[0] = 1;
698 uuids_start[1] = EIR_UUID32_ALL;
699 ptr += 2;
700 }
701
702 /* Stop if not enough space to put next UUID */
703 if ((ptr - data) + sizeof(u32) > len) {
704 uuids_start[1] = EIR_UUID32_SOME;
705 break;
706 }
707
708 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
709 ptr += sizeof(u32);
710 uuids_start[0] += sizeof(u32);
711 }
712
713 return ptr;
714 }
715
716 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
717 {
718 u8 *ptr = data, *uuids_start = NULL;
719 struct bt_uuid *uuid;
720
721 if (len < 18)
722 return ptr;
723
724 list_for_each_entry(uuid, &hdev->uuids, list) {
725 if (uuid->size != 128)
726 continue;
727
728 if (!uuids_start) {
729 uuids_start = ptr;
730 uuids_start[0] = 1;
731 uuids_start[1] = EIR_UUID128_ALL;
732 ptr += 2;
733 }
734
735 /* Stop if not enough space to put next UUID */
736 if ((ptr - data) + 16 > len) {
737 uuids_start[1] = EIR_UUID128_SOME;
738 break;
739 }
740
741 memcpy(ptr, uuid->uuid, 16);
742 ptr += 16;
743 uuids_start[0] += 16;
744 }
745
746 return ptr;
747 }
748
749 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
750 {
751 struct pending_cmd *cmd;
752
753 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
754 if (cmd->opcode == opcode)
755 return cmd;
756 }
757
758 return NULL;
759 }
760
761 static struct pending_cmd *mgmt_pending_find_data(u16 opcode,
762 struct hci_dev *hdev,
763 const void *data)
764 {
765 struct pending_cmd *cmd;
766
767 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
768 if (cmd->user_data != data)
769 continue;
770 if (cmd->opcode == opcode)
771 return cmd;
772 }
773
774 return NULL;
775 }
776
777 static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
778 {
779 u8 ad_len = 0;
780 size_t name_len;
781
782 name_len = strlen(hdev->dev_name);
783 if (name_len > 0) {
784 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
785
786 if (name_len > max_len) {
787 name_len = max_len;
788 ptr[1] = EIR_NAME_SHORT;
789 } else
790 ptr[1] = EIR_NAME_COMPLETE;
791
792 ptr[0] = name_len + 1;
793
794 memcpy(ptr + 2, hdev->dev_name, name_len);
795
796 ad_len += (name_len + 2);
797 ptr += (name_len + 2);
798 }
799
800 return ad_len;
801 }
802
803 static void update_scan_rsp_data(struct hci_request *req)
804 {
805 struct hci_dev *hdev = req->hdev;
806 struct hci_cp_le_set_scan_rsp_data cp;
807 u8 len;
808
809 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
810 return;
811
812 memset(&cp, 0, sizeof(cp));
813
814 len = create_scan_rsp_data(hdev, cp.data);
815
816 if (hdev->scan_rsp_data_len == len &&
817 memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
818 return;
819
820 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
821 hdev->scan_rsp_data_len = len;
822
823 cp.length = len;
824
825 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
826 }
827
828 static u8 get_adv_discov_flags(struct hci_dev *hdev)
829 {
830 struct pending_cmd *cmd;
831
832 /* If there's a pending mgmt command the flags will not yet have
833 * their final values, so check for this first.
834 */
835 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
836 if (cmd) {
837 struct mgmt_mode *cp = cmd->param;
838 if (cp->val == 0x01)
839 return LE_AD_GENERAL;
840 else if (cp->val == 0x02)
841 return LE_AD_LIMITED;
842 } else {
843 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
844 return LE_AD_LIMITED;
845 else if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
846 return LE_AD_GENERAL;
847 }
848
849 return 0;
850 }
851
852 static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
853 {
854 u8 ad_len = 0, flags = 0;
855
856 flags |= get_adv_discov_flags(hdev);
857
858 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
859 flags |= LE_AD_NO_BREDR;
860
861 if (flags) {
862 BT_DBG("adv flags 0x%02x", flags);
863
864 ptr[0] = 2;
865 ptr[1] = EIR_FLAGS;
866 ptr[2] = flags;
867
868 ad_len += 3;
869 ptr += 3;
870 }
871
872 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
873 ptr[0] = 2;
874 ptr[1] = EIR_TX_POWER;
875 ptr[2] = (u8) hdev->adv_tx_power;
876
877 ad_len += 3;
878 ptr += 3;
879 }
880
881 return ad_len;
882 }
883
884 static void update_adv_data(struct hci_request *req)
885 {
886 struct hci_dev *hdev = req->hdev;
887 struct hci_cp_le_set_adv_data cp;
888 u8 len;
889
890 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
891 return;
892
893 memset(&cp, 0, sizeof(cp));
894
895 len = create_adv_data(hdev, cp.data);
896
897 if (hdev->adv_data_len == len &&
898 memcmp(cp.data, hdev->adv_data, len) == 0)
899 return;
900
901 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
902 hdev->adv_data_len = len;
903
904 cp.length = len;
905
906 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
907 }
908
909 static void create_eir(struct hci_dev *hdev, u8 *data)
910 {
911 u8 *ptr = data;
912 size_t name_len;
913
914 name_len = strlen(hdev->dev_name);
915
916 if (name_len > 0) {
917 /* EIR Data type */
918 if (name_len > 48) {
919 name_len = 48;
920 ptr[1] = EIR_NAME_SHORT;
921 } else
922 ptr[1] = EIR_NAME_COMPLETE;
923
924 /* EIR Data length */
925 ptr[0] = name_len + 1;
926
927 memcpy(ptr + 2, hdev->dev_name, name_len);
928
929 ptr += (name_len + 2);
930 }
931
932 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
933 ptr[0] = 2;
934 ptr[1] = EIR_TX_POWER;
935 ptr[2] = (u8) hdev->inq_tx_power;
936
937 ptr += 3;
938 }
939
940 if (hdev->devid_source > 0) {
941 ptr[0] = 9;
942 ptr[1] = EIR_DEVICE_ID;
943
944 put_unaligned_le16(hdev->devid_source, ptr + 2);
945 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
946 put_unaligned_le16(hdev->devid_product, ptr + 6);
947 put_unaligned_le16(hdev->devid_version, ptr + 8);
948
949 ptr += 10;
950 }
951
952 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
953 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
954 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
955 }
956
957 static void update_eir(struct hci_request *req)
958 {
959 struct hci_dev *hdev = req->hdev;
960 struct hci_cp_write_eir cp;
961
962 if (!hdev_is_powered(hdev))
963 return;
964
965 if (!lmp_ext_inq_capable(hdev))
966 return;
967
968 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
969 return;
970
971 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
972 return;
973
974 memset(&cp, 0, sizeof(cp));
975
976 create_eir(hdev, cp.data);
977
978 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
979 return;
980
981 memcpy(hdev->eir, cp.data, sizeof(cp.data));
982
983 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
984 }
985
986 static u8 get_service_classes(struct hci_dev *hdev)
987 {
988 struct bt_uuid *uuid;
989 u8 val = 0;
990
991 list_for_each_entry(uuid, &hdev->uuids, list)
992 val |= uuid->svc_hint;
993
994 return val;
995 }
996
997 static void update_class(struct hci_request *req)
998 {
999 struct hci_dev *hdev = req->hdev;
1000 u8 cod[3];
1001
1002 BT_DBG("%s", hdev->name);
1003
1004 if (!hdev_is_powered(hdev))
1005 return;
1006
1007 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1008 return;
1009
1010 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1011 return;
1012
1013 cod[0] = hdev->minor_class;
1014 cod[1] = hdev->major_class;
1015 cod[2] = get_service_classes(hdev);
1016
1017 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
1018 cod[1] |= 0x20;
1019
1020 if (memcmp(cod, hdev->dev_class, 3) == 0)
1021 return;
1022
1023 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1024 }
1025
1026 static bool get_connectable(struct hci_dev *hdev)
1027 {
1028 struct pending_cmd *cmd;
1029
1030 /* If there's a pending mgmt command the flag will not yet have
1031 * it's final value, so check for this first.
1032 */
1033 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1034 if (cmd) {
1035 struct mgmt_mode *cp = cmd->param;
1036 return cp->val;
1037 }
1038
1039 return test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1040 }
1041
1042 static void enable_advertising(struct hci_request *req)
1043 {
1044 struct hci_dev *hdev = req->hdev;
1045 struct hci_cp_le_set_adv_param cp;
1046 u8 own_addr_type, enable = 0x01;
1047 bool connectable;
1048
1049 /* Clear the HCI_ADVERTISING bit temporarily so that the
1050 * hci_update_random_address knows that it's safe to go ahead
1051 * and write a new random address. The flag will be set back on
1052 * as soon as the SET_ADV_ENABLE HCI command completes.
1053 */
1054 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
1055
1056 connectable = get_connectable(hdev);
1057
1058 /* Set require_privacy to true only when non-connectable
1059 * advertising is used. In that case it is fine to use a
1060 * non-resolvable private address.
1061 */
1062 if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
1063 return;
1064
1065 memset(&cp, 0, sizeof(cp));
1066 cp.min_interval = cpu_to_le16(0x0800);
1067 cp.max_interval = cpu_to_le16(0x0800);
1068 cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
1069 cp.own_address_type = own_addr_type;
1070 cp.channel_map = hdev->le_adv_channel_map;
1071
1072 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1073
1074 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1075 }
1076
1077 static void disable_advertising(struct hci_request *req)
1078 {
1079 u8 enable = 0x00;
1080
1081 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1082 }
1083
1084 static void service_cache_off(struct work_struct *work)
1085 {
1086 struct hci_dev *hdev = container_of(work, struct hci_dev,
1087 service_cache.work);
1088 struct hci_request req;
1089
1090 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1091 return;
1092
1093 hci_req_init(&req, hdev);
1094
1095 hci_dev_lock(hdev);
1096
1097 update_eir(&req);
1098 update_class(&req);
1099
1100 hci_dev_unlock(hdev);
1101
1102 hci_req_run(&req, NULL);
1103 }
1104
1105 static void rpa_expired(struct work_struct *work)
1106 {
1107 struct hci_dev *hdev = container_of(work, struct hci_dev,
1108 rpa_expired.work);
1109 struct hci_request req;
1110
1111 BT_DBG("");
1112
1113 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
1114
1115 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags) ||
1116 hci_conn_num(hdev, LE_LINK) > 0)
1117 return;
1118
1119 /* The generation of a new RPA and programming it into the
1120 * controller happens in the enable_advertising() function.
1121 */
1122
1123 hci_req_init(&req, hdev);
1124
1125 disable_advertising(&req);
1126 enable_advertising(&req);
1127
1128 hci_req_run(&req, NULL);
1129 }
1130
1131 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1132 {
1133 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
1134 return;
1135
1136 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1137 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1138
1139 /* Non-mgmt controlled devices get this bit set
1140 * implicitly so that pairing works for them, however
1141 * for mgmt we require user-space to explicitly enable
1142 * it
1143 */
1144 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1145 }
1146
1147 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1148 void *data, u16 data_len)
1149 {
1150 struct mgmt_rp_read_info rp;
1151
1152 BT_DBG("sock %p %s", sk, hdev->name);
1153
1154 hci_dev_lock(hdev);
1155
1156 memset(&rp, 0, sizeof(rp));
1157
1158 bacpy(&rp.bdaddr, &hdev->bdaddr);
1159
1160 rp.version = hdev->hci_ver;
1161 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1162
1163 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1164 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1165
1166 memcpy(rp.dev_class, hdev->dev_class, 3);
1167
1168 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1169 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1170
1171 hci_dev_unlock(hdev);
1172
1173 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1174 sizeof(rp));
1175 }
1176
1177 static void mgmt_pending_free(struct pending_cmd *cmd)
1178 {
1179 sock_put(cmd->sk);
1180 kfree(cmd->param);
1181 kfree(cmd);
1182 }
1183
1184 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
1185 struct hci_dev *hdev, void *data,
1186 u16 len)
1187 {
1188 struct pending_cmd *cmd;
1189
1190 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
1191 if (!cmd)
1192 return NULL;
1193
1194 cmd->opcode = opcode;
1195 cmd->index = hdev->id;
1196
1197 cmd->param = kmalloc(len, GFP_KERNEL);
1198 if (!cmd->param) {
1199 kfree(cmd);
1200 return NULL;
1201 }
1202
1203 if (data)
1204 memcpy(cmd->param, data, len);
1205
1206 cmd->sk = sk;
1207 sock_hold(sk);
1208
1209 list_add(&cmd->list, &hdev->mgmt_pending);
1210
1211 return cmd;
1212 }
1213
1214 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
1215 void (*cb)(struct pending_cmd *cmd,
1216 void *data),
1217 void *data)
1218 {
1219 struct pending_cmd *cmd, *tmp;
1220
1221 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
1222 if (opcode > 0 && cmd->opcode != opcode)
1223 continue;
1224
1225 cb(cmd, data);
1226 }
1227 }
1228
1229 static void mgmt_pending_remove(struct pending_cmd *cmd)
1230 {
1231 list_del(&cmd->list);
1232 mgmt_pending_free(cmd);
1233 }
1234
1235 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1236 {
1237 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1238
1239 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
1240 sizeof(settings));
1241 }
1242
1243 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status)
1244 {
1245 BT_DBG("%s status 0x%02x", hdev->name, status);
1246
1247 if (hci_conn_count(hdev) == 0) {
1248 cancel_delayed_work(&hdev->power_off);
1249 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1250 }
1251 }
1252
1253 static void hci_stop_discovery(struct hci_request *req)
1254 {
1255 struct hci_dev *hdev = req->hdev;
1256 struct hci_cp_remote_name_req_cancel cp;
1257 struct inquiry_entry *e;
1258
1259 switch (hdev->discovery.state) {
1260 case DISCOVERY_FINDING:
1261 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
1262 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1263 } else {
1264 cancel_delayed_work(&hdev->le_scan_disable);
1265 hci_req_add_le_scan_disable(req);
1266 }
1267
1268 break;
1269
1270 case DISCOVERY_RESOLVING:
1271 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
1272 NAME_PENDING);
1273 if (!e)
1274 return;
1275
1276 bacpy(&cp.bdaddr, &e->data.bdaddr);
1277 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
1278 &cp);
1279
1280 break;
1281
1282 default:
1283 /* Passive scanning */
1284 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1285 hci_req_add_le_scan_disable(req);
1286 break;
1287 }
1288 }
1289
1290 static int clean_up_hci_state(struct hci_dev *hdev)
1291 {
1292 struct hci_request req;
1293 struct hci_conn *conn;
1294
1295 hci_req_init(&req, hdev);
1296
1297 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1298 test_bit(HCI_PSCAN, &hdev->flags)) {
1299 u8 scan = 0x00;
1300 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1301 }
1302
1303 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1304 disable_advertising(&req);
1305
1306 hci_stop_discovery(&req);
1307
1308 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1309 struct hci_cp_disconnect dc;
1310 struct hci_cp_reject_conn_req rej;
1311
1312 switch (conn->state) {
1313 case BT_CONNECTED:
1314 case BT_CONFIG:
1315 dc.handle = cpu_to_le16(conn->handle);
1316 dc.reason = 0x15; /* Terminated due to Power Off */
1317 hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1318 break;
1319 case BT_CONNECT:
1320 if (conn->type == LE_LINK)
1321 hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL,
1322 0, NULL);
1323 else if (conn->type == ACL_LINK)
1324 hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL,
1325 6, &conn->dst);
1326 break;
1327 case BT_CONNECT2:
1328 bacpy(&rej.bdaddr, &conn->dst);
1329 rej.reason = 0x15; /* Terminated due to Power Off */
1330 if (conn->type == ACL_LINK)
1331 hci_req_add(&req, HCI_OP_REJECT_CONN_REQ,
1332 sizeof(rej), &rej);
1333 else if (conn->type == SCO_LINK)
1334 hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ,
1335 sizeof(rej), &rej);
1336 break;
1337 }
1338 }
1339
1340 return hci_req_run(&req, clean_up_hci_complete);
1341 }
1342
1343 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1344 u16 len)
1345 {
1346 struct mgmt_mode *cp = data;
1347 struct pending_cmd *cmd;
1348 int err;
1349
1350 BT_DBG("request for %s", hdev->name);
1351
1352 if (cp->val != 0x00 && cp->val != 0x01)
1353 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1354 MGMT_STATUS_INVALID_PARAMS);
1355
1356 hci_dev_lock(hdev);
1357
1358 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
1359 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1360 MGMT_STATUS_BUSY);
1361 goto failed;
1362 }
1363
1364 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1365 cancel_delayed_work(&hdev->power_off);
1366
1367 if (cp->val) {
1368 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
1369 data, len);
1370 err = mgmt_powered(hdev, 1);
1371 goto failed;
1372 }
1373 }
1374
1375 if (!!cp->val == hdev_is_powered(hdev)) {
1376 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1377 goto failed;
1378 }
1379
1380 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1381 if (!cmd) {
1382 err = -ENOMEM;
1383 goto failed;
1384 }
1385
1386 if (cp->val) {
1387 queue_work(hdev->req_workqueue, &hdev->power_on);
1388 err = 0;
1389 } else {
1390 /* Disconnect connections, stop scans, etc */
1391 err = clean_up_hci_state(hdev);
1392 if (!err)
1393 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1394 HCI_POWER_OFF_TIMEOUT);
1395
1396 /* ENODATA means there were no HCI commands queued */
1397 if (err == -ENODATA) {
1398 cancel_delayed_work(&hdev->power_off);
1399 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1400 err = 0;
1401 }
1402 }
1403
1404 failed:
1405 hci_dev_unlock(hdev);
1406 return err;
1407 }
1408
1409 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1410 {
1411 __le32 ev;
1412
1413 ev = cpu_to_le32(get_current_settings(hdev));
1414
1415 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
1416 }
1417
1418 struct cmd_lookup {
1419 struct sock *sk;
1420 struct hci_dev *hdev;
1421 u8 mgmt_status;
1422 };
1423
1424 static void settings_rsp(struct pending_cmd *cmd, void *data)
1425 {
1426 struct cmd_lookup *match = data;
1427
1428 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1429
1430 list_del(&cmd->list);
1431
1432 if (match->sk == NULL) {
1433 match->sk = cmd->sk;
1434 sock_hold(match->sk);
1435 }
1436
1437 mgmt_pending_free(cmd);
1438 }
1439
1440 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
1441 {
1442 u8 *status = data;
1443
1444 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1445 mgmt_pending_remove(cmd);
1446 }
1447
1448 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1449 {
1450 if (!lmp_bredr_capable(hdev))
1451 return MGMT_STATUS_NOT_SUPPORTED;
1452 else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1453 return MGMT_STATUS_REJECTED;
1454 else
1455 return MGMT_STATUS_SUCCESS;
1456 }
1457
1458 static u8 mgmt_le_support(struct hci_dev *hdev)
1459 {
1460 if (!lmp_le_capable(hdev))
1461 return MGMT_STATUS_NOT_SUPPORTED;
1462 else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1463 return MGMT_STATUS_REJECTED;
1464 else
1465 return MGMT_STATUS_SUCCESS;
1466 }
1467
1468 static void set_discoverable_complete(struct hci_dev *hdev, u8 status)
1469 {
1470 struct pending_cmd *cmd;
1471 struct mgmt_mode *cp;
1472 struct hci_request req;
1473 bool changed;
1474
1475 BT_DBG("status 0x%02x", status);
1476
1477 hci_dev_lock(hdev);
1478
1479 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1480 if (!cmd)
1481 goto unlock;
1482
1483 if (status) {
1484 u8 mgmt_err = mgmt_status(status);
1485 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1486 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1487 goto remove_cmd;
1488 }
1489
1490 cp = cmd->param;
1491 if (cp->val) {
1492 changed = !test_and_set_bit(HCI_DISCOVERABLE,
1493 &hdev->dev_flags);
1494
1495 if (hdev->discov_timeout > 0) {
1496 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1497 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1498 to);
1499 }
1500 } else {
1501 changed = test_and_clear_bit(HCI_DISCOVERABLE,
1502 &hdev->dev_flags);
1503 }
1504
1505 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1506
1507 if (changed)
1508 new_settings(hdev, cmd->sk);
1509
1510 /* When the discoverable mode gets changed, make sure
1511 * that class of device has the limited discoverable
1512 * bit correctly set.
1513 */
1514 hci_req_init(&req, hdev);
1515 update_class(&req);
1516 hci_req_run(&req, NULL);
1517
1518 remove_cmd:
1519 mgmt_pending_remove(cmd);
1520
1521 unlock:
1522 hci_dev_unlock(hdev);
1523 }
1524
1525 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1526 u16 len)
1527 {
1528 struct mgmt_cp_set_discoverable *cp = data;
1529 struct pending_cmd *cmd;
1530 struct hci_request req;
1531 u16 timeout;
1532 u8 scan;
1533 int err;
1534
1535 BT_DBG("request for %s", hdev->name);
1536
1537 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1538 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1539 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1540 MGMT_STATUS_REJECTED);
1541
1542 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1543 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1544 MGMT_STATUS_INVALID_PARAMS);
1545
1546 timeout = __le16_to_cpu(cp->timeout);
1547
1548 /* Disabling discoverable requires that no timeout is set,
1549 * and enabling limited discoverable requires a timeout.
1550 */
1551 if ((cp->val == 0x00 && timeout > 0) ||
1552 (cp->val == 0x02 && timeout == 0))
1553 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1554 MGMT_STATUS_INVALID_PARAMS);
1555
1556 hci_dev_lock(hdev);
1557
1558 if (!hdev_is_powered(hdev) && timeout > 0) {
1559 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1560 MGMT_STATUS_NOT_POWERED);
1561 goto failed;
1562 }
1563
1564 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1565 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1566 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1567 MGMT_STATUS_BUSY);
1568 goto failed;
1569 }
1570
1571 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
1572 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1573 MGMT_STATUS_REJECTED);
1574 goto failed;
1575 }
1576
1577 if (!hdev_is_powered(hdev)) {
1578 bool changed = false;
1579
1580 /* Setting limited discoverable when powered off is
1581 * not a valid operation since it requires a timeout
1582 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1583 */
1584 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
1585 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1586 changed = true;
1587 }
1588
1589 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1590 if (err < 0)
1591 goto failed;
1592
1593 if (changed)
1594 err = new_settings(hdev, sk);
1595
1596 goto failed;
1597 }
1598
1599 /* If the current mode is the same, then just update the timeout
1600 * value with the new value. And if only the timeout gets updated,
1601 * then no need for any HCI transactions.
1602 */
1603 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
1604 (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
1605 &hdev->dev_flags)) {
1606 cancel_delayed_work(&hdev->discov_off);
1607 hdev->discov_timeout = timeout;
1608
1609 if (cp->val && hdev->discov_timeout > 0) {
1610 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1611 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1612 to);
1613 }
1614
1615 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1616 goto failed;
1617 }
1618
1619 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1620 if (!cmd) {
1621 err = -ENOMEM;
1622 goto failed;
1623 }
1624
1625 /* Cancel any potential discoverable timeout that might be
1626 * still active and store new timeout value. The arming of
1627 * the timeout happens in the complete handler.
1628 */
1629 cancel_delayed_work(&hdev->discov_off);
1630 hdev->discov_timeout = timeout;
1631
1632 /* Limited discoverable mode */
1633 if (cp->val == 0x02)
1634 set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1635 else
1636 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1637
1638 hci_req_init(&req, hdev);
1639
1640 /* The procedure for LE-only controllers is much simpler - just
1641 * update the advertising data.
1642 */
1643 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1644 goto update_ad;
1645
1646 scan = SCAN_PAGE;
1647
1648 if (cp->val) {
1649 struct hci_cp_write_current_iac_lap hci_cp;
1650
1651 if (cp->val == 0x02) {
1652 /* Limited discoverable mode */
1653 hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1654 hci_cp.iac_lap[0] = 0x00; /* LIAC */
1655 hci_cp.iac_lap[1] = 0x8b;
1656 hci_cp.iac_lap[2] = 0x9e;
1657 hci_cp.iac_lap[3] = 0x33; /* GIAC */
1658 hci_cp.iac_lap[4] = 0x8b;
1659 hci_cp.iac_lap[5] = 0x9e;
1660 } else {
1661 /* General discoverable mode */
1662 hci_cp.num_iac = 1;
1663 hci_cp.iac_lap[0] = 0x33; /* GIAC */
1664 hci_cp.iac_lap[1] = 0x8b;
1665 hci_cp.iac_lap[2] = 0x9e;
1666 }
1667
1668 hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1669 (hci_cp.num_iac * 3) + 1, &hci_cp);
1670
1671 scan |= SCAN_INQUIRY;
1672 } else {
1673 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1674 }
1675
1676 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1677
1678 update_ad:
1679 update_adv_data(&req);
1680
1681 err = hci_req_run(&req, set_discoverable_complete);
1682 if (err < 0)
1683 mgmt_pending_remove(cmd);
1684
1685 failed:
1686 hci_dev_unlock(hdev);
1687 return err;
1688 }
1689
1690 static void write_fast_connectable(struct hci_request *req, bool enable)
1691 {
1692 struct hci_dev *hdev = req->hdev;
1693 struct hci_cp_write_page_scan_activity acp;
1694 u8 type;
1695
1696 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1697 return;
1698
1699 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1700 return;
1701
1702 if (enable) {
1703 type = PAGE_SCAN_TYPE_INTERLACED;
1704
1705 /* 160 msec page scan interval */
1706 acp.interval = cpu_to_le16(0x0100);
1707 } else {
1708 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1709
1710 /* default 1.28 sec page scan */
1711 acp.interval = cpu_to_le16(0x0800);
1712 }
1713
1714 acp.window = cpu_to_le16(0x0012);
1715
1716 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1717 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1718 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1719 sizeof(acp), &acp);
1720
1721 if (hdev->page_scan_type != type)
1722 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1723 }
1724
1725 static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1726 {
1727 struct pending_cmd *cmd;
1728 struct mgmt_mode *cp;
1729 bool changed;
1730
1731 BT_DBG("status 0x%02x", status);
1732
1733 hci_dev_lock(hdev);
1734
1735 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1736 if (!cmd)
1737 goto unlock;
1738
1739 if (status) {
1740 u8 mgmt_err = mgmt_status(status);
1741 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1742 goto remove_cmd;
1743 }
1744
1745 cp = cmd->param;
1746 if (cp->val)
1747 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1748 else
1749 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1750
1751 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1752
1753 if (changed) {
1754 new_settings(hdev, cmd->sk);
1755 hci_update_background_scan(hdev);
1756 }
1757
1758 remove_cmd:
1759 mgmt_pending_remove(cmd);
1760
1761 unlock:
1762 hci_dev_unlock(hdev);
1763 }
1764
1765 static int set_connectable_update_settings(struct hci_dev *hdev,
1766 struct sock *sk, u8 val)
1767 {
1768 bool changed = false;
1769 int err;
1770
1771 if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1772 changed = true;
1773
1774 if (val) {
1775 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1776 } else {
1777 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1778 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1779 }
1780
1781 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1782 if (err < 0)
1783 return err;
1784
1785 if (changed)
1786 return new_settings(hdev, sk);
1787
1788 return 0;
1789 }
1790
1791 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1792 u16 len)
1793 {
1794 struct mgmt_mode *cp = data;
1795 struct pending_cmd *cmd;
1796 struct hci_request req;
1797 u8 scan;
1798 int err;
1799
1800 BT_DBG("request for %s", hdev->name);
1801
1802 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1803 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1804 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1805 MGMT_STATUS_REJECTED);
1806
1807 if (cp->val != 0x00 && cp->val != 0x01)
1808 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1809 MGMT_STATUS_INVALID_PARAMS);
1810
1811 hci_dev_lock(hdev);
1812
1813 if (!hdev_is_powered(hdev)) {
1814 err = set_connectable_update_settings(hdev, sk, cp->val);
1815 goto failed;
1816 }
1817
1818 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1819 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1820 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1821 MGMT_STATUS_BUSY);
1822 goto failed;
1823 }
1824
1825 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1826 if (!cmd) {
1827 err = -ENOMEM;
1828 goto failed;
1829 }
1830
1831 hci_req_init(&req, hdev);
1832
1833 /* If BR/EDR is not enabled and we disable advertising as a
1834 * by-product of disabling connectable, we need to update the
1835 * advertising flags.
1836 */
1837 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1838 if (!cp->val) {
1839 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1840 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1841 }
1842 update_adv_data(&req);
1843 } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
1844 if (cp->val) {
1845 scan = SCAN_PAGE;
1846 } else {
1847 scan = 0;
1848
1849 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1850 hdev->discov_timeout > 0)
1851 cancel_delayed_work(&hdev->discov_off);
1852 }
1853
1854 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1855 }
1856
1857 /* If we're going from non-connectable to connectable or
1858 * vice-versa when fast connectable is enabled ensure that fast
1859 * connectable gets disabled. write_fast_connectable won't do
1860 * anything if the page scan parameters are already what they
1861 * should be.
1862 */
1863 if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1864 write_fast_connectable(&req, false);
1865
1866 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) &&
1867 hci_conn_num(hdev, LE_LINK) == 0) {
1868 disable_advertising(&req);
1869 enable_advertising(&req);
1870 }
1871
1872 err = hci_req_run(&req, set_connectable_complete);
1873 if (err < 0) {
1874 mgmt_pending_remove(cmd);
1875 if (err == -ENODATA)
1876 err = set_connectable_update_settings(hdev, sk,
1877 cp->val);
1878 goto failed;
1879 }
1880
1881 failed:
1882 hci_dev_unlock(hdev);
1883 return err;
1884 }
1885
1886 static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
1887 u16 len)
1888 {
1889 struct mgmt_mode *cp = data;
1890 bool changed;
1891 int err;
1892
1893 BT_DBG("request for %s", hdev->name);
1894
1895 if (cp->val != 0x00 && cp->val != 0x01)
1896 return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE,
1897 MGMT_STATUS_INVALID_PARAMS);
1898
1899 hci_dev_lock(hdev);
1900
1901 if (cp->val)
1902 changed = !test_and_set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1903 else
1904 changed = test_and_clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1905
1906 err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
1907 if (err < 0)
1908 goto unlock;
1909
1910 if (changed)
1911 err = new_settings(hdev, sk);
1912
1913 unlock:
1914 hci_dev_unlock(hdev);
1915 return err;
1916 }
1917
1918 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1919 u16 len)
1920 {
1921 struct mgmt_mode *cp = data;
1922 struct pending_cmd *cmd;
1923 u8 val, status;
1924 int err;
1925
1926 BT_DBG("request for %s", hdev->name);
1927
1928 status = mgmt_bredr_support(hdev);
1929 if (status)
1930 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1931 status);
1932
1933 if (cp->val != 0x00 && cp->val != 0x01)
1934 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1935 MGMT_STATUS_INVALID_PARAMS);
1936
1937 hci_dev_lock(hdev);
1938
1939 if (!hdev_is_powered(hdev)) {
1940 bool changed = false;
1941
1942 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1943 &hdev->dev_flags)) {
1944 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
1945 changed = true;
1946 }
1947
1948 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1949 if (err < 0)
1950 goto failed;
1951
1952 if (changed)
1953 err = new_settings(hdev, sk);
1954
1955 goto failed;
1956 }
1957
1958 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1959 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1960 MGMT_STATUS_BUSY);
1961 goto failed;
1962 }
1963
1964 val = !!cp->val;
1965
1966 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1967 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1968 goto failed;
1969 }
1970
1971 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1972 if (!cmd) {
1973 err = -ENOMEM;
1974 goto failed;
1975 }
1976
1977 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1978 if (err < 0) {
1979 mgmt_pending_remove(cmd);
1980 goto failed;
1981 }
1982
1983 failed:
1984 hci_dev_unlock(hdev);
1985 return err;
1986 }
1987
1988 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1989 {
1990 struct mgmt_mode *cp = data;
1991 struct pending_cmd *cmd;
1992 u8 status;
1993 int err;
1994
1995 BT_DBG("request for %s", hdev->name);
1996
1997 status = mgmt_bredr_support(hdev);
1998 if (status)
1999 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
2000
2001 if (!lmp_ssp_capable(hdev))
2002 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2003 MGMT_STATUS_NOT_SUPPORTED);
2004
2005 if (cp->val != 0x00 && cp->val != 0x01)
2006 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2007 MGMT_STATUS_INVALID_PARAMS);
2008
2009 hci_dev_lock(hdev);
2010
2011 if (!hdev_is_powered(hdev)) {
2012 bool changed;
2013
2014 if (cp->val) {
2015 changed = !test_and_set_bit(HCI_SSP_ENABLED,
2016 &hdev->dev_flags);
2017 } else {
2018 changed = test_and_clear_bit(HCI_SSP_ENABLED,
2019 &hdev->dev_flags);
2020 if (!changed)
2021 changed = test_and_clear_bit(HCI_HS_ENABLED,
2022 &hdev->dev_flags);
2023 else
2024 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2025 }
2026
2027 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2028 if (err < 0)
2029 goto failed;
2030
2031 if (changed)
2032 err = new_settings(hdev, sk);
2033
2034 goto failed;
2035 }
2036
2037 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev) ||
2038 mgmt_pending_find(MGMT_OP_SET_HS, hdev)) {
2039 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2040 MGMT_STATUS_BUSY);
2041 goto failed;
2042 }
2043
2044 if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
2045 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2046 goto failed;
2047 }
2048
2049 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2050 if (!cmd) {
2051 err = -ENOMEM;
2052 goto failed;
2053 }
2054
2055 if (!cp->val && test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
2056 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
2057 sizeof(cp->val), &cp->val);
2058
2059 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
2060 if (err < 0) {
2061 mgmt_pending_remove(cmd);
2062 goto failed;
2063 }
2064
2065 failed:
2066 hci_dev_unlock(hdev);
2067 return err;
2068 }
2069
2070 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2071 {
2072 struct mgmt_mode *cp = data;
2073 bool changed;
2074 u8 status;
2075 int err;
2076
2077 BT_DBG("request for %s", hdev->name);
2078
2079 status = mgmt_bredr_support(hdev);
2080 if (status)
2081 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2082
2083 if (!lmp_ssp_capable(hdev))
2084 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2085 MGMT_STATUS_NOT_SUPPORTED);
2086
2087 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
2088 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2089 MGMT_STATUS_REJECTED);
2090
2091 if (cp->val != 0x00 && cp->val != 0x01)
2092 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2093 MGMT_STATUS_INVALID_PARAMS);
2094
2095 hci_dev_lock(hdev);
2096
2097 if (cp->val) {
2098 changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2099 } else {
2100 if (hdev_is_powered(hdev)) {
2101 err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2102 MGMT_STATUS_REJECTED);
2103 goto unlock;
2104 }
2105
2106 changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2107 }
2108
2109 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2110 if (err < 0)
2111 goto unlock;
2112
2113 if (changed)
2114 err = new_settings(hdev, sk);
2115
2116 unlock:
2117 hci_dev_unlock(hdev);
2118 return err;
2119 }
2120
2121 static void le_enable_complete(struct hci_dev *hdev, u8 status)
2122 {
2123 struct cmd_lookup match = { NULL, hdev };
2124
2125 if (status) {
2126 u8 mgmt_err = mgmt_status(status);
2127
2128 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2129 &mgmt_err);
2130 return;
2131 }
2132
2133 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2134
2135 new_settings(hdev, match.sk);
2136
2137 if (match.sk)
2138 sock_put(match.sk);
2139
2140 /* Make sure the controller has a good default for
2141 * advertising data. Restrict the update to when LE
2142 * has actually been enabled. During power on, the
2143 * update in powered_update_hci will take care of it.
2144 */
2145 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2146 struct hci_request req;
2147
2148 hci_dev_lock(hdev);
2149
2150 hci_req_init(&req, hdev);
2151 update_adv_data(&req);
2152 update_scan_rsp_data(&req);
2153 hci_req_run(&req, NULL);
2154
2155 hci_dev_unlock(hdev);
2156 }
2157 }
2158
2159 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2160 {
2161 struct mgmt_mode *cp = data;
2162 struct hci_cp_write_le_host_supported hci_cp;
2163 struct pending_cmd *cmd;
2164 struct hci_request req;
2165 int err;
2166 u8 val, enabled;
2167
2168 BT_DBG("request for %s", hdev->name);
2169
2170 if (!lmp_le_capable(hdev))
2171 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2172 MGMT_STATUS_NOT_SUPPORTED);
2173
2174 if (cp->val != 0x00 && cp->val != 0x01)
2175 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2176 MGMT_STATUS_INVALID_PARAMS);
2177
2178 /* LE-only devices do not allow toggling LE on/off */
2179 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
2180 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2181 MGMT_STATUS_REJECTED);
2182
2183 hci_dev_lock(hdev);
2184
2185 val = !!cp->val;
2186 enabled = lmp_host_le_capable(hdev);
2187
2188 if (!hdev_is_powered(hdev) || val == enabled) {
2189 bool changed = false;
2190
2191 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2192 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2193 changed = true;
2194 }
2195
2196 if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
2197 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
2198 changed = true;
2199 }
2200
2201 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2202 if (err < 0)
2203 goto unlock;
2204
2205 if (changed)
2206 err = new_settings(hdev, sk);
2207
2208 goto unlock;
2209 }
2210
2211 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
2212 mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2213 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2214 MGMT_STATUS_BUSY);
2215 goto unlock;
2216 }
2217
2218 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2219 if (!cmd) {
2220 err = -ENOMEM;
2221 goto unlock;
2222 }
2223
2224 hci_req_init(&req, hdev);
2225
2226 memset(&hci_cp, 0, sizeof(hci_cp));
2227
2228 if (val) {
2229 hci_cp.le = val;
2230 hci_cp.simul = lmp_le_br_capable(hdev);
2231 } else {
2232 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
2233 disable_advertising(&req);
2234 }
2235
2236 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2237 &hci_cp);
2238
2239 err = hci_req_run(&req, le_enable_complete);
2240 if (err < 0)
2241 mgmt_pending_remove(cmd);
2242
2243 unlock:
2244 hci_dev_unlock(hdev);
2245 return err;
2246 }
2247
2248 /* This is a helper function to test for pending mgmt commands that can
2249 * cause CoD or EIR HCI commands. We can only allow one such pending
2250 * mgmt command at a time since otherwise we cannot easily track what
2251 * the current values are, will be, and based on that calculate if a new
2252 * HCI command needs to be sent and if yes with what value.
2253 */
2254 static bool pending_eir_or_class(struct hci_dev *hdev)
2255 {
2256 struct pending_cmd *cmd;
2257
2258 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2259 switch (cmd->opcode) {
2260 case MGMT_OP_ADD_UUID:
2261 case MGMT_OP_REMOVE_UUID:
2262 case MGMT_OP_SET_DEV_CLASS:
2263 case MGMT_OP_SET_POWERED:
2264 return true;
2265 }
2266 }
2267
2268 return false;
2269 }
2270
2271 static const u8 bluetooth_base_uuid[] = {
2272 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2273 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2274 };
2275
2276 static u8 get_uuid_size(const u8 *uuid)
2277 {
2278 u32 val;
2279
2280 if (memcmp(uuid, bluetooth_base_uuid, 12))
2281 return 128;
2282
2283 val = get_unaligned_le32(&uuid[12]);
2284 if (val > 0xffff)
2285 return 32;
2286
2287 return 16;
2288 }
2289
2290 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2291 {
2292 struct pending_cmd *cmd;
2293
2294 hci_dev_lock(hdev);
2295
2296 cmd = mgmt_pending_find(mgmt_op, hdev);
2297 if (!cmd)
2298 goto unlock;
2299
2300 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
2301 hdev->dev_class, 3);
2302
2303 mgmt_pending_remove(cmd);
2304
2305 unlock:
2306 hci_dev_unlock(hdev);
2307 }
2308
2309 static void add_uuid_complete(struct hci_dev *hdev, u8 status)
2310 {
2311 BT_DBG("status 0x%02x", status);
2312
2313 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2314 }
2315
2316 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2317 {
2318 struct mgmt_cp_add_uuid *cp = data;
2319 struct pending_cmd *cmd;
2320 struct hci_request req;
2321 struct bt_uuid *uuid;
2322 int err;
2323
2324 BT_DBG("request for %s", hdev->name);
2325
2326 hci_dev_lock(hdev);
2327
2328 if (pending_eir_or_class(hdev)) {
2329 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2330 MGMT_STATUS_BUSY);
2331 goto failed;
2332 }
2333
2334 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2335 if (!uuid) {
2336 err = -ENOMEM;
2337 goto failed;
2338 }
2339
2340 memcpy(uuid->uuid, cp->uuid, 16);
2341 uuid->svc_hint = cp->svc_hint;
2342 uuid->size = get_uuid_size(cp->uuid);
2343
2344 list_add_tail(&uuid->list, &hdev->uuids);
2345
2346 hci_req_init(&req, hdev);
2347
2348 update_class(&req);
2349 update_eir(&req);
2350
2351 err = hci_req_run(&req, add_uuid_complete);
2352 if (err < 0) {
2353 if (err != -ENODATA)
2354 goto failed;
2355
2356 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2357 hdev->dev_class, 3);
2358 goto failed;
2359 }
2360
2361 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2362 if (!cmd) {
2363 err = -ENOMEM;
2364 goto failed;
2365 }
2366
2367 err = 0;
2368
2369 failed:
2370 hci_dev_unlock(hdev);
2371 return err;
2372 }
2373
2374 static bool enable_service_cache(struct hci_dev *hdev)
2375 {
2376 if (!hdev_is_powered(hdev))
2377 return false;
2378
2379 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2380 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2381 CACHE_TIMEOUT);
2382 return true;
2383 }
2384
2385 return false;
2386 }
2387
2388 static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
2389 {
2390 BT_DBG("status 0x%02x", status);
2391
2392 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2393 }
2394
2395 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2396 u16 len)
2397 {
2398 struct mgmt_cp_remove_uuid *cp = data;
2399 struct pending_cmd *cmd;
2400 struct bt_uuid *match, *tmp;
2401 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2402 struct hci_request req;
2403 int err, found;
2404
2405 BT_DBG("request for %s", hdev->name);
2406
2407 hci_dev_lock(hdev);
2408
2409 if (pending_eir_or_class(hdev)) {
2410 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2411 MGMT_STATUS_BUSY);
2412 goto unlock;
2413 }
2414
2415 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2416 hci_uuids_clear(hdev);
2417
2418 if (enable_service_cache(hdev)) {
2419 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2420 0, hdev->dev_class, 3);
2421 goto unlock;
2422 }
2423
2424 goto update_class;
2425 }
2426
2427 found = 0;
2428
2429 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2430 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2431 continue;
2432
2433 list_del(&match->list);
2434 kfree(match);
2435 found++;
2436 }
2437
2438 if (found == 0) {
2439 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2440 MGMT_STATUS_INVALID_PARAMS);
2441 goto unlock;
2442 }
2443
2444 update_class:
2445 hci_req_init(&req, hdev);
2446
2447 update_class(&req);
2448 update_eir(&req);
2449
2450 err = hci_req_run(&req, remove_uuid_complete);
2451 if (err < 0) {
2452 if (err != -ENODATA)
2453 goto unlock;
2454
2455 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2456 hdev->dev_class, 3);
2457 goto unlock;
2458 }
2459
2460 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2461 if (!cmd) {
2462 err = -ENOMEM;
2463 goto unlock;
2464 }
2465
2466 err = 0;
2467
2468 unlock:
2469 hci_dev_unlock(hdev);
2470 return err;
2471 }
2472
2473 static void set_class_complete(struct hci_dev *hdev, u8 status)
2474 {
2475 BT_DBG("status 0x%02x", status);
2476
2477 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2478 }
2479
2480 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2481 u16 len)
2482 {
2483 struct mgmt_cp_set_dev_class *cp = data;
2484 struct pending_cmd *cmd;
2485 struct hci_request req;
2486 int err;
2487
2488 BT_DBG("request for %s", hdev->name);
2489
2490 if (!lmp_bredr_capable(hdev))
2491 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2492 MGMT_STATUS_NOT_SUPPORTED);
2493
2494 hci_dev_lock(hdev);
2495
2496 if (pending_eir_or_class(hdev)) {
2497 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2498 MGMT_STATUS_BUSY);
2499 goto unlock;
2500 }
2501
2502 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2503 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2504 MGMT_STATUS_INVALID_PARAMS);
2505 goto unlock;
2506 }
2507
2508 hdev->major_class = cp->major;
2509 hdev->minor_class = cp->minor;
2510
2511 if (!hdev_is_powered(hdev)) {
2512 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2513 hdev->dev_class, 3);
2514 goto unlock;
2515 }
2516
2517 hci_req_init(&req, hdev);
2518
2519 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2520 hci_dev_unlock(hdev);
2521 cancel_delayed_work_sync(&hdev->service_cache);
2522 hci_dev_lock(hdev);
2523 update_eir(&req);
2524 }
2525
2526 update_class(&req);
2527
2528 err = hci_req_run(&req, set_class_complete);
2529 if (err < 0) {
2530 if (err != -ENODATA)
2531 goto unlock;
2532
2533 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2534 hdev->dev_class, 3);
2535 goto unlock;
2536 }
2537
2538 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2539 if (!cmd) {
2540 err = -ENOMEM;
2541 goto unlock;
2542 }
2543
2544 err = 0;
2545
2546 unlock:
2547 hci_dev_unlock(hdev);
2548 return err;
2549 }
2550
2551 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2552 u16 len)
2553 {
2554 struct mgmt_cp_load_link_keys *cp = data;
2555 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2556 sizeof(struct mgmt_link_key_info));
2557 u16 key_count, expected_len;
2558 bool changed;
2559 int i;
2560
2561 BT_DBG("request for %s", hdev->name);
2562
2563 if (!lmp_bredr_capable(hdev))
2564 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2565 MGMT_STATUS_NOT_SUPPORTED);
2566
2567 key_count = __le16_to_cpu(cp->key_count);
2568 if (key_count > max_key_count) {
2569 BT_ERR("load_link_keys: too big key_count value %u",
2570 key_count);
2571 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2572 MGMT_STATUS_INVALID_PARAMS);
2573 }
2574
2575 expected_len = sizeof(*cp) + key_count *
2576 sizeof(struct mgmt_link_key_info);
2577 if (expected_len != len) {
2578 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2579 expected_len, len);
2580 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2581 MGMT_STATUS_INVALID_PARAMS);
2582 }
2583
2584 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2585 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2586 MGMT_STATUS_INVALID_PARAMS);
2587
2588 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2589 key_count);
2590
2591 for (i = 0; i < key_count; i++) {
2592 struct mgmt_link_key_info *key = &cp->keys[i];
2593
2594 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2595 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2596 MGMT_STATUS_INVALID_PARAMS);
2597 }
2598
2599 hci_dev_lock(hdev);
2600
2601 hci_link_keys_clear(hdev);
2602
2603 if (cp->debug_keys)
2604 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
2605 &hdev->dev_flags);
2606 else
2607 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
2608 &hdev->dev_flags);
2609
2610 if (changed)
2611 new_settings(hdev, NULL);
2612
2613 for (i = 0; i < key_count; i++) {
2614 struct mgmt_link_key_info *key = &cp->keys[i];
2615
2616 /* Always ignore debug keys and require a new pairing if
2617 * the user wants to use them.
2618 */
2619 if (key->type == HCI_LK_DEBUG_COMBINATION)
2620 continue;
2621
2622 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2623 key->type, key->pin_len, NULL);
2624 }
2625
2626 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2627
2628 hci_dev_unlock(hdev);
2629
2630 return 0;
2631 }
2632
2633 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2634 u8 addr_type, struct sock *skip_sk)
2635 {
2636 struct mgmt_ev_device_unpaired ev;
2637
2638 bacpy(&ev.addr.bdaddr, bdaddr);
2639 ev.addr.type = addr_type;
2640
2641 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2642 skip_sk);
2643 }
2644
2645 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2646 u16 len)
2647 {
2648 struct mgmt_cp_unpair_device *cp = data;
2649 struct mgmt_rp_unpair_device rp;
2650 struct hci_cp_disconnect dc;
2651 struct pending_cmd *cmd;
2652 struct hci_conn *conn;
2653 int err;
2654
2655 memset(&rp, 0, sizeof(rp));
2656 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2657 rp.addr.type = cp->addr.type;
2658
2659 if (!bdaddr_type_is_valid(cp->addr.type))
2660 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2661 MGMT_STATUS_INVALID_PARAMS,
2662 &rp, sizeof(rp));
2663
2664 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2665 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2666 MGMT_STATUS_INVALID_PARAMS,
2667 &rp, sizeof(rp));
2668
2669 hci_dev_lock(hdev);
2670
2671 if (!hdev_is_powered(hdev)) {
2672 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2673 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2674 goto unlock;
2675 }
2676
2677 if (cp->addr.type == BDADDR_BREDR) {
2678 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2679 } else {
2680 u8 addr_type;
2681
2682 if (cp->addr.type == BDADDR_LE_PUBLIC)
2683 addr_type = ADDR_LE_DEV_PUBLIC;
2684 else
2685 addr_type = ADDR_LE_DEV_RANDOM;
2686
2687 hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2688
2689 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2690
2691 err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2692 }
2693
2694 if (err < 0) {
2695 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2696 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
2697 goto unlock;
2698 }
2699
2700 if (cp->disconnect) {
2701 if (cp->addr.type == BDADDR_BREDR)
2702 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2703 &cp->addr.bdaddr);
2704 else
2705 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2706 &cp->addr.bdaddr);
2707 } else {
2708 conn = NULL;
2709 }
2710
2711 if (!conn) {
2712 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2713 &rp, sizeof(rp));
2714 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2715 goto unlock;
2716 }
2717
2718 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2719 sizeof(*cp));
2720 if (!cmd) {
2721 err = -ENOMEM;
2722 goto unlock;
2723 }
2724
2725 dc.handle = cpu_to_le16(conn->handle);
2726 dc.reason = 0x13; /* Remote User Terminated Connection */
2727 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2728 if (err < 0)
2729 mgmt_pending_remove(cmd);
2730
2731 unlock:
2732 hci_dev_unlock(hdev);
2733 return err;
2734 }
2735
2736 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2737 u16 len)
2738 {
2739 struct mgmt_cp_disconnect *cp = data;
2740 struct mgmt_rp_disconnect rp;
2741 struct hci_cp_disconnect dc;
2742 struct pending_cmd *cmd;
2743 struct hci_conn *conn;
2744 int err;
2745
2746 BT_DBG("");
2747
2748 memset(&rp, 0, sizeof(rp));
2749 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2750 rp.addr.type = cp->addr.type;
2751
2752 if (!bdaddr_type_is_valid(cp->addr.type))
2753 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2754 MGMT_STATUS_INVALID_PARAMS,
2755 &rp, sizeof(rp));
2756
2757 hci_dev_lock(hdev);
2758
2759 if (!test_bit(HCI_UP, &hdev->flags)) {
2760 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2761 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2762 goto failed;
2763 }
2764
2765 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
2766 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2767 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2768 goto failed;
2769 }
2770
2771 if (cp->addr.type == BDADDR_BREDR)
2772 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2773 &cp->addr.bdaddr);
2774 else
2775 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2776
2777 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2778 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2779 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
2780 goto failed;
2781 }
2782
2783 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2784 if (!cmd) {
2785 err = -ENOMEM;
2786 goto failed;
2787 }
2788
2789 dc.handle = cpu_to_le16(conn->handle);
2790 dc.reason = HCI_ERROR_REMOTE_USER_TERM;
2791
2792 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2793 if (err < 0)
2794 mgmt_pending_remove(cmd);
2795
2796 failed:
2797 hci_dev_unlock(hdev);
2798 return err;
2799 }
2800
2801 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2802 {
2803 switch (link_type) {
2804 case LE_LINK:
2805 switch (addr_type) {
2806 case ADDR_LE_DEV_PUBLIC:
2807 return BDADDR_LE_PUBLIC;
2808
2809 default:
2810 /* Fallback to LE Random address type */
2811 return BDADDR_LE_RANDOM;
2812 }
2813
2814 default:
2815 /* Fallback to BR/EDR type */
2816 return BDADDR_BREDR;
2817 }
2818 }
2819
2820 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2821 u16 data_len)
2822 {
2823 struct mgmt_rp_get_connections *rp;
2824 struct hci_conn *c;
2825 size_t rp_len;
2826 int err;
2827 u16 i;
2828
2829 BT_DBG("");
2830
2831 hci_dev_lock(hdev);
2832
2833 if (!hdev_is_powered(hdev)) {
2834 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2835 MGMT_STATUS_NOT_POWERED);
2836 goto unlock;
2837 }
2838
2839 i = 0;
2840 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2841 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2842 i++;
2843 }
2844
2845 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2846 rp = kmalloc(rp_len, GFP_KERNEL);
2847 if (!rp) {
2848 err = -ENOMEM;
2849 goto unlock;
2850 }
2851
2852 i = 0;
2853 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2854 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2855 continue;
2856 bacpy(&rp->addr[i].bdaddr, &c->dst);
2857 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2858 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2859 continue;
2860 i++;
2861 }
2862
2863 rp->conn_count = cpu_to_le16(i);
2864
2865 /* Recalculate length in case of filtered SCO connections, etc */
2866 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2867
2868 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2869 rp_len);
2870
2871 kfree(rp);
2872
2873 unlock:
2874 hci_dev_unlock(hdev);
2875 return err;
2876 }
2877
2878 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2879 struct mgmt_cp_pin_code_neg_reply *cp)
2880 {
2881 struct pending_cmd *cmd;
2882 int err;
2883
2884 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2885 sizeof(*cp));
2886 if (!cmd)
2887 return -ENOMEM;
2888
2889 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2890 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2891 if (err < 0)
2892 mgmt_pending_remove(cmd);
2893
2894 return err;
2895 }
2896
2897 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2898 u16 len)
2899 {
2900 struct hci_conn *conn;
2901 struct mgmt_cp_pin_code_reply *cp = data;
2902 struct hci_cp_pin_code_reply reply;
2903 struct pending_cmd *cmd;
2904 int err;
2905
2906 BT_DBG("");
2907
2908 hci_dev_lock(hdev);
2909
2910 if (!hdev_is_powered(hdev)) {
2911 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2912 MGMT_STATUS_NOT_POWERED);
2913 goto failed;
2914 }
2915
2916 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2917 if (!conn) {
2918 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2919 MGMT_STATUS_NOT_CONNECTED);
2920 goto failed;
2921 }
2922
2923 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2924 struct mgmt_cp_pin_code_neg_reply ncp;
2925
2926 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2927
2928 BT_ERR("PIN code is not 16 bytes long");
2929
2930 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2931 if (err >= 0)
2932 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2933 MGMT_STATUS_INVALID_PARAMS);
2934
2935 goto failed;
2936 }
2937
2938 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2939 if (!cmd) {
2940 err = -ENOMEM;
2941 goto failed;
2942 }
2943
2944 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2945 reply.pin_len = cp->pin_len;
2946 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2947
2948 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2949 if (err < 0)
2950 mgmt_pending_remove(cmd);
2951
2952 failed:
2953 hci_dev_unlock(hdev);
2954 return err;
2955 }
2956
2957 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2958 u16 len)
2959 {
2960 struct mgmt_cp_set_io_capability *cp = data;
2961
2962 BT_DBG("");
2963
2964 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2965 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2966 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
2967
2968 hci_dev_lock(hdev);
2969
2970 hdev->io_capability = cp->io_capability;
2971
2972 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2973 hdev->io_capability);
2974
2975 hci_dev_unlock(hdev);
2976
2977 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
2978 0);
2979 }
2980
2981 static struct pending_cmd *find_pairing(struct hci_conn *conn)
2982 {
2983 struct hci_dev *hdev = conn->hdev;
2984 struct pending_cmd *cmd;
2985
2986 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2987 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2988 continue;
2989
2990 if (cmd->user_data != conn)
2991 continue;
2992
2993 return cmd;
2994 }
2995
2996 return NULL;
2997 }
2998
2999 static void pairing_complete(struct pending_cmd *cmd, u8 status)
3000 {
3001 struct mgmt_rp_pair_device rp;
3002 struct hci_conn *conn = cmd->user_data;
3003
3004 bacpy(&rp.addr.bdaddr, &conn->dst);
3005 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3006
3007 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
3008 &rp, sizeof(rp));
3009
3010 /* So we don't get further callbacks for this connection */
3011 conn->connect_cfm_cb = NULL;
3012 conn->security_cfm_cb = NULL;
3013 conn->disconn_cfm_cb = NULL;
3014
3015 hci_conn_drop(conn);
3016
3017 mgmt_pending_remove(cmd);
3018 }
3019
3020 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3021 {
3022 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3023 struct pending_cmd *cmd;
3024
3025 cmd = find_pairing(conn);
3026 if (cmd)
3027 pairing_complete(cmd, status);
3028 }
3029
3030 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3031 {
3032 struct pending_cmd *cmd;
3033
3034 BT_DBG("status %u", status);
3035
3036 cmd = find_pairing(conn);
3037 if (!cmd)
3038 BT_DBG("Unable to find a pending command");
3039 else
3040 pairing_complete(cmd, mgmt_status(status));
3041 }
3042
3043 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3044 {
3045 struct pending_cmd *cmd;
3046
3047 BT_DBG("status %u", status);
3048
3049 if (!status)
3050 return;
3051
3052 cmd = find_pairing(conn);
3053 if (!cmd)
3054 BT_DBG("Unable to find a pending command");
3055 else
3056 pairing_complete(cmd, mgmt_status(status));
3057 }
3058
3059 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3060 u16 len)
3061 {
3062 struct mgmt_cp_pair_device *cp = data;
3063 struct mgmt_rp_pair_device rp;
3064 struct pending_cmd *cmd;
3065 u8 sec_level, auth_type;
3066 struct hci_conn *conn;
3067 int err;
3068
3069 BT_DBG("");
3070
3071 memset(&rp, 0, sizeof(rp));
3072 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3073 rp.addr.type = cp->addr.type;
3074
3075 if (!bdaddr_type_is_valid(cp->addr.type))
3076 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3077 MGMT_STATUS_INVALID_PARAMS,
3078 &rp, sizeof(rp));
3079
3080 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3081 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3082 MGMT_STATUS_INVALID_PARAMS,
3083 &rp, sizeof(rp));
3084
3085 hci_dev_lock(hdev);
3086
3087 if (!hdev_is_powered(hdev)) {
3088 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3089 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
3090 goto unlock;
3091 }
3092
3093 sec_level = BT_SECURITY_MEDIUM;
3094 auth_type = HCI_AT_DEDICATED_BONDING;
3095
3096 if (cp->addr.type == BDADDR_BREDR) {
3097 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3098 auth_type);
3099 } else {
3100 u8 addr_type;
3101
3102 /* Convert from L2CAP channel address type to HCI address type
3103 */
3104 if (cp->addr.type == BDADDR_LE_PUBLIC)
3105 addr_type = ADDR_LE_DEV_PUBLIC;
3106 else
3107 addr_type = ADDR_LE_DEV_RANDOM;
3108
3109 /* When pairing a new device, it is expected to remember
3110 * this device for future connections. Adding the connection
3111 * parameter information ahead of time allows tracking
3112 * of the slave preferred values and will speed up any
3113 * further connection establishment.
3114 *
3115 * If connection parameters already exist, then they
3116 * will be kept and this function does nothing.
3117 */
3118 hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3119
3120 conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
3121 sec_level, auth_type,
3122 HCI_LE_CONN_TIMEOUT);
3123 }
3124
3125 if (IS_ERR(conn)) {
3126 int status;
3127
3128 if (PTR_ERR(conn) == -EBUSY)
3129 status = MGMT_STATUS_BUSY;
3130 else
3131 status = MGMT_STATUS_CONNECT_FAILED;
3132
3133 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3134 status, &rp,
3135 sizeof(rp));
3136 goto unlock;
3137 }
3138
3139 if (conn->connect_cfm_cb) {
3140 hci_conn_drop(conn);
3141 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3142 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3143 goto unlock;
3144 }
3145
3146 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3147 if (!cmd) {
3148 err = -ENOMEM;
3149 hci_conn_drop(conn);
3150 goto unlock;
3151 }
3152
3153 /* For LE, just connecting isn't a proof that the pairing finished */
3154 if (cp->addr.type == BDADDR_BREDR) {
3155 conn->connect_cfm_cb = pairing_complete_cb;
3156 conn->security_cfm_cb = pairing_complete_cb;
3157 conn->disconn_cfm_cb = pairing_complete_cb;
3158 } else {
3159 conn->connect_cfm_cb = le_pairing_complete_cb;
3160 conn->security_cfm_cb = le_pairing_complete_cb;
3161 conn->disconn_cfm_cb = le_pairing_complete_cb;
3162 }
3163
3164 conn->io_capability = cp->io_cap;
3165 cmd->user_data = conn;
3166
3167 if (conn->state == BT_CONNECTED &&
3168 hci_conn_security(conn, sec_level, auth_type))
3169 pairing_complete(cmd, 0);
3170
3171 err = 0;
3172
3173 unlock:
3174 hci_dev_unlock(hdev);
3175 return err;
3176 }
3177
3178 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3179 u16 len)
3180 {
3181 struct mgmt_addr_info *addr = data;
3182 struct pending_cmd *cmd;
3183 struct hci_conn *conn;
3184 int err;
3185
3186 BT_DBG("");
3187
3188 hci_dev_lock(hdev);
3189
3190 if (!hdev_is_powered(hdev)) {
3191 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3192 MGMT_STATUS_NOT_POWERED);
3193 goto unlock;
3194 }
3195
3196 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3197 if (!cmd) {
3198 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3199 MGMT_STATUS_INVALID_PARAMS);
3200 goto unlock;
3201 }
3202
3203 conn = cmd->user_data;
3204
3205 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3206 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3207 MGMT_STATUS_INVALID_PARAMS);
3208 goto unlock;
3209 }
3210
3211 pairing_complete(cmd, MGMT_STATUS_CANCELLED);
3212
3213 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3214 addr, sizeof(*addr));
3215 unlock:
3216 hci_dev_unlock(hdev);
3217 return err;
3218 }
3219
3220 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3221 struct mgmt_addr_info *addr, u16 mgmt_op,
3222 u16 hci_op, __le32 passkey)
3223 {
3224 struct pending_cmd *cmd;
3225 struct hci_conn *conn;
3226 int err;
3227
3228 hci_dev_lock(hdev);
3229
3230 if (!hdev_is_powered(hdev)) {
3231 err = cmd_complete(sk, hdev->id, mgmt_op,
3232 MGMT_STATUS_NOT_POWERED, addr,
3233 sizeof(*addr));
3234 goto done;
3235 }
3236
3237 if (addr->type == BDADDR_BREDR)
3238 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3239 else
3240 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
3241
3242 if (!conn) {
3243 err = cmd_complete(sk, hdev->id, mgmt_op,
3244 MGMT_STATUS_NOT_CONNECTED, addr,
3245 sizeof(*addr));
3246 goto done;
3247 }
3248
3249 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3250 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3251 if (!err)
3252 err = cmd_complete(sk, hdev->id, mgmt_op,
3253 MGMT_STATUS_SUCCESS, addr,
3254 sizeof(*addr));
3255 else
3256 err = cmd_complete(sk, hdev->id, mgmt_op,
3257 MGMT_STATUS_FAILED, addr,
3258 sizeof(*addr));
3259
3260 goto done;
3261 }
3262
3263 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3264 if (!cmd) {
3265 err = -ENOMEM;
3266 goto done;
3267 }
3268
3269 /* Continue with pairing via HCI */
3270 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3271 struct hci_cp_user_passkey_reply cp;
3272
3273 bacpy(&cp.bdaddr, &addr->bdaddr);
3274 cp.passkey = passkey;
3275 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3276 } else
3277 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3278 &addr->bdaddr);
3279
3280 if (err < 0)
3281 mgmt_pending_remove(cmd);
3282
3283 done:
3284 hci_dev_unlock(hdev);
3285 return err;
3286 }
3287
3288 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3289 void *data, u16 len)
3290 {
3291 struct mgmt_cp_pin_code_neg_reply *cp = data;
3292
3293 BT_DBG("");
3294
3295 return user_pairing_resp(sk, hdev, &cp->addr,
3296 MGMT_OP_PIN_CODE_NEG_REPLY,
3297 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3298 }
3299
3300 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3301 u16 len)
3302 {
3303 struct mgmt_cp_user_confirm_reply *cp = data;
3304
3305 BT_DBG("");
3306
3307 if (len != sizeof(*cp))
3308 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3309 MGMT_STATUS_INVALID_PARAMS);
3310
3311 return user_pairing_resp(sk, hdev, &cp->addr,
3312 MGMT_OP_USER_CONFIRM_REPLY,
3313 HCI_OP_USER_CONFIRM_REPLY, 0);
3314 }
3315
3316 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3317 void *data, u16 len)
3318 {
3319 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3320
3321 BT_DBG("");
3322
3323 return user_pairing_resp(sk, hdev, &cp->addr,
3324 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3325 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3326 }
3327
3328 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3329 u16 len)
3330 {
3331 struct mgmt_cp_user_passkey_reply *cp = data;
3332
3333 BT_DBG("");
3334
3335 return user_pairing_resp(sk, hdev, &cp->addr,
3336 MGMT_OP_USER_PASSKEY_REPLY,
3337 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3338 }
3339
3340 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3341 void *data, u16 len)
3342 {
3343 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3344
3345 BT_DBG("");
3346
3347 return user_pairing_resp(sk, hdev, &cp->addr,
3348 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3349 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3350 }
3351
3352 static void update_name(struct hci_request *req)
3353 {
3354 struct hci_dev *hdev = req->hdev;
3355 struct hci_cp_write_local_name cp;
3356
3357 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3358
3359 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3360 }
3361
3362 static void set_name_complete(struct hci_dev *hdev, u8 status)
3363 {
3364 struct mgmt_cp_set_local_name *cp;
3365 struct pending_cmd *cmd;
3366
3367 BT_DBG("status 0x%02x", status);
3368
3369 hci_dev_lock(hdev);
3370
3371 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3372 if (!cmd)
3373 goto unlock;
3374
3375 cp = cmd->param;
3376
3377 if (status)
3378 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3379 mgmt_status(status));
3380 else
3381 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3382 cp, sizeof(*cp));
3383
3384 mgmt_pending_remove(cmd);
3385
3386 unlock:
3387 hci_dev_unlock(hdev);
3388 }
3389
3390 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3391 u16 len)
3392 {
3393 struct mgmt_cp_set_local_name *cp = data;
3394 struct pending_cmd *cmd;
3395 struct hci_request req;
3396 int err;
3397
3398 BT_DBG("");
3399
3400 hci_dev_lock(hdev);
3401
3402 /* If the old values are the same as the new ones just return a
3403 * direct command complete event.
3404 */
3405 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3406 !memcmp(hdev->short_name, cp->short_name,
3407 sizeof(hdev->short_name))) {
3408 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3409 data, len);
3410 goto failed;
3411 }
3412
3413 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3414
3415 if (!hdev_is_powered(hdev)) {
3416 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3417
3418 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3419 data, len);
3420 if (err < 0)
3421 goto failed;
3422
3423 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
3424 sk);
3425
3426 goto failed;
3427 }
3428
3429 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3430 if (!cmd) {
3431 err = -ENOMEM;
3432 goto failed;
3433 }
3434
3435 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3436
3437 hci_req_init(&req, hdev);
3438
3439 if (lmp_bredr_capable(hdev)) {
3440 update_name(&req);
3441 update_eir(&req);
3442 }
3443
3444 /* The name is stored in the scan response data and so
3445 * no need to udpate the advertising data here.
3446 */
3447 if (lmp_le_capable(hdev))
3448 update_scan_rsp_data(&req);
3449
3450 err = hci_req_run(&req, set_name_complete);
3451 if (err < 0)
3452 mgmt_pending_remove(cmd);
3453
3454 failed:
3455 hci_dev_unlock(hdev);
3456 return err;
3457 }
3458
3459 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3460 void *data, u16 data_len)
3461 {
3462 struct pending_cmd *cmd;
3463 int err;
3464
3465 BT_DBG("%s", hdev->name);
3466
3467 hci_dev_lock(hdev);
3468
3469 if (!hdev_is_powered(hdev)) {
3470 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3471 MGMT_STATUS_NOT_POWERED);
3472 goto unlock;
3473 }
3474
3475 if (!lmp_ssp_capable(hdev)) {
3476 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3477 MGMT_STATUS_NOT_SUPPORTED);
3478 goto unlock;
3479 }
3480
3481 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3482 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3483 MGMT_STATUS_BUSY);
3484 goto unlock;
3485 }
3486
3487 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3488 if (!cmd) {
3489 err = -ENOMEM;
3490 goto unlock;
3491 }
3492
3493 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
3494 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
3495 0, NULL);
3496 else
3497 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3498
3499 if (err < 0)
3500 mgmt_pending_remove(cmd);
3501
3502 unlock:
3503 hci_dev_unlock(hdev);
3504 return err;
3505 }
3506
3507 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3508 void *data, u16 len)
3509 {
3510 int err;
3511
3512 BT_DBG("%s ", hdev->name);
3513
3514 hci_dev_lock(hdev);
3515
3516 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3517 struct mgmt_cp_add_remote_oob_data *cp = data;
3518 u8 status;
3519
3520 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3521 cp->hash, cp->randomizer);
3522 if (err < 0)
3523 status = MGMT_STATUS_FAILED;
3524 else
3525 status = MGMT_STATUS_SUCCESS;
3526
3527 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3528 status, &cp->addr, sizeof(cp->addr));
3529 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3530 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3531 u8 status;
3532
3533 err = hci_add_remote_oob_ext_data(hdev, &cp->addr.bdaddr,
3534 cp->hash192,
3535 cp->randomizer192,
3536 cp->hash256,
3537 cp->randomizer256);
3538 if (err < 0)
3539 status = MGMT_STATUS_FAILED;
3540 else
3541 status = MGMT_STATUS_SUCCESS;
3542
3543 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3544 status, &cp->addr, sizeof(cp->addr));
3545 } else {
3546 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3547 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3548 MGMT_STATUS_INVALID_PARAMS);
3549 }
3550
3551 hci_dev_unlock(hdev);
3552 return err;
3553 }
3554
3555 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3556 void *data, u16 len)
3557 {
3558 struct mgmt_cp_remove_remote_oob_data *cp = data;
3559 u8 status;
3560 int err;
3561
3562 BT_DBG("%s", hdev->name);
3563
3564 hci_dev_lock(hdev);
3565
3566 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
3567 if (err < 0)
3568 status = MGMT_STATUS_INVALID_PARAMS;
3569 else
3570 status = MGMT_STATUS_SUCCESS;
3571
3572 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3573 status, &cp->addr, sizeof(cp->addr));
3574
3575 hci_dev_unlock(hdev);
3576 return err;
3577 }
3578
3579 static int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
3580 {
3581 struct pending_cmd *cmd;
3582 u8 type;
3583 int err;
3584
3585 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3586
3587 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3588 if (!cmd)
3589 return -ENOENT;
3590
3591 type = hdev->discovery.type;
3592
3593 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3594 &type, sizeof(type));
3595 mgmt_pending_remove(cmd);
3596
3597 return err;
3598 }
3599
3600 static void start_discovery_complete(struct hci_dev *hdev, u8 status)
3601 {
3602 unsigned long timeout = 0;
3603
3604 BT_DBG("status %d", status);
3605
3606 if (status) {
3607 hci_dev_lock(hdev);
3608 mgmt_start_discovery_failed(hdev, status);
3609 hci_dev_unlock(hdev);
3610 return;
3611 }
3612
3613 hci_dev_lock(hdev);
3614 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3615 hci_dev_unlock(hdev);
3616
3617 switch (hdev->discovery.type) {
3618 case DISCOV_TYPE_LE:
3619 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3620 break;
3621
3622 case DISCOV_TYPE_INTERLEAVED:
3623 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
3624 break;
3625
3626 case DISCOV_TYPE_BREDR:
3627 break;
3628
3629 default:
3630 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
3631 }
3632
3633 if (!timeout)
3634 return;
3635
3636 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable, timeout);
3637 }
3638
3639 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3640 void *data, u16 len)
3641 {
3642 struct mgmt_cp_start_discovery *cp = data;
3643 struct pending_cmd *cmd;
3644 struct hci_cp_le_set_scan_param param_cp;
3645 struct hci_cp_le_set_scan_enable enable_cp;
3646 struct hci_cp_inquiry inq_cp;
3647 struct hci_request req;
3648 /* General inquiry access code (GIAC) */
3649 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3650 u8 status, own_addr_type;
3651 int err;
3652
3653 BT_DBG("%s", hdev->name);
3654
3655 hci_dev_lock(hdev);
3656
3657 if (!hdev_is_powered(hdev)) {
3658 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3659 MGMT_STATUS_NOT_POWERED);
3660 goto failed;
3661 }
3662
3663 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3664 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3665 MGMT_STATUS_BUSY);
3666 goto failed;
3667 }
3668
3669 if (hdev->discovery.state != DISCOVERY_STOPPED) {
3670 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3671 MGMT_STATUS_BUSY);
3672 goto failed;
3673 }
3674
3675 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
3676 if (!cmd) {
3677 err = -ENOMEM;
3678 goto failed;
3679 }
3680
3681 hdev->discovery.type = cp->type;
3682
3683 hci_req_init(&req, hdev);
3684
3685 switch (hdev->discovery.type) {
3686 case DISCOV_TYPE_BREDR:
3687 status = mgmt_bredr_support(hdev);
3688 if (status) {
3689 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3690 status);
3691 mgmt_pending_remove(cmd);
3692 goto failed;
3693 }
3694
3695 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3696 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3697 MGMT_STATUS_BUSY);
3698 mgmt_pending_remove(cmd);
3699 goto failed;
3700 }
3701
3702 hci_inquiry_cache_flush(hdev);
3703
3704 memset(&inq_cp, 0, sizeof(inq_cp));
3705 memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
3706 inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
3707 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
3708 break;
3709
3710 case DISCOV_TYPE_LE:
3711 case DISCOV_TYPE_INTERLEAVED:
3712 status = mgmt_le_support(hdev);
3713 if (status) {
3714 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3715 status);
3716 mgmt_pending_remove(cmd);
3717 goto failed;
3718 }
3719
3720 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3721 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3722 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3723 MGMT_STATUS_NOT_SUPPORTED);
3724 mgmt_pending_remove(cmd);
3725 goto failed;
3726 }
3727
3728 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3729 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3730 MGMT_STATUS_REJECTED);
3731 mgmt_pending_remove(cmd);
3732 goto failed;
3733 }
3734
3735 /* If controller is scanning, it means the background scanning
3736 * is running. Thus, we should temporarily stop it in order to
3737 * set the discovery scanning parameters.
3738 */
3739 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
3740 hci_req_add_le_scan_disable(&req);
3741
3742 memset(&param_cp, 0, sizeof(param_cp));
3743
3744 /* All active scans will be done with either a resolvable
3745 * private address (when privacy feature has been enabled)
3746 * or unresolvable private address.
3747 */
3748 err = hci_update_random_address(&req, true, &own_addr_type);
3749 if (err < 0) {
3750 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3751 MGMT_STATUS_FAILED);
3752 mgmt_pending_remove(cmd);
3753 goto failed;
3754 }
3755
3756 param_cp.type = LE_SCAN_ACTIVE;
3757 param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
3758 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3759 param_cp.own_address_type = own_addr_type;
3760 hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
3761 &param_cp);
3762
3763 memset(&enable_cp, 0, sizeof(enable_cp));
3764 enable_cp.enable = LE_SCAN_ENABLE;
3765 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3766 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
3767 &enable_cp);
3768 break;
3769
3770 default:
3771 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3772 MGMT_STATUS_INVALID_PARAMS);
3773 mgmt_pending_remove(cmd);
3774 goto failed;
3775 }
3776
3777 err = hci_req_run(&req, start_discovery_complete);
3778 if (err < 0)
3779 mgmt_pending_remove(cmd);
3780 else
3781 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3782
3783 failed:
3784 hci_dev_unlock(hdev);
3785 return err;
3786 }
3787
3788 static int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
3789 {
3790 struct pending_cmd *cmd;
3791 int err;
3792
3793 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
3794 if (!cmd)
3795 return -ENOENT;
3796
3797 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3798 &hdev->discovery.type, sizeof(hdev->discovery.type));
3799 mgmt_pending_remove(cmd);
3800
3801 return err;
3802 }
3803
3804 static void stop_discovery_complete(struct hci_dev *hdev, u8 status)
3805 {
3806 BT_DBG("status %d", status);
3807
3808 hci_dev_lock(hdev);
3809
3810 if (status) {
3811 mgmt_stop_discovery_failed(hdev, status);
3812 goto unlock;
3813 }
3814
3815 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3816
3817 unlock:
3818 hci_dev_unlock(hdev);
3819 }
3820
3821 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
3822 u16 len)
3823 {
3824 struct mgmt_cp_stop_discovery *mgmt_cp = data;
3825 struct pending_cmd *cmd;
3826 struct hci_request req;
3827 int err;
3828
3829 BT_DBG("%s", hdev->name);
3830
3831 hci_dev_lock(hdev);
3832
3833 if (!hci_discovery_active(hdev)) {
3834 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3835 MGMT_STATUS_REJECTED, &mgmt_cp->type,
3836 sizeof(mgmt_cp->type));
3837 goto unlock;
3838 }
3839
3840 if (hdev->discovery.type != mgmt_cp->type) {
3841 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3842 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
3843 sizeof(mgmt_cp->type));
3844 goto unlock;
3845 }
3846
3847 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
3848 if (!cmd) {
3849 err = -ENOMEM;
3850 goto unlock;
3851 }
3852
3853 hci_req_init(&req, hdev);
3854
3855 hci_stop_discovery(&req);
3856
3857 err = hci_req_run(&req, stop_discovery_complete);
3858 if (!err) {
3859 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
3860 goto unlock;
3861 }
3862
3863 mgmt_pending_remove(cmd);
3864
3865 /* If no HCI commands were sent we're done */
3866 if (err == -ENODATA) {
3867 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
3868 &mgmt_cp->type, sizeof(mgmt_cp->type));
3869 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3870 }
3871
3872 unlock:
3873 hci_dev_unlock(hdev);
3874 return err;
3875 }
3876
3877 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
3878 u16 len)
3879 {
3880 struct mgmt_cp_confirm_name *cp = data;
3881 struct inquiry_entry *e;
3882 int err;
3883
3884 BT_DBG("%s", hdev->name);
3885
3886 hci_dev_lock(hdev);
3887
3888 if (!hci_discovery_active(hdev)) {
3889 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3890 MGMT_STATUS_FAILED, &cp->addr,
3891 sizeof(cp->addr));
3892 goto failed;
3893 }
3894
3895 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
3896 if (!e) {
3897 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3898 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
3899 sizeof(cp->addr));
3900 goto failed;
3901 }
3902
3903 if (cp->name_known) {
3904 e->name_state = NAME_KNOWN;
3905 list_del(&e->list);
3906 } else {
3907 e->name_state = NAME_NEEDED;
3908 hci_inquiry_cache_update_resolve(hdev, e);
3909 }
3910
3911 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
3912 sizeof(cp->addr));
3913
3914 failed:
3915 hci_dev_unlock(hdev);
3916 return err;
3917 }
3918
3919 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
3920 u16 len)
3921 {
3922 struct mgmt_cp_block_device *cp = data;
3923 u8 status;
3924 int err;
3925
3926 BT_DBG("%s", hdev->name);
3927
3928 if (!bdaddr_type_is_valid(cp->addr.type))
3929 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
3930 MGMT_STATUS_INVALID_PARAMS,
3931 &cp->addr, sizeof(cp->addr));
3932
3933 hci_dev_lock(hdev);
3934
3935 err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type);
3936 if (err < 0) {
3937 status = MGMT_STATUS_FAILED;
3938 goto done;
3939 }
3940
3941 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
3942 sk);
3943 status = MGMT_STATUS_SUCCESS;
3944
3945 done:
3946 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
3947 &cp->addr, sizeof(cp->addr));
3948
3949 hci_dev_unlock(hdev);
3950
3951 return err;
3952 }
3953
3954 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
3955 u16 len)
3956 {
3957 struct mgmt_cp_unblock_device *cp = data;
3958 u8 status;
3959 int err;
3960
3961 BT_DBG("%s", hdev->name);
3962
3963 if (!bdaddr_type_is_valid(cp->addr.type))
3964 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
3965 MGMT_STATUS_INVALID_PARAMS,
3966 &cp->addr, sizeof(cp->addr));
3967
3968 hci_dev_lock(hdev);
3969
3970 err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type);
3971 if (err < 0) {
3972 status = MGMT_STATUS_INVALID_PARAMS;
3973 goto done;
3974 }
3975
3976 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
3977 sk);
3978 status = MGMT_STATUS_SUCCESS;
3979
3980 done:
3981 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
3982 &cp->addr, sizeof(cp->addr));
3983
3984 hci_dev_unlock(hdev);
3985
3986 return err;
3987 }
3988
3989 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
3990 u16 len)
3991 {
3992 struct mgmt_cp_set_device_id *cp = data;
3993 struct hci_request req;
3994 int err;
3995 __u16 source;
3996
3997 BT_DBG("%s", hdev->name);
3998
3999 source = __le16_to_cpu(cp->source);
4000
4001 if (source > 0x0002)
4002 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
4003 MGMT_STATUS_INVALID_PARAMS);
4004
4005 hci_dev_lock(hdev);
4006
4007 hdev->devid_source = source;
4008 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
4009 hdev->devid_product = __le16_to_cpu(cp->product);
4010 hdev->devid_version = __le16_to_cpu(cp->version);
4011
4012 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
4013
4014 hci_req_init(&req, hdev);
4015 update_eir(&req);
4016 hci_req_run(&req, NULL);
4017
4018 hci_dev_unlock(hdev);
4019
4020 return err;
4021 }
4022
4023 static void set_advertising_complete(struct hci_dev *hdev, u8 status)
4024 {
4025 struct cmd_lookup match = { NULL, hdev };
4026
4027 if (status) {
4028 u8 mgmt_err = mgmt_status(status);
4029
4030 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
4031 cmd_status_rsp, &mgmt_err);
4032 return;
4033 }
4034
4035 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
4036 &match);
4037
4038 new_settings(hdev, match.sk);
4039
4040 if (match.sk)
4041 sock_put(match.sk);
4042 }
4043
4044 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
4045 u16 len)
4046 {
4047 struct mgmt_mode *cp = data;
4048 struct pending_cmd *cmd;
4049 struct hci_request req;
4050 u8 val, enabled, status;
4051 int err;
4052
4053 BT_DBG("request for %s", hdev->name);
4054
4055 status = mgmt_le_support(hdev);
4056 if (status)
4057 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4058 status);
4059
4060 if (cp->val != 0x00 && cp->val != 0x01)
4061 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4062 MGMT_STATUS_INVALID_PARAMS);
4063
4064 hci_dev_lock(hdev);
4065
4066 val = !!cp->val;
4067 enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
4068
4069 /* The following conditions are ones which mean that we should
4070 * not do any HCI communication but directly send a mgmt
4071 * response to user space (after toggling the flag if
4072 * necessary).
4073 */
4074 if (!hdev_is_powered(hdev) || val == enabled ||
4075 hci_conn_num(hdev, LE_LINK) > 0) {
4076 bool changed = false;
4077
4078 if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
4079 change_bit(HCI_ADVERTISING, &hdev->dev_flags);
4080 changed = true;
4081 }
4082
4083 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
4084 if (err < 0)
4085 goto unlock;
4086
4087 if (changed)
4088 err = new_settings(hdev, sk);
4089
4090 goto unlock;
4091 }
4092
4093 if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
4094 mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
4095 err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4096 MGMT_STATUS_BUSY);
4097 goto unlock;
4098 }
4099
4100 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
4101 if (!cmd) {
4102 err = -ENOMEM;
4103 goto unlock;
4104 }
4105
4106 hci_req_init(&req, hdev);
4107
4108 if (val)
4109 enable_advertising(&req);
4110 else
4111 disable_advertising(&req);
4112
4113 err = hci_req_run(&req, set_advertising_complete);
4114 if (err < 0)
4115 mgmt_pending_remove(cmd);
4116
4117 unlock:
4118 hci_dev_unlock(hdev);
4119 return err;
4120 }
4121
4122 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
4123 void *data, u16 len)
4124 {
4125 struct mgmt_cp_set_static_address *cp = data;
4126 int err;
4127
4128 BT_DBG("%s", hdev->name);
4129
4130 if (!lmp_le_capable(hdev))
4131 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4132 MGMT_STATUS_NOT_SUPPORTED);
4133
4134 if (hdev_is_powered(hdev))
4135 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4136 MGMT_STATUS_REJECTED);
4137
4138 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
4139 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
4140 return cmd_status(sk, hdev->id,
4141 MGMT_OP_SET_STATIC_ADDRESS,
4142 MGMT_STATUS_INVALID_PARAMS);
4143
4144 /* Two most significant bits shall be set */
4145 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
4146 return cmd_status(sk, hdev->id,
4147 MGMT_OP_SET_STATIC_ADDRESS,
4148 MGMT_STATUS_INVALID_PARAMS);
4149 }
4150
4151 hci_dev_lock(hdev);
4152
4153 bacpy(&hdev->static_addr, &cp->bdaddr);
4154
4155 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0);
4156
4157 hci_dev_unlock(hdev);
4158
4159 return err;
4160 }
4161
4162 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
4163 void *data, u16 len)
4164 {
4165 struct mgmt_cp_set_scan_params *cp = data;
4166 __u16 interval, window;
4167 int err;
4168
4169 BT_DBG("%s", hdev->name);
4170
4171 if (!lmp_le_capable(hdev))
4172 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4173 MGMT_STATUS_NOT_SUPPORTED);
4174
4175 interval = __le16_to_cpu(cp->interval);
4176
4177 if (interval < 0x0004 || interval > 0x4000)
4178 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4179 MGMT_STATUS_INVALID_PARAMS);
4180
4181 window = __le16_to_cpu(cp->window);
4182
4183 if (window < 0x0004 || window > 0x4000)
4184 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4185 MGMT_STATUS_INVALID_PARAMS);
4186
4187 if (window > interval)
4188 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4189 MGMT_STATUS_INVALID_PARAMS);
4190
4191 hci_dev_lock(hdev);
4192
4193 hdev->le_scan_interval = interval;
4194 hdev->le_scan_window = window;
4195
4196 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);
4197
4198 /* If background scan is running, restart it so new parameters are
4199 * loaded.
4200 */
4201 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
4202 hdev->discovery.state == DISCOVERY_STOPPED) {
4203 struct hci_request req;
4204
4205 hci_req_init(&req, hdev);
4206
4207 hci_req_add_le_scan_disable(&req);
4208 hci_req_add_le_passive_scan(&req);
4209
4210 hci_req_run(&req, NULL);
4211 }
4212
4213 hci_dev_unlock(hdev);
4214
4215 return err;
4216 }
4217
4218 static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
4219 {
4220 struct pending_cmd *cmd;
4221
4222 BT_DBG("status 0x%02x", status);
4223
4224 hci_dev_lock(hdev);
4225
4226 cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4227 if (!cmd)
4228 goto unlock;
4229
4230 if (status) {
4231 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4232 mgmt_status(status));
4233 } else {
4234 struct mgmt_mode *cp = cmd->param;
4235
4236 if (cp->val)
4237 set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4238 else
4239 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4240
4241 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4242 new_settings(hdev, cmd->sk);
4243 }
4244
4245 mgmt_pending_remove(cmd);
4246
4247 unlock:
4248 hci_dev_unlock(hdev);
4249 }
4250
4251 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4252 void *data, u16 len)
4253 {
4254 struct mgmt_mode *cp = data;
4255 struct pending_cmd *cmd;
4256 struct hci_request req;
4257 int err;
4258
4259 BT_DBG("%s", hdev->name);
4260
4261 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
4262 hdev->hci_ver < BLUETOOTH_VER_1_2)
4263 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4264 MGMT_STATUS_NOT_SUPPORTED);
4265
4266 if (cp->val != 0x00 && cp->val != 0x01)
4267 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4268 MGMT_STATUS_INVALID_PARAMS);
4269
4270 if (!hdev_is_powered(hdev))
4271 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4272 MGMT_STATUS_NOT_POWERED);
4273
4274 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4275 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4276 MGMT_STATUS_REJECTED);
4277
4278 hci_dev_lock(hdev);
4279
4280 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4281 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4282 MGMT_STATUS_BUSY);
4283 goto unlock;
4284 }
4285
4286 if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
4287 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4288 hdev);
4289 goto unlock;
4290 }
4291
4292 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4293 data, len);
4294 if (!cmd) {
4295 err = -ENOMEM;
4296 goto unlock;
4297 }
4298
4299 hci_req_init(&req, hdev);
4300
4301 write_fast_connectable(&req, cp->val);
4302
4303 err = hci_req_run(&req, fast_connectable_complete);
4304 if (err < 0) {
4305 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4306 MGMT_STATUS_FAILED);
4307 mgmt_pending_remove(cmd);
4308 }
4309
4310 unlock:
4311 hci_dev_unlock(hdev);
4312
4313 return err;
4314 }
4315
4316 static void set_bredr_scan(struct hci_request *req)
4317 {
4318 struct hci_dev *hdev = req->hdev;
4319 u8 scan = 0;
4320
4321 /* Ensure that fast connectable is disabled. This function will
4322 * not do anything if the page scan parameters are already what
4323 * they should be.
4324 */
4325 write_fast_connectable(req, false);
4326
4327 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4328 scan |= SCAN_PAGE;
4329 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
4330 scan |= SCAN_INQUIRY;
4331
4332 if (scan)
4333 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
4334 }
4335
4336 static void set_bredr_complete(struct hci_dev *hdev, u8 status)
4337 {
4338 struct pending_cmd *cmd;
4339
4340 BT_DBG("status 0x%02x", status);
4341
4342 hci_dev_lock(hdev);
4343
4344 cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
4345 if (!cmd)
4346 goto unlock;
4347
4348 if (status) {
4349 u8 mgmt_err = mgmt_status(status);
4350
4351 /* We need to restore the flag if related HCI commands
4352 * failed.
4353 */
4354 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4355
4356 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4357 } else {
4358 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4359 new_settings(hdev, cmd->sk);
4360 }
4361
4362 mgmt_pending_remove(cmd);
4363
4364 unlock:
4365 hci_dev_unlock(hdev);
4366 }
4367
4368 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4369 {
4370 struct mgmt_mode *cp = data;
4371 struct pending_cmd *cmd;
4372 struct hci_request req;
4373 int err;
4374
4375 BT_DBG("request for %s", hdev->name);
4376
4377 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4378 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4379 MGMT_STATUS_NOT_SUPPORTED);
4380
4381 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4382 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4383 MGMT_STATUS_REJECTED);
4384
4385 if (cp->val != 0x00 && cp->val != 0x01)
4386 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4387 MGMT_STATUS_INVALID_PARAMS);
4388
4389 hci_dev_lock(hdev);
4390
4391 if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4392 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4393 goto unlock;
4394 }
4395
4396 if (!hdev_is_powered(hdev)) {
4397 if (!cp->val) {
4398 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4399 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4400 clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4401 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4402 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4403 }
4404
4405 change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4406
4407 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4408 if (err < 0)
4409 goto unlock;
4410
4411 err = new_settings(hdev, sk);
4412 goto unlock;
4413 }
4414
4415 /* Reject disabling when powered on */
4416 if (!cp->val) {
4417 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4418 MGMT_STATUS_REJECTED);
4419 goto unlock;
4420 }
4421
4422 if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
4423 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4424 MGMT_STATUS_BUSY);
4425 goto unlock;
4426 }
4427
4428 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4429 if (!cmd) {
4430 err = -ENOMEM;
4431 goto unlock;
4432 }
4433
4434 /* We need to flip the bit already here so that update_adv_data
4435 * generates the correct flags.
4436 */
4437 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4438
4439 hci_req_init(&req, hdev);
4440
4441 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4442 set_bredr_scan(&req);
4443
4444 /* Since only the advertising data flags will change, there
4445 * is no need to update the scan response data.
4446 */
4447 update_adv_data(&req);
4448
4449 err = hci_req_run(&req, set_bredr_complete);
4450 if (err < 0)
4451 mgmt_pending_remove(cmd);
4452
4453 unlock:
4454 hci_dev_unlock(hdev);
4455 return err;
4456 }
4457
4458 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4459 void *data, u16 len)
4460 {
4461 struct mgmt_mode *cp = data;
4462 struct pending_cmd *cmd;
4463 u8 val, status;
4464 int err;
4465
4466 BT_DBG("request for %s", hdev->name);
4467
4468 status = mgmt_bredr_support(hdev);
4469 if (status)
4470 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4471 status);
4472
4473 if (!lmp_sc_capable(hdev) &&
4474 !test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
4475 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4476 MGMT_STATUS_NOT_SUPPORTED);
4477
4478 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4479 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4480 MGMT_STATUS_INVALID_PARAMS);
4481
4482 hci_dev_lock(hdev);
4483
4484 if (!hdev_is_powered(hdev)) {
4485 bool changed;
4486
4487 if (cp->val) {
4488 changed = !test_and_set_bit(HCI_SC_ENABLED,
4489 &hdev->dev_flags);
4490 if (cp->val == 0x02)
4491 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4492 else
4493 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4494 } else {
4495 changed = test_and_clear_bit(HCI_SC_ENABLED,
4496 &hdev->dev_flags);
4497 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4498 }
4499
4500 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4501 if (err < 0)
4502 goto failed;
4503
4504 if (changed)
4505 err = new_settings(hdev, sk);
4506
4507 goto failed;
4508 }
4509
4510 if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4511 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4512 MGMT_STATUS_BUSY);
4513 goto failed;
4514 }
4515
4516 val = !!cp->val;
4517
4518 if (val == test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
4519 (cp->val == 0x02) == test_bit(HCI_SC_ONLY, &hdev->dev_flags)) {
4520 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4521 goto failed;
4522 }
4523
4524 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4525 if (!cmd) {
4526 err = -ENOMEM;
4527 goto failed;
4528 }
4529
4530 err = hci_send_cmd(hdev, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4531 if (err < 0) {
4532 mgmt_pending_remove(cmd);
4533 goto failed;
4534 }
4535
4536 if (cp->val == 0x02)
4537 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4538 else
4539 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4540
4541 failed:
4542 hci_dev_unlock(hdev);
4543 return err;
4544 }
4545
4546 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
4547 void *data, u16 len)
4548 {
4549 struct mgmt_mode *cp = data;
4550 bool changed, use_changed;
4551 int err;
4552
4553 BT_DBG("request for %s", hdev->name);
4554
4555 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4556 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
4557 MGMT_STATUS_INVALID_PARAMS);
4558
4559 hci_dev_lock(hdev);
4560
4561 if (cp->val)
4562 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
4563 &hdev->dev_flags);
4564 else
4565 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
4566 &hdev->dev_flags);
4567
4568 if (cp->val == 0x02)
4569 use_changed = !test_and_set_bit(HCI_USE_DEBUG_KEYS,
4570 &hdev->dev_flags);
4571 else
4572 use_changed = test_and_clear_bit(HCI_USE_DEBUG_KEYS,
4573 &hdev->dev_flags);
4574
4575 if (hdev_is_powered(hdev) && use_changed &&
4576 test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
4577 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
4578 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
4579 sizeof(mode), &mode);
4580 }
4581
4582 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
4583 if (err < 0)
4584 goto unlock;
4585
4586 if (changed)
4587 err = new_settings(hdev, sk);
4588
4589 unlock:
4590 hci_dev_unlock(hdev);
4591 return err;
4592 }
4593
4594 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4595 u16 len)
4596 {
4597 struct mgmt_cp_set_privacy *cp = cp_data;
4598 bool changed;
4599 int err;
4600
4601 BT_DBG("request for %s", hdev->name);
4602
4603 if (!lmp_le_capable(hdev))
4604 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4605 MGMT_STATUS_NOT_SUPPORTED);
4606
4607 if (cp->privacy != 0x00 && cp->privacy != 0x01)
4608 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4609 MGMT_STATUS_INVALID_PARAMS);
4610
4611 if (hdev_is_powered(hdev))
4612 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4613 MGMT_STATUS_REJECTED);
4614
4615 hci_dev_lock(hdev);
4616
4617 /* If user space supports this command it is also expected to
4618 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
4619 */
4620 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4621
4622 if (cp->privacy) {
4623 changed = !test_and_set_bit(HCI_PRIVACY, &hdev->dev_flags);
4624 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
4625 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4626 } else {
4627 changed = test_and_clear_bit(HCI_PRIVACY, &hdev->dev_flags);
4628 memset(hdev->irk, 0, sizeof(hdev->irk));
4629 clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4630 }
4631
4632 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
4633 if (err < 0)
4634 goto unlock;
4635
4636 if (changed)
4637 err = new_settings(hdev, sk);
4638
4639 unlock:
4640 hci_dev_unlock(hdev);
4641 return err;
4642 }
4643
4644 static bool irk_is_valid(struct mgmt_irk_info *irk)
4645 {
4646 switch (irk->addr.type) {
4647 case BDADDR_LE_PUBLIC:
4648 return true;
4649
4650 case BDADDR_LE_RANDOM:
4651 /* Two most significant bits shall be set */
4652 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4653 return false;
4654 return true;
4655 }
4656
4657 return false;
4658 }
4659
4660 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4661 u16 len)
4662 {
4663 struct mgmt_cp_load_irks *cp = cp_data;
4664 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
4665 sizeof(struct mgmt_irk_info));
4666 u16 irk_count, expected_len;
4667 int i, err;
4668
4669 BT_DBG("request for %s", hdev->name);
4670
4671 if (!lmp_le_capable(hdev))
4672 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4673 MGMT_STATUS_NOT_SUPPORTED);
4674
4675 irk_count = __le16_to_cpu(cp->irk_count);
4676 if (irk_count > max_irk_count) {
4677 BT_ERR("load_irks: too big irk_count value %u", irk_count);
4678 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4679 MGMT_STATUS_INVALID_PARAMS);
4680 }
4681
4682 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
4683 if (expected_len != len) {
4684 BT_ERR("load_irks: expected %u bytes, got %u bytes",
4685 expected_len, len);
4686 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4687 MGMT_STATUS_INVALID_PARAMS);
4688 }
4689
4690 BT_DBG("%s irk_count %u", hdev->name, irk_count);
4691
4692 for (i = 0; i < irk_count; i++) {
4693 struct mgmt_irk_info *key = &cp->irks[i];
4694
4695 if (!irk_is_valid(key))
4696 return cmd_status(sk, hdev->id,
4697 MGMT_OP_LOAD_IRKS,
4698 MGMT_STATUS_INVALID_PARAMS);
4699 }
4700
4701 hci_dev_lock(hdev);
4702
4703 hci_smp_irks_clear(hdev);
4704
4705 for (i = 0; i < irk_count; i++) {
4706 struct mgmt_irk_info *irk = &cp->irks[i];
4707 u8 addr_type;
4708
4709 if (irk->addr.type == BDADDR_LE_PUBLIC)
4710 addr_type = ADDR_LE_DEV_PUBLIC;
4711 else
4712 addr_type = ADDR_LE_DEV_RANDOM;
4713
4714 hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
4715 BDADDR_ANY);
4716 }
4717
4718 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4719
4720 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
4721
4722 hci_dev_unlock(hdev);
4723
4724 return err;
4725 }
4726
4727 static bool ltk_is_valid(struct mgmt_ltk_info *key)
4728 {
4729 if (key->master != 0x00 && key->master != 0x01)
4730 return false;
4731
4732 switch (key->addr.type) {
4733 case BDADDR_LE_PUBLIC:
4734 return true;
4735
4736 case BDADDR_LE_RANDOM:
4737 /* Two most significant bits shall be set */
4738 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4739 return false;
4740 return true;
4741 }
4742
4743 return false;
4744 }
4745
4746 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
4747 void *cp_data, u16 len)
4748 {
4749 struct mgmt_cp_load_long_term_keys *cp = cp_data;
4750 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
4751 sizeof(struct mgmt_ltk_info));
4752 u16 key_count, expected_len;
4753 int i, err;
4754
4755 BT_DBG("request for %s", hdev->name);
4756
4757 if (!lmp_le_capable(hdev))
4758 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4759 MGMT_STATUS_NOT_SUPPORTED);
4760
4761 key_count = __le16_to_cpu(cp->key_count);
4762 if (key_count > max_key_count) {
4763 BT_ERR("load_ltks: too big key_count value %u", key_count);
4764 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4765 MGMT_STATUS_INVALID_PARAMS);
4766 }
4767
4768 expected_len = sizeof(*cp) + key_count *
4769 sizeof(struct mgmt_ltk_info);
4770 if (expected_len != len) {
4771 BT_ERR("load_keys: expected %u bytes, got %u bytes",
4772 expected_len, len);
4773 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4774 MGMT_STATUS_INVALID_PARAMS);
4775 }
4776
4777 BT_DBG("%s key_count %u", hdev->name, key_count);
4778
4779 for (i = 0; i < key_count; i++) {
4780 struct mgmt_ltk_info *key = &cp->keys[i];
4781
4782 if (!ltk_is_valid(key))
4783 return cmd_status(sk, hdev->id,
4784 MGMT_OP_LOAD_LONG_TERM_KEYS,
4785 MGMT_STATUS_INVALID_PARAMS);
4786 }
4787
4788 hci_dev_lock(hdev);
4789
4790 hci_smp_ltks_clear(hdev);
4791
4792 for (i = 0; i < key_count; i++) {
4793 struct mgmt_ltk_info *key = &cp->keys[i];
4794 u8 type, addr_type, authenticated;
4795
4796 if (key->addr.type == BDADDR_LE_PUBLIC)
4797 addr_type = ADDR_LE_DEV_PUBLIC;
4798 else
4799 addr_type = ADDR_LE_DEV_RANDOM;
4800
4801 if (key->master)
4802 type = SMP_LTK;
4803 else
4804 type = SMP_LTK_SLAVE;
4805
4806 switch (key->type) {
4807 case MGMT_LTK_UNAUTHENTICATED:
4808 authenticated = 0x00;
4809 break;
4810 case MGMT_LTK_AUTHENTICATED:
4811 authenticated = 0x01;
4812 break;
4813 default:
4814 continue;
4815 }
4816
4817 hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
4818 authenticated, key->val, key->enc_size, key->ediv,
4819 key->rand);
4820 }
4821
4822 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
4823 NULL, 0);
4824
4825 hci_dev_unlock(hdev);
4826
4827 return err;
4828 }
4829
4830 struct cmd_conn_lookup {
4831 struct hci_conn *conn;
4832 bool valid_tx_power;
4833 u8 mgmt_status;
4834 };
4835
4836 static void get_conn_info_complete(struct pending_cmd *cmd, void *data)
4837 {
4838 struct cmd_conn_lookup *match = data;
4839 struct mgmt_cp_get_conn_info *cp;
4840 struct mgmt_rp_get_conn_info rp;
4841 struct hci_conn *conn = cmd->user_data;
4842
4843 if (conn != match->conn)
4844 return;
4845
4846 cp = (struct mgmt_cp_get_conn_info *) cmd->param;
4847
4848 memset(&rp, 0, sizeof(rp));
4849 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4850 rp.addr.type = cp->addr.type;
4851
4852 if (!match->mgmt_status) {
4853 rp.rssi = conn->rssi;
4854
4855 if (match->valid_tx_power) {
4856 rp.tx_power = conn->tx_power;
4857 rp.max_tx_power = conn->max_tx_power;
4858 } else {
4859 rp.tx_power = HCI_TX_POWER_INVALID;
4860 rp.max_tx_power = HCI_TX_POWER_INVALID;
4861 }
4862 }
4863
4864 cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
4865 match->mgmt_status, &rp, sizeof(rp));
4866
4867 hci_conn_drop(conn);
4868
4869 mgmt_pending_remove(cmd);
4870 }
4871
4872 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 status)
4873 {
4874 struct hci_cp_read_rssi *cp;
4875 struct hci_conn *conn;
4876 struct cmd_conn_lookup match;
4877 u16 handle;
4878
4879 BT_DBG("status 0x%02x", status);
4880
4881 hci_dev_lock(hdev);
4882
4883 /* TX power data is valid in case request completed successfully,
4884 * otherwise we assume it's not valid. At the moment we assume that
4885 * either both or none of current and max values are valid to keep code
4886 * simple.
4887 */
4888 match.valid_tx_power = !status;
4889
4890 /* Commands sent in request are either Read RSSI or Read Transmit Power
4891 * Level so we check which one was last sent to retrieve connection
4892 * handle. Both commands have handle as first parameter so it's safe to
4893 * cast data on the same command struct.
4894 *
4895 * First command sent is always Read RSSI and we fail only if it fails.
4896 * In other case we simply override error to indicate success as we
4897 * already remembered if TX power value is actually valid.
4898 */
4899 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
4900 if (!cp) {
4901 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
4902 status = 0;
4903 }
4904
4905 if (!cp) {
4906 BT_ERR("invalid sent_cmd in response");
4907 goto unlock;
4908 }
4909
4910 handle = __le16_to_cpu(cp->handle);
4911 conn = hci_conn_hash_lookup_handle(hdev, handle);
4912 if (!conn) {
4913 BT_ERR("unknown handle (%d) in response", handle);
4914 goto unlock;
4915 }
4916
4917 match.conn = conn;
4918 match.mgmt_status = mgmt_status(status);
4919
4920 /* Cache refresh is complete, now reply for mgmt request for given
4921 * connection only.
4922 */
4923 mgmt_pending_foreach(MGMT_OP_GET_CONN_INFO, hdev,
4924 get_conn_info_complete, &match);
4925
4926 unlock:
4927 hci_dev_unlock(hdev);
4928 }
4929
4930 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
4931 u16 len)
4932 {
4933 struct mgmt_cp_get_conn_info *cp = data;
4934 struct mgmt_rp_get_conn_info rp;
4935 struct hci_conn *conn;
4936 unsigned long conn_info_age;
4937 int err = 0;
4938
4939 BT_DBG("%s", hdev->name);
4940
4941 memset(&rp, 0, sizeof(rp));
4942 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4943 rp.addr.type = cp->addr.type;
4944
4945 if (!bdaddr_type_is_valid(cp->addr.type))
4946 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4947 MGMT_STATUS_INVALID_PARAMS,
4948 &rp, sizeof(rp));
4949
4950 hci_dev_lock(hdev);
4951
4952 if (!hdev_is_powered(hdev)) {
4953 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4954 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
4955 goto unlock;
4956 }
4957
4958 if (cp->addr.type == BDADDR_BREDR)
4959 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
4960 &cp->addr.bdaddr);
4961 else
4962 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
4963
4964 if (!conn || conn->state != BT_CONNECTED) {
4965 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4966 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
4967 goto unlock;
4968 }
4969
4970 /* To avoid client trying to guess when to poll again for information we
4971 * calculate conn info age as random value between min/max set in hdev.
4972 */
4973 conn_info_age = hdev->conn_info_min_age +
4974 prandom_u32_max(hdev->conn_info_max_age -
4975 hdev->conn_info_min_age);
4976
4977 /* Query controller to refresh cached values if they are too old or were
4978 * never read.
4979 */
4980 if (time_after(jiffies, conn->conn_info_timestamp +
4981 msecs_to_jiffies(conn_info_age)) ||
4982 !conn->conn_info_timestamp) {
4983 struct hci_request req;
4984 struct hci_cp_read_tx_power req_txp_cp;
4985 struct hci_cp_read_rssi req_rssi_cp;
4986 struct pending_cmd *cmd;
4987
4988 hci_req_init(&req, hdev);
4989 req_rssi_cp.handle = cpu_to_le16(conn->handle);
4990 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
4991 &req_rssi_cp);
4992
4993 /* For LE links TX power does not change thus we don't need to
4994 * query for it once value is known.
4995 */
4996 if (!bdaddr_type_is_le(cp->addr.type) ||
4997 conn->tx_power == HCI_TX_POWER_INVALID) {
4998 req_txp_cp.handle = cpu_to_le16(conn->handle);
4999 req_txp_cp.type = 0x00;
5000 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5001 sizeof(req_txp_cp), &req_txp_cp);
5002 }
5003
5004 /* Max TX power needs to be read only once per connection */
5005 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
5006 req_txp_cp.handle = cpu_to_le16(conn->handle);
5007 req_txp_cp.type = 0x01;
5008 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5009 sizeof(req_txp_cp), &req_txp_cp);
5010 }
5011
5012 err = hci_req_run(&req, conn_info_refresh_complete);
5013 if (err < 0)
5014 goto unlock;
5015
5016 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
5017 data, len);
5018 if (!cmd) {
5019 err = -ENOMEM;
5020 goto unlock;
5021 }
5022
5023 hci_conn_hold(conn);
5024 cmd->user_data = conn;
5025
5026 conn->conn_info_timestamp = jiffies;
5027 } else {
5028 /* Cache is valid, just reply with values cached in hci_conn */
5029 rp.rssi = conn->rssi;
5030 rp.tx_power = conn->tx_power;
5031 rp.max_tx_power = conn->max_tx_power;
5032
5033 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5034 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
5035 }
5036
5037 unlock:
5038 hci_dev_unlock(hdev);
5039 return err;
5040 }
5041
5042 static void get_clock_info_complete(struct hci_dev *hdev, u8 status)
5043 {
5044 struct mgmt_cp_get_clock_info *cp;
5045 struct mgmt_rp_get_clock_info rp;
5046 struct hci_cp_read_clock *hci_cp;
5047 struct pending_cmd *cmd;
5048 struct hci_conn *conn;
5049
5050 BT_DBG("%s status %u", hdev->name, status);
5051
5052 hci_dev_lock(hdev);
5053
5054 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
5055 if (!hci_cp)
5056 goto unlock;
5057
5058 if (hci_cp->which) {
5059 u16 handle = __le16_to_cpu(hci_cp->handle);
5060 conn = hci_conn_hash_lookup_handle(hdev, handle);
5061 } else {
5062 conn = NULL;
5063 }
5064
5065 cmd = mgmt_pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
5066 if (!cmd)
5067 goto unlock;
5068
5069 cp = cmd->param;
5070
5071 memset(&rp, 0, sizeof(rp));
5072 memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
5073
5074 if (status)
5075 goto send_rsp;
5076
5077 rp.local_clock = cpu_to_le32(hdev->clock);
5078
5079 if (conn) {
5080 rp.piconet_clock = cpu_to_le32(conn->clock);
5081 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
5082 }
5083
5084 send_rsp:
5085 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
5086 &rp, sizeof(rp));
5087 mgmt_pending_remove(cmd);
5088 if (conn)
5089 hci_conn_drop(conn);
5090
5091 unlock:
5092 hci_dev_unlock(hdev);
5093 }
5094
5095 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
5096 u16 len)
5097 {
5098 struct mgmt_cp_get_clock_info *cp = data;
5099 struct mgmt_rp_get_clock_info rp;
5100 struct hci_cp_read_clock hci_cp;
5101 struct pending_cmd *cmd;
5102 struct hci_request req;
5103 struct hci_conn *conn;
5104 int err;
5105
5106 BT_DBG("%s", hdev->name);
5107
5108 memset(&rp, 0, sizeof(rp));
5109 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5110 rp.addr.type = cp->addr.type;
5111
5112 if (cp->addr.type != BDADDR_BREDR)
5113 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5114 MGMT_STATUS_INVALID_PARAMS,
5115 &rp, sizeof(rp));
5116
5117 hci_dev_lock(hdev);
5118
5119 if (!hdev_is_powered(hdev)) {
5120 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5121 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
5122 goto unlock;
5123 }
5124
5125 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5126 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5127 &cp->addr.bdaddr);
5128 if (!conn || conn->state != BT_CONNECTED) {
5129 err = cmd_complete(sk, hdev->id,
5130 MGMT_OP_GET_CLOCK_INFO,
5131 MGMT_STATUS_NOT_CONNECTED,
5132 &rp, sizeof(rp));
5133 goto unlock;
5134 }
5135 } else {
5136 conn = NULL;
5137 }
5138
5139 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
5140 if (!cmd) {
5141 err = -ENOMEM;
5142 goto unlock;
5143 }
5144
5145 hci_req_init(&req, hdev);
5146
5147 memset(&hci_cp, 0, sizeof(hci_cp));
5148 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5149
5150 if (conn) {
5151 hci_conn_hold(conn);
5152 cmd->user_data = conn;
5153
5154 hci_cp.handle = cpu_to_le16(conn->handle);
5155 hci_cp.which = 0x01; /* Piconet clock */
5156 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5157 }
5158
5159 err = hci_req_run(&req, get_clock_info_complete);
5160 if (err < 0)
5161 mgmt_pending_remove(cmd);
5162
5163 unlock:
5164 hci_dev_unlock(hdev);
5165 return err;
5166 }
5167
5168 static void device_added(struct sock *sk, struct hci_dev *hdev,
5169 bdaddr_t *bdaddr, u8 type, u8 action)
5170 {
5171 struct mgmt_ev_device_added ev;
5172
5173 bacpy(&ev.addr.bdaddr, bdaddr);
5174 ev.addr.type = type;
5175 ev.action = action;
5176
5177 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
5178 }
5179
5180 static int add_device(struct sock *sk, struct hci_dev *hdev,
5181 void *data, u16 len)
5182 {
5183 struct mgmt_cp_add_device *cp = data;
5184 u8 auto_conn, addr_type;
5185 int err;
5186
5187 BT_DBG("%s", hdev->name);
5188
5189 if (!bdaddr_type_is_le(cp->addr.type) ||
5190 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
5191 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5192 MGMT_STATUS_INVALID_PARAMS,
5193 &cp->addr, sizeof(cp->addr));
5194
5195 if (cp->action != 0x00 && cp->action != 0x01)
5196 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5197 MGMT_STATUS_INVALID_PARAMS,
5198 &cp->addr, sizeof(cp->addr));
5199
5200 hci_dev_lock(hdev);
5201
5202 if (cp->addr.type == BDADDR_LE_PUBLIC)
5203 addr_type = ADDR_LE_DEV_PUBLIC;
5204 else
5205 addr_type = ADDR_LE_DEV_RANDOM;
5206
5207 if (cp->action)
5208 auto_conn = HCI_AUTO_CONN_ALWAYS;
5209 else
5210 auto_conn = HCI_AUTO_CONN_REPORT;
5211
5212 /* If the connection parameters don't exist for this device,
5213 * they will be created and configured with defaults.
5214 */
5215 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
5216 auto_conn) < 0) {
5217 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5218 MGMT_STATUS_FAILED,
5219 &cp->addr, sizeof(cp->addr));
5220 goto unlock;
5221 }
5222
5223 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
5224
5225 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5226 MGMT_STATUS_SUCCESS, &cp->addr, sizeof(cp->addr));
5227
5228 unlock:
5229 hci_dev_unlock(hdev);
5230 return err;
5231 }
5232
5233 static void device_removed(struct sock *sk, struct hci_dev *hdev,
5234 bdaddr_t *bdaddr, u8 type)
5235 {
5236 struct mgmt_ev_device_removed ev;
5237
5238 bacpy(&ev.addr.bdaddr, bdaddr);
5239 ev.addr.type = type;
5240
5241 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
5242 }
5243
5244 static int remove_device(struct sock *sk, struct hci_dev *hdev,
5245 void *data, u16 len)
5246 {
5247 struct mgmt_cp_remove_device *cp = data;
5248 int err;
5249
5250 BT_DBG("%s", hdev->name);
5251
5252 hci_dev_lock(hdev);
5253
5254 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5255 struct hci_conn_params *params;
5256 u8 addr_type;
5257
5258 if (!bdaddr_type_is_le(cp->addr.type)) {
5259 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5260 MGMT_STATUS_INVALID_PARAMS,
5261 &cp->addr, sizeof(cp->addr));
5262 goto unlock;
5263 }
5264
5265 if (cp->addr.type == BDADDR_LE_PUBLIC)
5266 addr_type = ADDR_LE_DEV_PUBLIC;
5267 else
5268 addr_type = ADDR_LE_DEV_RANDOM;
5269
5270 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5271 addr_type);
5272 if (!params) {
5273 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5274 MGMT_STATUS_INVALID_PARAMS,
5275 &cp->addr, sizeof(cp->addr));
5276 goto unlock;
5277 }
5278
5279 if (params->auto_connect == HCI_AUTO_CONN_DISABLED) {
5280 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5281 MGMT_STATUS_INVALID_PARAMS,
5282 &cp->addr, sizeof(cp->addr));
5283 goto unlock;
5284 }
5285
5286 list_del(&params->action);
5287 list_del(&params->list);
5288 kfree(params);
5289 hci_update_background_scan(hdev);
5290
5291 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
5292 } else {
5293 struct hci_conn_params *p, *tmp;
5294
5295 if (cp->addr.type) {
5296 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5297 MGMT_STATUS_INVALID_PARAMS,
5298 &cp->addr, sizeof(cp->addr));
5299 goto unlock;
5300 }
5301
5302 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
5303 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
5304 continue;
5305 device_removed(sk, hdev, &p->addr, p->addr_type);
5306 list_del(&p->action);
5307 list_del(&p->list);
5308 kfree(p);
5309 }
5310
5311 BT_DBG("All LE connection parameters were removed");
5312
5313 hci_update_background_scan(hdev);
5314 }
5315
5316 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5317 MGMT_STATUS_SUCCESS, &cp->addr, sizeof(cp->addr));
5318
5319 unlock:
5320 hci_dev_unlock(hdev);
5321 return err;
5322 }
5323
5324 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
5325 u16 len)
5326 {
5327 struct mgmt_cp_load_conn_param *cp = data;
5328 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
5329 sizeof(struct mgmt_conn_param));
5330 u16 param_count, expected_len;
5331 int i;
5332
5333 if (!lmp_le_capable(hdev))
5334 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5335 MGMT_STATUS_NOT_SUPPORTED);
5336
5337 param_count = __le16_to_cpu(cp->param_count);
5338 if (param_count > max_param_count) {
5339 BT_ERR("load_conn_param: too big param_count value %u",
5340 param_count);
5341 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5342 MGMT_STATUS_INVALID_PARAMS);
5343 }
5344
5345 expected_len = sizeof(*cp) + param_count *
5346 sizeof(struct mgmt_conn_param);
5347 if (expected_len != len) {
5348 BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
5349 expected_len, len);
5350 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5351 MGMT_STATUS_INVALID_PARAMS);
5352 }
5353
5354 BT_DBG("%s param_count %u", hdev->name, param_count);
5355
5356 hci_dev_lock(hdev);
5357
5358 hci_conn_params_clear_disabled(hdev);
5359
5360 for (i = 0; i < param_count; i++) {
5361 struct mgmt_conn_param *param = &cp->params[i];
5362 struct hci_conn_params *hci_param;
5363 u16 min, max, latency, timeout;
5364 u8 addr_type;
5365
5366 BT_DBG("Adding %pMR (type %u)", &param->addr.bdaddr,
5367 param->addr.type);
5368
5369 if (param->addr.type == BDADDR_LE_PUBLIC) {
5370 addr_type = ADDR_LE_DEV_PUBLIC;
5371 } else if (param->addr.type == BDADDR_LE_RANDOM) {
5372 addr_type = ADDR_LE_DEV_RANDOM;
5373 } else {
5374 BT_ERR("Ignoring invalid connection parameters");
5375 continue;
5376 }
5377
5378 min = le16_to_cpu(param->min_interval);
5379 max = le16_to_cpu(param->max_interval);
5380 latency = le16_to_cpu(param->latency);
5381 timeout = le16_to_cpu(param->timeout);
5382
5383 BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
5384 min, max, latency, timeout);
5385
5386 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
5387 BT_ERR("Ignoring invalid connection parameters");
5388 continue;
5389 }
5390
5391 hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
5392 addr_type);
5393 if (!hci_param) {
5394 BT_ERR("Failed to add connection parameters");
5395 continue;
5396 }
5397
5398 hci_param->conn_min_interval = min;
5399 hci_param->conn_max_interval = max;
5400 hci_param->conn_latency = latency;
5401 hci_param->supervision_timeout = timeout;
5402 }
5403
5404 hci_dev_unlock(hdev);
5405
5406 return cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0, NULL, 0);
5407 }
5408
5409 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
5410 void *data, u16 len)
5411 {
5412 struct mgmt_cp_set_external_config *cp = data;
5413 bool changed;
5414 int err;
5415
5416 BT_DBG("%s", hdev->name);
5417
5418 if (hdev_is_powered(hdev))
5419 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5420 MGMT_STATUS_REJECTED);
5421
5422 if (cp->config != 0x00 && cp->config != 0x01)
5423 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5424 MGMT_STATUS_INVALID_PARAMS);
5425
5426 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
5427 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5428 MGMT_STATUS_NOT_SUPPORTED);
5429
5430 hci_dev_lock(hdev);
5431
5432 if (cp->config)
5433 changed = !test_and_set_bit(HCI_EXT_CONFIGURED,
5434 &hdev->dev_flags);
5435 else
5436 changed = test_and_clear_bit(HCI_EXT_CONFIGURED,
5437 &hdev->dev_flags);
5438
5439 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
5440 if (err < 0)
5441 goto unlock;
5442
5443 if (!changed)
5444 goto unlock;
5445
5446 err = new_options(hdev, sk);
5447
5448 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) == is_configured(hdev)) {
5449 mgmt_index_removed(hdev);
5450
5451 if (test_and_change_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
5452 set_bit(HCI_CONFIG, &hdev->dev_flags);
5453 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
5454
5455 queue_work(hdev->req_workqueue, &hdev->power_on);
5456 } else {
5457 set_bit(HCI_RAW, &hdev->flags);
5458 mgmt_index_added(hdev);
5459 }
5460 }
5461
5462 unlock:
5463 hci_dev_unlock(hdev);
5464 return err;
5465 }
5466
5467 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
5468 void *data, u16 len)
5469 {
5470 struct mgmt_cp_set_public_address *cp = data;
5471 bool changed;
5472 int err;
5473
5474 BT_DBG("%s", hdev->name);
5475
5476 if (hdev_is_powered(hdev))
5477 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5478 MGMT_STATUS_REJECTED);
5479
5480 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
5481 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5482 MGMT_STATUS_INVALID_PARAMS);
5483
5484 if (!hdev->set_bdaddr)
5485 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5486 MGMT_STATUS_NOT_SUPPORTED);
5487
5488 hci_dev_lock(hdev);
5489
5490 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
5491 bacpy(&hdev->public_addr, &cp->bdaddr);
5492
5493 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
5494 if (err < 0)
5495 goto unlock;
5496
5497 if (!changed)
5498 goto unlock;
5499
5500 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
5501 err = new_options(hdev, sk);
5502
5503 if (is_configured(hdev)) {
5504 mgmt_index_removed(hdev);
5505
5506 clear_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
5507
5508 set_bit(HCI_CONFIG, &hdev->dev_flags);
5509 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
5510
5511 queue_work(hdev->req_workqueue, &hdev->power_on);
5512 }
5513
5514 unlock:
5515 hci_dev_unlock(hdev);
5516 return err;
5517 }
5518
5519 static const struct mgmt_handler {
5520 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
5521 u16 data_len);
5522 bool var_len;
5523 size_t data_len;
5524 } mgmt_handlers[] = {
5525 { NULL }, /* 0x0000 (no command) */
5526 { read_version, false, MGMT_READ_VERSION_SIZE },
5527 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
5528 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
5529 { read_controller_info, false, MGMT_READ_INFO_SIZE },
5530 { set_powered, false, MGMT_SETTING_SIZE },
5531 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
5532 { set_connectable, false, MGMT_SETTING_SIZE },
5533 { set_fast_connectable, false, MGMT_SETTING_SIZE },
5534 { set_pairable, false, MGMT_SETTING_SIZE },
5535 { set_link_security, false, MGMT_SETTING_SIZE },
5536 { set_ssp, false, MGMT_SETTING_SIZE },
5537 { set_hs, false, MGMT_SETTING_SIZE },
5538 { set_le, false, MGMT_SETTING_SIZE },
5539 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
5540 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
5541 { add_uuid, false, MGMT_ADD_UUID_SIZE },
5542 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
5543 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
5544 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
5545 { disconnect, false, MGMT_DISCONNECT_SIZE },
5546 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
5547 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
5548 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
5549 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
5550 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
5551 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
5552 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
5553 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
5554 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
5555 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
5556 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
5557 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
5558 { add_remote_oob_data, true, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
5559 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
5560 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
5561 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
5562 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
5563 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
5564 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
5565 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
5566 { set_advertising, false, MGMT_SETTING_SIZE },
5567 { set_bredr, false, MGMT_SETTING_SIZE },
5568 { set_static_address, false, MGMT_SET_STATIC_ADDRESS_SIZE },
5569 { set_scan_params, false, MGMT_SET_SCAN_PARAMS_SIZE },
5570 { set_secure_conn, false, MGMT_SETTING_SIZE },
5571 { set_debug_keys, false, MGMT_SETTING_SIZE },
5572 { set_privacy, false, MGMT_SET_PRIVACY_SIZE },
5573 { load_irks, true, MGMT_LOAD_IRKS_SIZE },
5574 { get_conn_info, false, MGMT_GET_CONN_INFO_SIZE },
5575 { get_clock_info, false, MGMT_GET_CLOCK_INFO_SIZE },
5576 { add_device, false, MGMT_ADD_DEVICE_SIZE },
5577 { remove_device, false, MGMT_REMOVE_DEVICE_SIZE },
5578 { load_conn_param, true, MGMT_LOAD_CONN_PARAM_SIZE },
5579 { read_unconf_index_list, false, MGMT_READ_UNCONF_INDEX_LIST_SIZE },
5580 { read_config_info, false, MGMT_READ_CONFIG_INFO_SIZE },
5581 { set_external_config, false, MGMT_SET_EXTERNAL_CONFIG_SIZE },
5582 { set_public_address, false, MGMT_SET_PUBLIC_ADDRESS_SIZE },
5583 };
5584
5585 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
5586 {
5587 void *buf;
5588 u8 *cp;
5589 struct mgmt_hdr *hdr;
5590 u16 opcode, index, len;
5591 struct hci_dev *hdev = NULL;
5592 const struct mgmt_handler *handler;
5593 int err;
5594
5595 BT_DBG("got %zu bytes", msglen);
5596
5597 if (msglen < sizeof(*hdr))
5598 return -EINVAL;
5599
5600 buf = kmalloc(msglen, GFP_KERNEL);
5601 if (!buf)
5602 return -ENOMEM;
5603
5604 if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
5605 err = -EFAULT;
5606 goto done;
5607 }
5608
5609 hdr = buf;
5610 opcode = __le16_to_cpu(hdr->opcode);
5611 index = __le16_to_cpu(hdr->index);
5612 len = __le16_to_cpu(hdr->len);
5613
5614 if (len != msglen - sizeof(*hdr)) {
5615 err = -EINVAL;
5616 goto done;
5617 }
5618
5619 if (index != MGMT_INDEX_NONE) {
5620 hdev = hci_dev_get(index);
5621 if (!hdev) {
5622 err = cmd_status(sk, index, opcode,
5623 MGMT_STATUS_INVALID_INDEX);
5624 goto done;
5625 }
5626
5627 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
5628 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
5629 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5630 err = cmd_status(sk, index, opcode,
5631 MGMT_STATUS_INVALID_INDEX);
5632 goto done;
5633 }
5634
5635 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
5636 opcode != MGMT_OP_READ_CONFIG_INFO &&
5637 opcode != MGMT_OP_SET_EXTERNAL_CONFIG &&
5638 opcode != MGMT_OP_SET_PUBLIC_ADDRESS) {
5639 err = cmd_status(sk, index, opcode,
5640 MGMT_STATUS_INVALID_INDEX);
5641 goto done;
5642 }
5643 }
5644
5645 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
5646 mgmt_handlers[opcode].func == NULL) {
5647 BT_DBG("Unknown op %u", opcode);
5648 err = cmd_status(sk, index, opcode,
5649 MGMT_STATUS_UNKNOWN_COMMAND);
5650 goto done;
5651 }
5652
5653 if (hdev && (opcode <= MGMT_OP_READ_INDEX_LIST ||
5654 opcode == MGMT_OP_READ_UNCONF_INDEX_LIST)) {
5655 err = cmd_status(sk, index, opcode,
5656 MGMT_STATUS_INVALID_INDEX);
5657 goto done;
5658 }
5659
5660 if (!hdev && (opcode > MGMT_OP_READ_INDEX_LIST &&
5661 opcode != MGMT_OP_READ_UNCONF_INDEX_LIST)) {
5662 err = cmd_status(sk, index, opcode,
5663 MGMT_STATUS_INVALID_INDEX);
5664 goto done;
5665 }
5666
5667 handler = &mgmt_handlers[opcode];
5668
5669 if ((handler->var_len && len < handler->data_len) ||
5670 (!handler->var_len && len != handler->data_len)) {
5671 err = cmd_status(sk, index, opcode,
5672 MGMT_STATUS_INVALID_PARAMS);
5673 goto done;
5674 }
5675
5676 if (hdev)
5677 mgmt_init_hdev(sk, hdev);
5678
5679 cp = buf + sizeof(*hdr);
5680
5681 err = handler->func(sk, hdev, cp, len);
5682 if (err < 0)
5683 goto done;
5684
5685 err = msglen;
5686
5687 done:
5688 if (hdev)
5689 hci_dev_put(hdev);
5690
5691 kfree(buf);
5692 return err;
5693 }
5694
5695 void mgmt_index_added(struct hci_dev *hdev)
5696 {
5697 if (hdev->dev_type != HCI_BREDR)
5698 return;
5699
5700 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
5701 return;
5702
5703 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
5704 mgmt_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0, NULL);
5705 else
5706 mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
5707 }
5708
5709 void mgmt_index_removed(struct hci_dev *hdev)
5710 {
5711 u8 status = MGMT_STATUS_INVALID_INDEX;
5712
5713 if (hdev->dev_type != HCI_BREDR)
5714 return;
5715
5716 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
5717 return;
5718
5719 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
5720
5721 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
5722 mgmt_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0, NULL);
5723 else
5724 mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
5725 }
5726
5727 /* This function requires the caller holds hdev->lock */
5728 static void restart_le_actions(struct hci_dev *hdev)
5729 {
5730 struct hci_conn_params *p;
5731
5732 list_for_each_entry(p, &hdev->le_conn_params, list) {
5733 /* Needed for AUTO_OFF case where might not "really"
5734 * have been powered off.
5735 */
5736 list_del_init(&p->action);
5737
5738 switch (p->auto_connect) {
5739 case HCI_AUTO_CONN_ALWAYS:
5740 list_add(&p->action, &hdev->pend_le_conns);
5741 break;
5742 case HCI_AUTO_CONN_REPORT:
5743 list_add(&p->action, &hdev->pend_le_reports);
5744 break;
5745 default:
5746 break;
5747 }
5748 }
5749
5750 hci_update_background_scan(hdev);
5751 }
5752
5753 static void powered_complete(struct hci_dev *hdev, u8 status)
5754 {
5755 struct cmd_lookup match = { NULL, hdev };
5756
5757 BT_DBG("status 0x%02x", status);
5758
5759 hci_dev_lock(hdev);
5760
5761 restart_le_actions(hdev);
5762
5763 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
5764
5765 new_settings(hdev, match.sk);
5766
5767 hci_dev_unlock(hdev);
5768
5769 if (match.sk)
5770 sock_put(match.sk);
5771 }
5772
5773 static int powered_update_hci(struct hci_dev *hdev)
5774 {
5775 struct hci_request req;
5776 u8 link_sec;
5777
5778 hci_req_init(&req, hdev);
5779
5780 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
5781 !lmp_host_ssp_capable(hdev)) {
5782 u8 ssp = 1;
5783
5784 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
5785 }
5786
5787 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
5788 lmp_bredr_capable(hdev)) {
5789 struct hci_cp_write_le_host_supported cp;
5790
5791 cp.le = 1;
5792 cp.simul = lmp_le_br_capable(hdev);
5793
5794 /* Check first if we already have the right
5795 * host state (host features set)
5796 */
5797 if (cp.le != lmp_host_le_capable(hdev) ||
5798 cp.simul != lmp_host_le_br_capable(hdev))
5799 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
5800 sizeof(cp), &cp);
5801 }
5802
5803 if (lmp_le_capable(hdev)) {
5804 /* Make sure the controller has a good default for
5805 * advertising data. This also applies to the case
5806 * where BR/EDR was toggled during the AUTO_OFF phase.
5807 */
5808 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
5809 update_adv_data(&req);
5810 update_scan_rsp_data(&req);
5811 }
5812
5813 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
5814 enable_advertising(&req);
5815 }
5816
5817 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
5818 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
5819 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
5820 sizeof(link_sec), &link_sec);
5821
5822 if (lmp_bredr_capable(hdev)) {
5823 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
5824 set_bredr_scan(&req);
5825 update_class(&req);
5826 update_name(&req);
5827 update_eir(&req);
5828 }
5829
5830 return hci_req_run(&req, powered_complete);
5831 }
5832
5833 int mgmt_powered(struct hci_dev *hdev, u8 powered)
5834 {
5835 struct cmd_lookup match = { NULL, hdev };
5836 u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
5837 u8 zero_cod[] = { 0, 0, 0 };
5838 int err;
5839
5840 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
5841 return 0;
5842
5843 if (powered) {
5844 if (powered_update_hci(hdev) == 0)
5845 return 0;
5846
5847 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
5848 &match);
5849 goto new_settings;
5850 }
5851
5852 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
5853 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered);
5854
5855 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
5856 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
5857 zero_cod, sizeof(zero_cod), NULL);
5858
5859 new_settings:
5860 err = new_settings(hdev, match.sk);
5861
5862 if (match.sk)
5863 sock_put(match.sk);
5864
5865 return err;
5866 }
5867
5868 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
5869 {
5870 struct pending_cmd *cmd;
5871 u8 status;
5872
5873 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
5874 if (!cmd)
5875 return;
5876
5877 if (err == -ERFKILL)
5878 status = MGMT_STATUS_RFKILLED;
5879 else
5880 status = MGMT_STATUS_FAILED;
5881
5882 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
5883
5884 mgmt_pending_remove(cmd);
5885 }
5886
5887 void mgmt_discoverable_timeout(struct hci_dev *hdev)
5888 {
5889 struct hci_request req;
5890
5891 hci_dev_lock(hdev);
5892
5893 /* When discoverable timeout triggers, then just make sure
5894 * the limited discoverable flag is cleared. Even in the case
5895 * of a timeout triggered from general discoverable, it is
5896 * safe to unconditionally clear the flag.
5897 */
5898 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
5899 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
5900
5901 hci_req_init(&req, hdev);
5902 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
5903 u8 scan = SCAN_PAGE;
5904 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
5905 sizeof(scan), &scan);
5906 }
5907 update_class(&req);
5908 update_adv_data(&req);
5909 hci_req_run(&req, NULL);
5910
5911 hdev->discov_timeout = 0;
5912
5913 new_settings(hdev, NULL);
5914
5915 hci_dev_unlock(hdev);
5916 }
5917
5918 void mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
5919 {
5920 bool changed;
5921
5922 /* Nothing needed here if there's a pending command since that
5923 * commands request completion callback takes care of everything
5924 * necessary.
5925 */
5926 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
5927 return;
5928
5929 /* Powering off may clear the scan mode - don't let that interfere */
5930 if (!discoverable && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5931 return;
5932
5933 if (discoverable) {
5934 changed = !test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
5935 } else {
5936 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
5937 changed = test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
5938 }
5939
5940 if (changed) {
5941 struct hci_request req;
5942
5943 /* In case this change in discoverable was triggered by
5944 * a disabling of connectable there could be a need to
5945 * update the advertising flags.
5946 */
5947 hci_req_init(&req, hdev);
5948 update_adv_data(&req);
5949 hci_req_run(&req, NULL);
5950
5951 new_settings(hdev, NULL);
5952 }
5953 }
5954
5955 void mgmt_connectable(struct hci_dev *hdev, u8 connectable)
5956 {
5957 bool changed;
5958
5959 /* Nothing needed here if there's a pending command since that
5960 * commands request completion callback takes care of everything
5961 * necessary.
5962 */
5963 if (mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
5964 return;
5965
5966 /* Powering off may clear the scan mode - don't let that interfere */
5967 if (!connectable && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5968 return;
5969
5970 if (connectable)
5971 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
5972 else
5973 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
5974
5975 if (changed)
5976 new_settings(hdev, NULL);
5977 }
5978
5979 void mgmt_advertising(struct hci_dev *hdev, u8 advertising)
5980 {
5981 /* Powering off may stop advertising - don't let that interfere */
5982 if (!advertising && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5983 return;
5984
5985 if (advertising)
5986 set_bit(HCI_ADVERTISING, &hdev->dev_flags);
5987 else
5988 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
5989 }
5990
5991 void mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
5992 {
5993 u8 mgmt_err = mgmt_status(status);
5994
5995 if (scan & SCAN_PAGE)
5996 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
5997 cmd_status_rsp, &mgmt_err);
5998
5999 if (scan & SCAN_INQUIRY)
6000 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
6001 cmd_status_rsp, &mgmt_err);
6002 }
6003
6004 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
6005 bool persistent)
6006 {
6007 struct mgmt_ev_new_link_key ev;
6008
6009 memset(&ev, 0, sizeof(ev));
6010
6011 ev.store_hint = persistent;
6012 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6013 ev.key.addr.type = BDADDR_BREDR;
6014 ev.key.type = key->type;
6015 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
6016 ev.key.pin_len = key->pin_len;
6017
6018 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
6019 }
6020
6021 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
6022 {
6023 if (ltk->authenticated)
6024 return MGMT_LTK_AUTHENTICATED;
6025
6026 return MGMT_LTK_UNAUTHENTICATED;
6027 }
6028
6029 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
6030 {
6031 struct mgmt_ev_new_long_term_key ev;
6032
6033 memset(&ev, 0, sizeof(ev));
6034
6035 /* Devices using resolvable or non-resolvable random addresses
6036 * without providing an indentity resolving key don't require
6037 * to store long term keys. Their addresses will change the
6038 * next time around.
6039 *
6040 * Only when a remote device provides an identity address
6041 * make sure the long term key is stored. If the remote
6042 * identity is known, the long term keys are internally
6043 * mapped to the identity address. So allow static random
6044 * and public addresses here.
6045 */
6046 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6047 (key->bdaddr.b[5] & 0xc0) != 0xc0)
6048 ev.store_hint = 0x00;
6049 else
6050 ev.store_hint = persistent;
6051
6052 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6053 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
6054 ev.key.type = mgmt_ltk_type(key);
6055 ev.key.enc_size = key->enc_size;
6056 ev.key.ediv = key->ediv;
6057 ev.key.rand = key->rand;
6058
6059 if (key->type == SMP_LTK)
6060 ev.key.master = 1;
6061
6062 memcpy(ev.key.val, key->val, sizeof(key->val));
6063
6064 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
6065 }
6066
6067 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
6068 {
6069 struct mgmt_ev_new_irk ev;
6070
6071 memset(&ev, 0, sizeof(ev));
6072
6073 /* For identity resolving keys from devices that are already
6074 * using a public address or static random address, do not
6075 * ask for storing this key. The identity resolving key really
6076 * is only mandatory for devices using resovlable random
6077 * addresses.
6078 *
6079 * Storing all identity resolving keys has the downside that
6080 * they will be also loaded on next boot of they system. More
6081 * identity resolving keys, means more time during scanning is
6082 * needed to actually resolve these addresses.
6083 */
6084 if (bacmp(&irk->rpa, BDADDR_ANY))
6085 ev.store_hint = 0x01;
6086 else
6087 ev.store_hint = 0x00;
6088
6089 bacpy(&ev.rpa, &irk->rpa);
6090 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
6091 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
6092 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
6093
6094 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
6095 }
6096
6097 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
6098 bool persistent)
6099 {
6100 struct mgmt_ev_new_csrk ev;
6101
6102 memset(&ev, 0, sizeof(ev));
6103
6104 /* Devices using resolvable or non-resolvable random addresses
6105 * without providing an indentity resolving key don't require
6106 * to store signature resolving keys. Their addresses will change
6107 * the next time around.
6108 *
6109 * Only when a remote device provides an identity address
6110 * make sure the signature resolving key is stored. So allow
6111 * static random and public addresses here.
6112 */
6113 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6114 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
6115 ev.store_hint = 0x00;
6116 else
6117 ev.store_hint = persistent;
6118
6119 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
6120 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
6121 ev.key.master = csrk->master;
6122 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
6123
6124 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
6125 }
6126
6127 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
6128 u8 bdaddr_type, u8 store_hint, u16 min_interval,
6129 u16 max_interval, u16 latency, u16 timeout)
6130 {
6131 struct mgmt_ev_new_conn_param ev;
6132
6133 if (!hci_is_identity_address(bdaddr, bdaddr_type))
6134 return;
6135
6136 memset(&ev, 0, sizeof(ev));
6137 bacpy(&ev.addr.bdaddr, bdaddr);
6138 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
6139 ev.store_hint = store_hint;
6140 ev.min_interval = cpu_to_le16(min_interval);
6141 ev.max_interval = cpu_to_le16(max_interval);
6142 ev.latency = cpu_to_le16(latency);
6143 ev.timeout = cpu_to_le16(timeout);
6144
6145 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
6146 }
6147
6148 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
6149 u8 data_len)
6150 {
6151 eir[eir_len++] = sizeof(type) + data_len;
6152 eir[eir_len++] = type;
6153 memcpy(&eir[eir_len], data, data_len);
6154 eir_len += data_len;
6155
6156 return eir_len;
6157 }
6158
6159 void mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6160 u8 addr_type, u32 flags, u8 *name, u8 name_len,
6161 u8 *dev_class)
6162 {
6163 char buf[512];
6164 struct mgmt_ev_device_connected *ev = (void *) buf;
6165 u16 eir_len = 0;
6166
6167 bacpy(&ev->addr.bdaddr, bdaddr);
6168 ev->addr.type = link_to_bdaddr(link_type, addr_type);
6169
6170 ev->flags = __cpu_to_le32(flags);
6171
6172 if (name_len > 0)
6173 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
6174 name, name_len);
6175
6176 if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
6177 eir_len = eir_append_data(ev->eir, eir_len,
6178 EIR_CLASS_OF_DEV, dev_class, 3);
6179
6180 ev->eir_len = cpu_to_le16(eir_len);
6181
6182 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
6183 sizeof(*ev) + eir_len, NULL);
6184 }
6185
6186 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
6187 {
6188 struct mgmt_cp_disconnect *cp = cmd->param;
6189 struct sock **sk = data;
6190 struct mgmt_rp_disconnect rp;
6191
6192 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6193 rp.addr.type = cp->addr.type;
6194
6195 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
6196 sizeof(rp));
6197
6198 *sk = cmd->sk;
6199 sock_hold(*sk);
6200
6201 mgmt_pending_remove(cmd);
6202 }
6203
6204 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
6205 {
6206 struct hci_dev *hdev = data;
6207 struct mgmt_cp_unpair_device *cp = cmd->param;
6208 struct mgmt_rp_unpair_device rp;
6209
6210 memset(&rp, 0, sizeof(rp));
6211 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6212 rp.addr.type = cp->addr.type;
6213
6214 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
6215
6216 cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
6217
6218 mgmt_pending_remove(cmd);
6219 }
6220
6221 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
6222 u8 link_type, u8 addr_type, u8 reason,
6223 bool mgmt_connected)
6224 {
6225 struct mgmt_ev_device_disconnected ev;
6226 struct pending_cmd *power_off;
6227 struct sock *sk = NULL;
6228
6229 power_off = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6230 if (power_off) {
6231 struct mgmt_mode *cp = power_off->param;
6232
6233 /* The connection is still in hci_conn_hash so test for 1
6234 * instead of 0 to know if this is the last one.
6235 */
6236 if (!cp->val && hci_conn_count(hdev) == 1) {
6237 cancel_delayed_work(&hdev->power_off);
6238 queue_work(hdev->req_workqueue, &hdev->power_off.work);
6239 }
6240 }
6241
6242 if (!mgmt_connected)
6243 return;
6244
6245 if (link_type != ACL_LINK && link_type != LE_LINK)
6246 return;
6247
6248 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
6249
6250 bacpy(&ev.addr.bdaddr, bdaddr);
6251 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6252 ev.reason = reason;
6253
6254 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
6255
6256 if (sk)
6257 sock_put(sk);
6258
6259 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6260 hdev);
6261 }
6262
6263 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
6264 u8 link_type, u8 addr_type, u8 status)
6265 {
6266 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
6267 struct mgmt_cp_disconnect *cp;
6268 struct mgmt_rp_disconnect rp;
6269 struct pending_cmd *cmd;
6270
6271 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6272 hdev);
6273
6274 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
6275 if (!cmd)
6276 return;
6277
6278 cp = cmd->param;
6279
6280 if (bacmp(bdaddr, &cp->addr.bdaddr))
6281 return;
6282
6283 if (cp->addr.type != bdaddr_type)
6284 return;
6285
6286 bacpy(&rp.addr.bdaddr, bdaddr);
6287 rp.addr.type = bdaddr_type;
6288
6289 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
6290 mgmt_status(status), &rp, sizeof(rp));
6291
6292 mgmt_pending_remove(cmd);
6293 }
6294
6295 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6296 u8 addr_type, u8 status)
6297 {
6298 struct mgmt_ev_connect_failed ev;
6299 struct pending_cmd *power_off;
6300
6301 power_off = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6302 if (power_off) {
6303 struct mgmt_mode *cp = power_off->param;
6304
6305 /* The connection is still in hci_conn_hash so test for 1
6306 * instead of 0 to know if this is the last one.
6307 */
6308 if (!cp->val && hci_conn_count(hdev) == 1) {
6309 cancel_delayed_work(&hdev->power_off);
6310 queue_work(hdev->req_workqueue, &hdev->power_off.work);
6311 }
6312 }
6313
6314 bacpy(&ev.addr.bdaddr, bdaddr);
6315 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6316 ev.status = mgmt_status(status);
6317
6318 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
6319 }
6320
6321 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
6322 {
6323 struct mgmt_ev_pin_code_request ev;
6324
6325 bacpy(&ev.addr.bdaddr, bdaddr);
6326 ev.addr.type = BDADDR_BREDR;
6327 ev.secure = secure;
6328
6329 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
6330 }
6331
6332 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6333 u8 status)
6334 {
6335 struct pending_cmd *cmd;
6336 struct mgmt_rp_pin_code_reply rp;
6337
6338 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
6339 if (!cmd)
6340 return;
6341
6342 bacpy(&rp.addr.bdaddr, bdaddr);
6343 rp.addr.type = BDADDR_BREDR;
6344
6345 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
6346 mgmt_status(status), &rp, sizeof(rp));
6347
6348 mgmt_pending_remove(cmd);
6349 }
6350
6351 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6352 u8 status)
6353 {
6354 struct pending_cmd *cmd;
6355 struct mgmt_rp_pin_code_reply rp;
6356
6357 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
6358 if (!cmd)
6359 return;
6360
6361 bacpy(&rp.addr.bdaddr, bdaddr);
6362 rp.addr.type = BDADDR_BREDR;
6363
6364 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
6365 mgmt_status(status), &rp, sizeof(rp));
6366
6367 mgmt_pending_remove(cmd);
6368 }
6369
6370 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6371 u8 link_type, u8 addr_type, u32 value,
6372 u8 confirm_hint)
6373 {
6374 struct mgmt_ev_user_confirm_request ev;
6375
6376 BT_DBG("%s", hdev->name);
6377
6378 bacpy(&ev.addr.bdaddr, bdaddr);
6379 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6380 ev.confirm_hint = confirm_hint;
6381 ev.value = cpu_to_le32(value);
6382
6383 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
6384 NULL);
6385 }
6386
6387 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6388 u8 link_type, u8 addr_type)
6389 {
6390 struct mgmt_ev_user_passkey_request ev;
6391
6392 BT_DBG("%s", hdev->name);
6393
6394 bacpy(&ev.addr.bdaddr, bdaddr);
6395 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6396
6397 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
6398 NULL);
6399 }
6400
6401 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6402 u8 link_type, u8 addr_type, u8 status,
6403 u8 opcode)
6404 {
6405 struct pending_cmd *cmd;
6406 struct mgmt_rp_user_confirm_reply rp;
6407 int err;
6408
6409 cmd = mgmt_pending_find(opcode, hdev);
6410 if (!cmd)
6411 return -ENOENT;
6412
6413 bacpy(&rp.addr.bdaddr, bdaddr);
6414 rp.addr.type = link_to_bdaddr(link_type, addr_type);
6415 err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
6416 &rp, sizeof(rp));
6417
6418 mgmt_pending_remove(cmd);
6419
6420 return err;
6421 }
6422
6423 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6424 u8 link_type, u8 addr_type, u8 status)
6425 {
6426 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6427 status, MGMT_OP_USER_CONFIRM_REPLY);
6428 }
6429
6430 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6431 u8 link_type, u8 addr_type, u8 status)
6432 {
6433 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6434 status,
6435 MGMT_OP_USER_CONFIRM_NEG_REPLY);
6436 }
6437
6438 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6439 u8 link_type, u8 addr_type, u8 status)
6440 {
6441 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6442 status, MGMT_OP_USER_PASSKEY_REPLY);
6443 }
6444
6445 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6446 u8 link_type, u8 addr_type, u8 status)
6447 {
6448 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6449 status,
6450 MGMT_OP_USER_PASSKEY_NEG_REPLY);
6451 }
6452
6453 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
6454 u8 link_type, u8 addr_type, u32 passkey,
6455 u8 entered)
6456 {
6457 struct mgmt_ev_passkey_notify ev;
6458
6459 BT_DBG("%s", hdev->name);
6460
6461 bacpy(&ev.addr.bdaddr, bdaddr);
6462 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6463 ev.passkey = __cpu_to_le32(passkey);
6464 ev.entered = entered;
6465
6466 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
6467 }
6468
6469 void mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6470 u8 addr_type, u8 status)
6471 {
6472 struct mgmt_ev_auth_failed ev;
6473
6474 bacpy(&ev.addr.bdaddr, bdaddr);
6475 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6476 ev.status = mgmt_status(status);
6477
6478 mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
6479 }
6480
6481 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
6482 {
6483 struct cmd_lookup match = { NULL, hdev };
6484 bool changed;
6485
6486 if (status) {
6487 u8 mgmt_err = mgmt_status(status);
6488 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
6489 cmd_status_rsp, &mgmt_err);
6490 return;
6491 }
6492
6493 if (test_bit(HCI_AUTH, &hdev->flags))
6494 changed = !test_and_set_bit(HCI_LINK_SECURITY,
6495 &hdev->dev_flags);
6496 else
6497 changed = test_and_clear_bit(HCI_LINK_SECURITY,
6498 &hdev->dev_flags);
6499
6500 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
6501 &match);
6502
6503 if (changed)
6504 new_settings(hdev, match.sk);
6505
6506 if (match.sk)
6507 sock_put(match.sk);
6508 }
6509
6510 static void clear_eir(struct hci_request *req)
6511 {
6512 struct hci_dev *hdev = req->hdev;
6513 struct hci_cp_write_eir cp;
6514
6515 if (!lmp_ext_inq_capable(hdev))
6516 return;
6517
6518 memset(hdev->eir, 0, sizeof(hdev->eir));
6519
6520 memset(&cp, 0, sizeof(cp));
6521
6522 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
6523 }
6524
6525 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
6526 {
6527 struct cmd_lookup match = { NULL, hdev };
6528 struct hci_request req;
6529 bool changed = false;
6530
6531 if (status) {
6532 u8 mgmt_err = mgmt_status(status);
6533
6534 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
6535 &hdev->dev_flags)) {
6536 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
6537 new_settings(hdev, NULL);
6538 }
6539
6540 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
6541 &mgmt_err);
6542 return;
6543 }
6544
6545 if (enable) {
6546 changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
6547 } else {
6548 changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
6549 if (!changed)
6550 changed = test_and_clear_bit(HCI_HS_ENABLED,
6551 &hdev->dev_flags);
6552 else
6553 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
6554 }
6555
6556 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
6557
6558 if (changed)
6559 new_settings(hdev, match.sk);
6560
6561 if (match.sk)
6562 sock_put(match.sk);
6563
6564 hci_req_init(&req, hdev);
6565
6566 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
6567 if (test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
6568 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
6569 sizeof(enable), &enable);
6570 update_eir(&req);
6571 } else {
6572 clear_eir(&req);
6573 }
6574
6575 hci_req_run(&req, NULL);
6576 }
6577
6578 void mgmt_sc_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
6579 {
6580 struct cmd_lookup match = { NULL, hdev };
6581 bool changed = false;
6582
6583 if (status) {
6584 u8 mgmt_err = mgmt_status(status);
6585
6586 if (enable) {
6587 if (test_and_clear_bit(HCI_SC_ENABLED,
6588 &hdev->dev_flags))
6589 new_settings(hdev, NULL);
6590 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
6591 }
6592
6593 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
6594 cmd_status_rsp, &mgmt_err);
6595 return;
6596 }
6597
6598 if (enable) {
6599 changed = !test_and_set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
6600 } else {
6601 changed = test_and_clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
6602 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
6603 }
6604
6605 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
6606 settings_rsp, &match);
6607
6608 if (changed)
6609 new_settings(hdev, match.sk);
6610
6611 if (match.sk)
6612 sock_put(match.sk);
6613 }
6614
6615 static void sk_lookup(struct pending_cmd *cmd, void *data)
6616 {
6617 struct cmd_lookup *match = data;
6618
6619 if (match->sk == NULL) {
6620 match->sk = cmd->sk;
6621 sock_hold(match->sk);
6622 }
6623 }
6624
6625 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
6626 u8 status)
6627 {
6628 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
6629
6630 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
6631 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
6632 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
6633
6634 if (!status)
6635 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
6636 NULL);
6637
6638 if (match.sk)
6639 sock_put(match.sk);
6640 }
6641
6642 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
6643 {
6644 struct mgmt_cp_set_local_name ev;
6645 struct pending_cmd *cmd;
6646
6647 if (status)
6648 return;
6649
6650 memset(&ev, 0, sizeof(ev));
6651 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
6652 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
6653
6654 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
6655 if (!cmd) {
6656 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
6657
6658 /* If this is a HCI command related to powering on the
6659 * HCI dev don't send any mgmt signals.
6660 */
6661 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
6662 return;
6663 }
6664
6665 mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
6666 cmd ? cmd->sk : NULL);
6667 }
6668
6669 void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
6670 u8 *randomizer192, u8 *hash256,
6671 u8 *randomizer256, u8 status)
6672 {
6673 struct pending_cmd *cmd;
6674
6675 BT_DBG("%s status %u", hdev->name, status);
6676
6677 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
6678 if (!cmd)
6679 return;
6680
6681 if (status) {
6682 cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
6683 mgmt_status(status));
6684 } else {
6685 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
6686 hash256 && randomizer256) {
6687 struct mgmt_rp_read_local_oob_ext_data rp;
6688
6689 memcpy(rp.hash192, hash192, sizeof(rp.hash192));
6690 memcpy(rp.randomizer192, randomizer192,
6691 sizeof(rp.randomizer192));
6692
6693 memcpy(rp.hash256, hash256, sizeof(rp.hash256));
6694 memcpy(rp.randomizer256, randomizer256,
6695 sizeof(rp.randomizer256));
6696
6697 cmd_complete(cmd->sk, hdev->id,
6698 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
6699 &rp, sizeof(rp));
6700 } else {
6701 struct mgmt_rp_read_local_oob_data rp;
6702
6703 memcpy(rp.hash, hash192, sizeof(rp.hash));
6704 memcpy(rp.randomizer, randomizer192,
6705 sizeof(rp.randomizer));
6706
6707 cmd_complete(cmd->sk, hdev->id,
6708 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
6709 &rp, sizeof(rp));
6710 }
6711 }
6712
6713 mgmt_pending_remove(cmd);
6714 }
6715
6716 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6717 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
6718 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
6719 {
6720 char buf[512];
6721 struct mgmt_ev_device_found *ev = (void *) buf;
6722 size_t ev_size;
6723
6724 /* Don't send events for a non-kernel initiated discovery. With
6725 * LE one exception is if we have pend_le_reports > 0 in which
6726 * case we're doing passive scanning and want these events.
6727 */
6728 if (!hci_discovery_active(hdev)) {
6729 if (link_type == ACL_LINK)
6730 return;
6731 if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
6732 return;
6733 }
6734
6735 /* Make sure that the buffer is big enough. The 5 extra bytes
6736 * are for the potential CoD field.
6737 */
6738 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
6739 return;
6740
6741 memset(buf, 0, sizeof(buf));
6742
6743 bacpy(&ev->addr.bdaddr, bdaddr);
6744 ev->addr.type = link_to_bdaddr(link_type, addr_type);
6745 ev->rssi = rssi;
6746 ev->flags = cpu_to_le32(flags);
6747
6748 if (eir_len > 0)
6749 memcpy(ev->eir, eir, eir_len);
6750
6751 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
6752 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
6753 dev_class, 3);
6754
6755 if (scan_rsp_len > 0)
6756 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
6757
6758 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
6759 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
6760
6761 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
6762 }
6763
6764 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6765 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
6766 {
6767 struct mgmt_ev_device_found *ev;
6768 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
6769 u16 eir_len;
6770
6771 ev = (struct mgmt_ev_device_found *) buf;
6772
6773 memset(buf, 0, sizeof(buf));
6774
6775 bacpy(&ev->addr.bdaddr, bdaddr);
6776 ev->addr.type = link_to_bdaddr(link_type, addr_type);
6777 ev->rssi = rssi;
6778
6779 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
6780 name_len);
6781
6782 ev->eir_len = cpu_to_le16(eir_len);
6783
6784 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
6785 }
6786
6787 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
6788 {
6789 struct mgmt_ev_discovering ev;
6790 struct pending_cmd *cmd;
6791
6792 BT_DBG("%s discovering %u", hdev->name, discovering);
6793
6794 if (discovering)
6795 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
6796 else
6797 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
6798
6799 if (cmd != NULL) {
6800 u8 type = hdev->discovery.type;
6801
6802 cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
6803 sizeof(type));
6804 mgmt_pending_remove(cmd);
6805 }
6806
6807 memset(&ev, 0, sizeof(ev));
6808 ev.type = hdev->discovery.type;
6809 ev.discovering = discovering;
6810
6811 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
6812 }
6813
6814 static void adv_enable_complete(struct hci_dev *hdev, u8 status)
6815 {
6816 BT_DBG("%s status %u", hdev->name, status);
6817
6818 /* Clear the advertising mgmt setting if we failed to re-enable it */
6819 if (status) {
6820 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
6821 new_settings(hdev, NULL);
6822 }
6823 }
6824
6825 void mgmt_reenable_advertising(struct hci_dev *hdev)
6826 {
6827 struct hci_request req;
6828
6829 if (hci_conn_num(hdev, LE_LINK) > 0)
6830 return;
6831
6832 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
6833 return;
6834
6835 hci_req_init(&req, hdev);
6836 enable_advertising(&req);
6837
6838 /* If this fails we have no option but to let user space know
6839 * that we've disabled advertising.
6840 */
6841 if (hci_req_run(&req, adv_enable_complete) < 0) {
6842 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
6843 new_settings(hdev, NULL);
6844 }
6845 }