]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - net/bluetooth/mgmt.c
Bluetooth: Add support for adding remote OOB data for LE
[mirror_ubuntu-zesty-kernel.git] / net / bluetooth / mgmt.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI Management interface */
26
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/l2cap.h>
33 #include <net/bluetooth/mgmt.h>
34
35 #include "smp.h"
36
37 #define MGMT_VERSION 1
38 #define MGMT_REVISION 7
39
40 static const u16 mgmt_commands[] = {
41 MGMT_OP_READ_INDEX_LIST,
42 MGMT_OP_READ_INFO,
43 MGMT_OP_SET_POWERED,
44 MGMT_OP_SET_DISCOVERABLE,
45 MGMT_OP_SET_CONNECTABLE,
46 MGMT_OP_SET_FAST_CONNECTABLE,
47 MGMT_OP_SET_BONDABLE,
48 MGMT_OP_SET_LINK_SECURITY,
49 MGMT_OP_SET_SSP,
50 MGMT_OP_SET_HS,
51 MGMT_OP_SET_LE,
52 MGMT_OP_SET_DEV_CLASS,
53 MGMT_OP_SET_LOCAL_NAME,
54 MGMT_OP_ADD_UUID,
55 MGMT_OP_REMOVE_UUID,
56 MGMT_OP_LOAD_LINK_KEYS,
57 MGMT_OP_LOAD_LONG_TERM_KEYS,
58 MGMT_OP_DISCONNECT,
59 MGMT_OP_GET_CONNECTIONS,
60 MGMT_OP_PIN_CODE_REPLY,
61 MGMT_OP_PIN_CODE_NEG_REPLY,
62 MGMT_OP_SET_IO_CAPABILITY,
63 MGMT_OP_PAIR_DEVICE,
64 MGMT_OP_CANCEL_PAIR_DEVICE,
65 MGMT_OP_UNPAIR_DEVICE,
66 MGMT_OP_USER_CONFIRM_REPLY,
67 MGMT_OP_USER_CONFIRM_NEG_REPLY,
68 MGMT_OP_USER_PASSKEY_REPLY,
69 MGMT_OP_USER_PASSKEY_NEG_REPLY,
70 MGMT_OP_READ_LOCAL_OOB_DATA,
71 MGMT_OP_ADD_REMOTE_OOB_DATA,
72 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
73 MGMT_OP_START_DISCOVERY,
74 MGMT_OP_STOP_DISCOVERY,
75 MGMT_OP_CONFIRM_NAME,
76 MGMT_OP_BLOCK_DEVICE,
77 MGMT_OP_UNBLOCK_DEVICE,
78 MGMT_OP_SET_DEVICE_ID,
79 MGMT_OP_SET_ADVERTISING,
80 MGMT_OP_SET_BREDR,
81 MGMT_OP_SET_STATIC_ADDRESS,
82 MGMT_OP_SET_SCAN_PARAMS,
83 MGMT_OP_SET_SECURE_CONN,
84 MGMT_OP_SET_DEBUG_KEYS,
85 MGMT_OP_SET_PRIVACY,
86 MGMT_OP_LOAD_IRKS,
87 MGMT_OP_GET_CONN_INFO,
88 MGMT_OP_GET_CLOCK_INFO,
89 MGMT_OP_ADD_DEVICE,
90 MGMT_OP_REMOVE_DEVICE,
91 MGMT_OP_LOAD_CONN_PARAM,
92 MGMT_OP_READ_UNCONF_INDEX_LIST,
93 MGMT_OP_READ_CONFIG_INFO,
94 MGMT_OP_SET_EXTERNAL_CONFIG,
95 MGMT_OP_SET_PUBLIC_ADDRESS,
96 };
97
98 static const u16 mgmt_events[] = {
99 MGMT_EV_CONTROLLER_ERROR,
100 MGMT_EV_INDEX_ADDED,
101 MGMT_EV_INDEX_REMOVED,
102 MGMT_EV_NEW_SETTINGS,
103 MGMT_EV_CLASS_OF_DEV_CHANGED,
104 MGMT_EV_LOCAL_NAME_CHANGED,
105 MGMT_EV_NEW_LINK_KEY,
106 MGMT_EV_NEW_LONG_TERM_KEY,
107 MGMT_EV_DEVICE_CONNECTED,
108 MGMT_EV_DEVICE_DISCONNECTED,
109 MGMT_EV_CONNECT_FAILED,
110 MGMT_EV_PIN_CODE_REQUEST,
111 MGMT_EV_USER_CONFIRM_REQUEST,
112 MGMT_EV_USER_PASSKEY_REQUEST,
113 MGMT_EV_AUTH_FAILED,
114 MGMT_EV_DEVICE_FOUND,
115 MGMT_EV_DISCOVERING,
116 MGMT_EV_DEVICE_BLOCKED,
117 MGMT_EV_DEVICE_UNBLOCKED,
118 MGMT_EV_DEVICE_UNPAIRED,
119 MGMT_EV_PASSKEY_NOTIFY,
120 MGMT_EV_NEW_IRK,
121 MGMT_EV_NEW_CSRK,
122 MGMT_EV_DEVICE_ADDED,
123 MGMT_EV_DEVICE_REMOVED,
124 MGMT_EV_NEW_CONN_PARAM,
125 MGMT_EV_UNCONF_INDEX_ADDED,
126 MGMT_EV_UNCONF_INDEX_REMOVED,
127 MGMT_EV_NEW_CONFIG_OPTIONS,
128 };
129
130 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
131
132 struct pending_cmd {
133 struct list_head list;
134 u16 opcode;
135 int index;
136 void *param;
137 struct sock *sk;
138 void *user_data;
139 };
140
141 /* HCI to MGMT error code conversion table */
142 static u8 mgmt_status_table[] = {
143 MGMT_STATUS_SUCCESS,
144 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
145 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
146 MGMT_STATUS_FAILED, /* Hardware Failure */
147 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
148 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
149 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
150 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
151 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
152 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
153 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
154 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
155 MGMT_STATUS_BUSY, /* Command Disallowed */
156 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
157 MGMT_STATUS_REJECTED, /* Rejected Security */
158 MGMT_STATUS_REJECTED, /* Rejected Personal */
159 MGMT_STATUS_TIMEOUT, /* Host Timeout */
160 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
161 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
162 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
163 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
164 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
165 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
166 MGMT_STATUS_BUSY, /* Repeated Attempts */
167 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
168 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
169 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
170 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
171 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
172 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
173 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
174 MGMT_STATUS_FAILED, /* Unspecified Error */
175 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
176 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
177 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
178 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
179 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
180 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
181 MGMT_STATUS_FAILED, /* Unit Link Key Used */
182 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
183 MGMT_STATUS_TIMEOUT, /* Instant Passed */
184 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
185 MGMT_STATUS_FAILED, /* Transaction Collision */
186 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
187 MGMT_STATUS_REJECTED, /* QoS Rejected */
188 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
189 MGMT_STATUS_REJECTED, /* Insufficient Security */
190 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
191 MGMT_STATUS_BUSY, /* Role Switch Pending */
192 MGMT_STATUS_FAILED, /* Slot Violation */
193 MGMT_STATUS_FAILED, /* Role Switch Failed */
194 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
195 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
196 MGMT_STATUS_BUSY, /* Host Busy Pairing */
197 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
198 MGMT_STATUS_BUSY, /* Controller Busy */
199 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
200 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
201 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
202 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
203 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
204 };
205
206 static u8 mgmt_status(u8 hci_status)
207 {
208 if (hci_status < ARRAY_SIZE(mgmt_status_table))
209 return mgmt_status_table[hci_status];
210
211 return MGMT_STATUS_FAILED;
212 }
213
214 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
215 struct sock *skip_sk)
216 {
217 struct sk_buff *skb;
218 struct mgmt_hdr *hdr;
219
220 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
221 if (!skb)
222 return -ENOMEM;
223
224 hdr = (void *) skb_put(skb, sizeof(*hdr));
225 hdr->opcode = cpu_to_le16(event);
226 if (hdev)
227 hdr->index = cpu_to_le16(hdev->id);
228 else
229 hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
230 hdr->len = cpu_to_le16(data_len);
231
232 if (data)
233 memcpy(skb_put(skb, data_len), data, data_len);
234
235 /* Time stamp */
236 __net_timestamp(skb);
237
238 hci_send_to_control(skb, skip_sk);
239 kfree_skb(skb);
240
241 return 0;
242 }
243
244 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
245 {
246 struct sk_buff *skb;
247 struct mgmt_hdr *hdr;
248 struct mgmt_ev_cmd_status *ev;
249 int err;
250
251 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
252
253 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
254 if (!skb)
255 return -ENOMEM;
256
257 hdr = (void *) skb_put(skb, sizeof(*hdr));
258
259 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS);
260 hdr->index = cpu_to_le16(index);
261 hdr->len = cpu_to_le16(sizeof(*ev));
262
263 ev = (void *) skb_put(skb, sizeof(*ev));
264 ev->status = status;
265 ev->opcode = cpu_to_le16(cmd);
266
267 err = sock_queue_rcv_skb(sk, skb);
268 if (err < 0)
269 kfree_skb(skb);
270
271 return err;
272 }
273
274 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
275 void *rp, size_t rp_len)
276 {
277 struct sk_buff *skb;
278 struct mgmt_hdr *hdr;
279 struct mgmt_ev_cmd_complete *ev;
280 int err;
281
282 BT_DBG("sock %p", sk);
283
284 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
285 if (!skb)
286 return -ENOMEM;
287
288 hdr = (void *) skb_put(skb, sizeof(*hdr));
289
290 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
291 hdr->index = cpu_to_le16(index);
292 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
293
294 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
295 ev->opcode = cpu_to_le16(cmd);
296 ev->status = status;
297
298 if (rp)
299 memcpy(ev->data, rp, rp_len);
300
301 err = sock_queue_rcv_skb(sk, skb);
302 if (err < 0)
303 kfree_skb(skb);
304
305 return err;
306 }
307
308 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
309 u16 data_len)
310 {
311 struct mgmt_rp_read_version rp;
312
313 BT_DBG("sock %p", sk);
314
315 rp.version = MGMT_VERSION;
316 rp.revision = cpu_to_le16(MGMT_REVISION);
317
318 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
319 sizeof(rp));
320 }
321
322 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
323 u16 data_len)
324 {
325 struct mgmt_rp_read_commands *rp;
326 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
327 const u16 num_events = ARRAY_SIZE(mgmt_events);
328 __le16 *opcode;
329 size_t rp_size;
330 int i, err;
331
332 BT_DBG("sock %p", sk);
333
334 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
335
336 rp = kmalloc(rp_size, GFP_KERNEL);
337 if (!rp)
338 return -ENOMEM;
339
340 rp->num_commands = cpu_to_le16(num_commands);
341 rp->num_events = cpu_to_le16(num_events);
342
343 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
344 put_unaligned_le16(mgmt_commands[i], opcode);
345
346 for (i = 0; i < num_events; i++, opcode++)
347 put_unaligned_le16(mgmt_events[i], opcode);
348
349 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
350 rp_size);
351 kfree(rp);
352
353 return err;
354 }
355
356 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
357 u16 data_len)
358 {
359 struct mgmt_rp_read_index_list *rp;
360 struct hci_dev *d;
361 size_t rp_len;
362 u16 count;
363 int err;
364
365 BT_DBG("sock %p", sk);
366
367 read_lock(&hci_dev_list_lock);
368
369 count = 0;
370 list_for_each_entry(d, &hci_dev_list, list) {
371 if (d->dev_type == HCI_BREDR &&
372 !test_bit(HCI_UNCONFIGURED, &d->dev_flags))
373 count++;
374 }
375
376 rp_len = sizeof(*rp) + (2 * count);
377 rp = kmalloc(rp_len, GFP_ATOMIC);
378 if (!rp) {
379 read_unlock(&hci_dev_list_lock);
380 return -ENOMEM;
381 }
382
383 count = 0;
384 list_for_each_entry(d, &hci_dev_list, list) {
385 if (test_bit(HCI_SETUP, &d->dev_flags) ||
386 test_bit(HCI_CONFIG, &d->dev_flags) ||
387 test_bit(HCI_USER_CHANNEL, &d->dev_flags))
388 continue;
389
390 /* Devices marked as raw-only are neither configured
391 * nor unconfigured controllers.
392 */
393 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
394 continue;
395
396 if (d->dev_type == HCI_BREDR &&
397 !test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
398 rp->index[count++] = cpu_to_le16(d->id);
399 BT_DBG("Added hci%u", d->id);
400 }
401 }
402
403 rp->num_controllers = cpu_to_le16(count);
404 rp_len = sizeof(*rp) + (2 * count);
405
406 read_unlock(&hci_dev_list_lock);
407
408 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
409 rp_len);
410
411 kfree(rp);
412
413 return err;
414 }
415
416 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
417 void *data, u16 data_len)
418 {
419 struct mgmt_rp_read_unconf_index_list *rp;
420 struct hci_dev *d;
421 size_t rp_len;
422 u16 count;
423 int err;
424
425 BT_DBG("sock %p", sk);
426
427 read_lock(&hci_dev_list_lock);
428
429 count = 0;
430 list_for_each_entry(d, &hci_dev_list, list) {
431 if (d->dev_type == HCI_BREDR &&
432 test_bit(HCI_UNCONFIGURED, &d->dev_flags))
433 count++;
434 }
435
436 rp_len = sizeof(*rp) + (2 * count);
437 rp = kmalloc(rp_len, GFP_ATOMIC);
438 if (!rp) {
439 read_unlock(&hci_dev_list_lock);
440 return -ENOMEM;
441 }
442
443 count = 0;
444 list_for_each_entry(d, &hci_dev_list, list) {
445 if (test_bit(HCI_SETUP, &d->dev_flags) ||
446 test_bit(HCI_CONFIG, &d->dev_flags) ||
447 test_bit(HCI_USER_CHANNEL, &d->dev_flags))
448 continue;
449
450 /* Devices marked as raw-only are neither configured
451 * nor unconfigured controllers.
452 */
453 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
454 continue;
455
456 if (d->dev_type == HCI_BREDR &&
457 test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
458 rp->index[count++] = cpu_to_le16(d->id);
459 BT_DBG("Added hci%u", d->id);
460 }
461 }
462
463 rp->num_controllers = cpu_to_le16(count);
464 rp_len = sizeof(*rp) + (2 * count);
465
466 read_unlock(&hci_dev_list_lock);
467
468 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_UNCONF_INDEX_LIST,
469 0, rp, rp_len);
470
471 kfree(rp);
472
473 return err;
474 }
475
476 static bool is_configured(struct hci_dev *hdev)
477 {
478 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
479 !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
480 return false;
481
482 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
483 !bacmp(&hdev->public_addr, BDADDR_ANY))
484 return false;
485
486 return true;
487 }
488
489 static __le32 get_missing_options(struct hci_dev *hdev)
490 {
491 u32 options = 0;
492
493 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
494 !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
495 options |= MGMT_OPTION_EXTERNAL_CONFIG;
496
497 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
498 !bacmp(&hdev->public_addr, BDADDR_ANY))
499 options |= MGMT_OPTION_PUBLIC_ADDRESS;
500
501 return cpu_to_le32(options);
502 }
503
504 static int new_options(struct hci_dev *hdev, struct sock *skip)
505 {
506 __le32 options = get_missing_options(hdev);
507
508 return mgmt_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
509 sizeof(options), skip);
510 }
511
512 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
513 {
514 __le32 options = get_missing_options(hdev);
515
516 return cmd_complete(sk, hdev->id, opcode, 0, &options,
517 sizeof(options));
518 }
519
520 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
521 void *data, u16 data_len)
522 {
523 struct mgmt_rp_read_config_info rp;
524 u32 options = 0;
525
526 BT_DBG("sock %p %s", sk, hdev->name);
527
528 hci_dev_lock(hdev);
529
530 memset(&rp, 0, sizeof(rp));
531 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
532
533 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
534 options |= MGMT_OPTION_EXTERNAL_CONFIG;
535
536 if (hdev->set_bdaddr)
537 options |= MGMT_OPTION_PUBLIC_ADDRESS;
538
539 rp.supported_options = cpu_to_le32(options);
540 rp.missing_options = get_missing_options(hdev);
541
542 hci_dev_unlock(hdev);
543
544 return cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0, &rp,
545 sizeof(rp));
546 }
547
548 static u32 get_supported_settings(struct hci_dev *hdev)
549 {
550 u32 settings = 0;
551
552 settings |= MGMT_SETTING_POWERED;
553 settings |= MGMT_SETTING_BONDABLE;
554 settings |= MGMT_SETTING_DEBUG_KEYS;
555 settings |= MGMT_SETTING_CONNECTABLE;
556 settings |= MGMT_SETTING_DISCOVERABLE;
557
558 if (lmp_bredr_capable(hdev)) {
559 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
560 settings |= MGMT_SETTING_FAST_CONNECTABLE;
561 settings |= MGMT_SETTING_BREDR;
562 settings |= MGMT_SETTING_LINK_SECURITY;
563
564 if (lmp_ssp_capable(hdev)) {
565 settings |= MGMT_SETTING_SSP;
566 settings |= MGMT_SETTING_HS;
567 }
568
569 if (lmp_sc_capable(hdev) ||
570 test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
571 settings |= MGMT_SETTING_SECURE_CONN;
572 }
573
574 if (lmp_le_capable(hdev)) {
575 settings |= MGMT_SETTING_LE;
576 settings |= MGMT_SETTING_ADVERTISING;
577 settings |= MGMT_SETTING_SECURE_CONN;
578 settings |= MGMT_SETTING_PRIVACY;
579 }
580
581 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
582 hdev->set_bdaddr)
583 settings |= MGMT_SETTING_CONFIGURATION;
584
585 return settings;
586 }
587
588 static u32 get_current_settings(struct hci_dev *hdev)
589 {
590 u32 settings = 0;
591
592 if (hdev_is_powered(hdev))
593 settings |= MGMT_SETTING_POWERED;
594
595 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
596 settings |= MGMT_SETTING_CONNECTABLE;
597
598 if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
599 settings |= MGMT_SETTING_FAST_CONNECTABLE;
600
601 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
602 settings |= MGMT_SETTING_DISCOVERABLE;
603
604 if (test_bit(HCI_BONDABLE, &hdev->dev_flags))
605 settings |= MGMT_SETTING_BONDABLE;
606
607 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
608 settings |= MGMT_SETTING_BREDR;
609
610 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
611 settings |= MGMT_SETTING_LE;
612
613 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
614 settings |= MGMT_SETTING_LINK_SECURITY;
615
616 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
617 settings |= MGMT_SETTING_SSP;
618
619 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
620 settings |= MGMT_SETTING_HS;
621
622 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
623 settings |= MGMT_SETTING_ADVERTISING;
624
625 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
626 settings |= MGMT_SETTING_SECURE_CONN;
627
628 if (test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags))
629 settings |= MGMT_SETTING_DEBUG_KEYS;
630
631 if (test_bit(HCI_PRIVACY, &hdev->dev_flags))
632 settings |= MGMT_SETTING_PRIVACY;
633
634 return settings;
635 }
636
637 #define PNP_INFO_SVCLASS_ID 0x1200
638
639 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
640 {
641 u8 *ptr = data, *uuids_start = NULL;
642 struct bt_uuid *uuid;
643
644 if (len < 4)
645 return ptr;
646
647 list_for_each_entry(uuid, &hdev->uuids, list) {
648 u16 uuid16;
649
650 if (uuid->size != 16)
651 continue;
652
653 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
654 if (uuid16 < 0x1100)
655 continue;
656
657 if (uuid16 == PNP_INFO_SVCLASS_ID)
658 continue;
659
660 if (!uuids_start) {
661 uuids_start = ptr;
662 uuids_start[0] = 1;
663 uuids_start[1] = EIR_UUID16_ALL;
664 ptr += 2;
665 }
666
667 /* Stop if not enough space to put next UUID */
668 if ((ptr - data) + sizeof(u16) > len) {
669 uuids_start[1] = EIR_UUID16_SOME;
670 break;
671 }
672
673 *ptr++ = (uuid16 & 0x00ff);
674 *ptr++ = (uuid16 & 0xff00) >> 8;
675 uuids_start[0] += sizeof(uuid16);
676 }
677
678 return ptr;
679 }
680
681 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
682 {
683 u8 *ptr = data, *uuids_start = NULL;
684 struct bt_uuid *uuid;
685
686 if (len < 6)
687 return ptr;
688
689 list_for_each_entry(uuid, &hdev->uuids, list) {
690 if (uuid->size != 32)
691 continue;
692
693 if (!uuids_start) {
694 uuids_start = ptr;
695 uuids_start[0] = 1;
696 uuids_start[1] = EIR_UUID32_ALL;
697 ptr += 2;
698 }
699
700 /* Stop if not enough space to put next UUID */
701 if ((ptr - data) + sizeof(u32) > len) {
702 uuids_start[1] = EIR_UUID32_SOME;
703 break;
704 }
705
706 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
707 ptr += sizeof(u32);
708 uuids_start[0] += sizeof(u32);
709 }
710
711 return ptr;
712 }
713
714 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
715 {
716 u8 *ptr = data, *uuids_start = NULL;
717 struct bt_uuid *uuid;
718
719 if (len < 18)
720 return ptr;
721
722 list_for_each_entry(uuid, &hdev->uuids, list) {
723 if (uuid->size != 128)
724 continue;
725
726 if (!uuids_start) {
727 uuids_start = ptr;
728 uuids_start[0] = 1;
729 uuids_start[1] = EIR_UUID128_ALL;
730 ptr += 2;
731 }
732
733 /* Stop if not enough space to put next UUID */
734 if ((ptr - data) + 16 > len) {
735 uuids_start[1] = EIR_UUID128_SOME;
736 break;
737 }
738
739 memcpy(ptr, uuid->uuid, 16);
740 ptr += 16;
741 uuids_start[0] += 16;
742 }
743
744 return ptr;
745 }
746
747 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
748 {
749 struct pending_cmd *cmd;
750
751 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
752 if (cmd->opcode == opcode)
753 return cmd;
754 }
755
756 return NULL;
757 }
758
759 static struct pending_cmd *mgmt_pending_find_data(u16 opcode,
760 struct hci_dev *hdev,
761 const void *data)
762 {
763 struct pending_cmd *cmd;
764
765 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
766 if (cmd->user_data != data)
767 continue;
768 if (cmd->opcode == opcode)
769 return cmd;
770 }
771
772 return NULL;
773 }
774
775 static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
776 {
777 u8 ad_len = 0;
778 size_t name_len;
779
780 name_len = strlen(hdev->dev_name);
781 if (name_len > 0) {
782 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
783
784 if (name_len > max_len) {
785 name_len = max_len;
786 ptr[1] = EIR_NAME_SHORT;
787 } else
788 ptr[1] = EIR_NAME_COMPLETE;
789
790 ptr[0] = name_len + 1;
791
792 memcpy(ptr + 2, hdev->dev_name, name_len);
793
794 ad_len += (name_len + 2);
795 ptr += (name_len + 2);
796 }
797
798 return ad_len;
799 }
800
801 static void update_scan_rsp_data(struct hci_request *req)
802 {
803 struct hci_dev *hdev = req->hdev;
804 struct hci_cp_le_set_scan_rsp_data cp;
805 u8 len;
806
807 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
808 return;
809
810 memset(&cp, 0, sizeof(cp));
811
812 len = create_scan_rsp_data(hdev, cp.data);
813
814 if (hdev->scan_rsp_data_len == len &&
815 memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
816 return;
817
818 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
819 hdev->scan_rsp_data_len = len;
820
821 cp.length = len;
822
823 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
824 }
825
826 static u8 get_adv_discov_flags(struct hci_dev *hdev)
827 {
828 struct pending_cmd *cmd;
829
830 /* If there's a pending mgmt command the flags will not yet have
831 * their final values, so check for this first.
832 */
833 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
834 if (cmd) {
835 struct mgmt_mode *cp = cmd->param;
836 if (cp->val == 0x01)
837 return LE_AD_GENERAL;
838 else if (cp->val == 0x02)
839 return LE_AD_LIMITED;
840 } else {
841 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
842 return LE_AD_LIMITED;
843 else if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
844 return LE_AD_GENERAL;
845 }
846
847 return 0;
848 }
849
850 static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
851 {
852 u8 ad_len = 0, flags = 0;
853
854 flags |= get_adv_discov_flags(hdev);
855
856 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
857 flags |= LE_AD_NO_BREDR;
858
859 if (flags) {
860 BT_DBG("adv flags 0x%02x", flags);
861
862 ptr[0] = 2;
863 ptr[1] = EIR_FLAGS;
864 ptr[2] = flags;
865
866 ad_len += 3;
867 ptr += 3;
868 }
869
870 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
871 ptr[0] = 2;
872 ptr[1] = EIR_TX_POWER;
873 ptr[2] = (u8) hdev->adv_tx_power;
874
875 ad_len += 3;
876 ptr += 3;
877 }
878
879 return ad_len;
880 }
881
882 static void update_adv_data(struct hci_request *req)
883 {
884 struct hci_dev *hdev = req->hdev;
885 struct hci_cp_le_set_adv_data cp;
886 u8 len;
887
888 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
889 return;
890
891 memset(&cp, 0, sizeof(cp));
892
893 len = create_adv_data(hdev, cp.data);
894
895 if (hdev->adv_data_len == len &&
896 memcmp(cp.data, hdev->adv_data, len) == 0)
897 return;
898
899 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
900 hdev->adv_data_len = len;
901
902 cp.length = len;
903
904 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
905 }
906
907 int mgmt_update_adv_data(struct hci_dev *hdev)
908 {
909 struct hci_request req;
910
911 hci_req_init(&req, hdev);
912 update_adv_data(&req);
913
914 return hci_req_run(&req, NULL);
915 }
916
917 static void create_eir(struct hci_dev *hdev, u8 *data)
918 {
919 u8 *ptr = data;
920 size_t name_len;
921
922 name_len = strlen(hdev->dev_name);
923
924 if (name_len > 0) {
925 /* EIR Data type */
926 if (name_len > 48) {
927 name_len = 48;
928 ptr[1] = EIR_NAME_SHORT;
929 } else
930 ptr[1] = EIR_NAME_COMPLETE;
931
932 /* EIR Data length */
933 ptr[0] = name_len + 1;
934
935 memcpy(ptr + 2, hdev->dev_name, name_len);
936
937 ptr += (name_len + 2);
938 }
939
940 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
941 ptr[0] = 2;
942 ptr[1] = EIR_TX_POWER;
943 ptr[2] = (u8) hdev->inq_tx_power;
944
945 ptr += 3;
946 }
947
948 if (hdev->devid_source > 0) {
949 ptr[0] = 9;
950 ptr[1] = EIR_DEVICE_ID;
951
952 put_unaligned_le16(hdev->devid_source, ptr + 2);
953 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
954 put_unaligned_le16(hdev->devid_product, ptr + 6);
955 put_unaligned_le16(hdev->devid_version, ptr + 8);
956
957 ptr += 10;
958 }
959
960 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
961 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
962 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
963 }
964
965 static void update_eir(struct hci_request *req)
966 {
967 struct hci_dev *hdev = req->hdev;
968 struct hci_cp_write_eir cp;
969
970 if (!hdev_is_powered(hdev))
971 return;
972
973 if (!lmp_ext_inq_capable(hdev))
974 return;
975
976 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
977 return;
978
979 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
980 return;
981
982 memset(&cp, 0, sizeof(cp));
983
984 create_eir(hdev, cp.data);
985
986 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
987 return;
988
989 memcpy(hdev->eir, cp.data, sizeof(cp.data));
990
991 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
992 }
993
994 static u8 get_service_classes(struct hci_dev *hdev)
995 {
996 struct bt_uuid *uuid;
997 u8 val = 0;
998
999 list_for_each_entry(uuid, &hdev->uuids, list)
1000 val |= uuid->svc_hint;
1001
1002 return val;
1003 }
1004
1005 static void update_class(struct hci_request *req)
1006 {
1007 struct hci_dev *hdev = req->hdev;
1008 u8 cod[3];
1009
1010 BT_DBG("%s", hdev->name);
1011
1012 if (!hdev_is_powered(hdev))
1013 return;
1014
1015 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1016 return;
1017
1018 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1019 return;
1020
1021 cod[0] = hdev->minor_class;
1022 cod[1] = hdev->major_class;
1023 cod[2] = get_service_classes(hdev);
1024
1025 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
1026 cod[1] |= 0x20;
1027
1028 if (memcmp(cod, hdev->dev_class, 3) == 0)
1029 return;
1030
1031 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1032 }
1033
1034 static bool get_connectable(struct hci_dev *hdev)
1035 {
1036 struct pending_cmd *cmd;
1037
1038 /* If there's a pending mgmt command the flag will not yet have
1039 * it's final value, so check for this first.
1040 */
1041 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1042 if (cmd) {
1043 struct mgmt_mode *cp = cmd->param;
1044 return cp->val;
1045 }
1046
1047 return test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1048 }
1049
1050 static void disable_advertising(struct hci_request *req)
1051 {
1052 u8 enable = 0x00;
1053
1054 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1055 }
1056
1057 static void enable_advertising(struct hci_request *req)
1058 {
1059 struct hci_dev *hdev = req->hdev;
1060 struct hci_cp_le_set_adv_param cp;
1061 u8 own_addr_type, enable = 0x01;
1062 bool connectable;
1063
1064 if (hci_conn_num(hdev, LE_LINK) > 0)
1065 return;
1066
1067 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
1068 disable_advertising(req);
1069
1070 /* Clear the HCI_LE_ADV bit temporarily so that the
1071 * hci_update_random_address knows that it's safe to go ahead
1072 * and write a new random address. The flag will be set back on
1073 * as soon as the SET_ADV_ENABLE HCI command completes.
1074 */
1075 clear_bit(HCI_LE_ADV, &hdev->dev_flags);
1076
1077 connectable = get_connectable(hdev);
1078
1079 /* Set require_privacy to true only when non-connectable
1080 * advertising is used. In that case it is fine to use a
1081 * non-resolvable private address.
1082 */
1083 if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
1084 return;
1085
1086 memset(&cp, 0, sizeof(cp));
1087 cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
1088 cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
1089 cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
1090 cp.own_address_type = own_addr_type;
1091 cp.channel_map = hdev->le_adv_channel_map;
1092
1093 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1094
1095 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1096 }
1097
1098 static void service_cache_off(struct work_struct *work)
1099 {
1100 struct hci_dev *hdev = container_of(work, struct hci_dev,
1101 service_cache.work);
1102 struct hci_request req;
1103
1104 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1105 return;
1106
1107 hci_req_init(&req, hdev);
1108
1109 hci_dev_lock(hdev);
1110
1111 update_eir(&req);
1112 update_class(&req);
1113
1114 hci_dev_unlock(hdev);
1115
1116 hci_req_run(&req, NULL);
1117 }
1118
1119 static void rpa_expired(struct work_struct *work)
1120 {
1121 struct hci_dev *hdev = container_of(work, struct hci_dev,
1122 rpa_expired.work);
1123 struct hci_request req;
1124
1125 BT_DBG("");
1126
1127 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
1128
1129 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1130 return;
1131
1132 /* The generation of a new RPA and programming it into the
1133 * controller happens in the enable_advertising() function.
1134 */
1135 hci_req_init(&req, hdev);
1136 enable_advertising(&req);
1137 hci_req_run(&req, NULL);
1138 }
1139
1140 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1141 {
1142 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
1143 return;
1144
1145 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1146 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1147
1148 /* Non-mgmt controlled devices get this bit set
1149 * implicitly so that pairing works for them, however
1150 * for mgmt we require user-space to explicitly enable
1151 * it
1152 */
1153 clear_bit(HCI_BONDABLE, &hdev->dev_flags);
1154 }
1155
1156 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1157 void *data, u16 data_len)
1158 {
1159 struct mgmt_rp_read_info rp;
1160
1161 BT_DBG("sock %p %s", sk, hdev->name);
1162
1163 hci_dev_lock(hdev);
1164
1165 memset(&rp, 0, sizeof(rp));
1166
1167 bacpy(&rp.bdaddr, &hdev->bdaddr);
1168
1169 rp.version = hdev->hci_ver;
1170 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1171
1172 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1173 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1174
1175 memcpy(rp.dev_class, hdev->dev_class, 3);
1176
1177 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1178 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1179
1180 hci_dev_unlock(hdev);
1181
1182 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1183 sizeof(rp));
1184 }
1185
1186 static void mgmt_pending_free(struct pending_cmd *cmd)
1187 {
1188 sock_put(cmd->sk);
1189 kfree(cmd->param);
1190 kfree(cmd);
1191 }
1192
1193 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
1194 struct hci_dev *hdev, void *data,
1195 u16 len)
1196 {
1197 struct pending_cmd *cmd;
1198
1199 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
1200 if (!cmd)
1201 return NULL;
1202
1203 cmd->opcode = opcode;
1204 cmd->index = hdev->id;
1205
1206 cmd->param = kmalloc(len, GFP_KERNEL);
1207 if (!cmd->param) {
1208 kfree(cmd);
1209 return NULL;
1210 }
1211
1212 if (data)
1213 memcpy(cmd->param, data, len);
1214
1215 cmd->sk = sk;
1216 sock_hold(sk);
1217
1218 list_add(&cmd->list, &hdev->mgmt_pending);
1219
1220 return cmd;
1221 }
1222
1223 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
1224 void (*cb)(struct pending_cmd *cmd,
1225 void *data),
1226 void *data)
1227 {
1228 struct pending_cmd *cmd, *tmp;
1229
1230 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
1231 if (opcode > 0 && cmd->opcode != opcode)
1232 continue;
1233
1234 cb(cmd, data);
1235 }
1236 }
1237
1238 static void mgmt_pending_remove(struct pending_cmd *cmd)
1239 {
1240 list_del(&cmd->list);
1241 mgmt_pending_free(cmd);
1242 }
1243
1244 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1245 {
1246 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1247
1248 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
1249 sizeof(settings));
1250 }
1251
1252 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status)
1253 {
1254 BT_DBG("%s status 0x%02x", hdev->name, status);
1255
1256 if (hci_conn_count(hdev) == 0) {
1257 cancel_delayed_work(&hdev->power_off);
1258 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1259 }
1260 }
1261
1262 static bool hci_stop_discovery(struct hci_request *req)
1263 {
1264 struct hci_dev *hdev = req->hdev;
1265 struct hci_cp_remote_name_req_cancel cp;
1266 struct inquiry_entry *e;
1267
1268 switch (hdev->discovery.state) {
1269 case DISCOVERY_FINDING:
1270 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
1271 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1272 } else {
1273 cancel_delayed_work(&hdev->le_scan_disable);
1274 hci_req_add_le_scan_disable(req);
1275 }
1276
1277 return true;
1278
1279 case DISCOVERY_RESOLVING:
1280 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
1281 NAME_PENDING);
1282 if (!e)
1283 break;
1284
1285 bacpy(&cp.bdaddr, &e->data.bdaddr);
1286 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
1287 &cp);
1288
1289 return true;
1290
1291 default:
1292 /* Passive scanning */
1293 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
1294 hci_req_add_le_scan_disable(req);
1295 return true;
1296 }
1297
1298 break;
1299 }
1300
1301 return false;
1302 }
1303
1304 static int clean_up_hci_state(struct hci_dev *hdev)
1305 {
1306 struct hci_request req;
1307 struct hci_conn *conn;
1308 bool discov_stopped;
1309 int err;
1310
1311 hci_req_init(&req, hdev);
1312
1313 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1314 test_bit(HCI_PSCAN, &hdev->flags)) {
1315 u8 scan = 0x00;
1316 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1317 }
1318
1319 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
1320 disable_advertising(&req);
1321
1322 discov_stopped = hci_stop_discovery(&req);
1323
1324 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1325 struct hci_cp_disconnect dc;
1326 struct hci_cp_reject_conn_req rej;
1327
1328 switch (conn->state) {
1329 case BT_CONNECTED:
1330 case BT_CONFIG:
1331 dc.handle = cpu_to_le16(conn->handle);
1332 dc.reason = 0x15; /* Terminated due to Power Off */
1333 hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1334 break;
1335 case BT_CONNECT:
1336 if (conn->type == LE_LINK)
1337 hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL,
1338 0, NULL);
1339 else if (conn->type == ACL_LINK)
1340 hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL,
1341 6, &conn->dst);
1342 break;
1343 case BT_CONNECT2:
1344 bacpy(&rej.bdaddr, &conn->dst);
1345 rej.reason = 0x15; /* Terminated due to Power Off */
1346 if (conn->type == ACL_LINK)
1347 hci_req_add(&req, HCI_OP_REJECT_CONN_REQ,
1348 sizeof(rej), &rej);
1349 else if (conn->type == SCO_LINK)
1350 hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ,
1351 sizeof(rej), &rej);
1352 break;
1353 }
1354 }
1355
1356 err = hci_req_run(&req, clean_up_hci_complete);
1357 if (!err && discov_stopped)
1358 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1359
1360 return err;
1361 }
1362
1363 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1364 u16 len)
1365 {
1366 struct mgmt_mode *cp = data;
1367 struct pending_cmd *cmd;
1368 int err;
1369
1370 BT_DBG("request for %s", hdev->name);
1371
1372 if (cp->val != 0x00 && cp->val != 0x01)
1373 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1374 MGMT_STATUS_INVALID_PARAMS);
1375
1376 hci_dev_lock(hdev);
1377
1378 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
1379 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1380 MGMT_STATUS_BUSY);
1381 goto failed;
1382 }
1383
1384 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1385 cancel_delayed_work(&hdev->power_off);
1386
1387 if (cp->val) {
1388 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
1389 data, len);
1390 err = mgmt_powered(hdev, 1);
1391 goto failed;
1392 }
1393 }
1394
1395 if (!!cp->val == hdev_is_powered(hdev)) {
1396 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1397 goto failed;
1398 }
1399
1400 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1401 if (!cmd) {
1402 err = -ENOMEM;
1403 goto failed;
1404 }
1405
1406 if (cp->val) {
1407 queue_work(hdev->req_workqueue, &hdev->power_on);
1408 err = 0;
1409 } else {
1410 /* Disconnect connections, stop scans, etc */
1411 err = clean_up_hci_state(hdev);
1412 if (!err)
1413 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1414 HCI_POWER_OFF_TIMEOUT);
1415
1416 /* ENODATA means there were no HCI commands queued */
1417 if (err == -ENODATA) {
1418 cancel_delayed_work(&hdev->power_off);
1419 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1420 err = 0;
1421 }
1422 }
1423
1424 failed:
1425 hci_dev_unlock(hdev);
1426 return err;
1427 }
1428
1429 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1430 {
1431 __le32 ev;
1432
1433 ev = cpu_to_le32(get_current_settings(hdev));
1434
1435 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
1436 }
1437
1438 int mgmt_new_settings(struct hci_dev *hdev)
1439 {
1440 return new_settings(hdev, NULL);
1441 }
1442
1443 struct cmd_lookup {
1444 struct sock *sk;
1445 struct hci_dev *hdev;
1446 u8 mgmt_status;
1447 };
1448
1449 static void settings_rsp(struct pending_cmd *cmd, void *data)
1450 {
1451 struct cmd_lookup *match = data;
1452
1453 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1454
1455 list_del(&cmd->list);
1456
1457 if (match->sk == NULL) {
1458 match->sk = cmd->sk;
1459 sock_hold(match->sk);
1460 }
1461
1462 mgmt_pending_free(cmd);
1463 }
1464
1465 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
1466 {
1467 u8 *status = data;
1468
1469 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1470 mgmt_pending_remove(cmd);
1471 }
1472
1473 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1474 {
1475 if (!lmp_bredr_capable(hdev))
1476 return MGMT_STATUS_NOT_SUPPORTED;
1477 else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1478 return MGMT_STATUS_REJECTED;
1479 else
1480 return MGMT_STATUS_SUCCESS;
1481 }
1482
1483 static u8 mgmt_le_support(struct hci_dev *hdev)
1484 {
1485 if (!lmp_le_capable(hdev))
1486 return MGMT_STATUS_NOT_SUPPORTED;
1487 else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1488 return MGMT_STATUS_REJECTED;
1489 else
1490 return MGMT_STATUS_SUCCESS;
1491 }
1492
1493 static void set_discoverable_complete(struct hci_dev *hdev, u8 status)
1494 {
1495 struct pending_cmd *cmd;
1496 struct mgmt_mode *cp;
1497 struct hci_request req;
1498 bool changed;
1499
1500 BT_DBG("status 0x%02x", status);
1501
1502 hci_dev_lock(hdev);
1503
1504 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1505 if (!cmd)
1506 goto unlock;
1507
1508 if (status) {
1509 u8 mgmt_err = mgmt_status(status);
1510 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1511 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1512 goto remove_cmd;
1513 }
1514
1515 cp = cmd->param;
1516 if (cp->val) {
1517 changed = !test_and_set_bit(HCI_DISCOVERABLE,
1518 &hdev->dev_flags);
1519
1520 if (hdev->discov_timeout > 0) {
1521 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1522 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1523 to);
1524 }
1525 } else {
1526 changed = test_and_clear_bit(HCI_DISCOVERABLE,
1527 &hdev->dev_flags);
1528 }
1529
1530 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1531
1532 if (changed)
1533 new_settings(hdev, cmd->sk);
1534
1535 /* When the discoverable mode gets changed, make sure
1536 * that class of device has the limited discoverable
1537 * bit correctly set. Also update page scan based on whitelist
1538 * entries.
1539 */
1540 hci_req_init(&req, hdev);
1541 hci_update_page_scan(hdev, &req);
1542 update_class(&req);
1543 hci_req_run(&req, NULL);
1544
1545 remove_cmd:
1546 mgmt_pending_remove(cmd);
1547
1548 unlock:
1549 hci_dev_unlock(hdev);
1550 }
1551
1552 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1553 u16 len)
1554 {
1555 struct mgmt_cp_set_discoverable *cp = data;
1556 struct pending_cmd *cmd;
1557 struct hci_request req;
1558 u16 timeout;
1559 u8 scan;
1560 int err;
1561
1562 BT_DBG("request for %s", hdev->name);
1563
1564 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1565 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1566 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1567 MGMT_STATUS_REJECTED);
1568
1569 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1570 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1571 MGMT_STATUS_INVALID_PARAMS);
1572
1573 timeout = __le16_to_cpu(cp->timeout);
1574
1575 /* Disabling discoverable requires that no timeout is set,
1576 * and enabling limited discoverable requires a timeout.
1577 */
1578 if ((cp->val == 0x00 && timeout > 0) ||
1579 (cp->val == 0x02 && timeout == 0))
1580 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1581 MGMT_STATUS_INVALID_PARAMS);
1582
1583 hci_dev_lock(hdev);
1584
1585 if (!hdev_is_powered(hdev) && timeout > 0) {
1586 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1587 MGMT_STATUS_NOT_POWERED);
1588 goto failed;
1589 }
1590
1591 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1592 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1593 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1594 MGMT_STATUS_BUSY);
1595 goto failed;
1596 }
1597
1598 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
1599 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1600 MGMT_STATUS_REJECTED);
1601 goto failed;
1602 }
1603
1604 if (!hdev_is_powered(hdev)) {
1605 bool changed = false;
1606
1607 /* Setting limited discoverable when powered off is
1608 * not a valid operation since it requires a timeout
1609 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1610 */
1611 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
1612 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1613 changed = true;
1614 }
1615
1616 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1617 if (err < 0)
1618 goto failed;
1619
1620 if (changed)
1621 err = new_settings(hdev, sk);
1622
1623 goto failed;
1624 }
1625
1626 /* If the current mode is the same, then just update the timeout
1627 * value with the new value. And if only the timeout gets updated,
1628 * then no need for any HCI transactions.
1629 */
1630 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
1631 (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
1632 &hdev->dev_flags)) {
1633 cancel_delayed_work(&hdev->discov_off);
1634 hdev->discov_timeout = timeout;
1635
1636 if (cp->val && hdev->discov_timeout > 0) {
1637 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1638 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1639 to);
1640 }
1641
1642 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1643 goto failed;
1644 }
1645
1646 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1647 if (!cmd) {
1648 err = -ENOMEM;
1649 goto failed;
1650 }
1651
1652 /* Cancel any potential discoverable timeout that might be
1653 * still active and store new timeout value. The arming of
1654 * the timeout happens in the complete handler.
1655 */
1656 cancel_delayed_work(&hdev->discov_off);
1657 hdev->discov_timeout = timeout;
1658
1659 /* Limited discoverable mode */
1660 if (cp->val == 0x02)
1661 set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1662 else
1663 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1664
1665 hci_req_init(&req, hdev);
1666
1667 /* The procedure for LE-only controllers is much simpler - just
1668 * update the advertising data.
1669 */
1670 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1671 goto update_ad;
1672
1673 scan = SCAN_PAGE;
1674
1675 if (cp->val) {
1676 struct hci_cp_write_current_iac_lap hci_cp;
1677
1678 if (cp->val == 0x02) {
1679 /* Limited discoverable mode */
1680 hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1681 hci_cp.iac_lap[0] = 0x00; /* LIAC */
1682 hci_cp.iac_lap[1] = 0x8b;
1683 hci_cp.iac_lap[2] = 0x9e;
1684 hci_cp.iac_lap[3] = 0x33; /* GIAC */
1685 hci_cp.iac_lap[4] = 0x8b;
1686 hci_cp.iac_lap[5] = 0x9e;
1687 } else {
1688 /* General discoverable mode */
1689 hci_cp.num_iac = 1;
1690 hci_cp.iac_lap[0] = 0x33; /* GIAC */
1691 hci_cp.iac_lap[1] = 0x8b;
1692 hci_cp.iac_lap[2] = 0x9e;
1693 }
1694
1695 hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1696 (hci_cp.num_iac * 3) + 1, &hci_cp);
1697
1698 scan |= SCAN_INQUIRY;
1699 } else {
1700 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1701 }
1702
1703 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1704
1705 update_ad:
1706 update_adv_data(&req);
1707
1708 err = hci_req_run(&req, set_discoverable_complete);
1709 if (err < 0)
1710 mgmt_pending_remove(cmd);
1711
1712 failed:
1713 hci_dev_unlock(hdev);
1714 return err;
1715 }
1716
1717 static void write_fast_connectable(struct hci_request *req, bool enable)
1718 {
1719 struct hci_dev *hdev = req->hdev;
1720 struct hci_cp_write_page_scan_activity acp;
1721 u8 type;
1722
1723 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1724 return;
1725
1726 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1727 return;
1728
1729 if (enable) {
1730 type = PAGE_SCAN_TYPE_INTERLACED;
1731
1732 /* 160 msec page scan interval */
1733 acp.interval = cpu_to_le16(0x0100);
1734 } else {
1735 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1736
1737 /* default 1.28 sec page scan */
1738 acp.interval = cpu_to_le16(0x0800);
1739 }
1740
1741 acp.window = cpu_to_le16(0x0012);
1742
1743 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1744 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1745 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1746 sizeof(acp), &acp);
1747
1748 if (hdev->page_scan_type != type)
1749 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1750 }
1751
1752 static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1753 {
1754 struct pending_cmd *cmd;
1755 struct mgmt_mode *cp;
1756 bool conn_changed, discov_changed;
1757
1758 BT_DBG("status 0x%02x", status);
1759
1760 hci_dev_lock(hdev);
1761
1762 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1763 if (!cmd)
1764 goto unlock;
1765
1766 if (status) {
1767 u8 mgmt_err = mgmt_status(status);
1768 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1769 goto remove_cmd;
1770 }
1771
1772 cp = cmd->param;
1773 if (cp->val) {
1774 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
1775 &hdev->dev_flags);
1776 discov_changed = false;
1777 } else {
1778 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
1779 &hdev->dev_flags);
1780 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
1781 &hdev->dev_flags);
1782 }
1783
1784 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1785
1786 if (conn_changed || discov_changed) {
1787 new_settings(hdev, cmd->sk);
1788 hci_update_page_scan(hdev, NULL);
1789 if (discov_changed)
1790 mgmt_update_adv_data(hdev);
1791 hci_update_background_scan(hdev);
1792 }
1793
1794 remove_cmd:
1795 mgmt_pending_remove(cmd);
1796
1797 unlock:
1798 hci_dev_unlock(hdev);
1799 }
1800
1801 static int set_connectable_update_settings(struct hci_dev *hdev,
1802 struct sock *sk, u8 val)
1803 {
1804 bool changed = false;
1805 int err;
1806
1807 if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1808 changed = true;
1809
1810 if (val) {
1811 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1812 } else {
1813 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1814 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1815 }
1816
1817 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1818 if (err < 0)
1819 return err;
1820
1821 if (changed) {
1822 hci_update_page_scan(hdev, NULL);
1823 hci_update_background_scan(hdev);
1824 return new_settings(hdev, sk);
1825 }
1826
1827 return 0;
1828 }
1829
1830 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1831 u16 len)
1832 {
1833 struct mgmt_mode *cp = data;
1834 struct pending_cmd *cmd;
1835 struct hci_request req;
1836 u8 scan;
1837 int err;
1838
1839 BT_DBG("request for %s", hdev->name);
1840
1841 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1842 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1843 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1844 MGMT_STATUS_REJECTED);
1845
1846 if (cp->val != 0x00 && cp->val != 0x01)
1847 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1848 MGMT_STATUS_INVALID_PARAMS);
1849
1850 hci_dev_lock(hdev);
1851
1852 if (!hdev_is_powered(hdev)) {
1853 err = set_connectable_update_settings(hdev, sk, cp->val);
1854 goto failed;
1855 }
1856
1857 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1858 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1859 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1860 MGMT_STATUS_BUSY);
1861 goto failed;
1862 }
1863
1864 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1865 if (!cmd) {
1866 err = -ENOMEM;
1867 goto failed;
1868 }
1869
1870 hci_req_init(&req, hdev);
1871
1872 /* If BR/EDR is not enabled and we disable advertising as a
1873 * by-product of disabling connectable, we need to update the
1874 * advertising flags.
1875 */
1876 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1877 if (!cp->val) {
1878 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1879 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1880 }
1881 update_adv_data(&req);
1882 } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
1883 if (cp->val) {
1884 scan = SCAN_PAGE;
1885 } else {
1886 /* If we don't have any whitelist entries just
1887 * disable all scanning. If there are entries
1888 * and we had both page and inquiry scanning
1889 * enabled then fall back to only page scanning.
1890 * Otherwise no changes are needed.
1891 */
1892 if (list_empty(&hdev->whitelist))
1893 scan = SCAN_DISABLED;
1894 else if (test_bit(HCI_ISCAN, &hdev->flags))
1895 scan = SCAN_PAGE;
1896 else
1897 goto no_scan_update;
1898
1899 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1900 hdev->discov_timeout > 0)
1901 cancel_delayed_work(&hdev->discov_off);
1902 }
1903
1904 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1905 }
1906
1907 no_scan_update:
1908 /* If we're going from non-connectable to connectable or
1909 * vice-versa when fast connectable is enabled ensure that fast
1910 * connectable gets disabled. write_fast_connectable won't do
1911 * anything if the page scan parameters are already what they
1912 * should be.
1913 */
1914 if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1915 write_fast_connectable(&req, false);
1916
1917 /* Update the advertising parameters if necessary */
1918 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1919 enable_advertising(&req);
1920
1921 err = hci_req_run(&req, set_connectable_complete);
1922 if (err < 0) {
1923 mgmt_pending_remove(cmd);
1924 if (err == -ENODATA)
1925 err = set_connectable_update_settings(hdev, sk,
1926 cp->val);
1927 goto failed;
1928 }
1929
1930 failed:
1931 hci_dev_unlock(hdev);
1932 return err;
1933 }
1934
1935 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1936 u16 len)
1937 {
1938 struct mgmt_mode *cp = data;
1939 bool changed;
1940 int err;
1941
1942 BT_DBG("request for %s", hdev->name);
1943
1944 if (cp->val != 0x00 && cp->val != 0x01)
1945 return cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1946 MGMT_STATUS_INVALID_PARAMS);
1947
1948 hci_dev_lock(hdev);
1949
1950 if (cp->val)
1951 changed = !test_and_set_bit(HCI_BONDABLE, &hdev->dev_flags);
1952 else
1953 changed = test_and_clear_bit(HCI_BONDABLE, &hdev->dev_flags);
1954
1955 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1956 if (err < 0)
1957 goto unlock;
1958
1959 if (changed)
1960 err = new_settings(hdev, sk);
1961
1962 unlock:
1963 hci_dev_unlock(hdev);
1964 return err;
1965 }
1966
1967 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1968 u16 len)
1969 {
1970 struct mgmt_mode *cp = data;
1971 struct pending_cmd *cmd;
1972 u8 val, status;
1973 int err;
1974
1975 BT_DBG("request for %s", hdev->name);
1976
1977 status = mgmt_bredr_support(hdev);
1978 if (status)
1979 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1980 status);
1981
1982 if (cp->val != 0x00 && cp->val != 0x01)
1983 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1984 MGMT_STATUS_INVALID_PARAMS);
1985
1986 hci_dev_lock(hdev);
1987
1988 if (!hdev_is_powered(hdev)) {
1989 bool changed = false;
1990
1991 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1992 &hdev->dev_flags)) {
1993 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
1994 changed = true;
1995 }
1996
1997 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1998 if (err < 0)
1999 goto failed;
2000
2001 if (changed)
2002 err = new_settings(hdev, sk);
2003
2004 goto failed;
2005 }
2006
2007 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
2008 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2009 MGMT_STATUS_BUSY);
2010 goto failed;
2011 }
2012
2013 val = !!cp->val;
2014
2015 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
2016 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2017 goto failed;
2018 }
2019
2020 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
2021 if (!cmd) {
2022 err = -ENOMEM;
2023 goto failed;
2024 }
2025
2026 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
2027 if (err < 0) {
2028 mgmt_pending_remove(cmd);
2029 goto failed;
2030 }
2031
2032 failed:
2033 hci_dev_unlock(hdev);
2034 return err;
2035 }
2036
2037 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2038 {
2039 struct mgmt_mode *cp = data;
2040 struct pending_cmd *cmd;
2041 u8 status;
2042 int err;
2043
2044 BT_DBG("request for %s", hdev->name);
2045
2046 status = mgmt_bredr_support(hdev);
2047 if (status)
2048 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
2049
2050 if (!lmp_ssp_capable(hdev))
2051 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2052 MGMT_STATUS_NOT_SUPPORTED);
2053
2054 if (cp->val != 0x00 && cp->val != 0x01)
2055 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2056 MGMT_STATUS_INVALID_PARAMS);
2057
2058 hci_dev_lock(hdev);
2059
2060 if (!hdev_is_powered(hdev)) {
2061 bool changed;
2062
2063 if (cp->val) {
2064 changed = !test_and_set_bit(HCI_SSP_ENABLED,
2065 &hdev->dev_flags);
2066 } else {
2067 changed = test_and_clear_bit(HCI_SSP_ENABLED,
2068 &hdev->dev_flags);
2069 if (!changed)
2070 changed = test_and_clear_bit(HCI_HS_ENABLED,
2071 &hdev->dev_flags);
2072 else
2073 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2074 }
2075
2076 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2077 if (err < 0)
2078 goto failed;
2079
2080 if (changed)
2081 err = new_settings(hdev, sk);
2082
2083 goto failed;
2084 }
2085
2086 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev) ||
2087 mgmt_pending_find(MGMT_OP_SET_HS, hdev)) {
2088 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2089 MGMT_STATUS_BUSY);
2090 goto failed;
2091 }
2092
2093 if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
2094 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2095 goto failed;
2096 }
2097
2098 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2099 if (!cmd) {
2100 err = -ENOMEM;
2101 goto failed;
2102 }
2103
2104 if (!cp->val && test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
2105 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
2106 sizeof(cp->val), &cp->val);
2107
2108 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
2109 if (err < 0) {
2110 mgmt_pending_remove(cmd);
2111 goto failed;
2112 }
2113
2114 failed:
2115 hci_dev_unlock(hdev);
2116 return err;
2117 }
2118
2119 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2120 {
2121 struct mgmt_mode *cp = data;
2122 bool changed;
2123 u8 status;
2124 int err;
2125
2126 BT_DBG("request for %s", hdev->name);
2127
2128 status = mgmt_bredr_support(hdev);
2129 if (status)
2130 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2131
2132 if (!lmp_ssp_capable(hdev))
2133 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2134 MGMT_STATUS_NOT_SUPPORTED);
2135
2136 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
2137 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2138 MGMT_STATUS_REJECTED);
2139
2140 if (cp->val != 0x00 && cp->val != 0x01)
2141 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2142 MGMT_STATUS_INVALID_PARAMS);
2143
2144 hci_dev_lock(hdev);
2145
2146 if (cp->val) {
2147 changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2148 } else {
2149 if (hdev_is_powered(hdev)) {
2150 err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2151 MGMT_STATUS_REJECTED);
2152 goto unlock;
2153 }
2154
2155 changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2156 }
2157
2158 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2159 if (err < 0)
2160 goto unlock;
2161
2162 if (changed)
2163 err = new_settings(hdev, sk);
2164
2165 unlock:
2166 hci_dev_unlock(hdev);
2167 return err;
2168 }
2169
2170 static void le_enable_complete(struct hci_dev *hdev, u8 status)
2171 {
2172 struct cmd_lookup match = { NULL, hdev };
2173
2174 if (status) {
2175 u8 mgmt_err = mgmt_status(status);
2176
2177 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2178 &mgmt_err);
2179 return;
2180 }
2181
2182 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2183
2184 new_settings(hdev, match.sk);
2185
2186 if (match.sk)
2187 sock_put(match.sk);
2188
2189 /* Make sure the controller has a good default for
2190 * advertising data. Restrict the update to when LE
2191 * has actually been enabled. During power on, the
2192 * update in powered_update_hci will take care of it.
2193 */
2194 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2195 struct hci_request req;
2196
2197 hci_dev_lock(hdev);
2198
2199 hci_req_init(&req, hdev);
2200 update_adv_data(&req);
2201 update_scan_rsp_data(&req);
2202 hci_req_run(&req, NULL);
2203
2204 hci_update_background_scan(hdev);
2205
2206 hci_dev_unlock(hdev);
2207 }
2208 }
2209
2210 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2211 {
2212 struct mgmt_mode *cp = data;
2213 struct hci_cp_write_le_host_supported hci_cp;
2214 struct pending_cmd *cmd;
2215 struct hci_request req;
2216 int err;
2217 u8 val, enabled;
2218
2219 BT_DBG("request for %s", hdev->name);
2220
2221 if (!lmp_le_capable(hdev))
2222 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2223 MGMT_STATUS_NOT_SUPPORTED);
2224
2225 if (cp->val != 0x00 && cp->val != 0x01)
2226 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2227 MGMT_STATUS_INVALID_PARAMS);
2228
2229 /* LE-only devices do not allow toggling LE on/off */
2230 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
2231 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2232 MGMT_STATUS_REJECTED);
2233
2234 hci_dev_lock(hdev);
2235
2236 val = !!cp->val;
2237 enabled = lmp_host_le_capable(hdev);
2238
2239 if (!hdev_is_powered(hdev) || val == enabled) {
2240 bool changed = false;
2241
2242 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2243 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2244 changed = true;
2245 }
2246
2247 if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
2248 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
2249 changed = true;
2250 }
2251
2252 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2253 if (err < 0)
2254 goto unlock;
2255
2256 if (changed)
2257 err = new_settings(hdev, sk);
2258
2259 goto unlock;
2260 }
2261
2262 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
2263 mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2264 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2265 MGMT_STATUS_BUSY);
2266 goto unlock;
2267 }
2268
2269 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2270 if (!cmd) {
2271 err = -ENOMEM;
2272 goto unlock;
2273 }
2274
2275 hci_req_init(&req, hdev);
2276
2277 memset(&hci_cp, 0, sizeof(hci_cp));
2278
2279 if (val) {
2280 hci_cp.le = val;
2281 hci_cp.simul = 0x00;
2282 } else {
2283 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
2284 disable_advertising(&req);
2285 }
2286
2287 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2288 &hci_cp);
2289
2290 err = hci_req_run(&req, le_enable_complete);
2291 if (err < 0)
2292 mgmt_pending_remove(cmd);
2293
2294 unlock:
2295 hci_dev_unlock(hdev);
2296 return err;
2297 }
2298
2299 /* This is a helper function to test for pending mgmt commands that can
2300 * cause CoD or EIR HCI commands. We can only allow one such pending
2301 * mgmt command at a time since otherwise we cannot easily track what
2302 * the current values are, will be, and based on that calculate if a new
2303 * HCI command needs to be sent and if yes with what value.
2304 */
2305 static bool pending_eir_or_class(struct hci_dev *hdev)
2306 {
2307 struct pending_cmd *cmd;
2308
2309 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2310 switch (cmd->opcode) {
2311 case MGMT_OP_ADD_UUID:
2312 case MGMT_OP_REMOVE_UUID:
2313 case MGMT_OP_SET_DEV_CLASS:
2314 case MGMT_OP_SET_POWERED:
2315 return true;
2316 }
2317 }
2318
2319 return false;
2320 }
2321
2322 static const u8 bluetooth_base_uuid[] = {
2323 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2324 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2325 };
2326
2327 static u8 get_uuid_size(const u8 *uuid)
2328 {
2329 u32 val;
2330
2331 if (memcmp(uuid, bluetooth_base_uuid, 12))
2332 return 128;
2333
2334 val = get_unaligned_le32(&uuid[12]);
2335 if (val > 0xffff)
2336 return 32;
2337
2338 return 16;
2339 }
2340
2341 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2342 {
2343 struct pending_cmd *cmd;
2344
2345 hci_dev_lock(hdev);
2346
2347 cmd = mgmt_pending_find(mgmt_op, hdev);
2348 if (!cmd)
2349 goto unlock;
2350
2351 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
2352 hdev->dev_class, 3);
2353
2354 mgmt_pending_remove(cmd);
2355
2356 unlock:
2357 hci_dev_unlock(hdev);
2358 }
2359
2360 static void add_uuid_complete(struct hci_dev *hdev, u8 status)
2361 {
2362 BT_DBG("status 0x%02x", status);
2363
2364 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2365 }
2366
2367 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2368 {
2369 struct mgmt_cp_add_uuid *cp = data;
2370 struct pending_cmd *cmd;
2371 struct hci_request req;
2372 struct bt_uuid *uuid;
2373 int err;
2374
2375 BT_DBG("request for %s", hdev->name);
2376
2377 hci_dev_lock(hdev);
2378
2379 if (pending_eir_or_class(hdev)) {
2380 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2381 MGMT_STATUS_BUSY);
2382 goto failed;
2383 }
2384
2385 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2386 if (!uuid) {
2387 err = -ENOMEM;
2388 goto failed;
2389 }
2390
2391 memcpy(uuid->uuid, cp->uuid, 16);
2392 uuid->svc_hint = cp->svc_hint;
2393 uuid->size = get_uuid_size(cp->uuid);
2394
2395 list_add_tail(&uuid->list, &hdev->uuids);
2396
2397 hci_req_init(&req, hdev);
2398
2399 update_class(&req);
2400 update_eir(&req);
2401
2402 err = hci_req_run(&req, add_uuid_complete);
2403 if (err < 0) {
2404 if (err != -ENODATA)
2405 goto failed;
2406
2407 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2408 hdev->dev_class, 3);
2409 goto failed;
2410 }
2411
2412 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2413 if (!cmd) {
2414 err = -ENOMEM;
2415 goto failed;
2416 }
2417
2418 err = 0;
2419
2420 failed:
2421 hci_dev_unlock(hdev);
2422 return err;
2423 }
2424
2425 static bool enable_service_cache(struct hci_dev *hdev)
2426 {
2427 if (!hdev_is_powered(hdev))
2428 return false;
2429
2430 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2431 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2432 CACHE_TIMEOUT);
2433 return true;
2434 }
2435
2436 return false;
2437 }
2438
2439 static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
2440 {
2441 BT_DBG("status 0x%02x", status);
2442
2443 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2444 }
2445
2446 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2447 u16 len)
2448 {
2449 struct mgmt_cp_remove_uuid *cp = data;
2450 struct pending_cmd *cmd;
2451 struct bt_uuid *match, *tmp;
2452 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2453 struct hci_request req;
2454 int err, found;
2455
2456 BT_DBG("request for %s", hdev->name);
2457
2458 hci_dev_lock(hdev);
2459
2460 if (pending_eir_or_class(hdev)) {
2461 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2462 MGMT_STATUS_BUSY);
2463 goto unlock;
2464 }
2465
2466 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2467 hci_uuids_clear(hdev);
2468
2469 if (enable_service_cache(hdev)) {
2470 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2471 0, hdev->dev_class, 3);
2472 goto unlock;
2473 }
2474
2475 goto update_class;
2476 }
2477
2478 found = 0;
2479
2480 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2481 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2482 continue;
2483
2484 list_del(&match->list);
2485 kfree(match);
2486 found++;
2487 }
2488
2489 if (found == 0) {
2490 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2491 MGMT_STATUS_INVALID_PARAMS);
2492 goto unlock;
2493 }
2494
2495 update_class:
2496 hci_req_init(&req, hdev);
2497
2498 update_class(&req);
2499 update_eir(&req);
2500
2501 err = hci_req_run(&req, remove_uuid_complete);
2502 if (err < 0) {
2503 if (err != -ENODATA)
2504 goto unlock;
2505
2506 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2507 hdev->dev_class, 3);
2508 goto unlock;
2509 }
2510
2511 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2512 if (!cmd) {
2513 err = -ENOMEM;
2514 goto unlock;
2515 }
2516
2517 err = 0;
2518
2519 unlock:
2520 hci_dev_unlock(hdev);
2521 return err;
2522 }
2523
2524 static void set_class_complete(struct hci_dev *hdev, u8 status)
2525 {
2526 BT_DBG("status 0x%02x", status);
2527
2528 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2529 }
2530
2531 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2532 u16 len)
2533 {
2534 struct mgmt_cp_set_dev_class *cp = data;
2535 struct pending_cmd *cmd;
2536 struct hci_request req;
2537 int err;
2538
2539 BT_DBG("request for %s", hdev->name);
2540
2541 if (!lmp_bredr_capable(hdev))
2542 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2543 MGMT_STATUS_NOT_SUPPORTED);
2544
2545 hci_dev_lock(hdev);
2546
2547 if (pending_eir_or_class(hdev)) {
2548 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2549 MGMT_STATUS_BUSY);
2550 goto unlock;
2551 }
2552
2553 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2554 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2555 MGMT_STATUS_INVALID_PARAMS);
2556 goto unlock;
2557 }
2558
2559 hdev->major_class = cp->major;
2560 hdev->minor_class = cp->minor;
2561
2562 if (!hdev_is_powered(hdev)) {
2563 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2564 hdev->dev_class, 3);
2565 goto unlock;
2566 }
2567
2568 hci_req_init(&req, hdev);
2569
2570 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2571 hci_dev_unlock(hdev);
2572 cancel_delayed_work_sync(&hdev->service_cache);
2573 hci_dev_lock(hdev);
2574 update_eir(&req);
2575 }
2576
2577 update_class(&req);
2578
2579 err = hci_req_run(&req, set_class_complete);
2580 if (err < 0) {
2581 if (err != -ENODATA)
2582 goto unlock;
2583
2584 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2585 hdev->dev_class, 3);
2586 goto unlock;
2587 }
2588
2589 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2590 if (!cmd) {
2591 err = -ENOMEM;
2592 goto unlock;
2593 }
2594
2595 err = 0;
2596
2597 unlock:
2598 hci_dev_unlock(hdev);
2599 return err;
2600 }
2601
2602 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2603 u16 len)
2604 {
2605 struct mgmt_cp_load_link_keys *cp = data;
2606 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2607 sizeof(struct mgmt_link_key_info));
2608 u16 key_count, expected_len;
2609 bool changed;
2610 int i;
2611
2612 BT_DBG("request for %s", hdev->name);
2613
2614 if (!lmp_bredr_capable(hdev))
2615 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2616 MGMT_STATUS_NOT_SUPPORTED);
2617
2618 key_count = __le16_to_cpu(cp->key_count);
2619 if (key_count > max_key_count) {
2620 BT_ERR("load_link_keys: too big key_count value %u",
2621 key_count);
2622 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2623 MGMT_STATUS_INVALID_PARAMS);
2624 }
2625
2626 expected_len = sizeof(*cp) + key_count *
2627 sizeof(struct mgmt_link_key_info);
2628 if (expected_len != len) {
2629 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2630 expected_len, len);
2631 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2632 MGMT_STATUS_INVALID_PARAMS);
2633 }
2634
2635 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2636 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2637 MGMT_STATUS_INVALID_PARAMS);
2638
2639 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2640 key_count);
2641
2642 for (i = 0; i < key_count; i++) {
2643 struct mgmt_link_key_info *key = &cp->keys[i];
2644
2645 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2646 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2647 MGMT_STATUS_INVALID_PARAMS);
2648 }
2649
2650 hci_dev_lock(hdev);
2651
2652 hci_link_keys_clear(hdev);
2653
2654 if (cp->debug_keys)
2655 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
2656 &hdev->dev_flags);
2657 else
2658 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
2659 &hdev->dev_flags);
2660
2661 if (changed)
2662 new_settings(hdev, NULL);
2663
2664 for (i = 0; i < key_count; i++) {
2665 struct mgmt_link_key_info *key = &cp->keys[i];
2666
2667 /* Always ignore debug keys and require a new pairing if
2668 * the user wants to use them.
2669 */
2670 if (key->type == HCI_LK_DEBUG_COMBINATION)
2671 continue;
2672
2673 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2674 key->type, key->pin_len, NULL);
2675 }
2676
2677 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2678
2679 hci_dev_unlock(hdev);
2680
2681 return 0;
2682 }
2683
2684 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2685 u8 addr_type, struct sock *skip_sk)
2686 {
2687 struct mgmt_ev_device_unpaired ev;
2688
2689 bacpy(&ev.addr.bdaddr, bdaddr);
2690 ev.addr.type = addr_type;
2691
2692 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2693 skip_sk);
2694 }
2695
2696 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2697 u16 len)
2698 {
2699 struct mgmt_cp_unpair_device *cp = data;
2700 struct mgmt_rp_unpair_device rp;
2701 struct hci_cp_disconnect dc;
2702 struct pending_cmd *cmd;
2703 struct hci_conn *conn;
2704 int err;
2705
2706 memset(&rp, 0, sizeof(rp));
2707 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2708 rp.addr.type = cp->addr.type;
2709
2710 if (!bdaddr_type_is_valid(cp->addr.type))
2711 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2712 MGMT_STATUS_INVALID_PARAMS,
2713 &rp, sizeof(rp));
2714
2715 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2716 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2717 MGMT_STATUS_INVALID_PARAMS,
2718 &rp, sizeof(rp));
2719
2720 hci_dev_lock(hdev);
2721
2722 if (!hdev_is_powered(hdev)) {
2723 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2724 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2725 goto unlock;
2726 }
2727
2728 if (cp->addr.type == BDADDR_BREDR) {
2729 /* If disconnection is requested, then look up the
2730 * connection. If the remote device is connected, it
2731 * will be later used to terminate the link.
2732 *
2733 * Setting it to NULL explicitly will cause no
2734 * termination of the link.
2735 */
2736 if (cp->disconnect)
2737 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2738 &cp->addr.bdaddr);
2739 else
2740 conn = NULL;
2741
2742 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2743 } else {
2744 u8 addr_type;
2745
2746 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2747 &cp->addr.bdaddr);
2748 if (conn) {
2749 /* Defer clearing up the connection parameters
2750 * until closing to give a chance of keeping
2751 * them if a repairing happens.
2752 */
2753 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2754
2755 /* If disconnection is not requested, then
2756 * clear the connection variable so that the
2757 * link is not terminated.
2758 */
2759 if (!cp->disconnect)
2760 conn = NULL;
2761 }
2762
2763 if (cp->addr.type == BDADDR_LE_PUBLIC)
2764 addr_type = ADDR_LE_DEV_PUBLIC;
2765 else
2766 addr_type = ADDR_LE_DEV_RANDOM;
2767
2768 hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2769
2770 err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2771 }
2772
2773 if (err < 0) {
2774 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2775 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
2776 goto unlock;
2777 }
2778
2779 /* If the connection variable is set, then termination of the
2780 * link is requested.
2781 */
2782 if (!conn) {
2783 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2784 &rp, sizeof(rp));
2785 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2786 goto unlock;
2787 }
2788
2789 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2790 sizeof(*cp));
2791 if (!cmd) {
2792 err = -ENOMEM;
2793 goto unlock;
2794 }
2795
2796 dc.handle = cpu_to_le16(conn->handle);
2797 dc.reason = 0x13; /* Remote User Terminated Connection */
2798 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2799 if (err < 0)
2800 mgmt_pending_remove(cmd);
2801
2802 unlock:
2803 hci_dev_unlock(hdev);
2804 return err;
2805 }
2806
2807 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2808 u16 len)
2809 {
2810 struct mgmt_cp_disconnect *cp = data;
2811 struct mgmt_rp_disconnect rp;
2812 struct pending_cmd *cmd;
2813 struct hci_conn *conn;
2814 int err;
2815
2816 BT_DBG("");
2817
2818 memset(&rp, 0, sizeof(rp));
2819 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2820 rp.addr.type = cp->addr.type;
2821
2822 if (!bdaddr_type_is_valid(cp->addr.type))
2823 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2824 MGMT_STATUS_INVALID_PARAMS,
2825 &rp, sizeof(rp));
2826
2827 hci_dev_lock(hdev);
2828
2829 if (!test_bit(HCI_UP, &hdev->flags)) {
2830 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2831 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2832 goto failed;
2833 }
2834
2835 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
2836 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2837 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2838 goto failed;
2839 }
2840
2841 if (cp->addr.type == BDADDR_BREDR)
2842 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2843 &cp->addr.bdaddr);
2844 else
2845 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2846
2847 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2848 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2849 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
2850 goto failed;
2851 }
2852
2853 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2854 if (!cmd) {
2855 err = -ENOMEM;
2856 goto failed;
2857 }
2858
2859 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2860 if (err < 0)
2861 mgmt_pending_remove(cmd);
2862
2863 failed:
2864 hci_dev_unlock(hdev);
2865 return err;
2866 }
2867
2868 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2869 {
2870 switch (link_type) {
2871 case LE_LINK:
2872 switch (addr_type) {
2873 case ADDR_LE_DEV_PUBLIC:
2874 return BDADDR_LE_PUBLIC;
2875
2876 default:
2877 /* Fallback to LE Random address type */
2878 return BDADDR_LE_RANDOM;
2879 }
2880
2881 default:
2882 /* Fallback to BR/EDR type */
2883 return BDADDR_BREDR;
2884 }
2885 }
2886
2887 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2888 u16 data_len)
2889 {
2890 struct mgmt_rp_get_connections *rp;
2891 struct hci_conn *c;
2892 size_t rp_len;
2893 int err;
2894 u16 i;
2895
2896 BT_DBG("");
2897
2898 hci_dev_lock(hdev);
2899
2900 if (!hdev_is_powered(hdev)) {
2901 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2902 MGMT_STATUS_NOT_POWERED);
2903 goto unlock;
2904 }
2905
2906 i = 0;
2907 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2908 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2909 i++;
2910 }
2911
2912 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2913 rp = kmalloc(rp_len, GFP_KERNEL);
2914 if (!rp) {
2915 err = -ENOMEM;
2916 goto unlock;
2917 }
2918
2919 i = 0;
2920 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2921 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2922 continue;
2923 bacpy(&rp->addr[i].bdaddr, &c->dst);
2924 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2925 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2926 continue;
2927 i++;
2928 }
2929
2930 rp->conn_count = cpu_to_le16(i);
2931
2932 /* Recalculate length in case of filtered SCO connections, etc */
2933 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2934
2935 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2936 rp_len);
2937
2938 kfree(rp);
2939
2940 unlock:
2941 hci_dev_unlock(hdev);
2942 return err;
2943 }
2944
2945 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2946 struct mgmt_cp_pin_code_neg_reply *cp)
2947 {
2948 struct pending_cmd *cmd;
2949 int err;
2950
2951 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2952 sizeof(*cp));
2953 if (!cmd)
2954 return -ENOMEM;
2955
2956 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2957 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2958 if (err < 0)
2959 mgmt_pending_remove(cmd);
2960
2961 return err;
2962 }
2963
2964 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2965 u16 len)
2966 {
2967 struct hci_conn *conn;
2968 struct mgmt_cp_pin_code_reply *cp = data;
2969 struct hci_cp_pin_code_reply reply;
2970 struct pending_cmd *cmd;
2971 int err;
2972
2973 BT_DBG("");
2974
2975 hci_dev_lock(hdev);
2976
2977 if (!hdev_is_powered(hdev)) {
2978 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2979 MGMT_STATUS_NOT_POWERED);
2980 goto failed;
2981 }
2982
2983 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2984 if (!conn) {
2985 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2986 MGMT_STATUS_NOT_CONNECTED);
2987 goto failed;
2988 }
2989
2990 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2991 struct mgmt_cp_pin_code_neg_reply ncp;
2992
2993 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2994
2995 BT_ERR("PIN code is not 16 bytes long");
2996
2997 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2998 if (err >= 0)
2999 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3000 MGMT_STATUS_INVALID_PARAMS);
3001
3002 goto failed;
3003 }
3004
3005 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3006 if (!cmd) {
3007 err = -ENOMEM;
3008 goto failed;
3009 }
3010
3011 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3012 reply.pin_len = cp->pin_len;
3013 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3014
3015 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3016 if (err < 0)
3017 mgmt_pending_remove(cmd);
3018
3019 failed:
3020 hci_dev_unlock(hdev);
3021 return err;
3022 }
3023
3024 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3025 u16 len)
3026 {
3027 struct mgmt_cp_set_io_capability *cp = data;
3028
3029 BT_DBG("");
3030
3031 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3032 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3033 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
3034
3035 hci_dev_lock(hdev);
3036
3037 hdev->io_capability = cp->io_capability;
3038
3039 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
3040 hdev->io_capability);
3041
3042 hci_dev_unlock(hdev);
3043
3044 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
3045 0);
3046 }
3047
3048 static struct pending_cmd *find_pairing(struct hci_conn *conn)
3049 {
3050 struct hci_dev *hdev = conn->hdev;
3051 struct pending_cmd *cmd;
3052
3053 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3054 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3055 continue;
3056
3057 if (cmd->user_data != conn)
3058 continue;
3059
3060 return cmd;
3061 }
3062
3063 return NULL;
3064 }
3065
3066 static void pairing_complete(struct pending_cmd *cmd, u8 status)
3067 {
3068 struct mgmt_rp_pair_device rp;
3069 struct hci_conn *conn = cmd->user_data;
3070
3071 bacpy(&rp.addr.bdaddr, &conn->dst);
3072 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3073
3074 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
3075 &rp, sizeof(rp));
3076
3077 /* So we don't get further callbacks for this connection */
3078 conn->connect_cfm_cb = NULL;
3079 conn->security_cfm_cb = NULL;
3080 conn->disconn_cfm_cb = NULL;
3081
3082 hci_conn_drop(conn);
3083 hci_conn_put(conn);
3084
3085 mgmt_pending_remove(cmd);
3086
3087 /* The device is paired so there is no need to remove
3088 * its connection parameters anymore.
3089 */
3090 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3091 }
3092
3093 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3094 {
3095 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3096 struct pending_cmd *cmd;
3097
3098 cmd = find_pairing(conn);
3099 if (cmd)
3100 pairing_complete(cmd, status);
3101 }
3102
3103 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3104 {
3105 struct pending_cmd *cmd;
3106
3107 BT_DBG("status %u", status);
3108
3109 cmd = find_pairing(conn);
3110 if (!cmd)
3111 BT_DBG("Unable to find a pending command");
3112 else
3113 pairing_complete(cmd, mgmt_status(status));
3114 }
3115
3116 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3117 {
3118 struct pending_cmd *cmd;
3119
3120 BT_DBG("status %u", status);
3121
3122 if (!status)
3123 return;
3124
3125 cmd = find_pairing(conn);
3126 if (!cmd)
3127 BT_DBG("Unable to find a pending command");
3128 else
3129 pairing_complete(cmd, mgmt_status(status));
3130 }
3131
3132 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3133 u16 len)
3134 {
3135 struct mgmt_cp_pair_device *cp = data;
3136 struct mgmt_rp_pair_device rp;
3137 struct pending_cmd *cmd;
3138 u8 sec_level, auth_type;
3139 struct hci_conn *conn;
3140 int err;
3141
3142 BT_DBG("");
3143
3144 memset(&rp, 0, sizeof(rp));
3145 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3146 rp.addr.type = cp->addr.type;
3147
3148 if (!bdaddr_type_is_valid(cp->addr.type))
3149 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3150 MGMT_STATUS_INVALID_PARAMS,
3151 &rp, sizeof(rp));
3152
3153 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3154 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3155 MGMT_STATUS_INVALID_PARAMS,
3156 &rp, sizeof(rp));
3157
3158 hci_dev_lock(hdev);
3159
3160 if (!hdev_is_powered(hdev)) {
3161 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3162 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
3163 goto unlock;
3164 }
3165
3166 sec_level = BT_SECURITY_MEDIUM;
3167 auth_type = HCI_AT_DEDICATED_BONDING;
3168
3169 if (cp->addr.type == BDADDR_BREDR) {
3170 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3171 auth_type);
3172 } else {
3173 u8 addr_type;
3174
3175 /* Convert from L2CAP channel address type to HCI address type
3176 */
3177 if (cp->addr.type == BDADDR_LE_PUBLIC)
3178 addr_type = ADDR_LE_DEV_PUBLIC;
3179 else
3180 addr_type = ADDR_LE_DEV_RANDOM;
3181
3182 /* When pairing a new device, it is expected to remember
3183 * this device for future connections. Adding the connection
3184 * parameter information ahead of time allows tracking
3185 * of the slave preferred values and will speed up any
3186 * further connection establishment.
3187 *
3188 * If connection parameters already exist, then they
3189 * will be kept and this function does nothing.
3190 */
3191 hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3192
3193 conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
3194 sec_level, HCI_LE_CONN_TIMEOUT,
3195 HCI_ROLE_MASTER);
3196 }
3197
3198 if (IS_ERR(conn)) {
3199 int status;
3200
3201 if (PTR_ERR(conn) == -EBUSY)
3202 status = MGMT_STATUS_BUSY;
3203 else
3204 status = MGMT_STATUS_CONNECT_FAILED;
3205
3206 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3207 status, &rp,
3208 sizeof(rp));
3209 goto unlock;
3210 }
3211
3212 if (conn->connect_cfm_cb) {
3213 hci_conn_drop(conn);
3214 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3215 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3216 goto unlock;
3217 }
3218
3219 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3220 if (!cmd) {
3221 err = -ENOMEM;
3222 hci_conn_drop(conn);
3223 goto unlock;
3224 }
3225
3226 /* For LE, just connecting isn't a proof that the pairing finished */
3227 if (cp->addr.type == BDADDR_BREDR) {
3228 conn->connect_cfm_cb = pairing_complete_cb;
3229 conn->security_cfm_cb = pairing_complete_cb;
3230 conn->disconn_cfm_cb = pairing_complete_cb;
3231 } else {
3232 conn->connect_cfm_cb = le_pairing_complete_cb;
3233 conn->security_cfm_cb = le_pairing_complete_cb;
3234 conn->disconn_cfm_cb = le_pairing_complete_cb;
3235 }
3236
3237 conn->io_capability = cp->io_cap;
3238 cmd->user_data = hci_conn_get(conn);
3239
3240 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3241 hci_conn_security(conn, sec_level, auth_type, true))
3242 pairing_complete(cmd, 0);
3243
3244 err = 0;
3245
3246 unlock:
3247 hci_dev_unlock(hdev);
3248 return err;
3249 }
3250
3251 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3252 u16 len)
3253 {
3254 struct mgmt_addr_info *addr = data;
3255 struct pending_cmd *cmd;
3256 struct hci_conn *conn;
3257 int err;
3258
3259 BT_DBG("");
3260
3261 hci_dev_lock(hdev);
3262
3263 if (!hdev_is_powered(hdev)) {
3264 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3265 MGMT_STATUS_NOT_POWERED);
3266 goto unlock;
3267 }
3268
3269 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3270 if (!cmd) {
3271 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3272 MGMT_STATUS_INVALID_PARAMS);
3273 goto unlock;
3274 }
3275
3276 conn = cmd->user_data;
3277
3278 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3279 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3280 MGMT_STATUS_INVALID_PARAMS);
3281 goto unlock;
3282 }
3283
3284 pairing_complete(cmd, MGMT_STATUS_CANCELLED);
3285
3286 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3287 addr, sizeof(*addr));
3288 unlock:
3289 hci_dev_unlock(hdev);
3290 return err;
3291 }
3292
3293 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3294 struct mgmt_addr_info *addr, u16 mgmt_op,
3295 u16 hci_op, __le32 passkey)
3296 {
3297 struct pending_cmd *cmd;
3298 struct hci_conn *conn;
3299 int err;
3300
3301 hci_dev_lock(hdev);
3302
3303 if (!hdev_is_powered(hdev)) {
3304 err = cmd_complete(sk, hdev->id, mgmt_op,
3305 MGMT_STATUS_NOT_POWERED, addr,
3306 sizeof(*addr));
3307 goto done;
3308 }
3309
3310 if (addr->type == BDADDR_BREDR)
3311 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3312 else
3313 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
3314
3315 if (!conn) {
3316 err = cmd_complete(sk, hdev->id, mgmt_op,
3317 MGMT_STATUS_NOT_CONNECTED, addr,
3318 sizeof(*addr));
3319 goto done;
3320 }
3321
3322 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3323 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3324 if (!err)
3325 err = cmd_complete(sk, hdev->id, mgmt_op,
3326 MGMT_STATUS_SUCCESS, addr,
3327 sizeof(*addr));
3328 else
3329 err = cmd_complete(sk, hdev->id, mgmt_op,
3330 MGMT_STATUS_FAILED, addr,
3331 sizeof(*addr));
3332
3333 goto done;
3334 }
3335
3336 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3337 if (!cmd) {
3338 err = -ENOMEM;
3339 goto done;
3340 }
3341
3342 /* Continue with pairing via HCI */
3343 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3344 struct hci_cp_user_passkey_reply cp;
3345
3346 bacpy(&cp.bdaddr, &addr->bdaddr);
3347 cp.passkey = passkey;
3348 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3349 } else
3350 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3351 &addr->bdaddr);
3352
3353 if (err < 0)
3354 mgmt_pending_remove(cmd);
3355
3356 done:
3357 hci_dev_unlock(hdev);
3358 return err;
3359 }
3360
3361 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3362 void *data, u16 len)
3363 {
3364 struct mgmt_cp_pin_code_neg_reply *cp = data;
3365
3366 BT_DBG("");
3367
3368 return user_pairing_resp(sk, hdev, &cp->addr,
3369 MGMT_OP_PIN_CODE_NEG_REPLY,
3370 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3371 }
3372
3373 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3374 u16 len)
3375 {
3376 struct mgmt_cp_user_confirm_reply *cp = data;
3377
3378 BT_DBG("");
3379
3380 if (len != sizeof(*cp))
3381 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3382 MGMT_STATUS_INVALID_PARAMS);
3383
3384 return user_pairing_resp(sk, hdev, &cp->addr,
3385 MGMT_OP_USER_CONFIRM_REPLY,
3386 HCI_OP_USER_CONFIRM_REPLY, 0);
3387 }
3388
3389 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3390 void *data, u16 len)
3391 {
3392 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3393
3394 BT_DBG("");
3395
3396 return user_pairing_resp(sk, hdev, &cp->addr,
3397 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3398 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3399 }
3400
3401 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3402 u16 len)
3403 {
3404 struct mgmt_cp_user_passkey_reply *cp = data;
3405
3406 BT_DBG("");
3407
3408 return user_pairing_resp(sk, hdev, &cp->addr,
3409 MGMT_OP_USER_PASSKEY_REPLY,
3410 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3411 }
3412
3413 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3414 void *data, u16 len)
3415 {
3416 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3417
3418 BT_DBG("");
3419
3420 return user_pairing_resp(sk, hdev, &cp->addr,
3421 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3422 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3423 }
3424
3425 static void update_name(struct hci_request *req)
3426 {
3427 struct hci_dev *hdev = req->hdev;
3428 struct hci_cp_write_local_name cp;
3429
3430 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3431
3432 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3433 }
3434
3435 static void set_name_complete(struct hci_dev *hdev, u8 status)
3436 {
3437 struct mgmt_cp_set_local_name *cp;
3438 struct pending_cmd *cmd;
3439
3440 BT_DBG("status 0x%02x", status);
3441
3442 hci_dev_lock(hdev);
3443
3444 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3445 if (!cmd)
3446 goto unlock;
3447
3448 cp = cmd->param;
3449
3450 if (status)
3451 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3452 mgmt_status(status));
3453 else
3454 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3455 cp, sizeof(*cp));
3456
3457 mgmt_pending_remove(cmd);
3458
3459 unlock:
3460 hci_dev_unlock(hdev);
3461 }
3462
3463 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3464 u16 len)
3465 {
3466 struct mgmt_cp_set_local_name *cp = data;
3467 struct pending_cmd *cmd;
3468 struct hci_request req;
3469 int err;
3470
3471 BT_DBG("");
3472
3473 hci_dev_lock(hdev);
3474
3475 /* If the old values are the same as the new ones just return a
3476 * direct command complete event.
3477 */
3478 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3479 !memcmp(hdev->short_name, cp->short_name,
3480 sizeof(hdev->short_name))) {
3481 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3482 data, len);
3483 goto failed;
3484 }
3485
3486 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3487
3488 if (!hdev_is_powered(hdev)) {
3489 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3490
3491 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3492 data, len);
3493 if (err < 0)
3494 goto failed;
3495
3496 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
3497 sk);
3498
3499 goto failed;
3500 }
3501
3502 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3503 if (!cmd) {
3504 err = -ENOMEM;
3505 goto failed;
3506 }
3507
3508 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3509
3510 hci_req_init(&req, hdev);
3511
3512 if (lmp_bredr_capable(hdev)) {
3513 update_name(&req);
3514 update_eir(&req);
3515 }
3516
3517 /* The name is stored in the scan response data and so
3518 * no need to udpate the advertising data here.
3519 */
3520 if (lmp_le_capable(hdev))
3521 update_scan_rsp_data(&req);
3522
3523 err = hci_req_run(&req, set_name_complete);
3524 if (err < 0)
3525 mgmt_pending_remove(cmd);
3526
3527 failed:
3528 hci_dev_unlock(hdev);
3529 return err;
3530 }
3531
3532 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3533 void *data, u16 data_len)
3534 {
3535 struct pending_cmd *cmd;
3536 int err;
3537
3538 BT_DBG("%s", hdev->name);
3539
3540 hci_dev_lock(hdev);
3541
3542 if (!hdev_is_powered(hdev)) {
3543 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3544 MGMT_STATUS_NOT_POWERED);
3545 goto unlock;
3546 }
3547
3548 if (!lmp_ssp_capable(hdev)) {
3549 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3550 MGMT_STATUS_NOT_SUPPORTED);
3551 goto unlock;
3552 }
3553
3554 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3555 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3556 MGMT_STATUS_BUSY);
3557 goto unlock;
3558 }
3559
3560 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3561 if (!cmd) {
3562 err = -ENOMEM;
3563 goto unlock;
3564 }
3565
3566 if (bredr_sc_enabled(hdev))
3567 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
3568 0, NULL);
3569 else
3570 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3571
3572 if (err < 0)
3573 mgmt_pending_remove(cmd);
3574
3575 unlock:
3576 hci_dev_unlock(hdev);
3577 return err;
3578 }
3579
3580 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3581 void *data, u16 len)
3582 {
3583 int err;
3584
3585 BT_DBG("%s ", hdev->name);
3586
3587 hci_dev_lock(hdev);
3588
3589 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3590 struct mgmt_cp_add_remote_oob_data *cp = data;
3591 u8 status;
3592
3593 if (cp->addr.type != BDADDR_BREDR) {
3594 err = cmd_complete(sk, hdev->id,
3595 MGMT_OP_ADD_REMOTE_OOB_DATA,
3596 MGMT_STATUS_INVALID_PARAMS,
3597 &cp->addr, sizeof(cp->addr));
3598 goto unlock;
3599 }
3600
3601 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3602 cp->addr.type, cp->hash,
3603 cp->rand, NULL, NULL);
3604 if (err < 0)
3605 status = MGMT_STATUS_FAILED;
3606 else
3607 status = MGMT_STATUS_SUCCESS;
3608
3609 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3610 status, &cp->addr, sizeof(cp->addr));
3611 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3612 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3613 u8 *rand192, *hash192;
3614 u8 status;
3615
3616 if (cp->addr.type != BDADDR_BREDR) {
3617 err = cmd_complete(sk, hdev->id,
3618 MGMT_OP_ADD_REMOTE_OOB_DATA,
3619 MGMT_STATUS_INVALID_PARAMS,
3620 &cp->addr, sizeof(cp->addr));
3621 goto unlock;
3622 }
3623
3624 if (bdaddr_type_is_le(cp->addr.type)) {
3625 rand192 = NULL;
3626 hash192 = NULL;
3627 } else {
3628 rand192 = cp->rand192;
3629 hash192 = cp->hash192;
3630 }
3631
3632 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3633 cp->addr.type, hash192, rand192,
3634 cp->hash256, cp->rand256);
3635 if (err < 0)
3636 status = MGMT_STATUS_FAILED;
3637 else
3638 status = MGMT_STATUS_SUCCESS;
3639
3640 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3641 status, &cp->addr, sizeof(cp->addr));
3642 } else {
3643 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3644 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3645 MGMT_STATUS_INVALID_PARAMS);
3646 }
3647
3648 unlock:
3649 hci_dev_unlock(hdev);
3650 return err;
3651 }
3652
3653 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3654 void *data, u16 len)
3655 {
3656 struct mgmt_cp_remove_remote_oob_data *cp = data;
3657 u8 status;
3658 int err;
3659
3660 BT_DBG("%s", hdev->name);
3661
3662 if (cp->addr.type != BDADDR_BREDR)
3663 return cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3664 MGMT_STATUS_INVALID_PARAMS,
3665 &cp->addr, sizeof(cp->addr));
3666
3667 hci_dev_lock(hdev);
3668
3669 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
3670 hci_remote_oob_data_clear(hdev);
3671 status = MGMT_STATUS_SUCCESS;
3672 goto done;
3673 }
3674
3675 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
3676 if (err < 0)
3677 status = MGMT_STATUS_INVALID_PARAMS;
3678 else
3679 status = MGMT_STATUS_SUCCESS;
3680
3681 done:
3682 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3683 status, &cp->addr, sizeof(cp->addr));
3684
3685 hci_dev_unlock(hdev);
3686 return err;
3687 }
3688
3689 static int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
3690 {
3691 struct pending_cmd *cmd;
3692 u8 type;
3693 int err;
3694
3695 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3696
3697 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3698 if (!cmd)
3699 return -ENOENT;
3700
3701 type = hdev->discovery.type;
3702
3703 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3704 &type, sizeof(type));
3705 mgmt_pending_remove(cmd);
3706
3707 return err;
3708 }
3709
3710 static void start_discovery_complete(struct hci_dev *hdev, u8 status)
3711 {
3712 unsigned long timeout = 0;
3713
3714 BT_DBG("status %d", status);
3715
3716 if (status) {
3717 hci_dev_lock(hdev);
3718 mgmt_start_discovery_failed(hdev, status);
3719 hci_dev_unlock(hdev);
3720 return;
3721 }
3722
3723 hci_dev_lock(hdev);
3724 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3725 hci_dev_unlock(hdev);
3726
3727 switch (hdev->discovery.type) {
3728 case DISCOV_TYPE_LE:
3729 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3730 break;
3731
3732 case DISCOV_TYPE_INTERLEAVED:
3733 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
3734 break;
3735
3736 case DISCOV_TYPE_BREDR:
3737 break;
3738
3739 default:
3740 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
3741 }
3742
3743 if (!timeout)
3744 return;
3745
3746 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable, timeout);
3747 }
3748
3749 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3750 void *data, u16 len)
3751 {
3752 struct mgmt_cp_start_discovery *cp = data;
3753 struct pending_cmd *cmd;
3754 struct hci_cp_le_set_scan_param param_cp;
3755 struct hci_cp_le_set_scan_enable enable_cp;
3756 struct hci_cp_inquiry inq_cp;
3757 struct hci_request req;
3758 /* General inquiry access code (GIAC) */
3759 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3760 u8 status, own_addr_type;
3761 int err;
3762
3763 BT_DBG("%s", hdev->name);
3764
3765 hci_dev_lock(hdev);
3766
3767 if (!hdev_is_powered(hdev)) {
3768 err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3769 MGMT_STATUS_NOT_POWERED,
3770 &cp->type, sizeof(cp->type));
3771 goto failed;
3772 }
3773
3774 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3775 err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3776 MGMT_STATUS_BUSY, &cp->type,
3777 sizeof(cp->type));
3778 goto failed;
3779 }
3780
3781 if (hdev->discovery.state != DISCOVERY_STOPPED) {
3782 err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3783 MGMT_STATUS_BUSY, &cp->type,
3784 sizeof(cp->type));
3785 goto failed;
3786 }
3787
3788 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
3789 if (!cmd) {
3790 err = -ENOMEM;
3791 goto failed;
3792 }
3793
3794 hdev->discovery.type = cp->type;
3795
3796 hci_req_init(&req, hdev);
3797
3798 switch (hdev->discovery.type) {
3799 case DISCOV_TYPE_BREDR:
3800 status = mgmt_bredr_support(hdev);
3801 if (status) {
3802 err = cmd_complete(sk, hdev->id,
3803 MGMT_OP_START_DISCOVERY, status,
3804 &cp->type, sizeof(cp->type));
3805 mgmt_pending_remove(cmd);
3806 goto failed;
3807 }
3808
3809 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3810 err = cmd_complete(sk, hdev->id,
3811 MGMT_OP_START_DISCOVERY,
3812 MGMT_STATUS_BUSY, &cp->type,
3813 sizeof(cp->type));
3814 mgmt_pending_remove(cmd);
3815 goto failed;
3816 }
3817
3818 hci_inquiry_cache_flush(hdev);
3819
3820 memset(&inq_cp, 0, sizeof(inq_cp));
3821 memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
3822 inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
3823 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
3824 break;
3825
3826 case DISCOV_TYPE_LE:
3827 case DISCOV_TYPE_INTERLEAVED:
3828 status = mgmt_le_support(hdev);
3829 if (status) {
3830 err = cmd_complete(sk, hdev->id,
3831 MGMT_OP_START_DISCOVERY, status,
3832 &cp->type, sizeof(cp->type));
3833 mgmt_pending_remove(cmd);
3834 goto failed;
3835 }
3836
3837 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3838 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3839 err = cmd_complete(sk, hdev->id,
3840 MGMT_OP_START_DISCOVERY,
3841 MGMT_STATUS_NOT_SUPPORTED,
3842 &cp->type, sizeof(cp->type));
3843 mgmt_pending_remove(cmd);
3844 goto failed;
3845 }
3846
3847 if (test_bit(HCI_LE_ADV, &hdev->dev_flags)) {
3848 /* Don't let discovery abort an outgoing
3849 * connection attempt that's using directed
3850 * advertising.
3851 */
3852 if (hci_conn_hash_lookup_state(hdev, LE_LINK,
3853 BT_CONNECT)) {
3854 err = cmd_complete(sk, hdev->id,
3855 MGMT_OP_START_DISCOVERY,
3856 MGMT_STATUS_REJECTED,
3857 &cp->type,
3858 sizeof(cp->type));
3859 mgmt_pending_remove(cmd);
3860 goto failed;
3861 }
3862
3863 disable_advertising(&req);
3864 }
3865
3866 /* If controller is scanning, it means the background scanning
3867 * is running. Thus, we should temporarily stop it in order to
3868 * set the discovery scanning parameters.
3869 */
3870 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
3871 hci_req_add_le_scan_disable(&req);
3872
3873 memset(&param_cp, 0, sizeof(param_cp));
3874
3875 /* All active scans will be done with either a resolvable
3876 * private address (when privacy feature has been enabled)
3877 * or unresolvable private address.
3878 */
3879 err = hci_update_random_address(&req, true, &own_addr_type);
3880 if (err < 0) {
3881 err = cmd_complete(sk, hdev->id,
3882 MGMT_OP_START_DISCOVERY,
3883 MGMT_STATUS_FAILED,
3884 &cp->type, sizeof(cp->type));
3885 mgmt_pending_remove(cmd);
3886 goto failed;
3887 }
3888
3889 param_cp.type = LE_SCAN_ACTIVE;
3890 param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
3891 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3892 param_cp.own_address_type = own_addr_type;
3893 hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
3894 &param_cp);
3895
3896 memset(&enable_cp, 0, sizeof(enable_cp));
3897 enable_cp.enable = LE_SCAN_ENABLE;
3898 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3899 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
3900 &enable_cp);
3901 break;
3902
3903 default:
3904 err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3905 MGMT_STATUS_INVALID_PARAMS,
3906 &cp->type, sizeof(cp->type));
3907 mgmt_pending_remove(cmd);
3908 goto failed;
3909 }
3910
3911 err = hci_req_run(&req, start_discovery_complete);
3912 if (err < 0)
3913 mgmt_pending_remove(cmd);
3914 else
3915 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3916
3917 failed:
3918 hci_dev_unlock(hdev);
3919 return err;
3920 }
3921
3922 static int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
3923 {
3924 struct pending_cmd *cmd;
3925 int err;
3926
3927 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
3928 if (!cmd)
3929 return -ENOENT;
3930
3931 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3932 &hdev->discovery.type, sizeof(hdev->discovery.type));
3933 mgmt_pending_remove(cmd);
3934
3935 return err;
3936 }
3937
3938 static void stop_discovery_complete(struct hci_dev *hdev, u8 status)
3939 {
3940 BT_DBG("status %d", status);
3941
3942 hci_dev_lock(hdev);
3943
3944 if (status) {
3945 mgmt_stop_discovery_failed(hdev, status);
3946 goto unlock;
3947 }
3948
3949 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3950
3951 unlock:
3952 hci_dev_unlock(hdev);
3953 }
3954
3955 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
3956 u16 len)
3957 {
3958 struct mgmt_cp_stop_discovery *mgmt_cp = data;
3959 struct pending_cmd *cmd;
3960 struct hci_request req;
3961 int err;
3962
3963 BT_DBG("%s", hdev->name);
3964
3965 hci_dev_lock(hdev);
3966
3967 if (!hci_discovery_active(hdev)) {
3968 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3969 MGMT_STATUS_REJECTED, &mgmt_cp->type,
3970 sizeof(mgmt_cp->type));
3971 goto unlock;
3972 }
3973
3974 if (hdev->discovery.type != mgmt_cp->type) {
3975 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3976 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
3977 sizeof(mgmt_cp->type));
3978 goto unlock;
3979 }
3980
3981 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
3982 if (!cmd) {
3983 err = -ENOMEM;
3984 goto unlock;
3985 }
3986
3987 hci_req_init(&req, hdev);
3988
3989 hci_stop_discovery(&req);
3990
3991 err = hci_req_run(&req, stop_discovery_complete);
3992 if (!err) {
3993 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
3994 goto unlock;
3995 }
3996
3997 mgmt_pending_remove(cmd);
3998
3999 /* If no HCI commands were sent we're done */
4000 if (err == -ENODATA) {
4001 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
4002 &mgmt_cp->type, sizeof(mgmt_cp->type));
4003 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4004 }
4005
4006 unlock:
4007 hci_dev_unlock(hdev);
4008 return err;
4009 }
4010
4011 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
4012 u16 len)
4013 {
4014 struct mgmt_cp_confirm_name *cp = data;
4015 struct inquiry_entry *e;
4016 int err;
4017
4018 BT_DBG("%s", hdev->name);
4019
4020 hci_dev_lock(hdev);
4021
4022 if (!hci_discovery_active(hdev)) {
4023 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4024 MGMT_STATUS_FAILED, &cp->addr,
4025 sizeof(cp->addr));
4026 goto failed;
4027 }
4028
4029 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
4030 if (!e) {
4031 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4032 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
4033 sizeof(cp->addr));
4034 goto failed;
4035 }
4036
4037 if (cp->name_known) {
4038 e->name_state = NAME_KNOWN;
4039 list_del(&e->list);
4040 } else {
4041 e->name_state = NAME_NEEDED;
4042 hci_inquiry_cache_update_resolve(hdev, e);
4043 }
4044
4045 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
4046 sizeof(cp->addr));
4047
4048 failed:
4049 hci_dev_unlock(hdev);
4050 return err;
4051 }
4052
4053 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
4054 u16 len)
4055 {
4056 struct mgmt_cp_block_device *cp = data;
4057 u8 status;
4058 int err;
4059
4060 BT_DBG("%s", hdev->name);
4061
4062 if (!bdaddr_type_is_valid(cp->addr.type))
4063 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
4064 MGMT_STATUS_INVALID_PARAMS,
4065 &cp->addr, sizeof(cp->addr));
4066
4067 hci_dev_lock(hdev);
4068
4069 err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
4070 cp->addr.type);
4071 if (err < 0) {
4072 status = MGMT_STATUS_FAILED;
4073 goto done;
4074 }
4075
4076 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4077 sk);
4078 status = MGMT_STATUS_SUCCESS;
4079
4080 done:
4081 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
4082 &cp->addr, sizeof(cp->addr));
4083
4084 hci_dev_unlock(hdev);
4085
4086 return err;
4087 }
4088
4089 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
4090 u16 len)
4091 {
4092 struct mgmt_cp_unblock_device *cp = data;
4093 u8 status;
4094 int err;
4095
4096 BT_DBG("%s", hdev->name);
4097
4098 if (!bdaddr_type_is_valid(cp->addr.type))
4099 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
4100 MGMT_STATUS_INVALID_PARAMS,
4101 &cp->addr, sizeof(cp->addr));
4102
4103 hci_dev_lock(hdev);
4104
4105 err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
4106 cp->addr.type);
4107 if (err < 0) {
4108 status = MGMT_STATUS_INVALID_PARAMS;
4109 goto done;
4110 }
4111
4112 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4113 sk);
4114 status = MGMT_STATUS_SUCCESS;
4115
4116 done:
4117 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
4118 &cp->addr, sizeof(cp->addr));
4119
4120 hci_dev_unlock(hdev);
4121
4122 return err;
4123 }
4124
4125 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
4126 u16 len)
4127 {
4128 struct mgmt_cp_set_device_id *cp = data;
4129 struct hci_request req;
4130 int err;
4131 __u16 source;
4132
4133 BT_DBG("%s", hdev->name);
4134
4135 source = __le16_to_cpu(cp->source);
4136
4137 if (source > 0x0002)
4138 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
4139 MGMT_STATUS_INVALID_PARAMS);
4140
4141 hci_dev_lock(hdev);
4142
4143 hdev->devid_source = source;
4144 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
4145 hdev->devid_product = __le16_to_cpu(cp->product);
4146 hdev->devid_version = __le16_to_cpu(cp->version);
4147
4148 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
4149
4150 hci_req_init(&req, hdev);
4151 update_eir(&req);
4152 hci_req_run(&req, NULL);
4153
4154 hci_dev_unlock(hdev);
4155
4156 return err;
4157 }
4158
4159 static void set_advertising_complete(struct hci_dev *hdev, u8 status)
4160 {
4161 struct cmd_lookup match = { NULL, hdev };
4162
4163 if (status) {
4164 u8 mgmt_err = mgmt_status(status);
4165
4166 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
4167 cmd_status_rsp, &mgmt_err);
4168 return;
4169 }
4170
4171 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
4172 set_bit(HCI_ADVERTISING, &hdev->dev_flags);
4173 else
4174 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
4175
4176 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
4177 &match);
4178
4179 new_settings(hdev, match.sk);
4180
4181 if (match.sk)
4182 sock_put(match.sk);
4183 }
4184
4185 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
4186 u16 len)
4187 {
4188 struct mgmt_mode *cp = data;
4189 struct pending_cmd *cmd;
4190 struct hci_request req;
4191 u8 val, enabled, status;
4192 int err;
4193
4194 BT_DBG("request for %s", hdev->name);
4195
4196 status = mgmt_le_support(hdev);
4197 if (status)
4198 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4199 status);
4200
4201 if (cp->val != 0x00 && cp->val != 0x01)
4202 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4203 MGMT_STATUS_INVALID_PARAMS);
4204
4205 hci_dev_lock(hdev);
4206
4207 val = !!cp->val;
4208 enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
4209
4210 /* The following conditions are ones which mean that we should
4211 * not do any HCI communication but directly send a mgmt
4212 * response to user space (after toggling the flag if
4213 * necessary).
4214 */
4215 if (!hdev_is_powered(hdev) || val == enabled ||
4216 hci_conn_num(hdev, LE_LINK) > 0 ||
4217 (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
4218 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
4219 bool changed = false;
4220
4221 if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
4222 change_bit(HCI_ADVERTISING, &hdev->dev_flags);
4223 changed = true;
4224 }
4225
4226 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
4227 if (err < 0)
4228 goto unlock;
4229
4230 if (changed)
4231 err = new_settings(hdev, sk);
4232
4233 goto unlock;
4234 }
4235
4236 if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
4237 mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
4238 err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4239 MGMT_STATUS_BUSY);
4240 goto unlock;
4241 }
4242
4243 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
4244 if (!cmd) {
4245 err = -ENOMEM;
4246 goto unlock;
4247 }
4248
4249 hci_req_init(&req, hdev);
4250
4251 if (val)
4252 enable_advertising(&req);
4253 else
4254 disable_advertising(&req);
4255
4256 err = hci_req_run(&req, set_advertising_complete);
4257 if (err < 0)
4258 mgmt_pending_remove(cmd);
4259
4260 unlock:
4261 hci_dev_unlock(hdev);
4262 return err;
4263 }
4264
4265 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
4266 void *data, u16 len)
4267 {
4268 struct mgmt_cp_set_static_address *cp = data;
4269 int err;
4270
4271 BT_DBG("%s", hdev->name);
4272
4273 if (!lmp_le_capable(hdev))
4274 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4275 MGMT_STATUS_NOT_SUPPORTED);
4276
4277 if (hdev_is_powered(hdev))
4278 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4279 MGMT_STATUS_REJECTED);
4280
4281 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
4282 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
4283 return cmd_status(sk, hdev->id,
4284 MGMT_OP_SET_STATIC_ADDRESS,
4285 MGMT_STATUS_INVALID_PARAMS);
4286
4287 /* Two most significant bits shall be set */
4288 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
4289 return cmd_status(sk, hdev->id,
4290 MGMT_OP_SET_STATIC_ADDRESS,
4291 MGMT_STATUS_INVALID_PARAMS);
4292 }
4293
4294 hci_dev_lock(hdev);
4295
4296 bacpy(&hdev->static_addr, &cp->bdaddr);
4297
4298 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0);
4299
4300 hci_dev_unlock(hdev);
4301
4302 return err;
4303 }
4304
4305 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
4306 void *data, u16 len)
4307 {
4308 struct mgmt_cp_set_scan_params *cp = data;
4309 __u16 interval, window;
4310 int err;
4311
4312 BT_DBG("%s", hdev->name);
4313
4314 if (!lmp_le_capable(hdev))
4315 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4316 MGMT_STATUS_NOT_SUPPORTED);
4317
4318 interval = __le16_to_cpu(cp->interval);
4319
4320 if (interval < 0x0004 || interval > 0x4000)
4321 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4322 MGMT_STATUS_INVALID_PARAMS);
4323
4324 window = __le16_to_cpu(cp->window);
4325
4326 if (window < 0x0004 || window > 0x4000)
4327 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4328 MGMT_STATUS_INVALID_PARAMS);
4329
4330 if (window > interval)
4331 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4332 MGMT_STATUS_INVALID_PARAMS);
4333
4334 hci_dev_lock(hdev);
4335
4336 hdev->le_scan_interval = interval;
4337 hdev->le_scan_window = window;
4338
4339 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);
4340
4341 /* If background scan is running, restart it so new parameters are
4342 * loaded.
4343 */
4344 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
4345 hdev->discovery.state == DISCOVERY_STOPPED) {
4346 struct hci_request req;
4347
4348 hci_req_init(&req, hdev);
4349
4350 hci_req_add_le_scan_disable(&req);
4351 hci_req_add_le_passive_scan(&req);
4352
4353 hci_req_run(&req, NULL);
4354 }
4355
4356 hci_dev_unlock(hdev);
4357
4358 return err;
4359 }
4360
4361 static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
4362 {
4363 struct pending_cmd *cmd;
4364
4365 BT_DBG("status 0x%02x", status);
4366
4367 hci_dev_lock(hdev);
4368
4369 cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4370 if (!cmd)
4371 goto unlock;
4372
4373 if (status) {
4374 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4375 mgmt_status(status));
4376 } else {
4377 struct mgmt_mode *cp = cmd->param;
4378
4379 if (cp->val)
4380 set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4381 else
4382 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4383
4384 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4385 new_settings(hdev, cmd->sk);
4386 }
4387
4388 mgmt_pending_remove(cmd);
4389
4390 unlock:
4391 hci_dev_unlock(hdev);
4392 }
4393
4394 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4395 void *data, u16 len)
4396 {
4397 struct mgmt_mode *cp = data;
4398 struct pending_cmd *cmd;
4399 struct hci_request req;
4400 int err;
4401
4402 BT_DBG("%s", hdev->name);
4403
4404 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
4405 hdev->hci_ver < BLUETOOTH_VER_1_2)
4406 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4407 MGMT_STATUS_NOT_SUPPORTED);
4408
4409 if (cp->val != 0x00 && cp->val != 0x01)
4410 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4411 MGMT_STATUS_INVALID_PARAMS);
4412
4413 if (!hdev_is_powered(hdev))
4414 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4415 MGMT_STATUS_NOT_POWERED);
4416
4417 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4418 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4419 MGMT_STATUS_REJECTED);
4420
4421 hci_dev_lock(hdev);
4422
4423 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4424 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4425 MGMT_STATUS_BUSY);
4426 goto unlock;
4427 }
4428
4429 if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
4430 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4431 hdev);
4432 goto unlock;
4433 }
4434
4435 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4436 data, len);
4437 if (!cmd) {
4438 err = -ENOMEM;
4439 goto unlock;
4440 }
4441
4442 hci_req_init(&req, hdev);
4443
4444 write_fast_connectable(&req, cp->val);
4445
4446 err = hci_req_run(&req, fast_connectable_complete);
4447 if (err < 0) {
4448 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4449 MGMT_STATUS_FAILED);
4450 mgmt_pending_remove(cmd);
4451 }
4452
4453 unlock:
4454 hci_dev_unlock(hdev);
4455
4456 return err;
4457 }
4458
4459 static void set_bredr_complete(struct hci_dev *hdev, u8 status)
4460 {
4461 struct pending_cmd *cmd;
4462
4463 BT_DBG("status 0x%02x", status);
4464
4465 hci_dev_lock(hdev);
4466
4467 cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
4468 if (!cmd)
4469 goto unlock;
4470
4471 if (status) {
4472 u8 mgmt_err = mgmt_status(status);
4473
4474 /* We need to restore the flag if related HCI commands
4475 * failed.
4476 */
4477 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4478
4479 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4480 } else {
4481 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4482 new_settings(hdev, cmd->sk);
4483 }
4484
4485 mgmt_pending_remove(cmd);
4486
4487 unlock:
4488 hci_dev_unlock(hdev);
4489 }
4490
4491 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4492 {
4493 struct mgmt_mode *cp = data;
4494 struct pending_cmd *cmd;
4495 struct hci_request req;
4496 int err;
4497
4498 BT_DBG("request for %s", hdev->name);
4499
4500 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4501 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4502 MGMT_STATUS_NOT_SUPPORTED);
4503
4504 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4505 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4506 MGMT_STATUS_REJECTED);
4507
4508 if (cp->val != 0x00 && cp->val != 0x01)
4509 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4510 MGMT_STATUS_INVALID_PARAMS);
4511
4512 hci_dev_lock(hdev);
4513
4514 if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4515 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4516 goto unlock;
4517 }
4518
4519 if (!hdev_is_powered(hdev)) {
4520 if (!cp->val) {
4521 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4522 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4523 clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4524 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4525 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4526 }
4527
4528 change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4529
4530 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4531 if (err < 0)
4532 goto unlock;
4533
4534 err = new_settings(hdev, sk);
4535 goto unlock;
4536 }
4537
4538 /* Reject disabling when powered on */
4539 if (!cp->val) {
4540 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4541 MGMT_STATUS_REJECTED);
4542 goto unlock;
4543 }
4544
4545 if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
4546 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4547 MGMT_STATUS_BUSY);
4548 goto unlock;
4549 }
4550
4551 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4552 if (!cmd) {
4553 err = -ENOMEM;
4554 goto unlock;
4555 }
4556
4557 /* We need to flip the bit already here so that update_adv_data
4558 * generates the correct flags.
4559 */
4560 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4561
4562 hci_req_init(&req, hdev);
4563
4564 write_fast_connectable(&req, false);
4565 hci_update_page_scan(hdev, &req);
4566
4567 /* Since only the advertising data flags will change, there
4568 * is no need to update the scan response data.
4569 */
4570 update_adv_data(&req);
4571
4572 err = hci_req_run(&req, set_bredr_complete);
4573 if (err < 0)
4574 mgmt_pending_remove(cmd);
4575
4576 unlock:
4577 hci_dev_unlock(hdev);
4578 return err;
4579 }
4580
4581 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4582 void *data, u16 len)
4583 {
4584 struct mgmt_mode *cp = data;
4585 struct pending_cmd *cmd;
4586 u8 val;
4587 int err;
4588
4589 BT_DBG("request for %s", hdev->name);
4590
4591 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
4592 !lmp_sc_capable(hdev) && !test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
4593 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4594 MGMT_STATUS_NOT_SUPPORTED);
4595
4596 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4597 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4598 MGMT_STATUS_INVALID_PARAMS);
4599
4600 hci_dev_lock(hdev);
4601
4602 if (!hdev_is_powered(hdev) ||
4603 (!lmp_sc_capable(hdev) &&
4604 !test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) ||
4605 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4606 bool changed;
4607
4608 if (cp->val) {
4609 changed = !test_and_set_bit(HCI_SC_ENABLED,
4610 &hdev->dev_flags);
4611 if (cp->val == 0x02)
4612 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4613 else
4614 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4615 } else {
4616 changed = test_and_clear_bit(HCI_SC_ENABLED,
4617 &hdev->dev_flags);
4618 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4619 }
4620
4621 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4622 if (err < 0)
4623 goto failed;
4624
4625 if (changed)
4626 err = new_settings(hdev, sk);
4627
4628 goto failed;
4629 }
4630
4631 if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4632 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4633 MGMT_STATUS_BUSY);
4634 goto failed;
4635 }
4636
4637 val = !!cp->val;
4638
4639 if (val == test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
4640 (cp->val == 0x02) == test_bit(HCI_SC_ONLY, &hdev->dev_flags)) {
4641 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4642 goto failed;
4643 }
4644
4645 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4646 if (!cmd) {
4647 err = -ENOMEM;
4648 goto failed;
4649 }
4650
4651 err = hci_send_cmd(hdev, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4652 if (err < 0) {
4653 mgmt_pending_remove(cmd);
4654 goto failed;
4655 }
4656
4657 if (cp->val == 0x02)
4658 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4659 else
4660 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4661
4662 failed:
4663 hci_dev_unlock(hdev);
4664 return err;
4665 }
4666
4667 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
4668 void *data, u16 len)
4669 {
4670 struct mgmt_mode *cp = data;
4671 bool changed, use_changed;
4672 int err;
4673
4674 BT_DBG("request for %s", hdev->name);
4675
4676 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4677 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
4678 MGMT_STATUS_INVALID_PARAMS);
4679
4680 hci_dev_lock(hdev);
4681
4682 if (cp->val)
4683 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
4684 &hdev->dev_flags);
4685 else
4686 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
4687 &hdev->dev_flags);
4688
4689 if (cp->val == 0x02)
4690 use_changed = !test_and_set_bit(HCI_USE_DEBUG_KEYS,
4691 &hdev->dev_flags);
4692 else
4693 use_changed = test_and_clear_bit(HCI_USE_DEBUG_KEYS,
4694 &hdev->dev_flags);
4695
4696 if (hdev_is_powered(hdev) && use_changed &&
4697 test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
4698 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
4699 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
4700 sizeof(mode), &mode);
4701 }
4702
4703 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
4704 if (err < 0)
4705 goto unlock;
4706
4707 if (changed)
4708 err = new_settings(hdev, sk);
4709
4710 unlock:
4711 hci_dev_unlock(hdev);
4712 return err;
4713 }
4714
4715 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4716 u16 len)
4717 {
4718 struct mgmt_cp_set_privacy *cp = cp_data;
4719 bool changed;
4720 int err;
4721
4722 BT_DBG("request for %s", hdev->name);
4723
4724 if (!lmp_le_capable(hdev))
4725 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4726 MGMT_STATUS_NOT_SUPPORTED);
4727
4728 if (cp->privacy != 0x00 && cp->privacy != 0x01)
4729 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4730 MGMT_STATUS_INVALID_PARAMS);
4731
4732 if (hdev_is_powered(hdev))
4733 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4734 MGMT_STATUS_REJECTED);
4735
4736 hci_dev_lock(hdev);
4737
4738 /* If user space supports this command it is also expected to
4739 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
4740 */
4741 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4742
4743 if (cp->privacy) {
4744 changed = !test_and_set_bit(HCI_PRIVACY, &hdev->dev_flags);
4745 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
4746 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4747 } else {
4748 changed = test_and_clear_bit(HCI_PRIVACY, &hdev->dev_flags);
4749 memset(hdev->irk, 0, sizeof(hdev->irk));
4750 clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4751 }
4752
4753 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
4754 if (err < 0)
4755 goto unlock;
4756
4757 if (changed)
4758 err = new_settings(hdev, sk);
4759
4760 unlock:
4761 hci_dev_unlock(hdev);
4762 return err;
4763 }
4764
4765 static bool irk_is_valid(struct mgmt_irk_info *irk)
4766 {
4767 switch (irk->addr.type) {
4768 case BDADDR_LE_PUBLIC:
4769 return true;
4770
4771 case BDADDR_LE_RANDOM:
4772 /* Two most significant bits shall be set */
4773 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4774 return false;
4775 return true;
4776 }
4777
4778 return false;
4779 }
4780
4781 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4782 u16 len)
4783 {
4784 struct mgmt_cp_load_irks *cp = cp_data;
4785 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
4786 sizeof(struct mgmt_irk_info));
4787 u16 irk_count, expected_len;
4788 int i, err;
4789
4790 BT_DBG("request for %s", hdev->name);
4791
4792 if (!lmp_le_capable(hdev))
4793 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4794 MGMT_STATUS_NOT_SUPPORTED);
4795
4796 irk_count = __le16_to_cpu(cp->irk_count);
4797 if (irk_count > max_irk_count) {
4798 BT_ERR("load_irks: too big irk_count value %u", irk_count);
4799 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4800 MGMT_STATUS_INVALID_PARAMS);
4801 }
4802
4803 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
4804 if (expected_len != len) {
4805 BT_ERR("load_irks: expected %u bytes, got %u bytes",
4806 expected_len, len);
4807 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4808 MGMT_STATUS_INVALID_PARAMS);
4809 }
4810
4811 BT_DBG("%s irk_count %u", hdev->name, irk_count);
4812
4813 for (i = 0; i < irk_count; i++) {
4814 struct mgmt_irk_info *key = &cp->irks[i];
4815
4816 if (!irk_is_valid(key))
4817 return cmd_status(sk, hdev->id,
4818 MGMT_OP_LOAD_IRKS,
4819 MGMT_STATUS_INVALID_PARAMS);
4820 }
4821
4822 hci_dev_lock(hdev);
4823
4824 hci_smp_irks_clear(hdev);
4825
4826 for (i = 0; i < irk_count; i++) {
4827 struct mgmt_irk_info *irk = &cp->irks[i];
4828 u8 addr_type;
4829
4830 if (irk->addr.type == BDADDR_LE_PUBLIC)
4831 addr_type = ADDR_LE_DEV_PUBLIC;
4832 else
4833 addr_type = ADDR_LE_DEV_RANDOM;
4834
4835 hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
4836 BDADDR_ANY);
4837 }
4838
4839 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4840
4841 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
4842
4843 hci_dev_unlock(hdev);
4844
4845 return err;
4846 }
4847
4848 static bool ltk_is_valid(struct mgmt_ltk_info *key)
4849 {
4850 if (key->master != 0x00 && key->master != 0x01)
4851 return false;
4852
4853 switch (key->addr.type) {
4854 case BDADDR_LE_PUBLIC:
4855 return true;
4856
4857 case BDADDR_LE_RANDOM:
4858 /* Two most significant bits shall be set */
4859 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4860 return false;
4861 return true;
4862 }
4863
4864 return false;
4865 }
4866
4867 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
4868 void *cp_data, u16 len)
4869 {
4870 struct mgmt_cp_load_long_term_keys *cp = cp_data;
4871 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
4872 sizeof(struct mgmt_ltk_info));
4873 u16 key_count, expected_len;
4874 int i, err;
4875
4876 BT_DBG("request for %s", hdev->name);
4877
4878 if (!lmp_le_capable(hdev))
4879 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4880 MGMT_STATUS_NOT_SUPPORTED);
4881
4882 key_count = __le16_to_cpu(cp->key_count);
4883 if (key_count > max_key_count) {
4884 BT_ERR("load_ltks: too big key_count value %u", key_count);
4885 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4886 MGMT_STATUS_INVALID_PARAMS);
4887 }
4888
4889 expected_len = sizeof(*cp) + key_count *
4890 sizeof(struct mgmt_ltk_info);
4891 if (expected_len != len) {
4892 BT_ERR("load_keys: expected %u bytes, got %u bytes",
4893 expected_len, len);
4894 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4895 MGMT_STATUS_INVALID_PARAMS);
4896 }
4897
4898 BT_DBG("%s key_count %u", hdev->name, key_count);
4899
4900 for (i = 0; i < key_count; i++) {
4901 struct mgmt_ltk_info *key = &cp->keys[i];
4902
4903 if (!ltk_is_valid(key))
4904 return cmd_status(sk, hdev->id,
4905 MGMT_OP_LOAD_LONG_TERM_KEYS,
4906 MGMT_STATUS_INVALID_PARAMS);
4907 }
4908
4909 hci_dev_lock(hdev);
4910
4911 hci_smp_ltks_clear(hdev);
4912
4913 for (i = 0; i < key_count; i++) {
4914 struct mgmt_ltk_info *key = &cp->keys[i];
4915 u8 type, addr_type, authenticated;
4916
4917 if (key->addr.type == BDADDR_LE_PUBLIC)
4918 addr_type = ADDR_LE_DEV_PUBLIC;
4919 else
4920 addr_type = ADDR_LE_DEV_RANDOM;
4921
4922 switch (key->type) {
4923 case MGMT_LTK_UNAUTHENTICATED:
4924 authenticated = 0x00;
4925 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
4926 break;
4927 case MGMT_LTK_AUTHENTICATED:
4928 authenticated = 0x01;
4929 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
4930 break;
4931 case MGMT_LTK_P256_UNAUTH:
4932 authenticated = 0x00;
4933 type = SMP_LTK_P256;
4934 break;
4935 case MGMT_LTK_P256_AUTH:
4936 authenticated = 0x01;
4937 type = SMP_LTK_P256;
4938 break;
4939 case MGMT_LTK_P256_DEBUG:
4940 authenticated = 0x00;
4941 type = SMP_LTK_P256_DEBUG;
4942 default:
4943 continue;
4944 }
4945
4946 hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
4947 authenticated, key->val, key->enc_size, key->ediv,
4948 key->rand);
4949 }
4950
4951 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
4952 NULL, 0);
4953
4954 hci_dev_unlock(hdev);
4955
4956 return err;
4957 }
4958
4959 struct cmd_conn_lookup {
4960 struct hci_conn *conn;
4961 bool valid_tx_power;
4962 u8 mgmt_status;
4963 };
4964
4965 static void get_conn_info_complete(struct pending_cmd *cmd, void *data)
4966 {
4967 struct cmd_conn_lookup *match = data;
4968 struct mgmt_cp_get_conn_info *cp;
4969 struct mgmt_rp_get_conn_info rp;
4970 struct hci_conn *conn = cmd->user_data;
4971
4972 if (conn != match->conn)
4973 return;
4974
4975 cp = (struct mgmt_cp_get_conn_info *) cmd->param;
4976
4977 memset(&rp, 0, sizeof(rp));
4978 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4979 rp.addr.type = cp->addr.type;
4980
4981 if (!match->mgmt_status) {
4982 rp.rssi = conn->rssi;
4983
4984 if (match->valid_tx_power) {
4985 rp.tx_power = conn->tx_power;
4986 rp.max_tx_power = conn->max_tx_power;
4987 } else {
4988 rp.tx_power = HCI_TX_POWER_INVALID;
4989 rp.max_tx_power = HCI_TX_POWER_INVALID;
4990 }
4991 }
4992
4993 cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
4994 match->mgmt_status, &rp, sizeof(rp));
4995
4996 hci_conn_drop(conn);
4997 hci_conn_put(conn);
4998
4999 mgmt_pending_remove(cmd);
5000 }
5001
5002 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 status)
5003 {
5004 struct hci_cp_read_rssi *cp;
5005 struct hci_conn *conn;
5006 struct cmd_conn_lookup match;
5007 u16 handle;
5008
5009 BT_DBG("status 0x%02x", status);
5010
5011 hci_dev_lock(hdev);
5012
5013 /* TX power data is valid in case request completed successfully,
5014 * otherwise we assume it's not valid. At the moment we assume that
5015 * either both or none of current and max values are valid to keep code
5016 * simple.
5017 */
5018 match.valid_tx_power = !status;
5019
5020 /* Commands sent in request are either Read RSSI or Read Transmit Power
5021 * Level so we check which one was last sent to retrieve connection
5022 * handle. Both commands have handle as first parameter so it's safe to
5023 * cast data on the same command struct.
5024 *
5025 * First command sent is always Read RSSI and we fail only if it fails.
5026 * In other case we simply override error to indicate success as we
5027 * already remembered if TX power value is actually valid.
5028 */
5029 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
5030 if (!cp) {
5031 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
5032 status = 0;
5033 }
5034
5035 if (!cp) {
5036 BT_ERR("invalid sent_cmd in response");
5037 goto unlock;
5038 }
5039
5040 handle = __le16_to_cpu(cp->handle);
5041 conn = hci_conn_hash_lookup_handle(hdev, handle);
5042 if (!conn) {
5043 BT_ERR("unknown handle (%d) in response", handle);
5044 goto unlock;
5045 }
5046
5047 match.conn = conn;
5048 match.mgmt_status = mgmt_status(status);
5049
5050 /* Cache refresh is complete, now reply for mgmt request for given
5051 * connection only.
5052 */
5053 mgmt_pending_foreach(MGMT_OP_GET_CONN_INFO, hdev,
5054 get_conn_info_complete, &match);
5055
5056 unlock:
5057 hci_dev_unlock(hdev);
5058 }
5059
5060 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
5061 u16 len)
5062 {
5063 struct mgmt_cp_get_conn_info *cp = data;
5064 struct mgmt_rp_get_conn_info rp;
5065 struct hci_conn *conn;
5066 unsigned long conn_info_age;
5067 int err = 0;
5068
5069 BT_DBG("%s", hdev->name);
5070
5071 memset(&rp, 0, sizeof(rp));
5072 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5073 rp.addr.type = cp->addr.type;
5074
5075 if (!bdaddr_type_is_valid(cp->addr.type))
5076 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5077 MGMT_STATUS_INVALID_PARAMS,
5078 &rp, sizeof(rp));
5079
5080 hci_dev_lock(hdev);
5081
5082 if (!hdev_is_powered(hdev)) {
5083 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5084 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
5085 goto unlock;
5086 }
5087
5088 if (cp->addr.type == BDADDR_BREDR)
5089 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5090 &cp->addr.bdaddr);
5091 else
5092 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
5093
5094 if (!conn || conn->state != BT_CONNECTED) {
5095 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5096 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
5097 goto unlock;
5098 }
5099
5100 /* To avoid client trying to guess when to poll again for information we
5101 * calculate conn info age as random value between min/max set in hdev.
5102 */
5103 conn_info_age = hdev->conn_info_min_age +
5104 prandom_u32_max(hdev->conn_info_max_age -
5105 hdev->conn_info_min_age);
5106
5107 /* Query controller to refresh cached values if they are too old or were
5108 * never read.
5109 */
5110 if (time_after(jiffies, conn->conn_info_timestamp +
5111 msecs_to_jiffies(conn_info_age)) ||
5112 !conn->conn_info_timestamp) {
5113 struct hci_request req;
5114 struct hci_cp_read_tx_power req_txp_cp;
5115 struct hci_cp_read_rssi req_rssi_cp;
5116 struct pending_cmd *cmd;
5117
5118 hci_req_init(&req, hdev);
5119 req_rssi_cp.handle = cpu_to_le16(conn->handle);
5120 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
5121 &req_rssi_cp);
5122
5123 /* For LE links TX power does not change thus we don't need to
5124 * query for it once value is known.
5125 */
5126 if (!bdaddr_type_is_le(cp->addr.type) ||
5127 conn->tx_power == HCI_TX_POWER_INVALID) {
5128 req_txp_cp.handle = cpu_to_le16(conn->handle);
5129 req_txp_cp.type = 0x00;
5130 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5131 sizeof(req_txp_cp), &req_txp_cp);
5132 }
5133
5134 /* Max TX power needs to be read only once per connection */
5135 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
5136 req_txp_cp.handle = cpu_to_le16(conn->handle);
5137 req_txp_cp.type = 0x01;
5138 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5139 sizeof(req_txp_cp), &req_txp_cp);
5140 }
5141
5142 err = hci_req_run(&req, conn_info_refresh_complete);
5143 if (err < 0)
5144 goto unlock;
5145
5146 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
5147 data, len);
5148 if (!cmd) {
5149 err = -ENOMEM;
5150 goto unlock;
5151 }
5152
5153 hci_conn_hold(conn);
5154 cmd->user_data = hci_conn_get(conn);
5155
5156 conn->conn_info_timestamp = jiffies;
5157 } else {
5158 /* Cache is valid, just reply with values cached in hci_conn */
5159 rp.rssi = conn->rssi;
5160 rp.tx_power = conn->tx_power;
5161 rp.max_tx_power = conn->max_tx_power;
5162
5163 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5164 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
5165 }
5166
5167 unlock:
5168 hci_dev_unlock(hdev);
5169 return err;
5170 }
5171
5172 static void get_clock_info_complete(struct hci_dev *hdev, u8 status)
5173 {
5174 struct mgmt_cp_get_clock_info *cp;
5175 struct mgmt_rp_get_clock_info rp;
5176 struct hci_cp_read_clock *hci_cp;
5177 struct pending_cmd *cmd;
5178 struct hci_conn *conn;
5179
5180 BT_DBG("%s status %u", hdev->name, status);
5181
5182 hci_dev_lock(hdev);
5183
5184 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
5185 if (!hci_cp)
5186 goto unlock;
5187
5188 if (hci_cp->which) {
5189 u16 handle = __le16_to_cpu(hci_cp->handle);
5190 conn = hci_conn_hash_lookup_handle(hdev, handle);
5191 } else {
5192 conn = NULL;
5193 }
5194
5195 cmd = mgmt_pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
5196 if (!cmd)
5197 goto unlock;
5198
5199 cp = cmd->param;
5200
5201 memset(&rp, 0, sizeof(rp));
5202 memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
5203
5204 if (status)
5205 goto send_rsp;
5206
5207 rp.local_clock = cpu_to_le32(hdev->clock);
5208
5209 if (conn) {
5210 rp.piconet_clock = cpu_to_le32(conn->clock);
5211 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
5212 }
5213
5214 send_rsp:
5215 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
5216 &rp, sizeof(rp));
5217 mgmt_pending_remove(cmd);
5218 if (conn) {
5219 hci_conn_drop(conn);
5220 hci_conn_put(conn);
5221 }
5222
5223 unlock:
5224 hci_dev_unlock(hdev);
5225 }
5226
5227 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
5228 u16 len)
5229 {
5230 struct mgmt_cp_get_clock_info *cp = data;
5231 struct mgmt_rp_get_clock_info rp;
5232 struct hci_cp_read_clock hci_cp;
5233 struct pending_cmd *cmd;
5234 struct hci_request req;
5235 struct hci_conn *conn;
5236 int err;
5237
5238 BT_DBG("%s", hdev->name);
5239
5240 memset(&rp, 0, sizeof(rp));
5241 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5242 rp.addr.type = cp->addr.type;
5243
5244 if (cp->addr.type != BDADDR_BREDR)
5245 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5246 MGMT_STATUS_INVALID_PARAMS,
5247 &rp, sizeof(rp));
5248
5249 hci_dev_lock(hdev);
5250
5251 if (!hdev_is_powered(hdev)) {
5252 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5253 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
5254 goto unlock;
5255 }
5256
5257 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5258 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5259 &cp->addr.bdaddr);
5260 if (!conn || conn->state != BT_CONNECTED) {
5261 err = cmd_complete(sk, hdev->id,
5262 MGMT_OP_GET_CLOCK_INFO,
5263 MGMT_STATUS_NOT_CONNECTED,
5264 &rp, sizeof(rp));
5265 goto unlock;
5266 }
5267 } else {
5268 conn = NULL;
5269 }
5270
5271 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
5272 if (!cmd) {
5273 err = -ENOMEM;
5274 goto unlock;
5275 }
5276
5277 hci_req_init(&req, hdev);
5278
5279 memset(&hci_cp, 0, sizeof(hci_cp));
5280 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5281
5282 if (conn) {
5283 hci_conn_hold(conn);
5284 cmd->user_data = hci_conn_get(conn);
5285
5286 hci_cp.handle = cpu_to_le16(conn->handle);
5287 hci_cp.which = 0x01; /* Piconet clock */
5288 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5289 }
5290
5291 err = hci_req_run(&req, get_clock_info_complete);
5292 if (err < 0)
5293 mgmt_pending_remove(cmd);
5294
5295 unlock:
5296 hci_dev_unlock(hdev);
5297 return err;
5298 }
5299
5300 static void device_added(struct sock *sk, struct hci_dev *hdev,
5301 bdaddr_t *bdaddr, u8 type, u8 action)
5302 {
5303 struct mgmt_ev_device_added ev;
5304
5305 bacpy(&ev.addr.bdaddr, bdaddr);
5306 ev.addr.type = type;
5307 ev.action = action;
5308
5309 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
5310 }
5311
5312 static int add_device(struct sock *sk, struct hci_dev *hdev,
5313 void *data, u16 len)
5314 {
5315 struct mgmt_cp_add_device *cp = data;
5316 u8 auto_conn, addr_type;
5317 int err;
5318
5319 BT_DBG("%s", hdev->name);
5320
5321 if (!bdaddr_type_is_valid(cp->addr.type) ||
5322 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
5323 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5324 MGMT_STATUS_INVALID_PARAMS,
5325 &cp->addr, sizeof(cp->addr));
5326
5327 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
5328 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5329 MGMT_STATUS_INVALID_PARAMS,
5330 &cp->addr, sizeof(cp->addr));
5331
5332 hci_dev_lock(hdev);
5333
5334 if (cp->addr.type == BDADDR_BREDR) {
5335 /* Only incoming connections action is supported for now */
5336 if (cp->action != 0x01) {
5337 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5338 MGMT_STATUS_INVALID_PARAMS,
5339 &cp->addr, sizeof(cp->addr));
5340 goto unlock;
5341 }
5342
5343 err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr,
5344 cp->addr.type);
5345 if (err)
5346 goto unlock;
5347
5348 hci_update_page_scan(hdev, NULL);
5349
5350 goto added;
5351 }
5352
5353 if (cp->addr.type == BDADDR_LE_PUBLIC)
5354 addr_type = ADDR_LE_DEV_PUBLIC;
5355 else
5356 addr_type = ADDR_LE_DEV_RANDOM;
5357
5358 if (cp->action == 0x02)
5359 auto_conn = HCI_AUTO_CONN_ALWAYS;
5360 else if (cp->action == 0x01)
5361 auto_conn = HCI_AUTO_CONN_DIRECT;
5362 else
5363 auto_conn = HCI_AUTO_CONN_REPORT;
5364
5365 /* If the connection parameters don't exist for this device,
5366 * they will be created and configured with defaults.
5367 */
5368 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
5369 auto_conn) < 0) {
5370 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5371 MGMT_STATUS_FAILED,
5372 &cp->addr, sizeof(cp->addr));
5373 goto unlock;
5374 }
5375
5376 added:
5377 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
5378
5379 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5380 MGMT_STATUS_SUCCESS, &cp->addr, sizeof(cp->addr));
5381
5382 unlock:
5383 hci_dev_unlock(hdev);
5384 return err;
5385 }
5386
5387 static void device_removed(struct sock *sk, struct hci_dev *hdev,
5388 bdaddr_t *bdaddr, u8 type)
5389 {
5390 struct mgmt_ev_device_removed ev;
5391
5392 bacpy(&ev.addr.bdaddr, bdaddr);
5393 ev.addr.type = type;
5394
5395 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
5396 }
5397
5398 static int remove_device(struct sock *sk, struct hci_dev *hdev,
5399 void *data, u16 len)
5400 {
5401 struct mgmt_cp_remove_device *cp = data;
5402 int err;
5403
5404 BT_DBG("%s", hdev->name);
5405
5406 hci_dev_lock(hdev);
5407
5408 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5409 struct hci_conn_params *params;
5410 u8 addr_type;
5411
5412 if (!bdaddr_type_is_valid(cp->addr.type)) {
5413 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5414 MGMT_STATUS_INVALID_PARAMS,
5415 &cp->addr, sizeof(cp->addr));
5416 goto unlock;
5417 }
5418
5419 if (cp->addr.type == BDADDR_BREDR) {
5420 err = hci_bdaddr_list_del(&hdev->whitelist,
5421 &cp->addr.bdaddr,
5422 cp->addr.type);
5423 if (err) {
5424 err = cmd_complete(sk, hdev->id,
5425 MGMT_OP_REMOVE_DEVICE,
5426 MGMT_STATUS_INVALID_PARAMS,
5427 &cp->addr, sizeof(cp->addr));
5428 goto unlock;
5429 }
5430
5431 hci_update_page_scan(hdev, NULL);
5432
5433 device_removed(sk, hdev, &cp->addr.bdaddr,
5434 cp->addr.type);
5435 goto complete;
5436 }
5437
5438 if (cp->addr.type == BDADDR_LE_PUBLIC)
5439 addr_type = ADDR_LE_DEV_PUBLIC;
5440 else
5441 addr_type = ADDR_LE_DEV_RANDOM;
5442
5443 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5444 addr_type);
5445 if (!params) {
5446 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5447 MGMT_STATUS_INVALID_PARAMS,
5448 &cp->addr, sizeof(cp->addr));
5449 goto unlock;
5450 }
5451
5452 if (params->auto_connect == HCI_AUTO_CONN_DISABLED) {
5453 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5454 MGMT_STATUS_INVALID_PARAMS,
5455 &cp->addr, sizeof(cp->addr));
5456 goto unlock;
5457 }
5458
5459 list_del(&params->action);
5460 list_del(&params->list);
5461 kfree(params);
5462 hci_update_background_scan(hdev);
5463
5464 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
5465 } else {
5466 struct hci_conn_params *p, *tmp;
5467 struct bdaddr_list *b, *btmp;
5468
5469 if (cp->addr.type) {
5470 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5471 MGMT_STATUS_INVALID_PARAMS,
5472 &cp->addr, sizeof(cp->addr));
5473 goto unlock;
5474 }
5475
5476 list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
5477 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
5478 list_del(&b->list);
5479 kfree(b);
5480 }
5481
5482 hci_update_page_scan(hdev, NULL);
5483
5484 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
5485 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
5486 continue;
5487 device_removed(sk, hdev, &p->addr, p->addr_type);
5488 list_del(&p->action);
5489 list_del(&p->list);
5490 kfree(p);
5491 }
5492
5493 BT_DBG("All LE connection parameters were removed");
5494
5495 hci_update_background_scan(hdev);
5496 }
5497
5498 complete:
5499 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5500 MGMT_STATUS_SUCCESS, &cp->addr, sizeof(cp->addr));
5501
5502 unlock:
5503 hci_dev_unlock(hdev);
5504 return err;
5505 }
5506
5507 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
5508 u16 len)
5509 {
5510 struct mgmt_cp_load_conn_param *cp = data;
5511 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
5512 sizeof(struct mgmt_conn_param));
5513 u16 param_count, expected_len;
5514 int i;
5515
5516 if (!lmp_le_capable(hdev))
5517 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5518 MGMT_STATUS_NOT_SUPPORTED);
5519
5520 param_count = __le16_to_cpu(cp->param_count);
5521 if (param_count > max_param_count) {
5522 BT_ERR("load_conn_param: too big param_count value %u",
5523 param_count);
5524 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5525 MGMT_STATUS_INVALID_PARAMS);
5526 }
5527
5528 expected_len = sizeof(*cp) + param_count *
5529 sizeof(struct mgmt_conn_param);
5530 if (expected_len != len) {
5531 BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
5532 expected_len, len);
5533 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5534 MGMT_STATUS_INVALID_PARAMS);
5535 }
5536
5537 BT_DBG("%s param_count %u", hdev->name, param_count);
5538
5539 hci_dev_lock(hdev);
5540
5541 hci_conn_params_clear_disabled(hdev);
5542
5543 for (i = 0; i < param_count; i++) {
5544 struct mgmt_conn_param *param = &cp->params[i];
5545 struct hci_conn_params *hci_param;
5546 u16 min, max, latency, timeout;
5547 u8 addr_type;
5548
5549 BT_DBG("Adding %pMR (type %u)", &param->addr.bdaddr,
5550 param->addr.type);
5551
5552 if (param->addr.type == BDADDR_LE_PUBLIC) {
5553 addr_type = ADDR_LE_DEV_PUBLIC;
5554 } else if (param->addr.type == BDADDR_LE_RANDOM) {
5555 addr_type = ADDR_LE_DEV_RANDOM;
5556 } else {
5557 BT_ERR("Ignoring invalid connection parameters");
5558 continue;
5559 }
5560
5561 min = le16_to_cpu(param->min_interval);
5562 max = le16_to_cpu(param->max_interval);
5563 latency = le16_to_cpu(param->latency);
5564 timeout = le16_to_cpu(param->timeout);
5565
5566 BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
5567 min, max, latency, timeout);
5568
5569 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
5570 BT_ERR("Ignoring invalid connection parameters");
5571 continue;
5572 }
5573
5574 hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
5575 addr_type);
5576 if (!hci_param) {
5577 BT_ERR("Failed to add connection parameters");
5578 continue;
5579 }
5580
5581 hci_param->conn_min_interval = min;
5582 hci_param->conn_max_interval = max;
5583 hci_param->conn_latency = latency;
5584 hci_param->supervision_timeout = timeout;
5585 }
5586
5587 hci_dev_unlock(hdev);
5588
5589 return cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0, NULL, 0);
5590 }
5591
5592 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
5593 void *data, u16 len)
5594 {
5595 struct mgmt_cp_set_external_config *cp = data;
5596 bool changed;
5597 int err;
5598
5599 BT_DBG("%s", hdev->name);
5600
5601 if (hdev_is_powered(hdev))
5602 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5603 MGMT_STATUS_REJECTED);
5604
5605 if (cp->config != 0x00 && cp->config != 0x01)
5606 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5607 MGMT_STATUS_INVALID_PARAMS);
5608
5609 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
5610 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5611 MGMT_STATUS_NOT_SUPPORTED);
5612
5613 hci_dev_lock(hdev);
5614
5615 if (cp->config)
5616 changed = !test_and_set_bit(HCI_EXT_CONFIGURED,
5617 &hdev->dev_flags);
5618 else
5619 changed = test_and_clear_bit(HCI_EXT_CONFIGURED,
5620 &hdev->dev_flags);
5621
5622 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
5623 if (err < 0)
5624 goto unlock;
5625
5626 if (!changed)
5627 goto unlock;
5628
5629 err = new_options(hdev, sk);
5630
5631 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) == is_configured(hdev)) {
5632 mgmt_index_removed(hdev);
5633
5634 if (test_and_change_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
5635 set_bit(HCI_CONFIG, &hdev->dev_flags);
5636 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
5637
5638 queue_work(hdev->req_workqueue, &hdev->power_on);
5639 } else {
5640 set_bit(HCI_RAW, &hdev->flags);
5641 mgmt_index_added(hdev);
5642 }
5643 }
5644
5645 unlock:
5646 hci_dev_unlock(hdev);
5647 return err;
5648 }
5649
5650 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
5651 void *data, u16 len)
5652 {
5653 struct mgmt_cp_set_public_address *cp = data;
5654 bool changed;
5655 int err;
5656
5657 BT_DBG("%s", hdev->name);
5658
5659 if (hdev_is_powered(hdev))
5660 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5661 MGMT_STATUS_REJECTED);
5662
5663 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
5664 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5665 MGMT_STATUS_INVALID_PARAMS);
5666
5667 if (!hdev->set_bdaddr)
5668 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5669 MGMT_STATUS_NOT_SUPPORTED);
5670
5671 hci_dev_lock(hdev);
5672
5673 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
5674 bacpy(&hdev->public_addr, &cp->bdaddr);
5675
5676 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
5677 if (err < 0)
5678 goto unlock;
5679
5680 if (!changed)
5681 goto unlock;
5682
5683 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
5684 err = new_options(hdev, sk);
5685
5686 if (is_configured(hdev)) {
5687 mgmt_index_removed(hdev);
5688
5689 clear_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
5690
5691 set_bit(HCI_CONFIG, &hdev->dev_flags);
5692 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
5693
5694 queue_work(hdev->req_workqueue, &hdev->power_on);
5695 }
5696
5697 unlock:
5698 hci_dev_unlock(hdev);
5699 return err;
5700 }
5701
5702 static const struct mgmt_handler {
5703 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
5704 u16 data_len);
5705 bool var_len;
5706 size_t data_len;
5707 } mgmt_handlers[] = {
5708 { NULL }, /* 0x0000 (no command) */
5709 { read_version, false, MGMT_READ_VERSION_SIZE },
5710 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
5711 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
5712 { read_controller_info, false, MGMT_READ_INFO_SIZE },
5713 { set_powered, false, MGMT_SETTING_SIZE },
5714 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
5715 { set_connectable, false, MGMT_SETTING_SIZE },
5716 { set_fast_connectable, false, MGMT_SETTING_SIZE },
5717 { set_bondable, false, MGMT_SETTING_SIZE },
5718 { set_link_security, false, MGMT_SETTING_SIZE },
5719 { set_ssp, false, MGMT_SETTING_SIZE },
5720 { set_hs, false, MGMT_SETTING_SIZE },
5721 { set_le, false, MGMT_SETTING_SIZE },
5722 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
5723 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
5724 { add_uuid, false, MGMT_ADD_UUID_SIZE },
5725 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
5726 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
5727 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
5728 { disconnect, false, MGMT_DISCONNECT_SIZE },
5729 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
5730 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
5731 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
5732 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
5733 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
5734 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
5735 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
5736 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
5737 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
5738 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
5739 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
5740 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
5741 { add_remote_oob_data, true, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
5742 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
5743 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
5744 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
5745 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
5746 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
5747 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
5748 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
5749 { set_advertising, false, MGMT_SETTING_SIZE },
5750 { set_bredr, false, MGMT_SETTING_SIZE },
5751 { set_static_address, false, MGMT_SET_STATIC_ADDRESS_SIZE },
5752 { set_scan_params, false, MGMT_SET_SCAN_PARAMS_SIZE },
5753 { set_secure_conn, false, MGMT_SETTING_SIZE },
5754 { set_debug_keys, false, MGMT_SETTING_SIZE },
5755 { set_privacy, false, MGMT_SET_PRIVACY_SIZE },
5756 { load_irks, true, MGMT_LOAD_IRKS_SIZE },
5757 { get_conn_info, false, MGMT_GET_CONN_INFO_SIZE },
5758 { get_clock_info, false, MGMT_GET_CLOCK_INFO_SIZE },
5759 { add_device, false, MGMT_ADD_DEVICE_SIZE },
5760 { remove_device, false, MGMT_REMOVE_DEVICE_SIZE },
5761 { load_conn_param, true, MGMT_LOAD_CONN_PARAM_SIZE },
5762 { read_unconf_index_list, false, MGMT_READ_UNCONF_INDEX_LIST_SIZE },
5763 { read_config_info, false, MGMT_READ_CONFIG_INFO_SIZE },
5764 { set_external_config, false, MGMT_SET_EXTERNAL_CONFIG_SIZE },
5765 { set_public_address, false, MGMT_SET_PUBLIC_ADDRESS_SIZE },
5766 };
5767
5768 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
5769 {
5770 void *buf;
5771 u8 *cp;
5772 struct mgmt_hdr *hdr;
5773 u16 opcode, index, len;
5774 struct hci_dev *hdev = NULL;
5775 const struct mgmt_handler *handler;
5776 int err;
5777
5778 BT_DBG("got %zu bytes", msglen);
5779
5780 if (msglen < sizeof(*hdr))
5781 return -EINVAL;
5782
5783 buf = kmalloc(msglen, GFP_KERNEL);
5784 if (!buf)
5785 return -ENOMEM;
5786
5787 if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
5788 err = -EFAULT;
5789 goto done;
5790 }
5791
5792 hdr = buf;
5793 opcode = __le16_to_cpu(hdr->opcode);
5794 index = __le16_to_cpu(hdr->index);
5795 len = __le16_to_cpu(hdr->len);
5796
5797 if (len != msglen - sizeof(*hdr)) {
5798 err = -EINVAL;
5799 goto done;
5800 }
5801
5802 if (index != MGMT_INDEX_NONE) {
5803 hdev = hci_dev_get(index);
5804 if (!hdev) {
5805 err = cmd_status(sk, index, opcode,
5806 MGMT_STATUS_INVALID_INDEX);
5807 goto done;
5808 }
5809
5810 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
5811 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
5812 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5813 err = cmd_status(sk, index, opcode,
5814 MGMT_STATUS_INVALID_INDEX);
5815 goto done;
5816 }
5817
5818 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
5819 opcode != MGMT_OP_READ_CONFIG_INFO &&
5820 opcode != MGMT_OP_SET_EXTERNAL_CONFIG &&
5821 opcode != MGMT_OP_SET_PUBLIC_ADDRESS) {
5822 err = cmd_status(sk, index, opcode,
5823 MGMT_STATUS_INVALID_INDEX);
5824 goto done;
5825 }
5826 }
5827
5828 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
5829 mgmt_handlers[opcode].func == NULL) {
5830 BT_DBG("Unknown op %u", opcode);
5831 err = cmd_status(sk, index, opcode,
5832 MGMT_STATUS_UNKNOWN_COMMAND);
5833 goto done;
5834 }
5835
5836 if (hdev && (opcode <= MGMT_OP_READ_INDEX_LIST ||
5837 opcode == MGMT_OP_READ_UNCONF_INDEX_LIST)) {
5838 err = cmd_status(sk, index, opcode,
5839 MGMT_STATUS_INVALID_INDEX);
5840 goto done;
5841 }
5842
5843 if (!hdev && (opcode > MGMT_OP_READ_INDEX_LIST &&
5844 opcode != MGMT_OP_READ_UNCONF_INDEX_LIST)) {
5845 err = cmd_status(sk, index, opcode,
5846 MGMT_STATUS_INVALID_INDEX);
5847 goto done;
5848 }
5849
5850 handler = &mgmt_handlers[opcode];
5851
5852 if ((handler->var_len && len < handler->data_len) ||
5853 (!handler->var_len && len != handler->data_len)) {
5854 err = cmd_status(sk, index, opcode,
5855 MGMT_STATUS_INVALID_PARAMS);
5856 goto done;
5857 }
5858
5859 if (hdev)
5860 mgmt_init_hdev(sk, hdev);
5861
5862 cp = buf + sizeof(*hdr);
5863
5864 err = handler->func(sk, hdev, cp, len);
5865 if (err < 0)
5866 goto done;
5867
5868 err = msglen;
5869
5870 done:
5871 if (hdev)
5872 hci_dev_put(hdev);
5873
5874 kfree(buf);
5875 return err;
5876 }
5877
5878 void mgmt_index_added(struct hci_dev *hdev)
5879 {
5880 if (hdev->dev_type != HCI_BREDR)
5881 return;
5882
5883 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
5884 return;
5885
5886 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
5887 mgmt_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0, NULL);
5888 else
5889 mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
5890 }
5891
5892 void mgmt_index_removed(struct hci_dev *hdev)
5893 {
5894 u8 status = MGMT_STATUS_INVALID_INDEX;
5895
5896 if (hdev->dev_type != HCI_BREDR)
5897 return;
5898
5899 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
5900 return;
5901
5902 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
5903
5904 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
5905 mgmt_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0, NULL);
5906 else
5907 mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
5908 }
5909
5910 /* This function requires the caller holds hdev->lock */
5911 static void restart_le_actions(struct hci_dev *hdev)
5912 {
5913 struct hci_conn_params *p;
5914
5915 list_for_each_entry(p, &hdev->le_conn_params, list) {
5916 /* Needed for AUTO_OFF case where might not "really"
5917 * have been powered off.
5918 */
5919 list_del_init(&p->action);
5920
5921 switch (p->auto_connect) {
5922 case HCI_AUTO_CONN_DIRECT:
5923 case HCI_AUTO_CONN_ALWAYS:
5924 list_add(&p->action, &hdev->pend_le_conns);
5925 break;
5926 case HCI_AUTO_CONN_REPORT:
5927 list_add(&p->action, &hdev->pend_le_reports);
5928 break;
5929 default:
5930 break;
5931 }
5932 }
5933
5934 hci_update_background_scan(hdev);
5935 }
5936
5937 static void powered_complete(struct hci_dev *hdev, u8 status)
5938 {
5939 struct cmd_lookup match = { NULL, hdev };
5940
5941 BT_DBG("status 0x%02x", status);
5942
5943 hci_dev_lock(hdev);
5944
5945 restart_le_actions(hdev);
5946
5947 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
5948
5949 new_settings(hdev, match.sk);
5950
5951 hci_dev_unlock(hdev);
5952
5953 if (match.sk)
5954 sock_put(match.sk);
5955 }
5956
5957 static int powered_update_hci(struct hci_dev *hdev)
5958 {
5959 struct hci_request req;
5960 u8 link_sec;
5961
5962 hci_req_init(&req, hdev);
5963
5964 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
5965 !lmp_host_ssp_capable(hdev)) {
5966 u8 ssp = 1;
5967
5968 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
5969 }
5970
5971 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
5972 lmp_bredr_capable(hdev)) {
5973 struct hci_cp_write_le_host_supported cp;
5974
5975 cp.le = 0x01;
5976 cp.simul = 0x00;
5977
5978 /* Check first if we already have the right
5979 * host state (host features set)
5980 */
5981 if (cp.le != lmp_host_le_capable(hdev) ||
5982 cp.simul != lmp_host_le_br_capable(hdev))
5983 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
5984 sizeof(cp), &cp);
5985 }
5986
5987 if (lmp_le_capable(hdev)) {
5988 /* Make sure the controller has a good default for
5989 * advertising data. This also applies to the case
5990 * where BR/EDR was toggled during the AUTO_OFF phase.
5991 */
5992 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
5993 update_adv_data(&req);
5994 update_scan_rsp_data(&req);
5995 }
5996
5997 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
5998 enable_advertising(&req);
5999 }
6000
6001 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
6002 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
6003 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
6004 sizeof(link_sec), &link_sec);
6005
6006 if (lmp_bredr_capable(hdev)) {
6007 write_fast_connectable(&req, false);
6008 hci_update_page_scan(hdev, &req);
6009 update_class(&req);
6010 update_name(&req);
6011 update_eir(&req);
6012 }
6013
6014 return hci_req_run(&req, powered_complete);
6015 }
6016
6017 int mgmt_powered(struct hci_dev *hdev, u8 powered)
6018 {
6019 struct cmd_lookup match = { NULL, hdev };
6020 u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
6021 u8 zero_cod[] = { 0, 0, 0 };
6022 int err;
6023
6024 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
6025 return 0;
6026
6027 if (powered) {
6028 if (powered_update_hci(hdev) == 0)
6029 return 0;
6030
6031 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
6032 &match);
6033 goto new_settings;
6034 }
6035
6036 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
6037 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered);
6038
6039 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
6040 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
6041 zero_cod, sizeof(zero_cod), NULL);
6042
6043 new_settings:
6044 err = new_settings(hdev, match.sk);
6045
6046 if (match.sk)
6047 sock_put(match.sk);
6048
6049 return err;
6050 }
6051
6052 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
6053 {
6054 struct pending_cmd *cmd;
6055 u8 status;
6056
6057 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6058 if (!cmd)
6059 return;
6060
6061 if (err == -ERFKILL)
6062 status = MGMT_STATUS_RFKILLED;
6063 else
6064 status = MGMT_STATUS_FAILED;
6065
6066 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
6067
6068 mgmt_pending_remove(cmd);
6069 }
6070
6071 void mgmt_discoverable_timeout(struct hci_dev *hdev)
6072 {
6073 struct hci_request req;
6074
6075 hci_dev_lock(hdev);
6076
6077 /* When discoverable timeout triggers, then just make sure
6078 * the limited discoverable flag is cleared. Even in the case
6079 * of a timeout triggered from general discoverable, it is
6080 * safe to unconditionally clear the flag.
6081 */
6082 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
6083 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
6084
6085 hci_req_init(&req, hdev);
6086 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
6087 u8 scan = SCAN_PAGE;
6088 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
6089 sizeof(scan), &scan);
6090 }
6091 update_class(&req);
6092 update_adv_data(&req);
6093 hci_req_run(&req, NULL);
6094
6095 hdev->discov_timeout = 0;
6096
6097 new_settings(hdev, NULL);
6098
6099 hci_dev_unlock(hdev);
6100 }
6101
6102 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
6103 bool persistent)
6104 {
6105 struct mgmt_ev_new_link_key ev;
6106
6107 memset(&ev, 0, sizeof(ev));
6108
6109 ev.store_hint = persistent;
6110 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6111 ev.key.addr.type = BDADDR_BREDR;
6112 ev.key.type = key->type;
6113 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
6114 ev.key.pin_len = key->pin_len;
6115
6116 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
6117 }
6118
6119 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
6120 {
6121 switch (ltk->type) {
6122 case SMP_LTK:
6123 case SMP_LTK_SLAVE:
6124 if (ltk->authenticated)
6125 return MGMT_LTK_AUTHENTICATED;
6126 return MGMT_LTK_UNAUTHENTICATED;
6127 case SMP_LTK_P256:
6128 if (ltk->authenticated)
6129 return MGMT_LTK_P256_AUTH;
6130 return MGMT_LTK_P256_UNAUTH;
6131 case SMP_LTK_P256_DEBUG:
6132 return MGMT_LTK_P256_DEBUG;
6133 }
6134
6135 return MGMT_LTK_UNAUTHENTICATED;
6136 }
6137
6138 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
6139 {
6140 struct mgmt_ev_new_long_term_key ev;
6141
6142 memset(&ev, 0, sizeof(ev));
6143
6144 /* Devices using resolvable or non-resolvable random addresses
6145 * without providing an indentity resolving key don't require
6146 * to store long term keys. Their addresses will change the
6147 * next time around.
6148 *
6149 * Only when a remote device provides an identity address
6150 * make sure the long term key is stored. If the remote
6151 * identity is known, the long term keys are internally
6152 * mapped to the identity address. So allow static random
6153 * and public addresses here.
6154 */
6155 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6156 (key->bdaddr.b[5] & 0xc0) != 0xc0)
6157 ev.store_hint = 0x00;
6158 else
6159 ev.store_hint = persistent;
6160
6161 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6162 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
6163 ev.key.type = mgmt_ltk_type(key);
6164 ev.key.enc_size = key->enc_size;
6165 ev.key.ediv = key->ediv;
6166 ev.key.rand = key->rand;
6167
6168 if (key->type == SMP_LTK)
6169 ev.key.master = 1;
6170
6171 memcpy(ev.key.val, key->val, sizeof(key->val));
6172
6173 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
6174 }
6175
6176 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
6177 {
6178 struct mgmt_ev_new_irk ev;
6179
6180 memset(&ev, 0, sizeof(ev));
6181
6182 /* For identity resolving keys from devices that are already
6183 * using a public address or static random address, do not
6184 * ask for storing this key. The identity resolving key really
6185 * is only mandatory for devices using resovlable random
6186 * addresses.
6187 *
6188 * Storing all identity resolving keys has the downside that
6189 * they will be also loaded on next boot of they system. More
6190 * identity resolving keys, means more time during scanning is
6191 * needed to actually resolve these addresses.
6192 */
6193 if (bacmp(&irk->rpa, BDADDR_ANY))
6194 ev.store_hint = 0x01;
6195 else
6196 ev.store_hint = 0x00;
6197
6198 bacpy(&ev.rpa, &irk->rpa);
6199 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
6200 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
6201 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
6202
6203 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
6204 }
6205
6206 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
6207 bool persistent)
6208 {
6209 struct mgmt_ev_new_csrk ev;
6210
6211 memset(&ev, 0, sizeof(ev));
6212
6213 /* Devices using resolvable or non-resolvable random addresses
6214 * without providing an indentity resolving key don't require
6215 * to store signature resolving keys. Their addresses will change
6216 * the next time around.
6217 *
6218 * Only when a remote device provides an identity address
6219 * make sure the signature resolving key is stored. So allow
6220 * static random and public addresses here.
6221 */
6222 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6223 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
6224 ev.store_hint = 0x00;
6225 else
6226 ev.store_hint = persistent;
6227
6228 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
6229 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
6230 ev.key.master = csrk->master;
6231 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
6232
6233 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
6234 }
6235
6236 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
6237 u8 bdaddr_type, u8 store_hint, u16 min_interval,
6238 u16 max_interval, u16 latency, u16 timeout)
6239 {
6240 struct mgmt_ev_new_conn_param ev;
6241
6242 if (!hci_is_identity_address(bdaddr, bdaddr_type))
6243 return;
6244
6245 memset(&ev, 0, sizeof(ev));
6246 bacpy(&ev.addr.bdaddr, bdaddr);
6247 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
6248 ev.store_hint = store_hint;
6249 ev.min_interval = cpu_to_le16(min_interval);
6250 ev.max_interval = cpu_to_le16(max_interval);
6251 ev.latency = cpu_to_le16(latency);
6252 ev.timeout = cpu_to_le16(timeout);
6253
6254 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
6255 }
6256
6257 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
6258 u8 data_len)
6259 {
6260 eir[eir_len++] = sizeof(type) + data_len;
6261 eir[eir_len++] = type;
6262 memcpy(&eir[eir_len], data, data_len);
6263 eir_len += data_len;
6264
6265 return eir_len;
6266 }
6267
6268 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
6269 u32 flags, u8 *name, u8 name_len)
6270 {
6271 char buf[512];
6272 struct mgmt_ev_device_connected *ev = (void *) buf;
6273 u16 eir_len = 0;
6274
6275 bacpy(&ev->addr.bdaddr, &conn->dst);
6276 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
6277
6278 ev->flags = __cpu_to_le32(flags);
6279
6280 /* We must ensure that the EIR Data fields are ordered and
6281 * unique. Keep it simple for now and avoid the problem by not
6282 * adding any BR/EDR data to the LE adv.
6283 */
6284 if (conn->le_adv_data_len > 0) {
6285 memcpy(&ev->eir[eir_len],
6286 conn->le_adv_data, conn->le_adv_data_len);
6287 eir_len = conn->le_adv_data_len;
6288 } else {
6289 if (name_len > 0)
6290 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
6291 name, name_len);
6292
6293 if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
6294 eir_len = eir_append_data(ev->eir, eir_len,
6295 EIR_CLASS_OF_DEV,
6296 conn->dev_class, 3);
6297 }
6298
6299 ev->eir_len = cpu_to_le16(eir_len);
6300
6301 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
6302 sizeof(*ev) + eir_len, NULL);
6303 }
6304
6305 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
6306 {
6307 struct mgmt_cp_disconnect *cp = cmd->param;
6308 struct sock **sk = data;
6309 struct mgmt_rp_disconnect rp;
6310
6311 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6312 rp.addr.type = cp->addr.type;
6313
6314 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
6315 sizeof(rp));
6316
6317 *sk = cmd->sk;
6318 sock_hold(*sk);
6319
6320 mgmt_pending_remove(cmd);
6321 }
6322
6323 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
6324 {
6325 struct hci_dev *hdev = data;
6326 struct mgmt_cp_unpair_device *cp = cmd->param;
6327 struct mgmt_rp_unpair_device rp;
6328
6329 memset(&rp, 0, sizeof(rp));
6330 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6331 rp.addr.type = cp->addr.type;
6332
6333 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
6334
6335 cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
6336
6337 mgmt_pending_remove(cmd);
6338 }
6339
6340 bool mgmt_powering_down(struct hci_dev *hdev)
6341 {
6342 struct pending_cmd *cmd;
6343 struct mgmt_mode *cp;
6344
6345 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6346 if (!cmd)
6347 return false;
6348
6349 cp = cmd->param;
6350 if (!cp->val)
6351 return true;
6352
6353 return false;
6354 }
6355
6356 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
6357 u8 link_type, u8 addr_type, u8 reason,
6358 bool mgmt_connected)
6359 {
6360 struct mgmt_ev_device_disconnected ev;
6361 struct sock *sk = NULL;
6362
6363 /* The connection is still in hci_conn_hash so test for 1
6364 * instead of 0 to know if this is the last one.
6365 */
6366 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
6367 cancel_delayed_work(&hdev->power_off);
6368 queue_work(hdev->req_workqueue, &hdev->power_off.work);
6369 }
6370
6371 if (!mgmt_connected)
6372 return;
6373
6374 if (link_type != ACL_LINK && link_type != LE_LINK)
6375 return;
6376
6377 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
6378
6379 bacpy(&ev.addr.bdaddr, bdaddr);
6380 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6381 ev.reason = reason;
6382
6383 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
6384
6385 if (sk)
6386 sock_put(sk);
6387
6388 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6389 hdev);
6390 }
6391
6392 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
6393 u8 link_type, u8 addr_type, u8 status)
6394 {
6395 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
6396 struct mgmt_cp_disconnect *cp;
6397 struct mgmt_rp_disconnect rp;
6398 struct pending_cmd *cmd;
6399
6400 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6401 hdev);
6402
6403 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
6404 if (!cmd)
6405 return;
6406
6407 cp = cmd->param;
6408
6409 if (bacmp(bdaddr, &cp->addr.bdaddr))
6410 return;
6411
6412 if (cp->addr.type != bdaddr_type)
6413 return;
6414
6415 bacpy(&rp.addr.bdaddr, bdaddr);
6416 rp.addr.type = bdaddr_type;
6417
6418 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
6419 mgmt_status(status), &rp, sizeof(rp));
6420
6421 mgmt_pending_remove(cmd);
6422 }
6423
6424 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6425 u8 addr_type, u8 status)
6426 {
6427 struct mgmt_ev_connect_failed ev;
6428
6429 /* The connection is still in hci_conn_hash so test for 1
6430 * instead of 0 to know if this is the last one.
6431 */
6432 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
6433 cancel_delayed_work(&hdev->power_off);
6434 queue_work(hdev->req_workqueue, &hdev->power_off.work);
6435 }
6436
6437 bacpy(&ev.addr.bdaddr, bdaddr);
6438 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6439 ev.status = mgmt_status(status);
6440
6441 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
6442 }
6443
6444 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
6445 {
6446 struct mgmt_ev_pin_code_request ev;
6447
6448 bacpy(&ev.addr.bdaddr, bdaddr);
6449 ev.addr.type = BDADDR_BREDR;
6450 ev.secure = secure;
6451
6452 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
6453 }
6454
6455 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6456 u8 status)
6457 {
6458 struct pending_cmd *cmd;
6459 struct mgmt_rp_pin_code_reply rp;
6460
6461 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
6462 if (!cmd)
6463 return;
6464
6465 bacpy(&rp.addr.bdaddr, bdaddr);
6466 rp.addr.type = BDADDR_BREDR;
6467
6468 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
6469 mgmt_status(status), &rp, sizeof(rp));
6470
6471 mgmt_pending_remove(cmd);
6472 }
6473
6474 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6475 u8 status)
6476 {
6477 struct pending_cmd *cmd;
6478 struct mgmt_rp_pin_code_reply rp;
6479
6480 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
6481 if (!cmd)
6482 return;
6483
6484 bacpy(&rp.addr.bdaddr, bdaddr);
6485 rp.addr.type = BDADDR_BREDR;
6486
6487 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
6488 mgmt_status(status), &rp, sizeof(rp));
6489
6490 mgmt_pending_remove(cmd);
6491 }
6492
6493 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6494 u8 link_type, u8 addr_type, u32 value,
6495 u8 confirm_hint)
6496 {
6497 struct mgmt_ev_user_confirm_request ev;
6498
6499 BT_DBG("%s", hdev->name);
6500
6501 bacpy(&ev.addr.bdaddr, bdaddr);
6502 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6503 ev.confirm_hint = confirm_hint;
6504 ev.value = cpu_to_le32(value);
6505
6506 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
6507 NULL);
6508 }
6509
6510 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6511 u8 link_type, u8 addr_type)
6512 {
6513 struct mgmt_ev_user_passkey_request ev;
6514
6515 BT_DBG("%s", hdev->name);
6516
6517 bacpy(&ev.addr.bdaddr, bdaddr);
6518 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6519
6520 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
6521 NULL);
6522 }
6523
6524 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6525 u8 link_type, u8 addr_type, u8 status,
6526 u8 opcode)
6527 {
6528 struct pending_cmd *cmd;
6529 struct mgmt_rp_user_confirm_reply rp;
6530 int err;
6531
6532 cmd = mgmt_pending_find(opcode, hdev);
6533 if (!cmd)
6534 return -ENOENT;
6535
6536 bacpy(&rp.addr.bdaddr, bdaddr);
6537 rp.addr.type = link_to_bdaddr(link_type, addr_type);
6538 err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
6539 &rp, sizeof(rp));
6540
6541 mgmt_pending_remove(cmd);
6542
6543 return err;
6544 }
6545
6546 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6547 u8 link_type, u8 addr_type, u8 status)
6548 {
6549 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6550 status, MGMT_OP_USER_CONFIRM_REPLY);
6551 }
6552
6553 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6554 u8 link_type, u8 addr_type, u8 status)
6555 {
6556 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6557 status,
6558 MGMT_OP_USER_CONFIRM_NEG_REPLY);
6559 }
6560
6561 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6562 u8 link_type, u8 addr_type, u8 status)
6563 {
6564 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6565 status, MGMT_OP_USER_PASSKEY_REPLY);
6566 }
6567
6568 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6569 u8 link_type, u8 addr_type, u8 status)
6570 {
6571 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6572 status,
6573 MGMT_OP_USER_PASSKEY_NEG_REPLY);
6574 }
6575
6576 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
6577 u8 link_type, u8 addr_type, u32 passkey,
6578 u8 entered)
6579 {
6580 struct mgmt_ev_passkey_notify ev;
6581
6582 BT_DBG("%s", hdev->name);
6583
6584 bacpy(&ev.addr.bdaddr, bdaddr);
6585 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6586 ev.passkey = __cpu_to_le32(passkey);
6587 ev.entered = entered;
6588
6589 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
6590 }
6591
6592 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
6593 {
6594 struct mgmt_ev_auth_failed ev;
6595 struct pending_cmd *cmd;
6596 u8 status = mgmt_status(hci_status);
6597
6598 bacpy(&ev.addr.bdaddr, &conn->dst);
6599 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
6600 ev.status = status;
6601
6602 cmd = find_pairing(conn);
6603
6604 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
6605 cmd ? cmd->sk : NULL);
6606
6607 if (cmd)
6608 pairing_complete(cmd, status);
6609 }
6610
6611 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
6612 {
6613 struct cmd_lookup match = { NULL, hdev };
6614 bool changed;
6615
6616 if (status) {
6617 u8 mgmt_err = mgmt_status(status);
6618 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
6619 cmd_status_rsp, &mgmt_err);
6620 return;
6621 }
6622
6623 if (test_bit(HCI_AUTH, &hdev->flags))
6624 changed = !test_and_set_bit(HCI_LINK_SECURITY,
6625 &hdev->dev_flags);
6626 else
6627 changed = test_and_clear_bit(HCI_LINK_SECURITY,
6628 &hdev->dev_flags);
6629
6630 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
6631 &match);
6632
6633 if (changed)
6634 new_settings(hdev, match.sk);
6635
6636 if (match.sk)
6637 sock_put(match.sk);
6638 }
6639
6640 static void clear_eir(struct hci_request *req)
6641 {
6642 struct hci_dev *hdev = req->hdev;
6643 struct hci_cp_write_eir cp;
6644
6645 if (!lmp_ext_inq_capable(hdev))
6646 return;
6647
6648 memset(hdev->eir, 0, sizeof(hdev->eir));
6649
6650 memset(&cp, 0, sizeof(cp));
6651
6652 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
6653 }
6654
6655 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
6656 {
6657 struct cmd_lookup match = { NULL, hdev };
6658 struct hci_request req;
6659 bool changed = false;
6660
6661 if (status) {
6662 u8 mgmt_err = mgmt_status(status);
6663
6664 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
6665 &hdev->dev_flags)) {
6666 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
6667 new_settings(hdev, NULL);
6668 }
6669
6670 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
6671 &mgmt_err);
6672 return;
6673 }
6674
6675 if (enable) {
6676 changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
6677 } else {
6678 changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
6679 if (!changed)
6680 changed = test_and_clear_bit(HCI_HS_ENABLED,
6681 &hdev->dev_flags);
6682 else
6683 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
6684 }
6685
6686 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
6687
6688 if (changed)
6689 new_settings(hdev, match.sk);
6690
6691 if (match.sk)
6692 sock_put(match.sk);
6693
6694 hci_req_init(&req, hdev);
6695
6696 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
6697 if (test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
6698 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
6699 sizeof(enable), &enable);
6700 update_eir(&req);
6701 } else {
6702 clear_eir(&req);
6703 }
6704
6705 hci_req_run(&req, NULL);
6706 }
6707
6708 void mgmt_sc_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
6709 {
6710 struct cmd_lookup match = { NULL, hdev };
6711 bool changed = false;
6712
6713 if (status) {
6714 u8 mgmt_err = mgmt_status(status);
6715
6716 if (enable) {
6717 if (test_and_clear_bit(HCI_SC_ENABLED,
6718 &hdev->dev_flags))
6719 new_settings(hdev, NULL);
6720 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
6721 }
6722
6723 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
6724 cmd_status_rsp, &mgmt_err);
6725 return;
6726 }
6727
6728 if (enable) {
6729 changed = !test_and_set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
6730 } else {
6731 changed = test_and_clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
6732 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
6733 }
6734
6735 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
6736 settings_rsp, &match);
6737
6738 if (changed)
6739 new_settings(hdev, match.sk);
6740
6741 if (match.sk)
6742 sock_put(match.sk);
6743 }
6744
6745 static void sk_lookup(struct pending_cmd *cmd, void *data)
6746 {
6747 struct cmd_lookup *match = data;
6748
6749 if (match->sk == NULL) {
6750 match->sk = cmd->sk;
6751 sock_hold(match->sk);
6752 }
6753 }
6754
6755 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
6756 u8 status)
6757 {
6758 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
6759
6760 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
6761 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
6762 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
6763
6764 if (!status)
6765 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
6766 NULL);
6767
6768 if (match.sk)
6769 sock_put(match.sk);
6770 }
6771
6772 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
6773 {
6774 struct mgmt_cp_set_local_name ev;
6775 struct pending_cmd *cmd;
6776
6777 if (status)
6778 return;
6779
6780 memset(&ev, 0, sizeof(ev));
6781 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
6782 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
6783
6784 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
6785 if (!cmd) {
6786 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
6787
6788 /* If this is a HCI command related to powering on the
6789 * HCI dev don't send any mgmt signals.
6790 */
6791 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
6792 return;
6793 }
6794
6795 mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
6796 cmd ? cmd->sk : NULL);
6797 }
6798
6799 void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
6800 u8 *rand192, u8 *hash256, u8 *rand256,
6801 u8 status)
6802 {
6803 struct pending_cmd *cmd;
6804
6805 BT_DBG("%s status %u", hdev->name, status);
6806
6807 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
6808 if (!cmd)
6809 return;
6810
6811 if (status) {
6812 cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
6813 mgmt_status(status));
6814 } else {
6815 if (bredr_sc_enabled(hdev) && hash256 && rand256) {
6816 struct mgmt_rp_read_local_oob_ext_data rp;
6817
6818 memcpy(rp.hash192, hash192, sizeof(rp.hash192));
6819 memcpy(rp.rand192, rand192, sizeof(rp.rand192));
6820
6821 memcpy(rp.hash256, hash256, sizeof(rp.hash256));
6822 memcpy(rp.rand256, rand256, sizeof(rp.rand256));
6823
6824 cmd_complete(cmd->sk, hdev->id,
6825 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
6826 &rp, sizeof(rp));
6827 } else {
6828 struct mgmt_rp_read_local_oob_data rp;
6829
6830 memcpy(rp.hash, hash192, sizeof(rp.hash));
6831 memcpy(rp.rand, rand192, sizeof(rp.rand));
6832
6833 cmd_complete(cmd->sk, hdev->id,
6834 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
6835 &rp, sizeof(rp));
6836 }
6837 }
6838
6839 mgmt_pending_remove(cmd);
6840 }
6841
6842 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6843 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
6844 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
6845 {
6846 char buf[512];
6847 struct mgmt_ev_device_found *ev = (void *) buf;
6848 size_t ev_size;
6849
6850 /* Don't send events for a non-kernel initiated discovery. With
6851 * LE one exception is if we have pend_le_reports > 0 in which
6852 * case we're doing passive scanning and want these events.
6853 */
6854 if (!hci_discovery_active(hdev)) {
6855 if (link_type == ACL_LINK)
6856 return;
6857 if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
6858 return;
6859 }
6860
6861 /* Make sure that the buffer is big enough. The 5 extra bytes
6862 * are for the potential CoD field.
6863 */
6864 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
6865 return;
6866
6867 memset(buf, 0, sizeof(buf));
6868
6869 bacpy(&ev->addr.bdaddr, bdaddr);
6870 ev->addr.type = link_to_bdaddr(link_type, addr_type);
6871 ev->rssi = rssi;
6872 ev->flags = cpu_to_le32(flags);
6873
6874 if (eir_len > 0)
6875 memcpy(ev->eir, eir, eir_len);
6876
6877 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
6878 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
6879 dev_class, 3);
6880
6881 if (scan_rsp_len > 0)
6882 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
6883
6884 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
6885 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
6886
6887 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
6888 }
6889
6890 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6891 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
6892 {
6893 struct mgmt_ev_device_found *ev;
6894 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
6895 u16 eir_len;
6896
6897 ev = (struct mgmt_ev_device_found *) buf;
6898
6899 memset(buf, 0, sizeof(buf));
6900
6901 bacpy(&ev->addr.bdaddr, bdaddr);
6902 ev->addr.type = link_to_bdaddr(link_type, addr_type);
6903 ev->rssi = rssi;
6904
6905 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
6906 name_len);
6907
6908 ev->eir_len = cpu_to_le16(eir_len);
6909
6910 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
6911 }
6912
6913 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
6914 {
6915 struct mgmt_ev_discovering ev;
6916 struct pending_cmd *cmd;
6917
6918 BT_DBG("%s discovering %u", hdev->name, discovering);
6919
6920 if (discovering)
6921 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
6922 else
6923 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
6924
6925 if (cmd != NULL) {
6926 u8 type = hdev->discovery.type;
6927
6928 cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
6929 sizeof(type));
6930 mgmt_pending_remove(cmd);
6931 }
6932
6933 memset(&ev, 0, sizeof(ev));
6934 ev.type = hdev->discovery.type;
6935 ev.discovering = discovering;
6936
6937 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
6938 }
6939
6940 static void adv_enable_complete(struct hci_dev *hdev, u8 status)
6941 {
6942 BT_DBG("%s status %u", hdev->name, status);
6943 }
6944
6945 void mgmt_reenable_advertising(struct hci_dev *hdev)
6946 {
6947 struct hci_request req;
6948
6949 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
6950 return;
6951
6952 hci_req_init(&req, hdev);
6953 enable_advertising(&req);
6954 hci_req_run(&req, adv_enable_complete);
6955 }