]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - net/bluetooth/mgmt.c
37305facf4d6f9c310caad3f132dde6af874130d
[mirror_ubuntu-artful-kernel.git] / net / bluetooth / mgmt.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI Management interface */
26
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/mgmt.h>
33
34 #include "smp.h"
35
36 #define MGMT_VERSION 1
37 #define MGMT_REVISION 5
38
39 static const u16 mgmt_commands[] = {
40 MGMT_OP_READ_INDEX_LIST,
41 MGMT_OP_READ_INFO,
42 MGMT_OP_SET_POWERED,
43 MGMT_OP_SET_DISCOVERABLE,
44 MGMT_OP_SET_CONNECTABLE,
45 MGMT_OP_SET_FAST_CONNECTABLE,
46 MGMT_OP_SET_PAIRABLE,
47 MGMT_OP_SET_LINK_SECURITY,
48 MGMT_OP_SET_SSP,
49 MGMT_OP_SET_HS,
50 MGMT_OP_SET_LE,
51 MGMT_OP_SET_DEV_CLASS,
52 MGMT_OP_SET_LOCAL_NAME,
53 MGMT_OP_ADD_UUID,
54 MGMT_OP_REMOVE_UUID,
55 MGMT_OP_LOAD_LINK_KEYS,
56 MGMT_OP_LOAD_LONG_TERM_KEYS,
57 MGMT_OP_DISCONNECT,
58 MGMT_OP_GET_CONNECTIONS,
59 MGMT_OP_PIN_CODE_REPLY,
60 MGMT_OP_PIN_CODE_NEG_REPLY,
61 MGMT_OP_SET_IO_CAPABILITY,
62 MGMT_OP_PAIR_DEVICE,
63 MGMT_OP_CANCEL_PAIR_DEVICE,
64 MGMT_OP_UNPAIR_DEVICE,
65 MGMT_OP_USER_CONFIRM_REPLY,
66 MGMT_OP_USER_CONFIRM_NEG_REPLY,
67 MGMT_OP_USER_PASSKEY_REPLY,
68 MGMT_OP_USER_PASSKEY_NEG_REPLY,
69 MGMT_OP_READ_LOCAL_OOB_DATA,
70 MGMT_OP_ADD_REMOTE_OOB_DATA,
71 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
72 MGMT_OP_START_DISCOVERY,
73 MGMT_OP_STOP_DISCOVERY,
74 MGMT_OP_CONFIRM_NAME,
75 MGMT_OP_BLOCK_DEVICE,
76 MGMT_OP_UNBLOCK_DEVICE,
77 MGMT_OP_SET_DEVICE_ID,
78 MGMT_OP_SET_ADVERTISING,
79 MGMT_OP_SET_BREDR,
80 MGMT_OP_SET_STATIC_ADDRESS,
81 MGMT_OP_SET_SCAN_PARAMS,
82 MGMT_OP_SET_SECURE_CONN,
83 MGMT_OP_SET_DEBUG_KEYS,
84 MGMT_OP_SET_PRIVACY,
85 MGMT_OP_LOAD_IRKS,
86 };
87
88 static const u16 mgmt_events[] = {
89 MGMT_EV_CONTROLLER_ERROR,
90 MGMT_EV_INDEX_ADDED,
91 MGMT_EV_INDEX_REMOVED,
92 MGMT_EV_NEW_SETTINGS,
93 MGMT_EV_CLASS_OF_DEV_CHANGED,
94 MGMT_EV_LOCAL_NAME_CHANGED,
95 MGMT_EV_NEW_LINK_KEY,
96 MGMT_EV_NEW_LONG_TERM_KEY,
97 MGMT_EV_DEVICE_CONNECTED,
98 MGMT_EV_DEVICE_DISCONNECTED,
99 MGMT_EV_CONNECT_FAILED,
100 MGMT_EV_PIN_CODE_REQUEST,
101 MGMT_EV_USER_CONFIRM_REQUEST,
102 MGMT_EV_USER_PASSKEY_REQUEST,
103 MGMT_EV_AUTH_FAILED,
104 MGMT_EV_DEVICE_FOUND,
105 MGMT_EV_DISCOVERING,
106 MGMT_EV_DEVICE_BLOCKED,
107 MGMT_EV_DEVICE_UNBLOCKED,
108 MGMT_EV_DEVICE_UNPAIRED,
109 MGMT_EV_PASSKEY_NOTIFY,
110 MGMT_EV_NEW_IRK,
111 };
112
113 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
114
115 #define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
116 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
117
118 struct pending_cmd {
119 struct list_head list;
120 u16 opcode;
121 int index;
122 void *param;
123 struct sock *sk;
124 void *user_data;
125 };
126
127 /* HCI to MGMT error code conversion table */
128 static u8 mgmt_status_table[] = {
129 MGMT_STATUS_SUCCESS,
130 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
131 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
132 MGMT_STATUS_FAILED, /* Hardware Failure */
133 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
134 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
135 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
136 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
137 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
138 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
139 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
140 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
141 MGMT_STATUS_BUSY, /* Command Disallowed */
142 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
143 MGMT_STATUS_REJECTED, /* Rejected Security */
144 MGMT_STATUS_REJECTED, /* Rejected Personal */
145 MGMT_STATUS_TIMEOUT, /* Host Timeout */
146 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
147 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
148 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
149 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
150 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
151 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
152 MGMT_STATUS_BUSY, /* Repeated Attempts */
153 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
154 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
155 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
156 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
157 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
158 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
159 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
160 MGMT_STATUS_FAILED, /* Unspecified Error */
161 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
162 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
163 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
164 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
165 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
166 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
167 MGMT_STATUS_FAILED, /* Unit Link Key Used */
168 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
169 MGMT_STATUS_TIMEOUT, /* Instant Passed */
170 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
171 MGMT_STATUS_FAILED, /* Transaction Collision */
172 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
173 MGMT_STATUS_REJECTED, /* QoS Rejected */
174 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
175 MGMT_STATUS_REJECTED, /* Insufficient Security */
176 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
177 MGMT_STATUS_BUSY, /* Role Switch Pending */
178 MGMT_STATUS_FAILED, /* Slot Violation */
179 MGMT_STATUS_FAILED, /* Role Switch Failed */
180 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
181 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
182 MGMT_STATUS_BUSY, /* Host Busy Pairing */
183 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
184 MGMT_STATUS_BUSY, /* Controller Busy */
185 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
186 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
187 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
188 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
189 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
190 };
191
192 static u8 mgmt_status(u8 hci_status)
193 {
194 if (hci_status < ARRAY_SIZE(mgmt_status_table))
195 return mgmt_status_table[hci_status];
196
197 return MGMT_STATUS_FAILED;
198 }
199
200 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
201 {
202 struct sk_buff *skb;
203 struct mgmt_hdr *hdr;
204 struct mgmt_ev_cmd_status *ev;
205 int err;
206
207 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
208
209 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
210 if (!skb)
211 return -ENOMEM;
212
213 hdr = (void *) skb_put(skb, sizeof(*hdr));
214
215 hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_STATUS);
216 hdr->index = cpu_to_le16(index);
217 hdr->len = cpu_to_le16(sizeof(*ev));
218
219 ev = (void *) skb_put(skb, sizeof(*ev));
220 ev->status = status;
221 ev->opcode = cpu_to_le16(cmd);
222
223 err = sock_queue_rcv_skb(sk, skb);
224 if (err < 0)
225 kfree_skb(skb);
226
227 return err;
228 }
229
230 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
231 void *rp, size_t rp_len)
232 {
233 struct sk_buff *skb;
234 struct mgmt_hdr *hdr;
235 struct mgmt_ev_cmd_complete *ev;
236 int err;
237
238 BT_DBG("sock %p", sk);
239
240 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
241 if (!skb)
242 return -ENOMEM;
243
244 hdr = (void *) skb_put(skb, sizeof(*hdr));
245
246 hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_COMPLETE);
247 hdr->index = cpu_to_le16(index);
248 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
249
250 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
251 ev->opcode = cpu_to_le16(cmd);
252 ev->status = status;
253
254 if (rp)
255 memcpy(ev->data, rp, rp_len);
256
257 err = sock_queue_rcv_skb(sk, skb);
258 if (err < 0)
259 kfree_skb(skb);
260
261 return err;
262 }
263
264 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
265 u16 data_len)
266 {
267 struct mgmt_rp_read_version rp;
268
269 BT_DBG("sock %p", sk);
270
271 rp.version = MGMT_VERSION;
272 rp.revision = __constant_cpu_to_le16(MGMT_REVISION);
273
274 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
275 sizeof(rp));
276 }
277
278 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
279 u16 data_len)
280 {
281 struct mgmt_rp_read_commands *rp;
282 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
283 const u16 num_events = ARRAY_SIZE(mgmt_events);
284 __le16 *opcode;
285 size_t rp_size;
286 int i, err;
287
288 BT_DBG("sock %p", sk);
289
290 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
291
292 rp = kmalloc(rp_size, GFP_KERNEL);
293 if (!rp)
294 return -ENOMEM;
295
296 rp->num_commands = __constant_cpu_to_le16(num_commands);
297 rp->num_events = __constant_cpu_to_le16(num_events);
298
299 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
300 put_unaligned_le16(mgmt_commands[i], opcode);
301
302 for (i = 0; i < num_events; i++, opcode++)
303 put_unaligned_le16(mgmt_events[i], opcode);
304
305 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
306 rp_size);
307 kfree(rp);
308
309 return err;
310 }
311
312 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
313 u16 data_len)
314 {
315 struct mgmt_rp_read_index_list *rp;
316 struct hci_dev *d;
317 size_t rp_len;
318 u16 count;
319 int err;
320
321 BT_DBG("sock %p", sk);
322
323 read_lock(&hci_dev_list_lock);
324
325 count = 0;
326 list_for_each_entry(d, &hci_dev_list, list) {
327 if (d->dev_type == HCI_BREDR)
328 count++;
329 }
330
331 rp_len = sizeof(*rp) + (2 * count);
332 rp = kmalloc(rp_len, GFP_ATOMIC);
333 if (!rp) {
334 read_unlock(&hci_dev_list_lock);
335 return -ENOMEM;
336 }
337
338 count = 0;
339 list_for_each_entry(d, &hci_dev_list, list) {
340 if (test_bit(HCI_SETUP, &d->dev_flags))
341 continue;
342
343 if (test_bit(HCI_USER_CHANNEL, &d->dev_flags))
344 continue;
345
346 if (d->dev_type == HCI_BREDR) {
347 rp->index[count++] = cpu_to_le16(d->id);
348 BT_DBG("Added hci%u", d->id);
349 }
350 }
351
352 rp->num_controllers = cpu_to_le16(count);
353 rp_len = sizeof(*rp) + (2 * count);
354
355 read_unlock(&hci_dev_list_lock);
356
357 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
358 rp_len);
359
360 kfree(rp);
361
362 return err;
363 }
364
365 static u32 get_supported_settings(struct hci_dev *hdev)
366 {
367 u32 settings = 0;
368
369 settings |= MGMT_SETTING_POWERED;
370 settings |= MGMT_SETTING_PAIRABLE;
371 settings |= MGMT_SETTING_DEBUG_KEYS;
372
373 if (lmp_bredr_capable(hdev)) {
374 settings |= MGMT_SETTING_CONNECTABLE;
375 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
376 settings |= MGMT_SETTING_FAST_CONNECTABLE;
377 settings |= MGMT_SETTING_DISCOVERABLE;
378 settings |= MGMT_SETTING_BREDR;
379 settings |= MGMT_SETTING_LINK_SECURITY;
380
381 if (lmp_ssp_capable(hdev)) {
382 settings |= MGMT_SETTING_SSP;
383 settings |= MGMT_SETTING_HS;
384 }
385
386 if (lmp_sc_capable(hdev) ||
387 test_bit(HCI_FORCE_SC, &hdev->dev_flags))
388 settings |= MGMT_SETTING_SECURE_CONN;
389 }
390
391 if (lmp_le_capable(hdev)) {
392 settings |= MGMT_SETTING_LE;
393 settings |= MGMT_SETTING_ADVERTISING;
394 settings |= MGMT_SETTING_PRIVACY;
395 }
396
397 return settings;
398 }
399
400 static u32 get_current_settings(struct hci_dev *hdev)
401 {
402 u32 settings = 0;
403
404 if (hdev_is_powered(hdev))
405 settings |= MGMT_SETTING_POWERED;
406
407 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
408 settings |= MGMT_SETTING_CONNECTABLE;
409
410 if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
411 settings |= MGMT_SETTING_FAST_CONNECTABLE;
412
413 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
414 settings |= MGMT_SETTING_DISCOVERABLE;
415
416 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
417 settings |= MGMT_SETTING_PAIRABLE;
418
419 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
420 settings |= MGMT_SETTING_BREDR;
421
422 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
423 settings |= MGMT_SETTING_LE;
424
425 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
426 settings |= MGMT_SETTING_LINK_SECURITY;
427
428 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
429 settings |= MGMT_SETTING_SSP;
430
431 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
432 settings |= MGMT_SETTING_HS;
433
434 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
435 settings |= MGMT_SETTING_ADVERTISING;
436
437 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
438 settings |= MGMT_SETTING_SECURE_CONN;
439
440 if (test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags))
441 settings |= MGMT_SETTING_DEBUG_KEYS;
442
443 if (test_bit(HCI_PRIVACY, &hdev->dev_flags))
444 settings |= MGMT_SETTING_PRIVACY;
445
446 return settings;
447 }
448
449 #define PNP_INFO_SVCLASS_ID 0x1200
450
451 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
452 {
453 u8 *ptr = data, *uuids_start = NULL;
454 struct bt_uuid *uuid;
455
456 if (len < 4)
457 return ptr;
458
459 list_for_each_entry(uuid, &hdev->uuids, list) {
460 u16 uuid16;
461
462 if (uuid->size != 16)
463 continue;
464
465 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
466 if (uuid16 < 0x1100)
467 continue;
468
469 if (uuid16 == PNP_INFO_SVCLASS_ID)
470 continue;
471
472 if (!uuids_start) {
473 uuids_start = ptr;
474 uuids_start[0] = 1;
475 uuids_start[1] = EIR_UUID16_ALL;
476 ptr += 2;
477 }
478
479 /* Stop if not enough space to put next UUID */
480 if ((ptr - data) + sizeof(u16) > len) {
481 uuids_start[1] = EIR_UUID16_SOME;
482 break;
483 }
484
485 *ptr++ = (uuid16 & 0x00ff);
486 *ptr++ = (uuid16 & 0xff00) >> 8;
487 uuids_start[0] += sizeof(uuid16);
488 }
489
490 return ptr;
491 }
492
493 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
494 {
495 u8 *ptr = data, *uuids_start = NULL;
496 struct bt_uuid *uuid;
497
498 if (len < 6)
499 return ptr;
500
501 list_for_each_entry(uuid, &hdev->uuids, list) {
502 if (uuid->size != 32)
503 continue;
504
505 if (!uuids_start) {
506 uuids_start = ptr;
507 uuids_start[0] = 1;
508 uuids_start[1] = EIR_UUID32_ALL;
509 ptr += 2;
510 }
511
512 /* Stop if not enough space to put next UUID */
513 if ((ptr - data) + sizeof(u32) > len) {
514 uuids_start[1] = EIR_UUID32_SOME;
515 break;
516 }
517
518 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
519 ptr += sizeof(u32);
520 uuids_start[0] += sizeof(u32);
521 }
522
523 return ptr;
524 }
525
526 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
527 {
528 u8 *ptr = data, *uuids_start = NULL;
529 struct bt_uuid *uuid;
530
531 if (len < 18)
532 return ptr;
533
534 list_for_each_entry(uuid, &hdev->uuids, list) {
535 if (uuid->size != 128)
536 continue;
537
538 if (!uuids_start) {
539 uuids_start = ptr;
540 uuids_start[0] = 1;
541 uuids_start[1] = EIR_UUID128_ALL;
542 ptr += 2;
543 }
544
545 /* Stop if not enough space to put next UUID */
546 if ((ptr - data) + 16 > len) {
547 uuids_start[1] = EIR_UUID128_SOME;
548 break;
549 }
550
551 memcpy(ptr, uuid->uuid, 16);
552 ptr += 16;
553 uuids_start[0] += 16;
554 }
555
556 return ptr;
557 }
558
559 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
560 {
561 struct pending_cmd *cmd;
562
563 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
564 if (cmd->opcode == opcode)
565 return cmd;
566 }
567
568 return NULL;
569 }
570
571 static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
572 {
573 u8 ad_len = 0;
574 size_t name_len;
575
576 name_len = strlen(hdev->dev_name);
577 if (name_len > 0) {
578 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
579
580 if (name_len > max_len) {
581 name_len = max_len;
582 ptr[1] = EIR_NAME_SHORT;
583 } else
584 ptr[1] = EIR_NAME_COMPLETE;
585
586 ptr[0] = name_len + 1;
587
588 memcpy(ptr + 2, hdev->dev_name, name_len);
589
590 ad_len += (name_len + 2);
591 ptr += (name_len + 2);
592 }
593
594 return ad_len;
595 }
596
597 static void update_scan_rsp_data(struct hci_request *req)
598 {
599 struct hci_dev *hdev = req->hdev;
600 struct hci_cp_le_set_scan_rsp_data cp;
601 u8 len;
602
603 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
604 return;
605
606 memset(&cp, 0, sizeof(cp));
607
608 len = create_scan_rsp_data(hdev, cp.data);
609
610 if (hdev->scan_rsp_data_len == len &&
611 memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
612 return;
613
614 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
615 hdev->scan_rsp_data_len = len;
616
617 cp.length = len;
618
619 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
620 }
621
622 static u8 get_adv_discov_flags(struct hci_dev *hdev)
623 {
624 struct pending_cmd *cmd;
625
626 /* If there's a pending mgmt command the flags will not yet have
627 * their final values, so check for this first.
628 */
629 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
630 if (cmd) {
631 struct mgmt_mode *cp = cmd->param;
632 if (cp->val == 0x01)
633 return LE_AD_GENERAL;
634 else if (cp->val == 0x02)
635 return LE_AD_LIMITED;
636 } else {
637 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
638 return LE_AD_LIMITED;
639 else if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
640 return LE_AD_GENERAL;
641 }
642
643 return 0;
644 }
645
646 static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
647 {
648 u8 ad_len = 0, flags = 0;
649
650 flags |= get_adv_discov_flags(hdev);
651
652 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
653 flags |= LE_AD_NO_BREDR;
654
655 if (flags) {
656 BT_DBG("adv flags 0x%02x", flags);
657
658 ptr[0] = 2;
659 ptr[1] = EIR_FLAGS;
660 ptr[2] = flags;
661
662 ad_len += 3;
663 ptr += 3;
664 }
665
666 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
667 ptr[0] = 2;
668 ptr[1] = EIR_TX_POWER;
669 ptr[2] = (u8) hdev->adv_tx_power;
670
671 ad_len += 3;
672 ptr += 3;
673 }
674
675 return ad_len;
676 }
677
678 static void update_adv_data(struct hci_request *req)
679 {
680 struct hci_dev *hdev = req->hdev;
681 struct hci_cp_le_set_adv_data cp;
682 u8 len;
683
684 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
685 return;
686
687 memset(&cp, 0, sizeof(cp));
688
689 len = create_adv_data(hdev, cp.data);
690
691 if (hdev->adv_data_len == len &&
692 memcmp(cp.data, hdev->adv_data, len) == 0)
693 return;
694
695 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
696 hdev->adv_data_len = len;
697
698 cp.length = len;
699
700 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
701 }
702
703 static void create_eir(struct hci_dev *hdev, u8 *data)
704 {
705 u8 *ptr = data;
706 size_t name_len;
707
708 name_len = strlen(hdev->dev_name);
709
710 if (name_len > 0) {
711 /* EIR Data type */
712 if (name_len > 48) {
713 name_len = 48;
714 ptr[1] = EIR_NAME_SHORT;
715 } else
716 ptr[1] = EIR_NAME_COMPLETE;
717
718 /* EIR Data length */
719 ptr[0] = name_len + 1;
720
721 memcpy(ptr + 2, hdev->dev_name, name_len);
722
723 ptr += (name_len + 2);
724 }
725
726 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
727 ptr[0] = 2;
728 ptr[1] = EIR_TX_POWER;
729 ptr[2] = (u8) hdev->inq_tx_power;
730
731 ptr += 3;
732 }
733
734 if (hdev->devid_source > 0) {
735 ptr[0] = 9;
736 ptr[1] = EIR_DEVICE_ID;
737
738 put_unaligned_le16(hdev->devid_source, ptr + 2);
739 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
740 put_unaligned_le16(hdev->devid_product, ptr + 6);
741 put_unaligned_le16(hdev->devid_version, ptr + 8);
742
743 ptr += 10;
744 }
745
746 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
747 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
748 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
749 }
750
751 static void update_eir(struct hci_request *req)
752 {
753 struct hci_dev *hdev = req->hdev;
754 struct hci_cp_write_eir cp;
755
756 if (!hdev_is_powered(hdev))
757 return;
758
759 if (!lmp_ext_inq_capable(hdev))
760 return;
761
762 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
763 return;
764
765 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
766 return;
767
768 memset(&cp, 0, sizeof(cp));
769
770 create_eir(hdev, cp.data);
771
772 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
773 return;
774
775 memcpy(hdev->eir, cp.data, sizeof(cp.data));
776
777 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
778 }
779
780 static u8 get_service_classes(struct hci_dev *hdev)
781 {
782 struct bt_uuid *uuid;
783 u8 val = 0;
784
785 list_for_each_entry(uuid, &hdev->uuids, list)
786 val |= uuid->svc_hint;
787
788 return val;
789 }
790
791 static void update_class(struct hci_request *req)
792 {
793 struct hci_dev *hdev = req->hdev;
794 u8 cod[3];
795
796 BT_DBG("%s", hdev->name);
797
798 if (!hdev_is_powered(hdev))
799 return;
800
801 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
802 return;
803
804 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
805 return;
806
807 cod[0] = hdev->minor_class;
808 cod[1] = hdev->major_class;
809 cod[2] = get_service_classes(hdev);
810
811 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
812 cod[1] |= 0x20;
813
814 if (memcmp(cod, hdev->dev_class, 3) == 0)
815 return;
816
817 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
818 }
819
820 static u8 get_adv_type(struct hci_dev *hdev)
821 {
822 struct pending_cmd *cmd;
823 bool connectable;
824
825 /* If there's a pending mgmt command the flag will not yet have
826 * it's final value, so check for this first.
827 */
828 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
829 if (cmd) {
830 struct mgmt_mode *cp = cmd->param;
831 connectable = !!cp->val;
832 } else {
833 connectable = test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
834 }
835
836 return connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
837 }
838
839 static void enable_advertising(struct hci_request *req)
840 {
841 struct hci_dev *hdev = req->hdev;
842 struct hci_cp_le_set_adv_param cp;
843 u8 own_addr_type, enable = 0x01;
844
845 memset(&cp, 0, sizeof(cp));
846
847 if (hci_update_random_address(req, &own_addr_type) < 0)
848 return;
849
850 cp.min_interval = __constant_cpu_to_le16(0x0800);
851 cp.max_interval = __constant_cpu_to_le16(0x0800);
852 cp.type = get_adv_type(hdev);
853 cp.own_address_type = own_addr_type;
854 cp.channel_map = hdev->le_adv_channel_map;
855
856 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
857
858 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
859 }
860
861 static void disable_advertising(struct hci_request *req)
862 {
863 u8 enable = 0x00;
864
865 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
866 }
867
868 static void service_cache_off(struct work_struct *work)
869 {
870 struct hci_dev *hdev = container_of(work, struct hci_dev,
871 service_cache.work);
872 struct hci_request req;
873
874 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
875 return;
876
877 hci_req_init(&req, hdev);
878
879 hci_dev_lock(hdev);
880
881 update_eir(&req);
882 update_class(&req);
883
884 hci_dev_unlock(hdev);
885
886 hci_req_run(&req, NULL);
887 }
888
889 static void rpa_expired(struct work_struct *work)
890 {
891 struct hci_dev *hdev = container_of(work, struct hci_dev,
892 rpa_expired.work);
893 struct hci_request req;
894
895 BT_DBG("");
896
897 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
898
899 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags) ||
900 hci_conn_num(hdev, LE_LINK) > 0)
901 return;
902
903 /* The generation of a new RPA and programming it into the
904 * controller happens in the enable_advertising() function.
905 */
906
907 hci_req_init(&req, hdev);
908
909 disable_advertising(&req);
910 enable_advertising(&req);
911
912 hci_req_run(&req, NULL);
913 }
914
915 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
916 {
917 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
918 return;
919
920 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
921 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
922
923 /* Non-mgmt controlled devices get this bit set
924 * implicitly so that pairing works for them, however
925 * for mgmt we require user-space to explicitly enable
926 * it
927 */
928 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
929 }
930
931 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
932 void *data, u16 data_len)
933 {
934 struct mgmt_rp_read_info rp;
935
936 BT_DBG("sock %p %s", sk, hdev->name);
937
938 hci_dev_lock(hdev);
939
940 memset(&rp, 0, sizeof(rp));
941
942 bacpy(&rp.bdaddr, &hdev->bdaddr);
943
944 rp.version = hdev->hci_ver;
945 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
946
947 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
948 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
949
950 memcpy(rp.dev_class, hdev->dev_class, 3);
951
952 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
953 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
954
955 hci_dev_unlock(hdev);
956
957 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
958 sizeof(rp));
959 }
960
961 static void mgmt_pending_free(struct pending_cmd *cmd)
962 {
963 sock_put(cmd->sk);
964 kfree(cmd->param);
965 kfree(cmd);
966 }
967
968 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
969 struct hci_dev *hdev, void *data,
970 u16 len)
971 {
972 struct pending_cmd *cmd;
973
974 cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
975 if (!cmd)
976 return NULL;
977
978 cmd->opcode = opcode;
979 cmd->index = hdev->id;
980
981 cmd->param = kmalloc(len, GFP_KERNEL);
982 if (!cmd->param) {
983 kfree(cmd);
984 return NULL;
985 }
986
987 if (data)
988 memcpy(cmd->param, data, len);
989
990 cmd->sk = sk;
991 sock_hold(sk);
992
993 list_add(&cmd->list, &hdev->mgmt_pending);
994
995 return cmd;
996 }
997
998 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
999 void (*cb)(struct pending_cmd *cmd,
1000 void *data),
1001 void *data)
1002 {
1003 struct pending_cmd *cmd, *tmp;
1004
1005 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
1006 if (opcode > 0 && cmd->opcode != opcode)
1007 continue;
1008
1009 cb(cmd, data);
1010 }
1011 }
1012
1013 static void mgmt_pending_remove(struct pending_cmd *cmd)
1014 {
1015 list_del(&cmd->list);
1016 mgmt_pending_free(cmd);
1017 }
1018
1019 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1020 {
1021 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1022
1023 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
1024 sizeof(settings));
1025 }
1026
1027 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1028 u16 len)
1029 {
1030 struct mgmt_mode *cp = data;
1031 struct pending_cmd *cmd;
1032 int err;
1033
1034 BT_DBG("request for %s", hdev->name);
1035
1036 if (cp->val != 0x00 && cp->val != 0x01)
1037 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1038 MGMT_STATUS_INVALID_PARAMS);
1039
1040 hci_dev_lock(hdev);
1041
1042 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
1043 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1044 MGMT_STATUS_BUSY);
1045 goto failed;
1046 }
1047
1048 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1049 cancel_delayed_work(&hdev->power_off);
1050
1051 if (cp->val) {
1052 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
1053 data, len);
1054 err = mgmt_powered(hdev, 1);
1055 goto failed;
1056 }
1057 }
1058
1059 if (!!cp->val == hdev_is_powered(hdev)) {
1060 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1061 goto failed;
1062 }
1063
1064 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1065 if (!cmd) {
1066 err = -ENOMEM;
1067 goto failed;
1068 }
1069
1070 if (cp->val)
1071 queue_work(hdev->req_workqueue, &hdev->power_on);
1072 else
1073 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1074
1075 err = 0;
1076
1077 failed:
1078 hci_dev_unlock(hdev);
1079 return err;
1080 }
1081
1082 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
1083 struct sock *skip_sk)
1084 {
1085 struct sk_buff *skb;
1086 struct mgmt_hdr *hdr;
1087
1088 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
1089 if (!skb)
1090 return -ENOMEM;
1091
1092 hdr = (void *) skb_put(skb, sizeof(*hdr));
1093 hdr->opcode = cpu_to_le16(event);
1094 if (hdev)
1095 hdr->index = cpu_to_le16(hdev->id);
1096 else
1097 hdr->index = __constant_cpu_to_le16(MGMT_INDEX_NONE);
1098 hdr->len = cpu_to_le16(data_len);
1099
1100 if (data)
1101 memcpy(skb_put(skb, data_len), data, data_len);
1102
1103 /* Time stamp */
1104 __net_timestamp(skb);
1105
1106 hci_send_to_control(skb, skip_sk);
1107 kfree_skb(skb);
1108
1109 return 0;
1110 }
1111
1112 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1113 {
1114 __le32 ev;
1115
1116 ev = cpu_to_le32(get_current_settings(hdev));
1117
1118 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
1119 }
1120
1121 struct cmd_lookup {
1122 struct sock *sk;
1123 struct hci_dev *hdev;
1124 u8 mgmt_status;
1125 };
1126
1127 static void settings_rsp(struct pending_cmd *cmd, void *data)
1128 {
1129 struct cmd_lookup *match = data;
1130
1131 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1132
1133 list_del(&cmd->list);
1134
1135 if (match->sk == NULL) {
1136 match->sk = cmd->sk;
1137 sock_hold(match->sk);
1138 }
1139
1140 mgmt_pending_free(cmd);
1141 }
1142
1143 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
1144 {
1145 u8 *status = data;
1146
1147 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1148 mgmt_pending_remove(cmd);
1149 }
1150
1151 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1152 {
1153 if (!lmp_bredr_capable(hdev))
1154 return MGMT_STATUS_NOT_SUPPORTED;
1155 else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1156 return MGMT_STATUS_REJECTED;
1157 else
1158 return MGMT_STATUS_SUCCESS;
1159 }
1160
1161 static u8 mgmt_le_support(struct hci_dev *hdev)
1162 {
1163 if (!lmp_le_capable(hdev))
1164 return MGMT_STATUS_NOT_SUPPORTED;
1165 else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1166 return MGMT_STATUS_REJECTED;
1167 else
1168 return MGMT_STATUS_SUCCESS;
1169 }
1170
1171 static void set_discoverable_complete(struct hci_dev *hdev, u8 status)
1172 {
1173 struct pending_cmd *cmd;
1174 struct mgmt_mode *cp;
1175 struct hci_request req;
1176 bool changed;
1177
1178 BT_DBG("status 0x%02x", status);
1179
1180 hci_dev_lock(hdev);
1181
1182 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1183 if (!cmd)
1184 goto unlock;
1185
1186 if (status) {
1187 u8 mgmt_err = mgmt_status(status);
1188 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1189 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1190 goto remove_cmd;
1191 }
1192
1193 cp = cmd->param;
1194 if (cp->val) {
1195 changed = !test_and_set_bit(HCI_DISCOVERABLE,
1196 &hdev->dev_flags);
1197
1198 if (hdev->discov_timeout > 0) {
1199 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1200 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1201 to);
1202 }
1203 } else {
1204 changed = test_and_clear_bit(HCI_DISCOVERABLE,
1205 &hdev->dev_flags);
1206 }
1207
1208 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1209
1210 if (changed)
1211 new_settings(hdev, cmd->sk);
1212
1213 /* When the discoverable mode gets changed, make sure
1214 * that class of device has the limited discoverable
1215 * bit correctly set.
1216 */
1217 hci_req_init(&req, hdev);
1218 update_class(&req);
1219 hci_req_run(&req, NULL);
1220
1221 remove_cmd:
1222 mgmt_pending_remove(cmd);
1223
1224 unlock:
1225 hci_dev_unlock(hdev);
1226 }
1227
1228 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1229 u16 len)
1230 {
1231 struct mgmt_cp_set_discoverable *cp = data;
1232 struct pending_cmd *cmd;
1233 struct hci_request req;
1234 u16 timeout;
1235 u8 scan;
1236 int err;
1237
1238 BT_DBG("request for %s", hdev->name);
1239
1240 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1241 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1242 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1243 MGMT_STATUS_REJECTED);
1244
1245 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1246 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1247 MGMT_STATUS_INVALID_PARAMS);
1248
1249 timeout = __le16_to_cpu(cp->timeout);
1250
1251 /* Disabling discoverable requires that no timeout is set,
1252 * and enabling limited discoverable requires a timeout.
1253 */
1254 if ((cp->val == 0x00 && timeout > 0) ||
1255 (cp->val == 0x02 && timeout == 0))
1256 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1257 MGMT_STATUS_INVALID_PARAMS);
1258
1259 hci_dev_lock(hdev);
1260
1261 if (!hdev_is_powered(hdev) && timeout > 0) {
1262 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1263 MGMT_STATUS_NOT_POWERED);
1264 goto failed;
1265 }
1266
1267 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1268 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1269 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1270 MGMT_STATUS_BUSY);
1271 goto failed;
1272 }
1273
1274 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
1275 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1276 MGMT_STATUS_REJECTED);
1277 goto failed;
1278 }
1279
1280 if (!hdev_is_powered(hdev)) {
1281 bool changed = false;
1282
1283 /* Setting limited discoverable when powered off is
1284 * not a valid operation since it requires a timeout
1285 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1286 */
1287 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
1288 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1289 changed = true;
1290 }
1291
1292 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1293 if (err < 0)
1294 goto failed;
1295
1296 if (changed)
1297 err = new_settings(hdev, sk);
1298
1299 goto failed;
1300 }
1301
1302 /* If the current mode is the same, then just update the timeout
1303 * value with the new value. And if only the timeout gets updated,
1304 * then no need for any HCI transactions.
1305 */
1306 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
1307 (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
1308 &hdev->dev_flags)) {
1309 cancel_delayed_work(&hdev->discov_off);
1310 hdev->discov_timeout = timeout;
1311
1312 if (cp->val && hdev->discov_timeout > 0) {
1313 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1314 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1315 to);
1316 }
1317
1318 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1319 goto failed;
1320 }
1321
1322 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1323 if (!cmd) {
1324 err = -ENOMEM;
1325 goto failed;
1326 }
1327
1328 /* Cancel any potential discoverable timeout that might be
1329 * still active and store new timeout value. The arming of
1330 * the timeout happens in the complete handler.
1331 */
1332 cancel_delayed_work(&hdev->discov_off);
1333 hdev->discov_timeout = timeout;
1334
1335 /* Limited discoverable mode */
1336 if (cp->val == 0x02)
1337 set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1338 else
1339 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1340
1341 hci_req_init(&req, hdev);
1342
1343 /* The procedure for LE-only controllers is much simpler - just
1344 * update the advertising data.
1345 */
1346 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1347 goto update_ad;
1348
1349 scan = SCAN_PAGE;
1350
1351 if (cp->val) {
1352 struct hci_cp_write_current_iac_lap hci_cp;
1353
1354 if (cp->val == 0x02) {
1355 /* Limited discoverable mode */
1356 hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1357 hci_cp.iac_lap[0] = 0x00; /* LIAC */
1358 hci_cp.iac_lap[1] = 0x8b;
1359 hci_cp.iac_lap[2] = 0x9e;
1360 hci_cp.iac_lap[3] = 0x33; /* GIAC */
1361 hci_cp.iac_lap[4] = 0x8b;
1362 hci_cp.iac_lap[5] = 0x9e;
1363 } else {
1364 /* General discoverable mode */
1365 hci_cp.num_iac = 1;
1366 hci_cp.iac_lap[0] = 0x33; /* GIAC */
1367 hci_cp.iac_lap[1] = 0x8b;
1368 hci_cp.iac_lap[2] = 0x9e;
1369 }
1370
1371 hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1372 (hci_cp.num_iac * 3) + 1, &hci_cp);
1373
1374 scan |= SCAN_INQUIRY;
1375 } else {
1376 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1377 }
1378
1379 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1380
1381 update_ad:
1382 update_adv_data(&req);
1383
1384 err = hci_req_run(&req, set_discoverable_complete);
1385 if (err < 0)
1386 mgmt_pending_remove(cmd);
1387
1388 failed:
1389 hci_dev_unlock(hdev);
1390 return err;
1391 }
1392
1393 static void write_fast_connectable(struct hci_request *req, bool enable)
1394 {
1395 struct hci_dev *hdev = req->hdev;
1396 struct hci_cp_write_page_scan_activity acp;
1397 u8 type;
1398
1399 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1400 return;
1401
1402 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1403 return;
1404
1405 if (enable) {
1406 type = PAGE_SCAN_TYPE_INTERLACED;
1407
1408 /* 160 msec page scan interval */
1409 acp.interval = __constant_cpu_to_le16(0x0100);
1410 } else {
1411 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1412
1413 /* default 1.28 sec page scan */
1414 acp.interval = __constant_cpu_to_le16(0x0800);
1415 }
1416
1417 acp.window = __constant_cpu_to_le16(0x0012);
1418
1419 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1420 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1421 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1422 sizeof(acp), &acp);
1423
1424 if (hdev->page_scan_type != type)
1425 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1426 }
1427
1428 static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1429 {
1430 struct pending_cmd *cmd;
1431 struct mgmt_mode *cp;
1432 bool changed;
1433
1434 BT_DBG("status 0x%02x", status);
1435
1436 hci_dev_lock(hdev);
1437
1438 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1439 if (!cmd)
1440 goto unlock;
1441
1442 if (status) {
1443 u8 mgmt_err = mgmt_status(status);
1444 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1445 goto remove_cmd;
1446 }
1447
1448 cp = cmd->param;
1449 if (cp->val)
1450 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1451 else
1452 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1453
1454 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1455
1456 if (changed)
1457 new_settings(hdev, cmd->sk);
1458
1459 remove_cmd:
1460 mgmt_pending_remove(cmd);
1461
1462 unlock:
1463 hci_dev_unlock(hdev);
1464 }
1465
1466 static int set_connectable_update_settings(struct hci_dev *hdev,
1467 struct sock *sk, u8 val)
1468 {
1469 bool changed = false;
1470 int err;
1471
1472 if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1473 changed = true;
1474
1475 if (val) {
1476 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1477 } else {
1478 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1479 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1480 }
1481
1482 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1483 if (err < 0)
1484 return err;
1485
1486 if (changed)
1487 return new_settings(hdev, sk);
1488
1489 return 0;
1490 }
1491
1492 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1493 u16 len)
1494 {
1495 struct mgmt_mode *cp = data;
1496 struct pending_cmd *cmd;
1497 struct hci_request req;
1498 u8 scan;
1499 int err;
1500
1501 BT_DBG("request for %s", hdev->name);
1502
1503 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1504 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1505 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1506 MGMT_STATUS_REJECTED);
1507
1508 if (cp->val != 0x00 && cp->val != 0x01)
1509 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1510 MGMT_STATUS_INVALID_PARAMS);
1511
1512 hci_dev_lock(hdev);
1513
1514 if (!hdev_is_powered(hdev)) {
1515 err = set_connectable_update_settings(hdev, sk, cp->val);
1516 goto failed;
1517 }
1518
1519 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1520 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1521 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1522 MGMT_STATUS_BUSY);
1523 goto failed;
1524 }
1525
1526 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1527 if (!cmd) {
1528 err = -ENOMEM;
1529 goto failed;
1530 }
1531
1532 hci_req_init(&req, hdev);
1533
1534 /* If BR/EDR is not enabled and we disable advertising as a
1535 * by-product of disabling connectable, we need to update the
1536 * advertising flags.
1537 */
1538 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1539 if (!cp->val) {
1540 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1541 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1542 }
1543 update_adv_data(&req);
1544 } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
1545 if (cp->val) {
1546 scan = SCAN_PAGE;
1547 } else {
1548 scan = 0;
1549
1550 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1551 hdev->discov_timeout > 0)
1552 cancel_delayed_work(&hdev->discov_off);
1553 }
1554
1555 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1556 }
1557
1558 /* If we're going from non-connectable to connectable or
1559 * vice-versa when fast connectable is enabled ensure that fast
1560 * connectable gets disabled. write_fast_connectable won't do
1561 * anything if the page scan parameters are already what they
1562 * should be.
1563 */
1564 if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1565 write_fast_connectable(&req, false);
1566
1567 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) &&
1568 hci_conn_num(hdev, LE_LINK) == 0) {
1569 disable_advertising(&req);
1570 enable_advertising(&req);
1571 }
1572
1573 err = hci_req_run(&req, set_connectable_complete);
1574 if (err < 0) {
1575 mgmt_pending_remove(cmd);
1576 if (err == -ENODATA)
1577 err = set_connectable_update_settings(hdev, sk,
1578 cp->val);
1579 goto failed;
1580 }
1581
1582 failed:
1583 hci_dev_unlock(hdev);
1584 return err;
1585 }
1586
1587 static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
1588 u16 len)
1589 {
1590 struct mgmt_mode *cp = data;
1591 bool changed;
1592 int err;
1593
1594 BT_DBG("request for %s", hdev->name);
1595
1596 if (cp->val != 0x00 && cp->val != 0x01)
1597 return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE,
1598 MGMT_STATUS_INVALID_PARAMS);
1599
1600 hci_dev_lock(hdev);
1601
1602 if (cp->val)
1603 changed = !test_and_set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1604 else
1605 changed = test_and_clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1606
1607 err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
1608 if (err < 0)
1609 goto unlock;
1610
1611 if (changed)
1612 err = new_settings(hdev, sk);
1613
1614 unlock:
1615 hci_dev_unlock(hdev);
1616 return err;
1617 }
1618
1619 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1620 u16 len)
1621 {
1622 struct mgmt_mode *cp = data;
1623 struct pending_cmd *cmd;
1624 u8 val, status;
1625 int err;
1626
1627 BT_DBG("request for %s", hdev->name);
1628
1629 status = mgmt_bredr_support(hdev);
1630 if (status)
1631 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1632 status);
1633
1634 if (cp->val != 0x00 && cp->val != 0x01)
1635 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1636 MGMT_STATUS_INVALID_PARAMS);
1637
1638 hci_dev_lock(hdev);
1639
1640 if (!hdev_is_powered(hdev)) {
1641 bool changed = false;
1642
1643 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1644 &hdev->dev_flags)) {
1645 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
1646 changed = true;
1647 }
1648
1649 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1650 if (err < 0)
1651 goto failed;
1652
1653 if (changed)
1654 err = new_settings(hdev, sk);
1655
1656 goto failed;
1657 }
1658
1659 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1660 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1661 MGMT_STATUS_BUSY);
1662 goto failed;
1663 }
1664
1665 val = !!cp->val;
1666
1667 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1668 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1669 goto failed;
1670 }
1671
1672 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1673 if (!cmd) {
1674 err = -ENOMEM;
1675 goto failed;
1676 }
1677
1678 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1679 if (err < 0) {
1680 mgmt_pending_remove(cmd);
1681 goto failed;
1682 }
1683
1684 failed:
1685 hci_dev_unlock(hdev);
1686 return err;
1687 }
1688
1689 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1690 {
1691 struct mgmt_mode *cp = data;
1692 struct pending_cmd *cmd;
1693 u8 status;
1694 int err;
1695
1696 BT_DBG("request for %s", hdev->name);
1697
1698 status = mgmt_bredr_support(hdev);
1699 if (status)
1700 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1701
1702 if (!lmp_ssp_capable(hdev))
1703 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1704 MGMT_STATUS_NOT_SUPPORTED);
1705
1706 if (cp->val != 0x00 && cp->val != 0x01)
1707 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1708 MGMT_STATUS_INVALID_PARAMS);
1709
1710 hci_dev_lock(hdev);
1711
1712 if (!hdev_is_powered(hdev)) {
1713 bool changed;
1714
1715 if (cp->val) {
1716 changed = !test_and_set_bit(HCI_SSP_ENABLED,
1717 &hdev->dev_flags);
1718 } else {
1719 changed = test_and_clear_bit(HCI_SSP_ENABLED,
1720 &hdev->dev_flags);
1721 if (!changed)
1722 changed = test_and_clear_bit(HCI_HS_ENABLED,
1723 &hdev->dev_flags);
1724 else
1725 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1726 }
1727
1728 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1729 if (err < 0)
1730 goto failed;
1731
1732 if (changed)
1733 err = new_settings(hdev, sk);
1734
1735 goto failed;
1736 }
1737
1738 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev) ||
1739 mgmt_pending_find(MGMT_OP_SET_HS, hdev)) {
1740 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1741 MGMT_STATUS_BUSY);
1742 goto failed;
1743 }
1744
1745 if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1746 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1747 goto failed;
1748 }
1749
1750 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1751 if (!cmd) {
1752 err = -ENOMEM;
1753 goto failed;
1754 }
1755
1756 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1757 if (err < 0) {
1758 mgmt_pending_remove(cmd);
1759 goto failed;
1760 }
1761
1762 failed:
1763 hci_dev_unlock(hdev);
1764 return err;
1765 }
1766
1767 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1768 {
1769 struct mgmt_mode *cp = data;
1770 bool changed;
1771 u8 status;
1772 int err;
1773
1774 BT_DBG("request for %s", hdev->name);
1775
1776 status = mgmt_bredr_support(hdev);
1777 if (status)
1778 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1779
1780 if (!lmp_ssp_capable(hdev))
1781 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1782 MGMT_STATUS_NOT_SUPPORTED);
1783
1784 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
1785 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1786 MGMT_STATUS_REJECTED);
1787
1788 if (cp->val != 0x00 && cp->val != 0x01)
1789 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1790 MGMT_STATUS_INVALID_PARAMS);
1791
1792 hci_dev_lock(hdev);
1793
1794 if (cp->val) {
1795 changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1796 } else {
1797 if (hdev_is_powered(hdev)) {
1798 err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1799 MGMT_STATUS_REJECTED);
1800 goto unlock;
1801 }
1802
1803 changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1804 }
1805
1806 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1807 if (err < 0)
1808 goto unlock;
1809
1810 if (changed)
1811 err = new_settings(hdev, sk);
1812
1813 unlock:
1814 hci_dev_unlock(hdev);
1815 return err;
1816 }
1817
1818 static void le_enable_complete(struct hci_dev *hdev, u8 status)
1819 {
1820 struct cmd_lookup match = { NULL, hdev };
1821
1822 if (status) {
1823 u8 mgmt_err = mgmt_status(status);
1824
1825 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1826 &mgmt_err);
1827 return;
1828 }
1829
1830 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1831
1832 new_settings(hdev, match.sk);
1833
1834 if (match.sk)
1835 sock_put(match.sk);
1836
1837 /* Make sure the controller has a good default for
1838 * advertising data. Restrict the update to when LE
1839 * has actually been enabled. During power on, the
1840 * update in powered_update_hci will take care of it.
1841 */
1842 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1843 struct hci_request req;
1844
1845 hci_dev_lock(hdev);
1846
1847 hci_req_init(&req, hdev);
1848 update_adv_data(&req);
1849 update_scan_rsp_data(&req);
1850 hci_req_run(&req, NULL);
1851
1852 hci_dev_unlock(hdev);
1853 }
1854 }
1855
1856 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1857 {
1858 struct mgmt_mode *cp = data;
1859 struct hci_cp_write_le_host_supported hci_cp;
1860 struct pending_cmd *cmd;
1861 struct hci_request req;
1862 int err;
1863 u8 val, enabled;
1864
1865 BT_DBG("request for %s", hdev->name);
1866
1867 if (!lmp_le_capable(hdev))
1868 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1869 MGMT_STATUS_NOT_SUPPORTED);
1870
1871 if (cp->val != 0x00 && cp->val != 0x01)
1872 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1873 MGMT_STATUS_INVALID_PARAMS);
1874
1875 /* LE-only devices do not allow toggling LE on/off */
1876 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1877 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1878 MGMT_STATUS_REJECTED);
1879
1880 hci_dev_lock(hdev);
1881
1882 val = !!cp->val;
1883 enabled = lmp_host_le_capable(hdev);
1884
1885 if (!hdev_is_powered(hdev) || val == enabled) {
1886 bool changed = false;
1887
1888 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1889 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1890 changed = true;
1891 }
1892
1893 if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
1894 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
1895 changed = true;
1896 }
1897
1898 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1899 if (err < 0)
1900 goto unlock;
1901
1902 if (changed)
1903 err = new_settings(hdev, sk);
1904
1905 goto unlock;
1906 }
1907
1908 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
1909 mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
1910 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1911 MGMT_STATUS_BUSY);
1912 goto unlock;
1913 }
1914
1915 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
1916 if (!cmd) {
1917 err = -ENOMEM;
1918 goto unlock;
1919 }
1920
1921 hci_req_init(&req, hdev);
1922
1923 memset(&hci_cp, 0, sizeof(hci_cp));
1924
1925 if (val) {
1926 hci_cp.le = val;
1927 hci_cp.simul = lmp_le_br_capable(hdev);
1928 } else {
1929 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1930 disable_advertising(&req);
1931 }
1932
1933 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
1934 &hci_cp);
1935
1936 err = hci_req_run(&req, le_enable_complete);
1937 if (err < 0)
1938 mgmt_pending_remove(cmd);
1939
1940 unlock:
1941 hci_dev_unlock(hdev);
1942 return err;
1943 }
1944
1945 /* This is a helper function to test for pending mgmt commands that can
1946 * cause CoD or EIR HCI commands. We can only allow one such pending
1947 * mgmt command at a time since otherwise we cannot easily track what
1948 * the current values are, will be, and based on that calculate if a new
1949 * HCI command needs to be sent and if yes with what value.
1950 */
1951 static bool pending_eir_or_class(struct hci_dev *hdev)
1952 {
1953 struct pending_cmd *cmd;
1954
1955 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
1956 switch (cmd->opcode) {
1957 case MGMT_OP_ADD_UUID:
1958 case MGMT_OP_REMOVE_UUID:
1959 case MGMT_OP_SET_DEV_CLASS:
1960 case MGMT_OP_SET_POWERED:
1961 return true;
1962 }
1963 }
1964
1965 return false;
1966 }
1967
1968 static const u8 bluetooth_base_uuid[] = {
1969 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
1970 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1971 };
1972
1973 static u8 get_uuid_size(const u8 *uuid)
1974 {
1975 u32 val;
1976
1977 if (memcmp(uuid, bluetooth_base_uuid, 12))
1978 return 128;
1979
1980 val = get_unaligned_le32(&uuid[12]);
1981 if (val > 0xffff)
1982 return 32;
1983
1984 return 16;
1985 }
1986
1987 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
1988 {
1989 struct pending_cmd *cmd;
1990
1991 hci_dev_lock(hdev);
1992
1993 cmd = mgmt_pending_find(mgmt_op, hdev);
1994 if (!cmd)
1995 goto unlock;
1996
1997 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
1998 hdev->dev_class, 3);
1999
2000 mgmt_pending_remove(cmd);
2001
2002 unlock:
2003 hci_dev_unlock(hdev);
2004 }
2005
2006 static void add_uuid_complete(struct hci_dev *hdev, u8 status)
2007 {
2008 BT_DBG("status 0x%02x", status);
2009
2010 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2011 }
2012
2013 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2014 {
2015 struct mgmt_cp_add_uuid *cp = data;
2016 struct pending_cmd *cmd;
2017 struct hci_request req;
2018 struct bt_uuid *uuid;
2019 int err;
2020
2021 BT_DBG("request for %s", hdev->name);
2022
2023 hci_dev_lock(hdev);
2024
2025 if (pending_eir_or_class(hdev)) {
2026 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2027 MGMT_STATUS_BUSY);
2028 goto failed;
2029 }
2030
2031 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2032 if (!uuid) {
2033 err = -ENOMEM;
2034 goto failed;
2035 }
2036
2037 memcpy(uuid->uuid, cp->uuid, 16);
2038 uuid->svc_hint = cp->svc_hint;
2039 uuid->size = get_uuid_size(cp->uuid);
2040
2041 list_add_tail(&uuid->list, &hdev->uuids);
2042
2043 hci_req_init(&req, hdev);
2044
2045 update_class(&req);
2046 update_eir(&req);
2047
2048 err = hci_req_run(&req, add_uuid_complete);
2049 if (err < 0) {
2050 if (err != -ENODATA)
2051 goto failed;
2052
2053 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2054 hdev->dev_class, 3);
2055 goto failed;
2056 }
2057
2058 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2059 if (!cmd) {
2060 err = -ENOMEM;
2061 goto failed;
2062 }
2063
2064 err = 0;
2065
2066 failed:
2067 hci_dev_unlock(hdev);
2068 return err;
2069 }
2070
2071 static bool enable_service_cache(struct hci_dev *hdev)
2072 {
2073 if (!hdev_is_powered(hdev))
2074 return false;
2075
2076 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2077 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2078 CACHE_TIMEOUT);
2079 return true;
2080 }
2081
2082 return false;
2083 }
2084
2085 static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
2086 {
2087 BT_DBG("status 0x%02x", status);
2088
2089 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2090 }
2091
2092 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2093 u16 len)
2094 {
2095 struct mgmt_cp_remove_uuid *cp = data;
2096 struct pending_cmd *cmd;
2097 struct bt_uuid *match, *tmp;
2098 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2099 struct hci_request req;
2100 int err, found;
2101
2102 BT_DBG("request for %s", hdev->name);
2103
2104 hci_dev_lock(hdev);
2105
2106 if (pending_eir_or_class(hdev)) {
2107 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2108 MGMT_STATUS_BUSY);
2109 goto unlock;
2110 }
2111
2112 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2113 hci_uuids_clear(hdev);
2114
2115 if (enable_service_cache(hdev)) {
2116 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2117 0, hdev->dev_class, 3);
2118 goto unlock;
2119 }
2120
2121 goto update_class;
2122 }
2123
2124 found = 0;
2125
2126 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2127 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2128 continue;
2129
2130 list_del(&match->list);
2131 kfree(match);
2132 found++;
2133 }
2134
2135 if (found == 0) {
2136 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2137 MGMT_STATUS_INVALID_PARAMS);
2138 goto unlock;
2139 }
2140
2141 update_class:
2142 hci_req_init(&req, hdev);
2143
2144 update_class(&req);
2145 update_eir(&req);
2146
2147 err = hci_req_run(&req, remove_uuid_complete);
2148 if (err < 0) {
2149 if (err != -ENODATA)
2150 goto unlock;
2151
2152 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2153 hdev->dev_class, 3);
2154 goto unlock;
2155 }
2156
2157 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2158 if (!cmd) {
2159 err = -ENOMEM;
2160 goto unlock;
2161 }
2162
2163 err = 0;
2164
2165 unlock:
2166 hci_dev_unlock(hdev);
2167 return err;
2168 }
2169
2170 static void set_class_complete(struct hci_dev *hdev, u8 status)
2171 {
2172 BT_DBG("status 0x%02x", status);
2173
2174 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2175 }
2176
2177 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2178 u16 len)
2179 {
2180 struct mgmt_cp_set_dev_class *cp = data;
2181 struct pending_cmd *cmd;
2182 struct hci_request req;
2183 int err;
2184
2185 BT_DBG("request for %s", hdev->name);
2186
2187 if (!lmp_bredr_capable(hdev))
2188 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2189 MGMT_STATUS_NOT_SUPPORTED);
2190
2191 hci_dev_lock(hdev);
2192
2193 if (pending_eir_or_class(hdev)) {
2194 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2195 MGMT_STATUS_BUSY);
2196 goto unlock;
2197 }
2198
2199 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2200 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2201 MGMT_STATUS_INVALID_PARAMS);
2202 goto unlock;
2203 }
2204
2205 hdev->major_class = cp->major;
2206 hdev->minor_class = cp->minor;
2207
2208 if (!hdev_is_powered(hdev)) {
2209 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2210 hdev->dev_class, 3);
2211 goto unlock;
2212 }
2213
2214 hci_req_init(&req, hdev);
2215
2216 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2217 hci_dev_unlock(hdev);
2218 cancel_delayed_work_sync(&hdev->service_cache);
2219 hci_dev_lock(hdev);
2220 update_eir(&req);
2221 }
2222
2223 update_class(&req);
2224
2225 err = hci_req_run(&req, set_class_complete);
2226 if (err < 0) {
2227 if (err != -ENODATA)
2228 goto unlock;
2229
2230 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2231 hdev->dev_class, 3);
2232 goto unlock;
2233 }
2234
2235 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2236 if (!cmd) {
2237 err = -ENOMEM;
2238 goto unlock;
2239 }
2240
2241 err = 0;
2242
2243 unlock:
2244 hci_dev_unlock(hdev);
2245 return err;
2246 }
2247
2248 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2249 u16 len)
2250 {
2251 struct mgmt_cp_load_link_keys *cp = data;
2252 u16 key_count, expected_len;
2253 bool changed;
2254 int i;
2255
2256 BT_DBG("request for %s", hdev->name);
2257
2258 if (!lmp_bredr_capable(hdev))
2259 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2260 MGMT_STATUS_NOT_SUPPORTED);
2261
2262 key_count = __le16_to_cpu(cp->key_count);
2263
2264 expected_len = sizeof(*cp) + key_count *
2265 sizeof(struct mgmt_link_key_info);
2266 if (expected_len != len) {
2267 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2268 len, expected_len);
2269 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2270 MGMT_STATUS_INVALID_PARAMS);
2271 }
2272
2273 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2274 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2275 MGMT_STATUS_INVALID_PARAMS);
2276
2277 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2278 key_count);
2279
2280 for (i = 0; i < key_count; i++) {
2281 struct mgmt_link_key_info *key = &cp->keys[i];
2282
2283 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2284 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2285 MGMT_STATUS_INVALID_PARAMS);
2286 }
2287
2288 hci_dev_lock(hdev);
2289
2290 hci_link_keys_clear(hdev);
2291
2292 if (cp->debug_keys)
2293 changed = !test_and_set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
2294 else
2295 changed = test_and_clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
2296
2297 if (changed)
2298 new_settings(hdev, NULL);
2299
2300 for (i = 0; i < key_count; i++) {
2301 struct mgmt_link_key_info *key = &cp->keys[i];
2302
2303 hci_add_link_key(hdev, NULL, 0, &key->addr.bdaddr, key->val,
2304 key->type, key->pin_len);
2305 }
2306
2307 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2308
2309 hci_dev_unlock(hdev);
2310
2311 return 0;
2312 }
2313
2314 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2315 u8 addr_type, struct sock *skip_sk)
2316 {
2317 struct mgmt_ev_device_unpaired ev;
2318
2319 bacpy(&ev.addr.bdaddr, bdaddr);
2320 ev.addr.type = addr_type;
2321
2322 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2323 skip_sk);
2324 }
2325
2326 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2327 u16 len)
2328 {
2329 struct mgmt_cp_unpair_device *cp = data;
2330 struct mgmt_rp_unpair_device rp;
2331 struct hci_cp_disconnect dc;
2332 struct pending_cmd *cmd;
2333 struct hci_conn *conn;
2334 int err;
2335
2336 memset(&rp, 0, sizeof(rp));
2337 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2338 rp.addr.type = cp->addr.type;
2339
2340 if (!bdaddr_type_is_valid(cp->addr.type))
2341 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2342 MGMT_STATUS_INVALID_PARAMS,
2343 &rp, sizeof(rp));
2344
2345 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2346 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2347 MGMT_STATUS_INVALID_PARAMS,
2348 &rp, sizeof(rp));
2349
2350 hci_dev_lock(hdev);
2351
2352 if (!hdev_is_powered(hdev)) {
2353 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2354 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2355 goto unlock;
2356 }
2357
2358 if (cp->addr.type == BDADDR_BREDR) {
2359 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2360 } else {
2361 u8 addr_type;
2362
2363 if (cp->addr.type == BDADDR_LE_PUBLIC)
2364 addr_type = ADDR_LE_DEV_PUBLIC;
2365 else
2366 addr_type = ADDR_LE_DEV_RANDOM;
2367
2368 hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2369
2370 err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2371 }
2372
2373 if (err < 0) {
2374 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2375 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
2376 goto unlock;
2377 }
2378
2379 if (cp->disconnect) {
2380 if (cp->addr.type == BDADDR_BREDR)
2381 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2382 &cp->addr.bdaddr);
2383 else
2384 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2385 &cp->addr.bdaddr);
2386 } else {
2387 conn = NULL;
2388 }
2389
2390 if (!conn) {
2391 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2392 &rp, sizeof(rp));
2393 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2394 goto unlock;
2395 }
2396
2397 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2398 sizeof(*cp));
2399 if (!cmd) {
2400 err = -ENOMEM;
2401 goto unlock;
2402 }
2403
2404 dc.handle = cpu_to_le16(conn->handle);
2405 dc.reason = 0x13; /* Remote User Terminated Connection */
2406 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2407 if (err < 0)
2408 mgmt_pending_remove(cmd);
2409
2410 unlock:
2411 hci_dev_unlock(hdev);
2412 return err;
2413 }
2414
2415 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2416 u16 len)
2417 {
2418 struct mgmt_cp_disconnect *cp = data;
2419 struct mgmt_rp_disconnect rp;
2420 struct hci_cp_disconnect dc;
2421 struct pending_cmd *cmd;
2422 struct hci_conn *conn;
2423 int err;
2424
2425 BT_DBG("");
2426
2427 memset(&rp, 0, sizeof(rp));
2428 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2429 rp.addr.type = cp->addr.type;
2430
2431 if (!bdaddr_type_is_valid(cp->addr.type))
2432 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2433 MGMT_STATUS_INVALID_PARAMS,
2434 &rp, sizeof(rp));
2435
2436 hci_dev_lock(hdev);
2437
2438 if (!test_bit(HCI_UP, &hdev->flags)) {
2439 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2440 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2441 goto failed;
2442 }
2443
2444 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
2445 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2446 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2447 goto failed;
2448 }
2449
2450 if (cp->addr.type == BDADDR_BREDR)
2451 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2452 &cp->addr.bdaddr);
2453 else
2454 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2455
2456 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2457 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2458 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
2459 goto failed;
2460 }
2461
2462 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2463 if (!cmd) {
2464 err = -ENOMEM;
2465 goto failed;
2466 }
2467
2468 dc.handle = cpu_to_le16(conn->handle);
2469 dc.reason = HCI_ERROR_REMOTE_USER_TERM;
2470
2471 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2472 if (err < 0)
2473 mgmt_pending_remove(cmd);
2474
2475 failed:
2476 hci_dev_unlock(hdev);
2477 return err;
2478 }
2479
2480 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2481 {
2482 switch (link_type) {
2483 case LE_LINK:
2484 switch (addr_type) {
2485 case ADDR_LE_DEV_PUBLIC:
2486 return BDADDR_LE_PUBLIC;
2487
2488 default:
2489 /* Fallback to LE Random address type */
2490 return BDADDR_LE_RANDOM;
2491 }
2492
2493 default:
2494 /* Fallback to BR/EDR type */
2495 return BDADDR_BREDR;
2496 }
2497 }
2498
2499 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2500 u16 data_len)
2501 {
2502 struct mgmt_rp_get_connections *rp;
2503 struct hci_conn *c;
2504 size_t rp_len;
2505 int err;
2506 u16 i;
2507
2508 BT_DBG("");
2509
2510 hci_dev_lock(hdev);
2511
2512 if (!hdev_is_powered(hdev)) {
2513 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2514 MGMT_STATUS_NOT_POWERED);
2515 goto unlock;
2516 }
2517
2518 i = 0;
2519 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2520 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2521 i++;
2522 }
2523
2524 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2525 rp = kmalloc(rp_len, GFP_KERNEL);
2526 if (!rp) {
2527 err = -ENOMEM;
2528 goto unlock;
2529 }
2530
2531 i = 0;
2532 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2533 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2534 continue;
2535 bacpy(&rp->addr[i].bdaddr, &c->dst);
2536 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2537 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2538 continue;
2539 i++;
2540 }
2541
2542 rp->conn_count = cpu_to_le16(i);
2543
2544 /* Recalculate length in case of filtered SCO connections, etc */
2545 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2546
2547 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2548 rp_len);
2549
2550 kfree(rp);
2551
2552 unlock:
2553 hci_dev_unlock(hdev);
2554 return err;
2555 }
2556
2557 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2558 struct mgmt_cp_pin_code_neg_reply *cp)
2559 {
2560 struct pending_cmd *cmd;
2561 int err;
2562
2563 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2564 sizeof(*cp));
2565 if (!cmd)
2566 return -ENOMEM;
2567
2568 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2569 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2570 if (err < 0)
2571 mgmt_pending_remove(cmd);
2572
2573 return err;
2574 }
2575
2576 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2577 u16 len)
2578 {
2579 struct hci_conn *conn;
2580 struct mgmt_cp_pin_code_reply *cp = data;
2581 struct hci_cp_pin_code_reply reply;
2582 struct pending_cmd *cmd;
2583 int err;
2584
2585 BT_DBG("");
2586
2587 hci_dev_lock(hdev);
2588
2589 if (!hdev_is_powered(hdev)) {
2590 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2591 MGMT_STATUS_NOT_POWERED);
2592 goto failed;
2593 }
2594
2595 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2596 if (!conn) {
2597 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2598 MGMT_STATUS_NOT_CONNECTED);
2599 goto failed;
2600 }
2601
2602 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2603 struct mgmt_cp_pin_code_neg_reply ncp;
2604
2605 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2606
2607 BT_ERR("PIN code is not 16 bytes long");
2608
2609 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2610 if (err >= 0)
2611 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2612 MGMT_STATUS_INVALID_PARAMS);
2613
2614 goto failed;
2615 }
2616
2617 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2618 if (!cmd) {
2619 err = -ENOMEM;
2620 goto failed;
2621 }
2622
2623 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2624 reply.pin_len = cp->pin_len;
2625 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2626
2627 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2628 if (err < 0)
2629 mgmt_pending_remove(cmd);
2630
2631 failed:
2632 hci_dev_unlock(hdev);
2633 return err;
2634 }
2635
2636 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2637 u16 len)
2638 {
2639 struct mgmt_cp_set_io_capability *cp = data;
2640
2641 BT_DBG("");
2642
2643 hci_dev_lock(hdev);
2644
2645 hdev->io_capability = cp->io_capability;
2646
2647 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2648 hdev->io_capability);
2649
2650 hci_dev_unlock(hdev);
2651
2652 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
2653 0);
2654 }
2655
2656 static struct pending_cmd *find_pairing(struct hci_conn *conn)
2657 {
2658 struct hci_dev *hdev = conn->hdev;
2659 struct pending_cmd *cmd;
2660
2661 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2662 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2663 continue;
2664
2665 if (cmd->user_data != conn)
2666 continue;
2667
2668 return cmd;
2669 }
2670
2671 return NULL;
2672 }
2673
2674 static void pairing_complete(struct pending_cmd *cmd, u8 status)
2675 {
2676 struct mgmt_rp_pair_device rp;
2677 struct hci_conn *conn = cmd->user_data;
2678
2679 bacpy(&rp.addr.bdaddr, &conn->dst);
2680 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2681
2682 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
2683 &rp, sizeof(rp));
2684
2685 /* So we don't get further callbacks for this connection */
2686 conn->connect_cfm_cb = NULL;
2687 conn->security_cfm_cb = NULL;
2688 conn->disconn_cfm_cb = NULL;
2689
2690 hci_conn_drop(conn);
2691
2692 mgmt_pending_remove(cmd);
2693 }
2694
2695 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2696 {
2697 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2698 struct pending_cmd *cmd;
2699
2700 cmd = find_pairing(conn);
2701 if (cmd)
2702 pairing_complete(cmd, status);
2703 }
2704
2705 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2706 {
2707 struct pending_cmd *cmd;
2708
2709 BT_DBG("status %u", status);
2710
2711 cmd = find_pairing(conn);
2712 if (!cmd)
2713 BT_DBG("Unable to find a pending command");
2714 else
2715 pairing_complete(cmd, mgmt_status(status));
2716 }
2717
2718 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2719 {
2720 struct pending_cmd *cmd;
2721
2722 BT_DBG("status %u", status);
2723
2724 if (!status)
2725 return;
2726
2727 cmd = find_pairing(conn);
2728 if (!cmd)
2729 BT_DBG("Unable to find a pending command");
2730 else
2731 pairing_complete(cmd, mgmt_status(status));
2732 }
2733
2734 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2735 u16 len)
2736 {
2737 struct mgmt_cp_pair_device *cp = data;
2738 struct mgmt_rp_pair_device rp;
2739 struct pending_cmd *cmd;
2740 u8 sec_level, auth_type;
2741 struct hci_conn *conn;
2742 int err;
2743
2744 BT_DBG("");
2745
2746 memset(&rp, 0, sizeof(rp));
2747 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2748 rp.addr.type = cp->addr.type;
2749
2750 if (!bdaddr_type_is_valid(cp->addr.type))
2751 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2752 MGMT_STATUS_INVALID_PARAMS,
2753 &rp, sizeof(rp));
2754
2755 hci_dev_lock(hdev);
2756
2757 if (!hdev_is_powered(hdev)) {
2758 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2759 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2760 goto unlock;
2761 }
2762
2763 sec_level = BT_SECURITY_MEDIUM;
2764 if (cp->io_cap == 0x03)
2765 auth_type = HCI_AT_DEDICATED_BONDING;
2766 else
2767 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
2768
2769 if (cp->addr.type == BDADDR_BREDR)
2770 conn = hci_connect(hdev, ACL_LINK, &cp->addr.bdaddr,
2771 cp->addr.type, sec_level, auth_type);
2772 else
2773 conn = hci_connect(hdev, LE_LINK, &cp->addr.bdaddr,
2774 cp->addr.type, sec_level, auth_type);
2775
2776 if (IS_ERR(conn)) {
2777 int status;
2778
2779 if (PTR_ERR(conn) == -EBUSY)
2780 status = MGMT_STATUS_BUSY;
2781 else
2782 status = MGMT_STATUS_CONNECT_FAILED;
2783
2784 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2785 status, &rp,
2786 sizeof(rp));
2787 goto unlock;
2788 }
2789
2790 if (conn->connect_cfm_cb) {
2791 hci_conn_drop(conn);
2792 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2793 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2794 goto unlock;
2795 }
2796
2797 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2798 if (!cmd) {
2799 err = -ENOMEM;
2800 hci_conn_drop(conn);
2801 goto unlock;
2802 }
2803
2804 /* For LE, just connecting isn't a proof that the pairing finished */
2805 if (cp->addr.type == BDADDR_BREDR) {
2806 conn->connect_cfm_cb = pairing_complete_cb;
2807 conn->security_cfm_cb = pairing_complete_cb;
2808 conn->disconn_cfm_cb = pairing_complete_cb;
2809 } else {
2810 conn->connect_cfm_cb = le_pairing_complete_cb;
2811 conn->security_cfm_cb = le_pairing_complete_cb;
2812 conn->disconn_cfm_cb = le_pairing_complete_cb;
2813 }
2814
2815 conn->io_capability = cp->io_cap;
2816 cmd->user_data = conn;
2817
2818 if (conn->state == BT_CONNECTED &&
2819 hci_conn_security(conn, sec_level, auth_type))
2820 pairing_complete(cmd, 0);
2821
2822 err = 0;
2823
2824 unlock:
2825 hci_dev_unlock(hdev);
2826 return err;
2827 }
2828
2829 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2830 u16 len)
2831 {
2832 struct mgmt_addr_info *addr = data;
2833 struct pending_cmd *cmd;
2834 struct hci_conn *conn;
2835 int err;
2836
2837 BT_DBG("");
2838
2839 hci_dev_lock(hdev);
2840
2841 if (!hdev_is_powered(hdev)) {
2842 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2843 MGMT_STATUS_NOT_POWERED);
2844 goto unlock;
2845 }
2846
2847 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
2848 if (!cmd) {
2849 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2850 MGMT_STATUS_INVALID_PARAMS);
2851 goto unlock;
2852 }
2853
2854 conn = cmd->user_data;
2855
2856 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
2857 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2858 MGMT_STATUS_INVALID_PARAMS);
2859 goto unlock;
2860 }
2861
2862 pairing_complete(cmd, MGMT_STATUS_CANCELLED);
2863
2864 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
2865 addr, sizeof(*addr));
2866 unlock:
2867 hci_dev_unlock(hdev);
2868 return err;
2869 }
2870
2871 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
2872 struct mgmt_addr_info *addr, u16 mgmt_op,
2873 u16 hci_op, __le32 passkey)
2874 {
2875 struct pending_cmd *cmd;
2876 struct hci_conn *conn;
2877 int err;
2878
2879 hci_dev_lock(hdev);
2880
2881 if (!hdev_is_powered(hdev)) {
2882 err = cmd_complete(sk, hdev->id, mgmt_op,
2883 MGMT_STATUS_NOT_POWERED, addr,
2884 sizeof(*addr));
2885 goto done;
2886 }
2887
2888 if (addr->type == BDADDR_BREDR)
2889 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
2890 else
2891 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
2892
2893 if (!conn) {
2894 err = cmd_complete(sk, hdev->id, mgmt_op,
2895 MGMT_STATUS_NOT_CONNECTED, addr,
2896 sizeof(*addr));
2897 goto done;
2898 }
2899
2900 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
2901 /* Continue with pairing via SMP */
2902 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
2903
2904 if (!err)
2905 err = cmd_complete(sk, hdev->id, mgmt_op,
2906 MGMT_STATUS_SUCCESS, addr,
2907 sizeof(*addr));
2908 else
2909 err = cmd_complete(sk, hdev->id, mgmt_op,
2910 MGMT_STATUS_FAILED, addr,
2911 sizeof(*addr));
2912
2913 goto done;
2914 }
2915
2916 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
2917 if (!cmd) {
2918 err = -ENOMEM;
2919 goto done;
2920 }
2921
2922 /* Continue with pairing via HCI */
2923 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
2924 struct hci_cp_user_passkey_reply cp;
2925
2926 bacpy(&cp.bdaddr, &addr->bdaddr);
2927 cp.passkey = passkey;
2928 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
2929 } else
2930 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
2931 &addr->bdaddr);
2932
2933 if (err < 0)
2934 mgmt_pending_remove(cmd);
2935
2936 done:
2937 hci_dev_unlock(hdev);
2938 return err;
2939 }
2940
2941 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2942 void *data, u16 len)
2943 {
2944 struct mgmt_cp_pin_code_neg_reply *cp = data;
2945
2946 BT_DBG("");
2947
2948 return user_pairing_resp(sk, hdev, &cp->addr,
2949 MGMT_OP_PIN_CODE_NEG_REPLY,
2950 HCI_OP_PIN_CODE_NEG_REPLY, 0);
2951 }
2952
2953 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2954 u16 len)
2955 {
2956 struct mgmt_cp_user_confirm_reply *cp = data;
2957
2958 BT_DBG("");
2959
2960 if (len != sizeof(*cp))
2961 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
2962 MGMT_STATUS_INVALID_PARAMS);
2963
2964 return user_pairing_resp(sk, hdev, &cp->addr,
2965 MGMT_OP_USER_CONFIRM_REPLY,
2966 HCI_OP_USER_CONFIRM_REPLY, 0);
2967 }
2968
2969 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
2970 void *data, u16 len)
2971 {
2972 struct mgmt_cp_user_confirm_neg_reply *cp = data;
2973
2974 BT_DBG("");
2975
2976 return user_pairing_resp(sk, hdev, &cp->addr,
2977 MGMT_OP_USER_CONFIRM_NEG_REPLY,
2978 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
2979 }
2980
2981 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2982 u16 len)
2983 {
2984 struct mgmt_cp_user_passkey_reply *cp = data;
2985
2986 BT_DBG("");
2987
2988 return user_pairing_resp(sk, hdev, &cp->addr,
2989 MGMT_OP_USER_PASSKEY_REPLY,
2990 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
2991 }
2992
2993 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
2994 void *data, u16 len)
2995 {
2996 struct mgmt_cp_user_passkey_neg_reply *cp = data;
2997
2998 BT_DBG("");
2999
3000 return user_pairing_resp(sk, hdev, &cp->addr,
3001 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3002 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3003 }
3004
3005 static void update_name(struct hci_request *req)
3006 {
3007 struct hci_dev *hdev = req->hdev;
3008 struct hci_cp_write_local_name cp;
3009
3010 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3011
3012 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3013 }
3014
3015 static void set_name_complete(struct hci_dev *hdev, u8 status)
3016 {
3017 struct mgmt_cp_set_local_name *cp;
3018 struct pending_cmd *cmd;
3019
3020 BT_DBG("status 0x%02x", status);
3021
3022 hci_dev_lock(hdev);
3023
3024 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3025 if (!cmd)
3026 goto unlock;
3027
3028 cp = cmd->param;
3029
3030 if (status)
3031 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3032 mgmt_status(status));
3033 else
3034 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3035 cp, sizeof(*cp));
3036
3037 mgmt_pending_remove(cmd);
3038
3039 unlock:
3040 hci_dev_unlock(hdev);
3041 }
3042
3043 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3044 u16 len)
3045 {
3046 struct mgmt_cp_set_local_name *cp = data;
3047 struct pending_cmd *cmd;
3048 struct hci_request req;
3049 int err;
3050
3051 BT_DBG("");
3052
3053 hci_dev_lock(hdev);
3054
3055 /* If the old values are the same as the new ones just return a
3056 * direct command complete event.
3057 */
3058 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3059 !memcmp(hdev->short_name, cp->short_name,
3060 sizeof(hdev->short_name))) {
3061 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3062 data, len);
3063 goto failed;
3064 }
3065
3066 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3067
3068 if (!hdev_is_powered(hdev)) {
3069 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3070
3071 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3072 data, len);
3073 if (err < 0)
3074 goto failed;
3075
3076 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
3077 sk);
3078
3079 goto failed;
3080 }
3081
3082 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3083 if (!cmd) {
3084 err = -ENOMEM;
3085 goto failed;
3086 }
3087
3088 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3089
3090 hci_req_init(&req, hdev);
3091
3092 if (lmp_bredr_capable(hdev)) {
3093 update_name(&req);
3094 update_eir(&req);
3095 }
3096
3097 /* The name is stored in the scan response data and so
3098 * no need to udpate the advertising data here.
3099 */
3100 if (lmp_le_capable(hdev))
3101 update_scan_rsp_data(&req);
3102
3103 err = hci_req_run(&req, set_name_complete);
3104 if (err < 0)
3105 mgmt_pending_remove(cmd);
3106
3107 failed:
3108 hci_dev_unlock(hdev);
3109 return err;
3110 }
3111
3112 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3113 void *data, u16 data_len)
3114 {
3115 struct pending_cmd *cmd;
3116 int err;
3117
3118 BT_DBG("%s", hdev->name);
3119
3120 hci_dev_lock(hdev);
3121
3122 if (!hdev_is_powered(hdev)) {
3123 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3124 MGMT_STATUS_NOT_POWERED);
3125 goto unlock;
3126 }
3127
3128 if (!lmp_ssp_capable(hdev)) {
3129 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3130 MGMT_STATUS_NOT_SUPPORTED);
3131 goto unlock;
3132 }
3133
3134 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3135 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3136 MGMT_STATUS_BUSY);
3137 goto unlock;
3138 }
3139
3140 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3141 if (!cmd) {
3142 err = -ENOMEM;
3143 goto unlock;
3144 }
3145
3146 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
3147 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
3148 0, NULL);
3149 else
3150 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3151
3152 if (err < 0)
3153 mgmt_pending_remove(cmd);
3154
3155 unlock:
3156 hci_dev_unlock(hdev);
3157 return err;
3158 }
3159
3160 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3161 void *data, u16 len)
3162 {
3163 int err;
3164
3165 BT_DBG("%s ", hdev->name);
3166
3167 hci_dev_lock(hdev);
3168
3169 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3170 struct mgmt_cp_add_remote_oob_data *cp = data;
3171 u8 status;
3172
3173 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3174 cp->hash, cp->randomizer);
3175 if (err < 0)
3176 status = MGMT_STATUS_FAILED;
3177 else
3178 status = MGMT_STATUS_SUCCESS;
3179
3180 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3181 status, &cp->addr, sizeof(cp->addr));
3182 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3183 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3184 u8 status;
3185
3186 err = hci_add_remote_oob_ext_data(hdev, &cp->addr.bdaddr,
3187 cp->hash192,
3188 cp->randomizer192,
3189 cp->hash256,
3190 cp->randomizer256);
3191 if (err < 0)
3192 status = MGMT_STATUS_FAILED;
3193 else
3194 status = MGMT_STATUS_SUCCESS;
3195
3196 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3197 status, &cp->addr, sizeof(cp->addr));
3198 } else {
3199 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3200 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3201 MGMT_STATUS_INVALID_PARAMS);
3202 }
3203
3204 hci_dev_unlock(hdev);
3205 return err;
3206 }
3207
3208 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3209 void *data, u16 len)
3210 {
3211 struct mgmt_cp_remove_remote_oob_data *cp = data;
3212 u8 status;
3213 int err;
3214
3215 BT_DBG("%s", hdev->name);
3216
3217 hci_dev_lock(hdev);
3218
3219 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
3220 if (err < 0)
3221 status = MGMT_STATUS_INVALID_PARAMS;
3222 else
3223 status = MGMT_STATUS_SUCCESS;
3224
3225 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3226 status, &cp->addr, sizeof(cp->addr));
3227
3228 hci_dev_unlock(hdev);
3229 return err;
3230 }
3231
3232 static int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
3233 {
3234 struct pending_cmd *cmd;
3235 u8 type;
3236 int err;
3237
3238 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3239
3240 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3241 if (!cmd)
3242 return -ENOENT;
3243
3244 type = hdev->discovery.type;
3245
3246 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3247 &type, sizeof(type));
3248 mgmt_pending_remove(cmd);
3249
3250 return err;
3251 }
3252
3253 static void start_discovery_complete(struct hci_dev *hdev, u8 status)
3254 {
3255 BT_DBG("status %d", status);
3256
3257 if (status) {
3258 hci_dev_lock(hdev);
3259 mgmt_start_discovery_failed(hdev, status);
3260 hci_dev_unlock(hdev);
3261 return;
3262 }
3263
3264 hci_dev_lock(hdev);
3265 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3266 hci_dev_unlock(hdev);
3267
3268 switch (hdev->discovery.type) {
3269 case DISCOV_TYPE_LE:
3270 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
3271 DISCOV_LE_TIMEOUT);
3272 break;
3273
3274 case DISCOV_TYPE_INTERLEAVED:
3275 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
3276 DISCOV_INTERLEAVED_TIMEOUT);
3277 break;
3278
3279 case DISCOV_TYPE_BREDR:
3280 break;
3281
3282 default:
3283 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
3284 }
3285 }
3286
3287 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3288 void *data, u16 len)
3289 {
3290 struct mgmt_cp_start_discovery *cp = data;
3291 struct pending_cmd *cmd;
3292 struct hci_cp_le_set_scan_param param_cp;
3293 struct hci_cp_le_set_scan_enable enable_cp;
3294 struct hci_cp_inquiry inq_cp;
3295 struct hci_request req;
3296 /* General inquiry access code (GIAC) */
3297 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3298 u8 status, own_addr_type;
3299 int err;
3300
3301 BT_DBG("%s", hdev->name);
3302
3303 hci_dev_lock(hdev);
3304
3305 if (!hdev_is_powered(hdev)) {
3306 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3307 MGMT_STATUS_NOT_POWERED);
3308 goto failed;
3309 }
3310
3311 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3312 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3313 MGMT_STATUS_BUSY);
3314 goto failed;
3315 }
3316
3317 if (hdev->discovery.state != DISCOVERY_STOPPED) {
3318 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3319 MGMT_STATUS_BUSY);
3320 goto failed;
3321 }
3322
3323 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
3324 if (!cmd) {
3325 err = -ENOMEM;
3326 goto failed;
3327 }
3328
3329 hdev->discovery.type = cp->type;
3330
3331 hci_req_init(&req, hdev);
3332
3333 switch (hdev->discovery.type) {
3334 case DISCOV_TYPE_BREDR:
3335 status = mgmt_bredr_support(hdev);
3336 if (status) {
3337 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3338 status);
3339 mgmt_pending_remove(cmd);
3340 goto failed;
3341 }
3342
3343 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3344 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3345 MGMT_STATUS_BUSY);
3346 mgmt_pending_remove(cmd);
3347 goto failed;
3348 }
3349
3350 hci_inquiry_cache_flush(hdev);
3351
3352 memset(&inq_cp, 0, sizeof(inq_cp));
3353 memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
3354 inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
3355 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
3356 break;
3357
3358 case DISCOV_TYPE_LE:
3359 case DISCOV_TYPE_INTERLEAVED:
3360 status = mgmt_le_support(hdev);
3361 if (status) {
3362 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3363 status);
3364 mgmt_pending_remove(cmd);
3365 goto failed;
3366 }
3367
3368 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3369 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3370 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3371 MGMT_STATUS_NOT_SUPPORTED);
3372 mgmt_pending_remove(cmd);
3373 goto failed;
3374 }
3375
3376 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3377 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3378 MGMT_STATUS_REJECTED);
3379 mgmt_pending_remove(cmd);
3380 goto failed;
3381 }
3382
3383 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
3384 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3385 MGMT_STATUS_BUSY);
3386 mgmt_pending_remove(cmd);
3387 goto failed;
3388 }
3389
3390 memset(&param_cp, 0, sizeof(param_cp));
3391
3392 err = hci_update_random_address(&req, &own_addr_type);
3393 if (err < 0) {
3394 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3395 MGMT_STATUS_FAILED);
3396 mgmt_pending_remove(cmd);
3397 goto failed;
3398 }
3399
3400 param_cp.type = LE_SCAN_ACTIVE;
3401 param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
3402 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3403 param_cp.own_address_type = own_addr_type;
3404 hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
3405 &param_cp);
3406
3407 memset(&enable_cp, 0, sizeof(enable_cp));
3408 enable_cp.enable = LE_SCAN_ENABLE;
3409 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3410 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
3411 &enable_cp);
3412 break;
3413
3414 default:
3415 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3416 MGMT_STATUS_INVALID_PARAMS);
3417 mgmt_pending_remove(cmd);
3418 goto failed;
3419 }
3420
3421 err = hci_req_run(&req, start_discovery_complete);
3422 if (err < 0)
3423 mgmt_pending_remove(cmd);
3424 else
3425 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3426
3427 failed:
3428 hci_dev_unlock(hdev);
3429 return err;
3430 }
3431
3432 static int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
3433 {
3434 struct pending_cmd *cmd;
3435 int err;
3436
3437 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
3438 if (!cmd)
3439 return -ENOENT;
3440
3441 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3442 &hdev->discovery.type, sizeof(hdev->discovery.type));
3443 mgmt_pending_remove(cmd);
3444
3445 return err;
3446 }
3447
3448 static void stop_discovery_complete(struct hci_dev *hdev, u8 status)
3449 {
3450 BT_DBG("status %d", status);
3451
3452 hci_dev_lock(hdev);
3453
3454 if (status) {
3455 mgmt_stop_discovery_failed(hdev, status);
3456 goto unlock;
3457 }
3458
3459 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3460
3461 unlock:
3462 hci_dev_unlock(hdev);
3463 }
3464
3465 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
3466 u16 len)
3467 {
3468 struct mgmt_cp_stop_discovery *mgmt_cp = data;
3469 struct pending_cmd *cmd;
3470 struct hci_cp_remote_name_req_cancel cp;
3471 struct inquiry_entry *e;
3472 struct hci_request req;
3473 struct hci_cp_le_set_scan_enable enable_cp;
3474 int err;
3475
3476 BT_DBG("%s", hdev->name);
3477
3478 hci_dev_lock(hdev);
3479
3480 if (!hci_discovery_active(hdev)) {
3481 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3482 MGMT_STATUS_REJECTED, &mgmt_cp->type,
3483 sizeof(mgmt_cp->type));
3484 goto unlock;
3485 }
3486
3487 if (hdev->discovery.type != mgmt_cp->type) {
3488 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3489 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
3490 sizeof(mgmt_cp->type));
3491 goto unlock;
3492 }
3493
3494 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
3495 if (!cmd) {
3496 err = -ENOMEM;
3497 goto unlock;
3498 }
3499
3500 hci_req_init(&req, hdev);
3501
3502 switch (hdev->discovery.state) {
3503 case DISCOVERY_FINDING:
3504 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3505 hci_req_add(&req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3506 } else {
3507 cancel_delayed_work(&hdev->le_scan_disable);
3508
3509 memset(&enable_cp, 0, sizeof(enable_cp));
3510 enable_cp.enable = LE_SCAN_DISABLE;
3511 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE,
3512 sizeof(enable_cp), &enable_cp);
3513 }
3514
3515 break;
3516
3517 case DISCOVERY_RESOLVING:
3518 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
3519 NAME_PENDING);
3520 if (!e) {
3521 mgmt_pending_remove(cmd);
3522 err = cmd_complete(sk, hdev->id,
3523 MGMT_OP_STOP_DISCOVERY, 0,
3524 &mgmt_cp->type,
3525 sizeof(mgmt_cp->type));
3526 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3527 goto unlock;
3528 }
3529
3530 bacpy(&cp.bdaddr, &e->data.bdaddr);
3531 hci_req_add(&req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
3532 &cp);
3533
3534 break;
3535
3536 default:
3537 BT_DBG("unknown discovery state %u", hdev->discovery.state);
3538
3539 mgmt_pending_remove(cmd);
3540 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3541 MGMT_STATUS_FAILED, &mgmt_cp->type,
3542 sizeof(mgmt_cp->type));
3543 goto unlock;
3544 }
3545
3546 err = hci_req_run(&req, stop_discovery_complete);
3547 if (err < 0)
3548 mgmt_pending_remove(cmd);
3549 else
3550 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
3551
3552 unlock:
3553 hci_dev_unlock(hdev);
3554 return err;
3555 }
3556
3557 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
3558 u16 len)
3559 {
3560 struct mgmt_cp_confirm_name *cp = data;
3561 struct inquiry_entry *e;
3562 int err;
3563
3564 BT_DBG("%s", hdev->name);
3565
3566 hci_dev_lock(hdev);
3567
3568 if (!hci_discovery_active(hdev)) {
3569 err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3570 MGMT_STATUS_FAILED);
3571 goto failed;
3572 }
3573
3574 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
3575 if (!e) {
3576 err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3577 MGMT_STATUS_INVALID_PARAMS);
3578 goto failed;
3579 }
3580
3581 if (cp->name_known) {
3582 e->name_state = NAME_KNOWN;
3583 list_del(&e->list);
3584 } else {
3585 e->name_state = NAME_NEEDED;
3586 hci_inquiry_cache_update_resolve(hdev, e);
3587 }
3588
3589 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
3590 sizeof(cp->addr));
3591
3592 failed:
3593 hci_dev_unlock(hdev);
3594 return err;
3595 }
3596
3597 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
3598 u16 len)
3599 {
3600 struct mgmt_cp_block_device *cp = data;
3601 u8 status;
3602 int err;
3603
3604 BT_DBG("%s", hdev->name);
3605
3606 if (!bdaddr_type_is_valid(cp->addr.type))
3607 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
3608 MGMT_STATUS_INVALID_PARAMS,
3609 &cp->addr, sizeof(cp->addr));
3610
3611 hci_dev_lock(hdev);
3612
3613 err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type);
3614 if (err < 0)
3615 status = MGMT_STATUS_FAILED;
3616 else
3617 status = MGMT_STATUS_SUCCESS;
3618
3619 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
3620 &cp->addr, sizeof(cp->addr));
3621
3622 hci_dev_unlock(hdev);
3623
3624 return err;
3625 }
3626
3627 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
3628 u16 len)
3629 {
3630 struct mgmt_cp_unblock_device *cp = data;
3631 u8 status;
3632 int err;
3633
3634 BT_DBG("%s", hdev->name);
3635
3636 if (!bdaddr_type_is_valid(cp->addr.type))
3637 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
3638 MGMT_STATUS_INVALID_PARAMS,
3639 &cp->addr, sizeof(cp->addr));
3640
3641 hci_dev_lock(hdev);
3642
3643 err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type);
3644 if (err < 0)
3645 status = MGMT_STATUS_INVALID_PARAMS;
3646 else
3647 status = MGMT_STATUS_SUCCESS;
3648
3649 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
3650 &cp->addr, sizeof(cp->addr));
3651
3652 hci_dev_unlock(hdev);
3653
3654 return err;
3655 }
3656
3657 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
3658 u16 len)
3659 {
3660 struct mgmt_cp_set_device_id *cp = data;
3661 struct hci_request req;
3662 int err;
3663 __u16 source;
3664
3665 BT_DBG("%s", hdev->name);
3666
3667 source = __le16_to_cpu(cp->source);
3668
3669 if (source > 0x0002)
3670 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
3671 MGMT_STATUS_INVALID_PARAMS);
3672
3673 hci_dev_lock(hdev);
3674
3675 hdev->devid_source = source;
3676 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
3677 hdev->devid_product = __le16_to_cpu(cp->product);
3678 hdev->devid_version = __le16_to_cpu(cp->version);
3679
3680 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
3681
3682 hci_req_init(&req, hdev);
3683 update_eir(&req);
3684 hci_req_run(&req, NULL);
3685
3686 hci_dev_unlock(hdev);
3687
3688 return err;
3689 }
3690
3691 static void set_advertising_complete(struct hci_dev *hdev, u8 status)
3692 {
3693 struct cmd_lookup match = { NULL, hdev };
3694
3695 if (status) {
3696 u8 mgmt_err = mgmt_status(status);
3697
3698 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
3699 cmd_status_rsp, &mgmt_err);
3700 return;
3701 }
3702
3703 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
3704 &match);
3705
3706 new_settings(hdev, match.sk);
3707
3708 if (match.sk)
3709 sock_put(match.sk);
3710 }
3711
3712 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
3713 u16 len)
3714 {
3715 struct mgmt_mode *cp = data;
3716 struct pending_cmd *cmd;
3717 struct hci_request req;
3718 u8 val, enabled, status;
3719 int err;
3720
3721 BT_DBG("request for %s", hdev->name);
3722
3723 status = mgmt_le_support(hdev);
3724 if (status)
3725 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3726 status);
3727
3728 if (cp->val != 0x00 && cp->val != 0x01)
3729 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3730 MGMT_STATUS_INVALID_PARAMS);
3731
3732 hci_dev_lock(hdev);
3733
3734 val = !!cp->val;
3735 enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
3736
3737 /* The following conditions are ones which mean that we should
3738 * not do any HCI communication but directly send a mgmt
3739 * response to user space (after toggling the flag if
3740 * necessary).
3741 */
3742 if (!hdev_is_powered(hdev) || val == enabled ||
3743 hci_conn_num(hdev, LE_LINK) > 0) {
3744 bool changed = false;
3745
3746 if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3747 change_bit(HCI_ADVERTISING, &hdev->dev_flags);
3748 changed = true;
3749 }
3750
3751 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
3752 if (err < 0)
3753 goto unlock;
3754
3755 if (changed)
3756 err = new_settings(hdev, sk);
3757
3758 goto unlock;
3759 }
3760
3761 if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
3762 mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
3763 err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3764 MGMT_STATUS_BUSY);
3765 goto unlock;
3766 }
3767
3768 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
3769 if (!cmd) {
3770 err = -ENOMEM;
3771 goto unlock;
3772 }
3773
3774 hci_req_init(&req, hdev);
3775
3776 if (val)
3777 enable_advertising(&req);
3778 else
3779 disable_advertising(&req);
3780
3781 err = hci_req_run(&req, set_advertising_complete);
3782 if (err < 0)
3783 mgmt_pending_remove(cmd);
3784
3785 unlock:
3786 hci_dev_unlock(hdev);
3787 return err;
3788 }
3789
3790 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
3791 void *data, u16 len)
3792 {
3793 struct mgmt_cp_set_static_address *cp = data;
3794 int err;
3795
3796 BT_DBG("%s", hdev->name);
3797
3798 if (!lmp_le_capable(hdev))
3799 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
3800 MGMT_STATUS_NOT_SUPPORTED);
3801
3802 if (hdev_is_powered(hdev))
3803 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
3804 MGMT_STATUS_REJECTED);
3805
3806 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
3807 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
3808 return cmd_status(sk, hdev->id,
3809 MGMT_OP_SET_STATIC_ADDRESS,
3810 MGMT_STATUS_INVALID_PARAMS);
3811
3812 /* Two most significant bits shall be set */
3813 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
3814 return cmd_status(sk, hdev->id,
3815 MGMT_OP_SET_STATIC_ADDRESS,
3816 MGMT_STATUS_INVALID_PARAMS);
3817 }
3818
3819 hci_dev_lock(hdev);
3820
3821 bacpy(&hdev->static_addr, &cp->bdaddr);
3822
3823 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0);
3824
3825 hci_dev_unlock(hdev);
3826
3827 return err;
3828 }
3829
3830 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
3831 void *data, u16 len)
3832 {
3833 struct mgmt_cp_set_scan_params *cp = data;
3834 __u16 interval, window;
3835 int err;
3836
3837 BT_DBG("%s", hdev->name);
3838
3839 if (!lmp_le_capable(hdev))
3840 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3841 MGMT_STATUS_NOT_SUPPORTED);
3842
3843 interval = __le16_to_cpu(cp->interval);
3844
3845 if (interval < 0x0004 || interval > 0x4000)
3846 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3847 MGMT_STATUS_INVALID_PARAMS);
3848
3849 window = __le16_to_cpu(cp->window);
3850
3851 if (window < 0x0004 || window > 0x4000)
3852 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3853 MGMT_STATUS_INVALID_PARAMS);
3854
3855 if (window > interval)
3856 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3857 MGMT_STATUS_INVALID_PARAMS);
3858
3859 hci_dev_lock(hdev);
3860
3861 hdev->le_scan_interval = interval;
3862 hdev->le_scan_window = window;
3863
3864 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);
3865
3866 hci_dev_unlock(hdev);
3867
3868 return err;
3869 }
3870
3871 static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
3872 {
3873 struct pending_cmd *cmd;
3874
3875 BT_DBG("status 0x%02x", status);
3876
3877 hci_dev_lock(hdev);
3878
3879 cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
3880 if (!cmd)
3881 goto unlock;
3882
3883 if (status) {
3884 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3885 mgmt_status(status));
3886 } else {
3887 struct mgmt_mode *cp = cmd->param;
3888
3889 if (cp->val)
3890 set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
3891 else
3892 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
3893
3894 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
3895 new_settings(hdev, cmd->sk);
3896 }
3897
3898 mgmt_pending_remove(cmd);
3899
3900 unlock:
3901 hci_dev_unlock(hdev);
3902 }
3903
3904 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
3905 void *data, u16 len)
3906 {
3907 struct mgmt_mode *cp = data;
3908 struct pending_cmd *cmd;
3909 struct hci_request req;
3910 int err;
3911
3912 BT_DBG("%s", hdev->name);
3913
3914 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
3915 hdev->hci_ver < BLUETOOTH_VER_1_2)
3916 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3917 MGMT_STATUS_NOT_SUPPORTED);
3918
3919 if (cp->val != 0x00 && cp->val != 0x01)
3920 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3921 MGMT_STATUS_INVALID_PARAMS);
3922
3923 if (!hdev_is_powered(hdev))
3924 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3925 MGMT_STATUS_NOT_POWERED);
3926
3927 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3928 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3929 MGMT_STATUS_REJECTED);
3930
3931 hci_dev_lock(hdev);
3932
3933 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
3934 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3935 MGMT_STATUS_BUSY);
3936 goto unlock;
3937 }
3938
3939 if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
3940 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
3941 hdev);
3942 goto unlock;
3943 }
3944
3945 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
3946 data, len);
3947 if (!cmd) {
3948 err = -ENOMEM;
3949 goto unlock;
3950 }
3951
3952 hci_req_init(&req, hdev);
3953
3954 write_fast_connectable(&req, cp->val);
3955
3956 err = hci_req_run(&req, fast_connectable_complete);
3957 if (err < 0) {
3958 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3959 MGMT_STATUS_FAILED);
3960 mgmt_pending_remove(cmd);
3961 }
3962
3963 unlock:
3964 hci_dev_unlock(hdev);
3965
3966 return err;
3967 }
3968
3969 static void set_bredr_scan(struct hci_request *req)
3970 {
3971 struct hci_dev *hdev = req->hdev;
3972 u8 scan = 0;
3973
3974 /* Ensure that fast connectable is disabled. This function will
3975 * not do anything if the page scan parameters are already what
3976 * they should be.
3977 */
3978 write_fast_connectable(req, false);
3979
3980 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3981 scan |= SCAN_PAGE;
3982 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
3983 scan |= SCAN_INQUIRY;
3984
3985 if (scan)
3986 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
3987 }
3988
3989 static void set_bredr_complete(struct hci_dev *hdev, u8 status)
3990 {
3991 struct pending_cmd *cmd;
3992
3993 BT_DBG("status 0x%02x", status);
3994
3995 hci_dev_lock(hdev);
3996
3997 cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
3998 if (!cmd)
3999 goto unlock;
4000
4001 if (status) {
4002 u8 mgmt_err = mgmt_status(status);
4003
4004 /* We need to restore the flag if related HCI commands
4005 * failed.
4006 */
4007 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4008
4009 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4010 } else {
4011 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4012 new_settings(hdev, cmd->sk);
4013 }
4014
4015 mgmt_pending_remove(cmd);
4016
4017 unlock:
4018 hci_dev_unlock(hdev);
4019 }
4020
4021 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4022 {
4023 struct mgmt_mode *cp = data;
4024 struct pending_cmd *cmd;
4025 struct hci_request req;
4026 int err;
4027
4028 BT_DBG("request for %s", hdev->name);
4029
4030 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4031 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4032 MGMT_STATUS_NOT_SUPPORTED);
4033
4034 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4035 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4036 MGMT_STATUS_REJECTED);
4037
4038 if (cp->val != 0x00 && cp->val != 0x01)
4039 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4040 MGMT_STATUS_INVALID_PARAMS);
4041
4042 hci_dev_lock(hdev);
4043
4044 if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4045 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4046 goto unlock;
4047 }
4048
4049 if (!hdev_is_powered(hdev)) {
4050 if (!cp->val) {
4051 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4052 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4053 clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4054 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4055 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4056 }
4057
4058 change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4059
4060 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4061 if (err < 0)
4062 goto unlock;
4063
4064 err = new_settings(hdev, sk);
4065 goto unlock;
4066 }
4067
4068 /* Reject disabling when powered on */
4069 if (!cp->val) {
4070 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4071 MGMT_STATUS_REJECTED);
4072 goto unlock;
4073 }
4074
4075 if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
4076 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4077 MGMT_STATUS_BUSY);
4078 goto unlock;
4079 }
4080
4081 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4082 if (!cmd) {
4083 err = -ENOMEM;
4084 goto unlock;
4085 }
4086
4087 /* We need to flip the bit already here so that update_adv_data
4088 * generates the correct flags.
4089 */
4090 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4091
4092 hci_req_init(&req, hdev);
4093
4094 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4095 set_bredr_scan(&req);
4096
4097 /* Since only the advertising data flags will change, there
4098 * is no need to update the scan response data.
4099 */
4100 update_adv_data(&req);
4101
4102 err = hci_req_run(&req, set_bredr_complete);
4103 if (err < 0)
4104 mgmt_pending_remove(cmd);
4105
4106 unlock:
4107 hci_dev_unlock(hdev);
4108 return err;
4109 }
4110
4111 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4112 void *data, u16 len)
4113 {
4114 struct mgmt_mode *cp = data;
4115 struct pending_cmd *cmd;
4116 u8 val, status;
4117 int err;
4118
4119 BT_DBG("request for %s", hdev->name);
4120
4121 status = mgmt_bredr_support(hdev);
4122 if (status)
4123 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4124 status);
4125
4126 if (!lmp_sc_capable(hdev) &&
4127 !test_bit(HCI_FORCE_SC, &hdev->dev_flags))
4128 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4129 MGMT_STATUS_NOT_SUPPORTED);
4130
4131 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4132 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4133 MGMT_STATUS_INVALID_PARAMS);
4134
4135 hci_dev_lock(hdev);
4136
4137 if (!hdev_is_powered(hdev)) {
4138 bool changed;
4139
4140 if (cp->val) {
4141 changed = !test_and_set_bit(HCI_SC_ENABLED,
4142 &hdev->dev_flags);
4143 if (cp->val == 0x02)
4144 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4145 else
4146 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4147 } else {
4148 changed = test_and_clear_bit(HCI_SC_ENABLED,
4149 &hdev->dev_flags);
4150 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4151 }
4152
4153 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4154 if (err < 0)
4155 goto failed;
4156
4157 if (changed)
4158 err = new_settings(hdev, sk);
4159
4160 goto failed;
4161 }
4162
4163 if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4164 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4165 MGMT_STATUS_BUSY);
4166 goto failed;
4167 }
4168
4169 val = !!cp->val;
4170
4171 if (val == test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
4172 (cp->val == 0x02) == test_bit(HCI_SC_ONLY, &hdev->dev_flags)) {
4173 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4174 goto failed;
4175 }
4176
4177 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4178 if (!cmd) {
4179 err = -ENOMEM;
4180 goto failed;
4181 }
4182
4183 err = hci_send_cmd(hdev, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4184 if (err < 0) {
4185 mgmt_pending_remove(cmd);
4186 goto failed;
4187 }
4188
4189 if (cp->val == 0x02)
4190 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4191 else
4192 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4193
4194 failed:
4195 hci_dev_unlock(hdev);
4196 return err;
4197 }
4198
4199 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
4200 void *data, u16 len)
4201 {
4202 struct mgmt_mode *cp = data;
4203 bool changed;
4204 int err;
4205
4206 BT_DBG("request for %s", hdev->name);
4207
4208 if (cp->val != 0x00 && cp->val != 0x01)
4209 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
4210 MGMT_STATUS_INVALID_PARAMS);
4211
4212 hci_dev_lock(hdev);
4213
4214 if (cp->val)
4215 changed = !test_and_set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
4216 else
4217 changed = test_and_clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
4218
4219 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
4220 if (err < 0)
4221 goto unlock;
4222
4223 if (changed)
4224 err = new_settings(hdev, sk);
4225
4226 unlock:
4227 hci_dev_unlock(hdev);
4228 return err;
4229 }
4230
4231 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4232 u16 len)
4233 {
4234 struct mgmt_cp_set_privacy *cp = cp_data;
4235 bool changed;
4236 int err;
4237
4238 BT_DBG("request for %s", hdev->name);
4239
4240 if (!lmp_le_capable(hdev))
4241 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4242 MGMT_STATUS_NOT_SUPPORTED);
4243
4244 if (cp->privacy != 0x00 && cp->privacy != 0x01)
4245 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4246 MGMT_STATUS_INVALID_PARAMS);
4247
4248 if (hdev_is_powered(hdev))
4249 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4250 MGMT_STATUS_REJECTED);
4251
4252 hci_dev_lock(hdev);
4253
4254 if (cp->privacy) {
4255 changed = !test_and_set_bit(HCI_PRIVACY, &hdev->dev_flags);
4256 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
4257 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4258 } else {
4259 changed = test_and_clear_bit(HCI_PRIVACY, &hdev->dev_flags);
4260 memset(hdev->irk, 0, sizeof(hdev->irk));
4261 clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4262 }
4263
4264 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
4265 if (err < 0)
4266 goto unlock;
4267
4268 if (changed)
4269 err = new_settings(hdev, sk);
4270
4271 unlock:
4272 hci_dev_unlock(hdev);
4273 return err;
4274 }
4275
4276 static bool irk_is_valid(struct mgmt_irk_info *irk)
4277 {
4278 switch (irk->addr.type) {
4279 case BDADDR_LE_PUBLIC:
4280 return true;
4281
4282 case BDADDR_LE_RANDOM:
4283 /* Two most significant bits shall be set */
4284 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4285 return false;
4286 return true;
4287 }
4288
4289 return false;
4290 }
4291
4292 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4293 u16 len)
4294 {
4295 struct mgmt_cp_load_irks *cp = cp_data;
4296 u16 irk_count, expected_len;
4297 int i, err;
4298
4299 BT_DBG("request for %s", hdev->name);
4300
4301 if (!lmp_le_capable(hdev))
4302 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4303 MGMT_STATUS_NOT_SUPPORTED);
4304
4305 irk_count = __le16_to_cpu(cp->irk_count);
4306
4307 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
4308 if (expected_len != len) {
4309 BT_ERR("load_irks: expected %u bytes, got %u bytes",
4310 len, expected_len);
4311 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4312 MGMT_STATUS_INVALID_PARAMS);
4313 }
4314
4315 BT_DBG("%s irk_count %u", hdev->name, irk_count);
4316
4317 for (i = 0; i < irk_count; i++) {
4318 struct mgmt_irk_info *key = &cp->irks[i];
4319
4320 if (!irk_is_valid(key))
4321 return cmd_status(sk, hdev->id,
4322 MGMT_OP_LOAD_IRKS,
4323 MGMT_STATUS_INVALID_PARAMS);
4324 }
4325
4326 hci_dev_lock(hdev);
4327
4328 hci_smp_irks_clear(hdev);
4329
4330 for (i = 0; i < irk_count; i++) {
4331 struct mgmt_irk_info *irk = &cp->irks[i];
4332 u8 addr_type;
4333
4334 if (irk->addr.type == BDADDR_LE_PUBLIC)
4335 addr_type = ADDR_LE_DEV_PUBLIC;
4336 else
4337 addr_type = ADDR_LE_DEV_RANDOM;
4338
4339 hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
4340 BDADDR_ANY);
4341 }
4342
4343 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4344
4345 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
4346
4347 hci_dev_unlock(hdev);
4348
4349 return err;
4350 }
4351
4352 static bool ltk_is_valid(struct mgmt_ltk_info *key)
4353 {
4354 if (key->master != 0x00 && key->master != 0x01)
4355 return false;
4356
4357 switch (key->addr.type) {
4358 case BDADDR_LE_PUBLIC:
4359 return true;
4360
4361 case BDADDR_LE_RANDOM:
4362 /* Two most significant bits shall be set */
4363 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4364 return false;
4365 return true;
4366 }
4367
4368 return false;
4369 }
4370
4371 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
4372 void *cp_data, u16 len)
4373 {
4374 struct mgmt_cp_load_long_term_keys *cp = cp_data;
4375 u16 key_count, expected_len;
4376 int i, err;
4377
4378 BT_DBG("request for %s", hdev->name);
4379
4380 if (!lmp_le_capable(hdev))
4381 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4382 MGMT_STATUS_NOT_SUPPORTED);
4383
4384 key_count = __le16_to_cpu(cp->key_count);
4385
4386 expected_len = sizeof(*cp) + key_count *
4387 sizeof(struct mgmt_ltk_info);
4388 if (expected_len != len) {
4389 BT_ERR("load_keys: expected %u bytes, got %u bytes",
4390 len, expected_len);
4391 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4392 MGMT_STATUS_INVALID_PARAMS);
4393 }
4394
4395 BT_DBG("%s key_count %u", hdev->name, key_count);
4396
4397 for (i = 0; i < key_count; i++) {
4398 struct mgmt_ltk_info *key = &cp->keys[i];
4399
4400 if (!ltk_is_valid(key))
4401 return cmd_status(sk, hdev->id,
4402 MGMT_OP_LOAD_LONG_TERM_KEYS,
4403 MGMT_STATUS_INVALID_PARAMS);
4404 }
4405
4406 hci_dev_lock(hdev);
4407
4408 hci_smp_ltks_clear(hdev);
4409
4410 for (i = 0; i < key_count; i++) {
4411 struct mgmt_ltk_info *key = &cp->keys[i];
4412 u8 type, addr_type;
4413
4414 if (key->addr.type == BDADDR_LE_PUBLIC)
4415 addr_type = ADDR_LE_DEV_PUBLIC;
4416 else
4417 addr_type = ADDR_LE_DEV_RANDOM;
4418
4419 if (key->master)
4420 type = HCI_SMP_LTK;
4421 else
4422 type = HCI_SMP_LTK_SLAVE;
4423
4424 hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
4425 key->type, key->val, key->enc_size, key->ediv,
4426 key->rand);
4427 }
4428
4429 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
4430 NULL, 0);
4431
4432 hci_dev_unlock(hdev);
4433
4434 return err;
4435 }
4436
4437 static const struct mgmt_handler {
4438 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
4439 u16 data_len);
4440 bool var_len;
4441 size_t data_len;
4442 } mgmt_handlers[] = {
4443 { NULL }, /* 0x0000 (no command) */
4444 { read_version, false, MGMT_READ_VERSION_SIZE },
4445 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
4446 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
4447 { read_controller_info, false, MGMT_READ_INFO_SIZE },
4448 { set_powered, false, MGMT_SETTING_SIZE },
4449 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
4450 { set_connectable, false, MGMT_SETTING_SIZE },
4451 { set_fast_connectable, false, MGMT_SETTING_SIZE },
4452 { set_pairable, false, MGMT_SETTING_SIZE },
4453 { set_link_security, false, MGMT_SETTING_SIZE },
4454 { set_ssp, false, MGMT_SETTING_SIZE },
4455 { set_hs, false, MGMT_SETTING_SIZE },
4456 { set_le, false, MGMT_SETTING_SIZE },
4457 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
4458 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
4459 { add_uuid, false, MGMT_ADD_UUID_SIZE },
4460 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
4461 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
4462 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
4463 { disconnect, false, MGMT_DISCONNECT_SIZE },
4464 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
4465 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
4466 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
4467 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
4468 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
4469 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
4470 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
4471 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
4472 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
4473 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
4474 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
4475 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
4476 { add_remote_oob_data, true, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
4477 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
4478 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
4479 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
4480 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
4481 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
4482 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
4483 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
4484 { set_advertising, false, MGMT_SETTING_SIZE },
4485 { set_bredr, false, MGMT_SETTING_SIZE },
4486 { set_static_address, false, MGMT_SET_STATIC_ADDRESS_SIZE },
4487 { set_scan_params, false, MGMT_SET_SCAN_PARAMS_SIZE },
4488 { set_secure_conn, false, MGMT_SETTING_SIZE },
4489 { set_debug_keys, false, MGMT_SETTING_SIZE },
4490 { set_privacy, false, MGMT_SET_PRIVACY_SIZE },
4491 { load_irks, true, MGMT_LOAD_IRKS_SIZE },
4492 };
4493
4494
4495 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
4496 {
4497 void *buf;
4498 u8 *cp;
4499 struct mgmt_hdr *hdr;
4500 u16 opcode, index, len;
4501 struct hci_dev *hdev = NULL;
4502 const struct mgmt_handler *handler;
4503 int err;
4504
4505 BT_DBG("got %zu bytes", msglen);
4506
4507 if (msglen < sizeof(*hdr))
4508 return -EINVAL;
4509
4510 buf = kmalloc(msglen, GFP_KERNEL);
4511 if (!buf)
4512 return -ENOMEM;
4513
4514 if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
4515 err = -EFAULT;
4516 goto done;
4517 }
4518
4519 hdr = buf;
4520 opcode = __le16_to_cpu(hdr->opcode);
4521 index = __le16_to_cpu(hdr->index);
4522 len = __le16_to_cpu(hdr->len);
4523
4524 if (len != msglen - sizeof(*hdr)) {
4525 err = -EINVAL;
4526 goto done;
4527 }
4528
4529 if (index != MGMT_INDEX_NONE) {
4530 hdev = hci_dev_get(index);
4531 if (!hdev) {
4532 err = cmd_status(sk, index, opcode,
4533 MGMT_STATUS_INVALID_INDEX);
4534 goto done;
4535 }
4536
4537 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
4538 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4539 err = cmd_status(sk, index, opcode,
4540 MGMT_STATUS_INVALID_INDEX);
4541 goto done;
4542 }
4543 }
4544
4545 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
4546 mgmt_handlers[opcode].func == NULL) {
4547 BT_DBG("Unknown op %u", opcode);
4548 err = cmd_status(sk, index, opcode,
4549 MGMT_STATUS_UNKNOWN_COMMAND);
4550 goto done;
4551 }
4552
4553 if ((hdev && opcode < MGMT_OP_READ_INFO) ||
4554 (!hdev && opcode >= MGMT_OP_READ_INFO)) {
4555 err = cmd_status(sk, index, opcode,
4556 MGMT_STATUS_INVALID_INDEX);
4557 goto done;
4558 }
4559
4560 handler = &mgmt_handlers[opcode];
4561
4562 if ((handler->var_len && len < handler->data_len) ||
4563 (!handler->var_len && len != handler->data_len)) {
4564 err = cmd_status(sk, index, opcode,
4565 MGMT_STATUS_INVALID_PARAMS);
4566 goto done;
4567 }
4568
4569 if (hdev)
4570 mgmt_init_hdev(sk, hdev);
4571
4572 cp = buf + sizeof(*hdr);
4573
4574 err = handler->func(sk, hdev, cp, len);
4575 if (err < 0)
4576 goto done;
4577
4578 err = msglen;
4579
4580 done:
4581 if (hdev)
4582 hci_dev_put(hdev);
4583
4584 kfree(buf);
4585 return err;
4586 }
4587
4588 void mgmt_index_added(struct hci_dev *hdev)
4589 {
4590 if (hdev->dev_type != HCI_BREDR)
4591 return;
4592
4593 mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
4594 }
4595
4596 void mgmt_index_removed(struct hci_dev *hdev)
4597 {
4598 u8 status = MGMT_STATUS_INVALID_INDEX;
4599
4600 if (hdev->dev_type != HCI_BREDR)
4601 return;
4602
4603 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
4604
4605 mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
4606 }
4607
4608 static void powered_complete(struct hci_dev *hdev, u8 status)
4609 {
4610 struct cmd_lookup match = { NULL, hdev };
4611
4612 BT_DBG("status 0x%02x", status);
4613
4614 hci_dev_lock(hdev);
4615
4616 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
4617
4618 new_settings(hdev, match.sk);
4619
4620 hci_dev_unlock(hdev);
4621
4622 if (match.sk)
4623 sock_put(match.sk);
4624 }
4625
4626 static int powered_update_hci(struct hci_dev *hdev)
4627 {
4628 struct hci_request req;
4629 u8 link_sec;
4630
4631 hci_req_init(&req, hdev);
4632
4633 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
4634 !lmp_host_ssp_capable(hdev)) {
4635 u8 ssp = 1;
4636
4637 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
4638 }
4639
4640 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
4641 lmp_bredr_capable(hdev)) {
4642 struct hci_cp_write_le_host_supported cp;
4643
4644 cp.le = 1;
4645 cp.simul = lmp_le_br_capable(hdev);
4646
4647 /* Check first if we already have the right
4648 * host state (host features set)
4649 */
4650 if (cp.le != lmp_host_le_capable(hdev) ||
4651 cp.simul != lmp_host_le_br_capable(hdev))
4652 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
4653 sizeof(cp), &cp);
4654 }
4655
4656 if (lmp_le_capable(hdev)) {
4657 /* Make sure the controller has a good default for
4658 * advertising data. This also applies to the case
4659 * where BR/EDR was toggled during the AUTO_OFF phase.
4660 */
4661 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
4662 update_adv_data(&req);
4663 update_scan_rsp_data(&req);
4664 }
4665
4666 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
4667 enable_advertising(&req);
4668 }
4669
4670 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4671 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
4672 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
4673 sizeof(link_sec), &link_sec);
4674
4675 if (lmp_bredr_capable(hdev)) {
4676 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
4677 set_bredr_scan(&req);
4678 update_class(&req);
4679 update_name(&req);
4680 update_eir(&req);
4681 }
4682
4683 return hci_req_run(&req, powered_complete);
4684 }
4685
4686 int mgmt_powered(struct hci_dev *hdev, u8 powered)
4687 {
4688 struct cmd_lookup match = { NULL, hdev };
4689 u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
4690 u8 zero_cod[] = { 0, 0, 0 };
4691 int err;
4692
4693 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
4694 return 0;
4695
4696 if (powered) {
4697 if (powered_update_hci(hdev) == 0)
4698 return 0;
4699
4700 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
4701 &match);
4702 goto new_settings;
4703 }
4704
4705 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
4706 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered);
4707
4708 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
4709 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
4710 zero_cod, sizeof(zero_cod), NULL);
4711
4712 new_settings:
4713 err = new_settings(hdev, match.sk);
4714
4715 if (match.sk)
4716 sock_put(match.sk);
4717
4718 return err;
4719 }
4720
4721 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
4722 {
4723 struct pending_cmd *cmd;
4724 u8 status;
4725
4726 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
4727 if (!cmd)
4728 return;
4729
4730 if (err == -ERFKILL)
4731 status = MGMT_STATUS_RFKILLED;
4732 else
4733 status = MGMT_STATUS_FAILED;
4734
4735 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
4736
4737 mgmt_pending_remove(cmd);
4738 }
4739
4740 void mgmt_discoverable_timeout(struct hci_dev *hdev)
4741 {
4742 struct hci_request req;
4743
4744 hci_dev_lock(hdev);
4745
4746 /* When discoverable timeout triggers, then just make sure
4747 * the limited discoverable flag is cleared. Even in the case
4748 * of a timeout triggered from general discoverable, it is
4749 * safe to unconditionally clear the flag.
4750 */
4751 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
4752 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4753
4754 hci_req_init(&req, hdev);
4755 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4756 u8 scan = SCAN_PAGE;
4757 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
4758 sizeof(scan), &scan);
4759 }
4760 update_class(&req);
4761 update_adv_data(&req);
4762 hci_req_run(&req, NULL);
4763
4764 hdev->discov_timeout = 0;
4765
4766 new_settings(hdev, NULL);
4767
4768 hci_dev_unlock(hdev);
4769 }
4770
4771 void mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
4772 {
4773 bool changed;
4774
4775 /* Nothing needed here if there's a pending command since that
4776 * commands request completion callback takes care of everything
4777 * necessary.
4778 */
4779 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
4780 return;
4781
4782 if (discoverable) {
4783 changed = !test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4784 } else {
4785 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
4786 changed = test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4787 }
4788
4789 if (changed) {
4790 struct hci_request req;
4791
4792 /* In case this change in discoverable was triggered by
4793 * a disabling of connectable there could be a need to
4794 * update the advertising flags.
4795 */
4796 hci_req_init(&req, hdev);
4797 update_adv_data(&req);
4798 hci_req_run(&req, NULL);
4799
4800 new_settings(hdev, NULL);
4801 }
4802 }
4803
4804 void mgmt_connectable(struct hci_dev *hdev, u8 connectable)
4805 {
4806 bool changed;
4807
4808 /* Nothing needed here if there's a pending command since that
4809 * commands request completion callback takes care of everything
4810 * necessary.
4811 */
4812 if (mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
4813 return;
4814
4815 if (connectable)
4816 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
4817 else
4818 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
4819
4820 if (changed)
4821 new_settings(hdev, NULL);
4822 }
4823
4824 void mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
4825 {
4826 u8 mgmt_err = mgmt_status(status);
4827
4828 if (scan & SCAN_PAGE)
4829 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
4830 cmd_status_rsp, &mgmt_err);
4831
4832 if (scan & SCAN_INQUIRY)
4833 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
4834 cmd_status_rsp, &mgmt_err);
4835 }
4836
4837 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
4838 bool persistent)
4839 {
4840 struct mgmt_ev_new_link_key ev;
4841
4842 memset(&ev, 0, sizeof(ev));
4843
4844 ev.store_hint = persistent;
4845 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
4846 ev.key.addr.type = BDADDR_BREDR;
4847 ev.key.type = key->type;
4848 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
4849 ev.key.pin_len = key->pin_len;
4850
4851 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
4852 }
4853
4854 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key)
4855 {
4856 struct mgmt_ev_new_long_term_key ev;
4857
4858 memset(&ev, 0, sizeof(ev));
4859
4860 /* Devices using resolvable or non-resolvable random addresses
4861 * without providing an indentity resolving key don't require
4862 * to store long term keys. Their addresses will change the
4863 * next time around.
4864 *
4865 * Only when a remote device provides an identity address
4866 * make sure the long term key is stored. If the remote
4867 * identity is known, the long term keys are internally
4868 * mapped to the identity address. So allow static random
4869 * and public addresses here.
4870 */
4871 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
4872 (key->bdaddr.b[5] & 0xc0) != 0xc0)
4873 ev.store_hint = 0x00;
4874 else
4875 ev.store_hint = 0x01;
4876
4877 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
4878 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
4879 ev.key.type = key->authenticated;
4880 ev.key.enc_size = key->enc_size;
4881 ev.key.ediv = key->ediv;
4882
4883 if (key->type == HCI_SMP_LTK)
4884 ev.key.master = 1;
4885
4886 memcpy(ev.key.rand, key->rand, sizeof(key->rand));
4887 memcpy(ev.key.val, key->val, sizeof(key->val));
4888
4889 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
4890 }
4891
4892 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
4893 {
4894 struct mgmt_ev_new_irk ev;
4895
4896 memset(&ev, 0, sizeof(ev));
4897
4898 /* For identity resolving keys from devices that are already
4899 * using a public address or static random address, do not
4900 * ask for storing this key. The identity resolving key really
4901 * is only mandatory for devices using resovlable random
4902 * addresses.
4903 *
4904 * Storing all identity resolving keys has the downside that
4905 * they will be also loaded on next boot of they system. More
4906 * identity resolving keys, means more time during scanning is
4907 * needed to actually resolve these addresses.
4908 */
4909 if (bacmp(&irk->rpa, BDADDR_ANY))
4910 ev.store_hint = 0x01;
4911 else
4912 ev.store_hint = 0x00;
4913
4914 bacpy(&ev.rpa, &irk->rpa);
4915 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
4916 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
4917 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
4918
4919 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
4920 }
4921
4922 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
4923 u8 data_len)
4924 {
4925 eir[eir_len++] = sizeof(type) + data_len;
4926 eir[eir_len++] = type;
4927 memcpy(&eir[eir_len], data, data_len);
4928 eir_len += data_len;
4929
4930 return eir_len;
4931 }
4932
4933 void mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4934 u8 addr_type, u32 flags, u8 *name, u8 name_len,
4935 u8 *dev_class)
4936 {
4937 char buf[512];
4938 struct mgmt_ev_device_connected *ev = (void *) buf;
4939 u16 eir_len = 0;
4940
4941 bacpy(&ev->addr.bdaddr, bdaddr);
4942 ev->addr.type = link_to_bdaddr(link_type, addr_type);
4943
4944 ev->flags = __cpu_to_le32(flags);
4945
4946 if (name_len > 0)
4947 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
4948 name, name_len);
4949
4950 if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
4951 eir_len = eir_append_data(ev->eir, eir_len,
4952 EIR_CLASS_OF_DEV, dev_class, 3);
4953
4954 ev->eir_len = cpu_to_le16(eir_len);
4955
4956 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
4957 sizeof(*ev) + eir_len, NULL);
4958 }
4959
4960 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
4961 {
4962 struct mgmt_cp_disconnect *cp = cmd->param;
4963 struct sock **sk = data;
4964 struct mgmt_rp_disconnect rp;
4965
4966 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4967 rp.addr.type = cp->addr.type;
4968
4969 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
4970 sizeof(rp));
4971
4972 *sk = cmd->sk;
4973 sock_hold(*sk);
4974
4975 mgmt_pending_remove(cmd);
4976 }
4977
4978 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
4979 {
4980 struct hci_dev *hdev = data;
4981 struct mgmt_cp_unpair_device *cp = cmd->param;
4982 struct mgmt_rp_unpair_device rp;
4983
4984 memset(&rp, 0, sizeof(rp));
4985 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4986 rp.addr.type = cp->addr.type;
4987
4988 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
4989
4990 cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
4991
4992 mgmt_pending_remove(cmd);
4993 }
4994
4995 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
4996 u8 link_type, u8 addr_type, u8 reason)
4997 {
4998 struct mgmt_ev_device_disconnected ev;
4999 struct sock *sk = NULL;
5000
5001 if (link_type != ACL_LINK && link_type != LE_LINK)
5002 return;
5003
5004 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
5005
5006 bacpy(&ev.addr.bdaddr, bdaddr);
5007 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5008 ev.reason = reason;
5009
5010 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
5011
5012 if (sk)
5013 sock_put(sk);
5014
5015 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
5016 hdev);
5017 }
5018
5019 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
5020 u8 link_type, u8 addr_type, u8 status)
5021 {
5022 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
5023 struct mgmt_cp_disconnect *cp;
5024 struct mgmt_rp_disconnect rp;
5025 struct pending_cmd *cmd;
5026
5027 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
5028 hdev);
5029
5030 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
5031 if (!cmd)
5032 return;
5033
5034 cp = cmd->param;
5035
5036 if (bacmp(bdaddr, &cp->addr.bdaddr))
5037 return;
5038
5039 if (cp->addr.type != bdaddr_type)
5040 return;
5041
5042 bacpy(&rp.addr.bdaddr, bdaddr);
5043 rp.addr.type = bdaddr_type;
5044
5045 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
5046 mgmt_status(status), &rp, sizeof(rp));
5047
5048 mgmt_pending_remove(cmd);
5049 }
5050
5051 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5052 u8 addr_type, u8 status)
5053 {
5054 struct mgmt_ev_connect_failed ev;
5055
5056 bacpy(&ev.addr.bdaddr, bdaddr);
5057 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5058 ev.status = mgmt_status(status);
5059
5060 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
5061 }
5062
5063 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
5064 {
5065 struct mgmt_ev_pin_code_request ev;
5066
5067 bacpy(&ev.addr.bdaddr, bdaddr);
5068 ev.addr.type = BDADDR_BREDR;
5069 ev.secure = secure;
5070
5071 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
5072 }
5073
5074 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5075 u8 status)
5076 {
5077 struct pending_cmd *cmd;
5078 struct mgmt_rp_pin_code_reply rp;
5079
5080 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
5081 if (!cmd)
5082 return;
5083
5084 bacpy(&rp.addr.bdaddr, bdaddr);
5085 rp.addr.type = BDADDR_BREDR;
5086
5087 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
5088 mgmt_status(status), &rp, sizeof(rp));
5089
5090 mgmt_pending_remove(cmd);
5091 }
5092
5093 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5094 u8 status)
5095 {
5096 struct pending_cmd *cmd;
5097 struct mgmt_rp_pin_code_reply rp;
5098
5099 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
5100 if (!cmd)
5101 return;
5102
5103 bacpy(&rp.addr.bdaddr, bdaddr);
5104 rp.addr.type = BDADDR_BREDR;
5105
5106 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
5107 mgmt_status(status), &rp, sizeof(rp));
5108
5109 mgmt_pending_remove(cmd);
5110 }
5111
5112 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
5113 u8 link_type, u8 addr_type, __le32 value,
5114 u8 confirm_hint)
5115 {
5116 struct mgmt_ev_user_confirm_request ev;
5117
5118 BT_DBG("%s", hdev->name);
5119
5120 bacpy(&ev.addr.bdaddr, bdaddr);
5121 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5122 ev.confirm_hint = confirm_hint;
5123 ev.value = value;
5124
5125 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
5126 NULL);
5127 }
5128
5129 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
5130 u8 link_type, u8 addr_type)
5131 {
5132 struct mgmt_ev_user_passkey_request ev;
5133
5134 BT_DBG("%s", hdev->name);
5135
5136 bacpy(&ev.addr.bdaddr, bdaddr);
5137 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5138
5139 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
5140 NULL);
5141 }
5142
5143 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5144 u8 link_type, u8 addr_type, u8 status,
5145 u8 opcode)
5146 {
5147 struct pending_cmd *cmd;
5148 struct mgmt_rp_user_confirm_reply rp;
5149 int err;
5150
5151 cmd = mgmt_pending_find(opcode, hdev);
5152 if (!cmd)
5153 return -ENOENT;
5154
5155 bacpy(&rp.addr.bdaddr, bdaddr);
5156 rp.addr.type = link_to_bdaddr(link_type, addr_type);
5157 err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
5158 &rp, sizeof(rp));
5159
5160 mgmt_pending_remove(cmd);
5161
5162 return err;
5163 }
5164
5165 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5166 u8 link_type, u8 addr_type, u8 status)
5167 {
5168 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5169 status, MGMT_OP_USER_CONFIRM_REPLY);
5170 }
5171
5172 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5173 u8 link_type, u8 addr_type, u8 status)
5174 {
5175 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5176 status,
5177 MGMT_OP_USER_CONFIRM_NEG_REPLY);
5178 }
5179
5180 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5181 u8 link_type, u8 addr_type, u8 status)
5182 {
5183 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5184 status, MGMT_OP_USER_PASSKEY_REPLY);
5185 }
5186
5187 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5188 u8 link_type, u8 addr_type, u8 status)
5189 {
5190 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5191 status,
5192 MGMT_OP_USER_PASSKEY_NEG_REPLY);
5193 }
5194
5195 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
5196 u8 link_type, u8 addr_type, u32 passkey,
5197 u8 entered)
5198 {
5199 struct mgmt_ev_passkey_notify ev;
5200
5201 BT_DBG("%s", hdev->name);
5202
5203 bacpy(&ev.addr.bdaddr, bdaddr);
5204 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5205 ev.passkey = __cpu_to_le32(passkey);
5206 ev.entered = entered;
5207
5208 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
5209 }
5210
5211 void mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5212 u8 addr_type, u8 status)
5213 {
5214 struct mgmt_ev_auth_failed ev;
5215
5216 bacpy(&ev.addr.bdaddr, bdaddr);
5217 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5218 ev.status = mgmt_status(status);
5219
5220 mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
5221 }
5222
5223 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
5224 {
5225 struct cmd_lookup match = { NULL, hdev };
5226 bool changed;
5227
5228 if (status) {
5229 u8 mgmt_err = mgmt_status(status);
5230 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
5231 cmd_status_rsp, &mgmt_err);
5232 return;
5233 }
5234
5235 if (test_bit(HCI_AUTH, &hdev->flags))
5236 changed = !test_and_set_bit(HCI_LINK_SECURITY,
5237 &hdev->dev_flags);
5238 else
5239 changed = test_and_clear_bit(HCI_LINK_SECURITY,
5240 &hdev->dev_flags);
5241
5242 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
5243 &match);
5244
5245 if (changed)
5246 new_settings(hdev, match.sk);
5247
5248 if (match.sk)
5249 sock_put(match.sk);
5250 }
5251
5252 static void clear_eir(struct hci_request *req)
5253 {
5254 struct hci_dev *hdev = req->hdev;
5255 struct hci_cp_write_eir cp;
5256
5257 if (!lmp_ext_inq_capable(hdev))
5258 return;
5259
5260 memset(hdev->eir, 0, sizeof(hdev->eir));
5261
5262 memset(&cp, 0, sizeof(cp));
5263
5264 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
5265 }
5266
5267 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
5268 {
5269 struct cmd_lookup match = { NULL, hdev };
5270 struct hci_request req;
5271 bool changed = false;
5272
5273 if (status) {
5274 u8 mgmt_err = mgmt_status(status);
5275
5276 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
5277 &hdev->dev_flags)) {
5278 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
5279 new_settings(hdev, NULL);
5280 }
5281
5282 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
5283 &mgmt_err);
5284 return;
5285 }
5286
5287 if (enable) {
5288 changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
5289 } else {
5290 changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
5291 if (!changed)
5292 changed = test_and_clear_bit(HCI_HS_ENABLED,
5293 &hdev->dev_flags);
5294 else
5295 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
5296 }
5297
5298 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
5299
5300 if (changed)
5301 new_settings(hdev, match.sk);
5302
5303 if (match.sk)
5304 sock_put(match.sk);
5305
5306 hci_req_init(&req, hdev);
5307
5308 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
5309 update_eir(&req);
5310 else
5311 clear_eir(&req);
5312
5313 hci_req_run(&req, NULL);
5314 }
5315
5316 void mgmt_sc_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
5317 {
5318 struct cmd_lookup match = { NULL, hdev };
5319 bool changed = false;
5320
5321 if (status) {
5322 u8 mgmt_err = mgmt_status(status);
5323
5324 if (enable) {
5325 if (test_and_clear_bit(HCI_SC_ENABLED,
5326 &hdev->dev_flags))
5327 new_settings(hdev, NULL);
5328 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
5329 }
5330
5331 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
5332 cmd_status_rsp, &mgmt_err);
5333 return;
5334 }
5335
5336 if (enable) {
5337 changed = !test_and_set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
5338 } else {
5339 changed = test_and_clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
5340 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
5341 }
5342
5343 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
5344 settings_rsp, &match);
5345
5346 if (changed)
5347 new_settings(hdev, match.sk);
5348
5349 if (match.sk)
5350 sock_put(match.sk);
5351 }
5352
5353 static void sk_lookup(struct pending_cmd *cmd, void *data)
5354 {
5355 struct cmd_lookup *match = data;
5356
5357 if (match->sk == NULL) {
5358 match->sk = cmd->sk;
5359 sock_hold(match->sk);
5360 }
5361 }
5362
5363 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
5364 u8 status)
5365 {
5366 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
5367
5368 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
5369 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
5370 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
5371
5372 if (!status)
5373 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
5374 NULL);
5375
5376 if (match.sk)
5377 sock_put(match.sk);
5378 }
5379
5380 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
5381 {
5382 struct mgmt_cp_set_local_name ev;
5383 struct pending_cmd *cmd;
5384
5385 if (status)
5386 return;
5387
5388 memset(&ev, 0, sizeof(ev));
5389 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
5390 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
5391
5392 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
5393 if (!cmd) {
5394 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
5395
5396 /* If this is a HCI command related to powering on the
5397 * HCI dev don't send any mgmt signals.
5398 */
5399 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5400 return;
5401 }
5402
5403 mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
5404 cmd ? cmd->sk : NULL);
5405 }
5406
5407 void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
5408 u8 *randomizer192, u8 *hash256,
5409 u8 *randomizer256, u8 status)
5410 {
5411 struct pending_cmd *cmd;
5412
5413 BT_DBG("%s status %u", hdev->name, status);
5414
5415 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
5416 if (!cmd)
5417 return;
5418
5419 if (status) {
5420 cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5421 mgmt_status(status));
5422 } else {
5423 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
5424 hash256 && randomizer256) {
5425 struct mgmt_rp_read_local_oob_ext_data rp;
5426
5427 memcpy(rp.hash192, hash192, sizeof(rp.hash192));
5428 memcpy(rp.randomizer192, randomizer192,
5429 sizeof(rp.randomizer192));
5430
5431 memcpy(rp.hash256, hash256, sizeof(rp.hash256));
5432 memcpy(rp.randomizer256, randomizer256,
5433 sizeof(rp.randomizer256));
5434
5435 cmd_complete(cmd->sk, hdev->id,
5436 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
5437 &rp, sizeof(rp));
5438 } else {
5439 struct mgmt_rp_read_local_oob_data rp;
5440
5441 memcpy(rp.hash, hash192, sizeof(rp.hash));
5442 memcpy(rp.randomizer, randomizer192,
5443 sizeof(rp.randomizer));
5444
5445 cmd_complete(cmd->sk, hdev->id,
5446 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
5447 &rp, sizeof(rp));
5448 }
5449 }
5450
5451 mgmt_pending_remove(cmd);
5452 }
5453
5454 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5455 u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name, u8
5456 ssp, u8 *eir, u16 eir_len)
5457 {
5458 char buf[512];
5459 struct mgmt_ev_device_found *ev = (void *) buf;
5460 struct smp_irk *irk;
5461 size_t ev_size;
5462
5463 if (!hci_discovery_active(hdev))
5464 return;
5465
5466 /* Leave 5 bytes for a potential CoD field */
5467 if (sizeof(*ev) + eir_len + 5 > sizeof(buf))
5468 return;
5469
5470 memset(buf, 0, sizeof(buf));
5471
5472 irk = hci_get_irk(hdev, bdaddr, addr_type);
5473 if (irk) {
5474 bacpy(&ev->addr.bdaddr, &irk->bdaddr);
5475 ev->addr.type = link_to_bdaddr(link_type, irk->addr_type);
5476 } else {
5477 bacpy(&ev->addr.bdaddr, bdaddr);
5478 ev->addr.type = link_to_bdaddr(link_type, addr_type);
5479 }
5480
5481 ev->rssi = rssi;
5482 if (cfm_name)
5483 ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_CONFIRM_NAME);
5484 if (!ssp)
5485 ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_LEGACY_PAIRING);
5486
5487 if (eir_len > 0)
5488 memcpy(ev->eir, eir, eir_len);
5489
5490 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
5491 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
5492 dev_class, 3);
5493
5494 ev->eir_len = cpu_to_le16(eir_len);
5495 ev_size = sizeof(*ev) + eir_len;
5496
5497 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
5498 }
5499
5500 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5501 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
5502 {
5503 struct mgmt_ev_device_found *ev;
5504 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
5505 u16 eir_len;
5506
5507 ev = (struct mgmt_ev_device_found *) buf;
5508
5509 memset(buf, 0, sizeof(buf));
5510
5511 bacpy(&ev->addr.bdaddr, bdaddr);
5512 ev->addr.type = link_to_bdaddr(link_type, addr_type);
5513 ev->rssi = rssi;
5514
5515 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
5516 name_len);
5517
5518 ev->eir_len = cpu_to_le16(eir_len);
5519
5520 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
5521 }
5522
5523 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
5524 {
5525 struct mgmt_ev_discovering ev;
5526 struct pending_cmd *cmd;
5527
5528 BT_DBG("%s discovering %u", hdev->name, discovering);
5529
5530 if (discovering)
5531 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
5532 else
5533 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
5534
5535 if (cmd != NULL) {
5536 u8 type = hdev->discovery.type;
5537
5538 cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
5539 sizeof(type));
5540 mgmt_pending_remove(cmd);
5541 }
5542
5543 memset(&ev, 0, sizeof(ev));
5544 ev.type = hdev->discovery.type;
5545 ev.discovering = discovering;
5546
5547 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
5548 }
5549
5550 int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
5551 {
5552 struct pending_cmd *cmd;
5553 struct mgmt_ev_device_blocked ev;
5554
5555 cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, hdev);
5556
5557 bacpy(&ev.addr.bdaddr, bdaddr);
5558 ev.addr.type = type;
5559
5560 return mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &ev, sizeof(ev),
5561 cmd ? cmd->sk : NULL);
5562 }
5563
5564 int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
5565 {
5566 struct pending_cmd *cmd;
5567 struct mgmt_ev_device_unblocked ev;
5568
5569 cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, hdev);
5570
5571 bacpy(&ev.addr.bdaddr, bdaddr);
5572 ev.addr.type = type;
5573
5574 return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &ev, sizeof(ev),
5575 cmd ? cmd->sk : NULL);
5576 }
5577
5578 static void adv_enable_complete(struct hci_dev *hdev, u8 status)
5579 {
5580 BT_DBG("%s status %u", hdev->name, status);
5581
5582 /* Clear the advertising mgmt setting if we failed to re-enable it */
5583 if (status) {
5584 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
5585 new_settings(hdev, NULL);
5586 }
5587 }
5588
5589 void mgmt_reenable_advertising(struct hci_dev *hdev)
5590 {
5591 struct hci_request req;
5592
5593 if (hci_conn_num(hdev, LE_LINK) > 0)
5594 return;
5595
5596 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
5597 return;
5598
5599 hci_req_init(&req, hdev);
5600 enable_advertising(&req);
5601
5602 /* If this fails we have no option but to let user space know
5603 * that we've disabled advertising.
5604 */
5605 if (hci_req_run(&req, adv_enable_complete) < 0) {
5606 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
5607 new_settings(hdev, NULL);
5608 }
5609 }