]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - net/bluetooth/mgmt.c
Bluetooth: Provide high speed configuration option
[mirror_ubuntu-zesty-kernel.git] / net / bluetooth / mgmt.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI Management interface */
26
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/mgmt.h>
33 #include <net/bluetooth/smp.h>
34
35 #define MGMT_VERSION 1
36 #define MGMT_REVISION 3
37
38 static const u16 mgmt_commands[] = {
39 MGMT_OP_READ_INDEX_LIST,
40 MGMT_OP_READ_INFO,
41 MGMT_OP_SET_POWERED,
42 MGMT_OP_SET_DISCOVERABLE,
43 MGMT_OP_SET_CONNECTABLE,
44 MGMT_OP_SET_FAST_CONNECTABLE,
45 MGMT_OP_SET_PAIRABLE,
46 MGMT_OP_SET_LINK_SECURITY,
47 MGMT_OP_SET_SSP,
48 MGMT_OP_SET_HS,
49 MGMT_OP_SET_LE,
50 MGMT_OP_SET_DEV_CLASS,
51 MGMT_OP_SET_LOCAL_NAME,
52 MGMT_OP_ADD_UUID,
53 MGMT_OP_REMOVE_UUID,
54 MGMT_OP_LOAD_LINK_KEYS,
55 MGMT_OP_LOAD_LONG_TERM_KEYS,
56 MGMT_OP_DISCONNECT,
57 MGMT_OP_GET_CONNECTIONS,
58 MGMT_OP_PIN_CODE_REPLY,
59 MGMT_OP_PIN_CODE_NEG_REPLY,
60 MGMT_OP_SET_IO_CAPABILITY,
61 MGMT_OP_PAIR_DEVICE,
62 MGMT_OP_CANCEL_PAIR_DEVICE,
63 MGMT_OP_UNPAIR_DEVICE,
64 MGMT_OP_USER_CONFIRM_REPLY,
65 MGMT_OP_USER_CONFIRM_NEG_REPLY,
66 MGMT_OP_USER_PASSKEY_REPLY,
67 MGMT_OP_USER_PASSKEY_NEG_REPLY,
68 MGMT_OP_READ_LOCAL_OOB_DATA,
69 MGMT_OP_ADD_REMOTE_OOB_DATA,
70 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
71 MGMT_OP_START_DISCOVERY,
72 MGMT_OP_STOP_DISCOVERY,
73 MGMT_OP_CONFIRM_NAME,
74 MGMT_OP_BLOCK_DEVICE,
75 MGMT_OP_UNBLOCK_DEVICE,
76 MGMT_OP_SET_DEVICE_ID,
77 MGMT_OP_SET_ADVERTISING,
78 };
79
80 static const u16 mgmt_events[] = {
81 MGMT_EV_CONTROLLER_ERROR,
82 MGMT_EV_INDEX_ADDED,
83 MGMT_EV_INDEX_REMOVED,
84 MGMT_EV_NEW_SETTINGS,
85 MGMT_EV_CLASS_OF_DEV_CHANGED,
86 MGMT_EV_LOCAL_NAME_CHANGED,
87 MGMT_EV_NEW_LINK_KEY,
88 MGMT_EV_NEW_LONG_TERM_KEY,
89 MGMT_EV_DEVICE_CONNECTED,
90 MGMT_EV_DEVICE_DISCONNECTED,
91 MGMT_EV_CONNECT_FAILED,
92 MGMT_EV_PIN_CODE_REQUEST,
93 MGMT_EV_USER_CONFIRM_REQUEST,
94 MGMT_EV_USER_PASSKEY_REQUEST,
95 MGMT_EV_AUTH_FAILED,
96 MGMT_EV_DEVICE_FOUND,
97 MGMT_EV_DISCOVERING,
98 MGMT_EV_DEVICE_BLOCKED,
99 MGMT_EV_DEVICE_UNBLOCKED,
100 MGMT_EV_DEVICE_UNPAIRED,
101 MGMT_EV_PASSKEY_NOTIFY,
102 };
103
104 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
105
106 #define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
107 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
108
109 struct pending_cmd {
110 struct list_head list;
111 u16 opcode;
112 int index;
113 void *param;
114 struct sock *sk;
115 void *user_data;
116 };
117
118 /* HCI to MGMT error code conversion table */
119 static u8 mgmt_status_table[] = {
120 MGMT_STATUS_SUCCESS,
121 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
122 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
123 MGMT_STATUS_FAILED, /* Hardware Failure */
124 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
125 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
126 MGMT_STATUS_NOT_PAIRED, /* PIN or Key Missing */
127 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
128 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
129 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
130 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
131 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
132 MGMT_STATUS_BUSY, /* Command Disallowed */
133 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
134 MGMT_STATUS_REJECTED, /* Rejected Security */
135 MGMT_STATUS_REJECTED, /* Rejected Personal */
136 MGMT_STATUS_TIMEOUT, /* Host Timeout */
137 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
138 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
139 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
140 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
141 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
142 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
143 MGMT_STATUS_BUSY, /* Repeated Attempts */
144 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
145 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
146 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
147 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
148 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
149 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
150 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
151 MGMT_STATUS_FAILED, /* Unspecified Error */
152 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
153 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
154 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
155 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
156 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
157 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
158 MGMT_STATUS_FAILED, /* Unit Link Key Used */
159 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
160 MGMT_STATUS_TIMEOUT, /* Instant Passed */
161 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
162 MGMT_STATUS_FAILED, /* Transaction Collision */
163 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
164 MGMT_STATUS_REJECTED, /* QoS Rejected */
165 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
166 MGMT_STATUS_REJECTED, /* Insufficient Security */
167 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
168 MGMT_STATUS_BUSY, /* Role Switch Pending */
169 MGMT_STATUS_FAILED, /* Slot Violation */
170 MGMT_STATUS_FAILED, /* Role Switch Failed */
171 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
172 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
173 MGMT_STATUS_BUSY, /* Host Busy Pairing */
174 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
175 MGMT_STATUS_BUSY, /* Controller Busy */
176 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
177 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
178 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
179 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
180 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
181 };
182
183 bool mgmt_valid_hdev(struct hci_dev *hdev)
184 {
185 return hdev->dev_type == HCI_BREDR;
186 }
187
188 static u8 mgmt_status(u8 hci_status)
189 {
190 if (hci_status < ARRAY_SIZE(mgmt_status_table))
191 return mgmt_status_table[hci_status];
192
193 return MGMT_STATUS_FAILED;
194 }
195
196 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
197 {
198 struct sk_buff *skb;
199 struct mgmt_hdr *hdr;
200 struct mgmt_ev_cmd_status *ev;
201 int err;
202
203 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
204
205 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
206 if (!skb)
207 return -ENOMEM;
208
209 hdr = (void *) skb_put(skb, sizeof(*hdr));
210
211 hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_STATUS);
212 hdr->index = cpu_to_le16(index);
213 hdr->len = cpu_to_le16(sizeof(*ev));
214
215 ev = (void *) skb_put(skb, sizeof(*ev));
216 ev->status = status;
217 ev->opcode = cpu_to_le16(cmd);
218
219 err = sock_queue_rcv_skb(sk, skb);
220 if (err < 0)
221 kfree_skb(skb);
222
223 return err;
224 }
225
226 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
227 void *rp, size_t rp_len)
228 {
229 struct sk_buff *skb;
230 struct mgmt_hdr *hdr;
231 struct mgmt_ev_cmd_complete *ev;
232 int err;
233
234 BT_DBG("sock %p", sk);
235
236 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
237 if (!skb)
238 return -ENOMEM;
239
240 hdr = (void *) skb_put(skb, sizeof(*hdr));
241
242 hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_COMPLETE);
243 hdr->index = cpu_to_le16(index);
244 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
245
246 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
247 ev->opcode = cpu_to_le16(cmd);
248 ev->status = status;
249
250 if (rp)
251 memcpy(ev->data, rp, rp_len);
252
253 err = sock_queue_rcv_skb(sk, skb);
254 if (err < 0)
255 kfree_skb(skb);
256
257 return err;
258 }
259
260 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
261 u16 data_len)
262 {
263 struct mgmt_rp_read_version rp;
264
265 BT_DBG("sock %p", sk);
266
267 rp.version = MGMT_VERSION;
268 rp.revision = __constant_cpu_to_le16(MGMT_REVISION);
269
270 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
271 sizeof(rp));
272 }
273
274 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
275 u16 data_len)
276 {
277 struct mgmt_rp_read_commands *rp;
278 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
279 const u16 num_events = ARRAY_SIZE(mgmt_events);
280 __le16 *opcode;
281 size_t rp_size;
282 int i, err;
283
284 BT_DBG("sock %p", sk);
285
286 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
287
288 rp = kmalloc(rp_size, GFP_KERNEL);
289 if (!rp)
290 return -ENOMEM;
291
292 rp->num_commands = __constant_cpu_to_le16(num_commands);
293 rp->num_events = __constant_cpu_to_le16(num_events);
294
295 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
296 put_unaligned_le16(mgmt_commands[i], opcode);
297
298 for (i = 0; i < num_events; i++, opcode++)
299 put_unaligned_le16(mgmt_events[i], opcode);
300
301 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
302 rp_size);
303 kfree(rp);
304
305 return err;
306 }
307
308 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
309 u16 data_len)
310 {
311 struct mgmt_rp_read_index_list *rp;
312 struct hci_dev *d;
313 size_t rp_len;
314 u16 count;
315 int err;
316
317 BT_DBG("sock %p", sk);
318
319 read_lock(&hci_dev_list_lock);
320
321 count = 0;
322 list_for_each_entry(d, &hci_dev_list, list) {
323 if (!mgmt_valid_hdev(d))
324 continue;
325
326 count++;
327 }
328
329 rp_len = sizeof(*rp) + (2 * count);
330 rp = kmalloc(rp_len, GFP_ATOMIC);
331 if (!rp) {
332 read_unlock(&hci_dev_list_lock);
333 return -ENOMEM;
334 }
335
336 count = 0;
337 list_for_each_entry(d, &hci_dev_list, list) {
338 if (test_bit(HCI_SETUP, &d->dev_flags))
339 continue;
340
341 if (test_bit(HCI_USER_CHANNEL, &d->dev_flags))
342 continue;
343
344 if (!mgmt_valid_hdev(d))
345 continue;
346
347 rp->index[count++] = cpu_to_le16(d->id);
348 BT_DBG("Added hci%u", d->id);
349 }
350
351 rp->num_controllers = cpu_to_le16(count);
352 rp_len = sizeof(*rp) + (2 * count);
353
354 read_unlock(&hci_dev_list_lock);
355
356 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
357 rp_len);
358
359 kfree(rp);
360
361 return err;
362 }
363
364 static u32 get_supported_settings(struct hci_dev *hdev)
365 {
366 u32 settings = 0;
367
368 settings |= MGMT_SETTING_POWERED;
369 settings |= MGMT_SETTING_PAIRABLE;
370
371 if (lmp_ssp_capable(hdev))
372 settings |= MGMT_SETTING_SSP;
373
374 if (lmp_bredr_capable(hdev)) {
375 settings |= MGMT_SETTING_CONNECTABLE;
376 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
377 settings |= MGMT_SETTING_FAST_CONNECTABLE;
378 settings |= MGMT_SETTING_DISCOVERABLE;
379 settings |= MGMT_SETTING_BREDR;
380 settings |= MGMT_SETTING_LINK_SECURITY;
381 settings |= MGMT_SETTING_HS;
382 }
383
384 if (lmp_le_capable(hdev)) {
385 settings |= MGMT_SETTING_LE;
386 settings |= MGMT_SETTING_ADVERTISING;
387 }
388
389 return settings;
390 }
391
392 static u32 get_current_settings(struct hci_dev *hdev)
393 {
394 u32 settings = 0;
395
396 if (hdev_is_powered(hdev))
397 settings |= MGMT_SETTING_POWERED;
398
399 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
400 settings |= MGMT_SETTING_CONNECTABLE;
401
402 if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
403 settings |= MGMT_SETTING_FAST_CONNECTABLE;
404
405 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
406 settings |= MGMT_SETTING_DISCOVERABLE;
407
408 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
409 settings |= MGMT_SETTING_PAIRABLE;
410
411 if (lmp_bredr_capable(hdev))
412 settings |= MGMT_SETTING_BREDR;
413
414 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
415 settings |= MGMT_SETTING_LE;
416
417 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
418 settings |= MGMT_SETTING_LINK_SECURITY;
419
420 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
421 settings |= MGMT_SETTING_SSP;
422
423 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
424 settings |= MGMT_SETTING_HS;
425
426 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
427 settings |= MGMT_SETTING_ADVERTISING;
428
429 return settings;
430 }
431
432 #define PNP_INFO_SVCLASS_ID 0x1200
433
434 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
435 {
436 u8 *ptr = data, *uuids_start = NULL;
437 struct bt_uuid *uuid;
438
439 if (len < 4)
440 return ptr;
441
442 list_for_each_entry(uuid, &hdev->uuids, list) {
443 u16 uuid16;
444
445 if (uuid->size != 16)
446 continue;
447
448 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
449 if (uuid16 < 0x1100)
450 continue;
451
452 if (uuid16 == PNP_INFO_SVCLASS_ID)
453 continue;
454
455 if (!uuids_start) {
456 uuids_start = ptr;
457 uuids_start[0] = 1;
458 uuids_start[1] = EIR_UUID16_ALL;
459 ptr += 2;
460 }
461
462 /* Stop if not enough space to put next UUID */
463 if ((ptr - data) + sizeof(u16) > len) {
464 uuids_start[1] = EIR_UUID16_SOME;
465 break;
466 }
467
468 *ptr++ = (uuid16 & 0x00ff);
469 *ptr++ = (uuid16 & 0xff00) >> 8;
470 uuids_start[0] += sizeof(uuid16);
471 }
472
473 return ptr;
474 }
475
476 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
477 {
478 u8 *ptr = data, *uuids_start = NULL;
479 struct bt_uuid *uuid;
480
481 if (len < 6)
482 return ptr;
483
484 list_for_each_entry(uuid, &hdev->uuids, list) {
485 if (uuid->size != 32)
486 continue;
487
488 if (!uuids_start) {
489 uuids_start = ptr;
490 uuids_start[0] = 1;
491 uuids_start[1] = EIR_UUID32_ALL;
492 ptr += 2;
493 }
494
495 /* Stop if not enough space to put next UUID */
496 if ((ptr - data) + sizeof(u32) > len) {
497 uuids_start[1] = EIR_UUID32_SOME;
498 break;
499 }
500
501 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
502 ptr += sizeof(u32);
503 uuids_start[0] += sizeof(u32);
504 }
505
506 return ptr;
507 }
508
509 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
510 {
511 u8 *ptr = data, *uuids_start = NULL;
512 struct bt_uuid *uuid;
513
514 if (len < 18)
515 return ptr;
516
517 list_for_each_entry(uuid, &hdev->uuids, list) {
518 if (uuid->size != 128)
519 continue;
520
521 if (!uuids_start) {
522 uuids_start = ptr;
523 uuids_start[0] = 1;
524 uuids_start[1] = EIR_UUID128_ALL;
525 ptr += 2;
526 }
527
528 /* Stop if not enough space to put next UUID */
529 if ((ptr - data) + 16 > len) {
530 uuids_start[1] = EIR_UUID128_SOME;
531 break;
532 }
533
534 memcpy(ptr, uuid->uuid, 16);
535 ptr += 16;
536 uuids_start[0] += 16;
537 }
538
539 return ptr;
540 }
541
542 static void create_eir(struct hci_dev *hdev, u8 *data)
543 {
544 u8 *ptr = data;
545 size_t name_len;
546
547 name_len = strlen(hdev->dev_name);
548
549 if (name_len > 0) {
550 /* EIR Data type */
551 if (name_len > 48) {
552 name_len = 48;
553 ptr[1] = EIR_NAME_SHORT;
554 } else
555 ptr[1] = EIR_NAME_COMPLETE;
556
557 /* EIR Data length */
558 ptr[0] = name_len + 1;
559
560 memcpy(ptr + 2, hdev->dev_name, name_len);
561
562 ptr += (name_len + 2);
563 }
564
565 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
566 ptr[0] = 2;
567 ptr[1] = EIR_TX_POWER;
568 ptr[2] = (u8) hdev->inq_tx_power;
569
570 ptr += 3;
571 }
572
573 if (hdev->devid_source > 0) {
574 ptr[0] = 9;
575 ptr[1] = EIR_DEVICE_ID;
576
577 put_unaligned_le16(hdev->devid_source, ptr + 2);
578 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
579 put_unaligned_le16(hdev->devid_product, ptr + 6);
580 put_unaligned_le16(hdev->devid_version, ptr + 8);
581
582 ptr += 10;
583 }
584
585 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
586 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
587 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
588 }
589
590 static void update_eir(struct hci_request *req)
591 {
592 struct hci_dev *hdev = req->hdev;
593 struct hci_cp_write_eir cp;
594
595 if (!hdev_is_powered(hdev))
596 return;
597
598 if (!lmp_ext_inq_capable(hdev))
599 return;
600
601 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
602 return;
603
604 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
605 return;
606
607 memset(&cp, 0, sizeof(cp));
608
609 create_eir(hdev, cp.data);
610
611 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
612 return;
613
614 memcpy(hdev->eir, cp.data, sizeof(cp.data));
615
616 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
617 }
618
619 static u8 get_service_classes(struct hci_dev *hdev)
620 {
621 struct bt_uuid *uuid;
622 u8 val = 0;
623
624 list_for_each_entry(uuid, &hdev->uuids, list)
625 val |= uuid->svc_hint;
626
627 return val;
628 }
629
630 static void update_class(struct hci_request *req)
631 {
632 struct hci_dev *hdev = req->hdev;
633 u8 cod[3];
634
635 BT_DBG("%s", hdev->name);
636
637 if (!hdev_is_powered(hdev))
638 return;
639
640 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
641 return;
642
643 cod[0] = hdev->minor_class;
644 cod[1] = hdev->major_class;
645 cod[2] = get_service_classes(hdev);
646
647 if (memcmp(cod, hdev->dev_class, 3) == 0)
648 return;
649
650 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
651 }
652
653 static void service_cache_off(struct work_struct *work)
654 {
655 struct hci_dev *hdev = container_of(work, struct hci_dev,
656 service_cache.work);
657 struct hci_request req;
658
659 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
660 return;
661
662 hci_req_init(&req, hdev);
663
664 hci_dev_lock(hdev);
665
666 update_eir(&req);
667 update_class(&req);
668
669 hci_dev_unlock(hdev);
670
671 hci_req_run(&req, NULL);
672 }
673
674 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
675 {
676 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
677 return;
678
679 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
680
681 /* Non-mgmt controlled devices get this bit set
682 * implicitly so that pairing works for them, however
683 * for mgmt we require user-space to explicitly enable
684 * it
685 */
686 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
687 }
688
689 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
690 void *data, u16 data_len)
691 {
692 struct mgmt_rp_read_info rp;
693
694 BT_DBG("sock %p %s", sk, hdev->name);
695
696 hci_dev_lock(hdev);
697
698 memset(&rp, 0, sizeof(rp));
699
700 bacpy(&rp.bdaddr, &hdev->bdaddr);
701
702 rp.version = hdev->hci_ver;
703 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
704
705 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
706 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
707
708 memcpy(rp.dev_class, hdev->dev_class, 3);
709
710 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
711 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
712
713 hci_dev_unlock(hdev);
714
715 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
716 sizeof(rp));
717 }
718
719 static void mgmt_pending_free(struct pending_cmd *cmd)
720 {
721 sock_put(cmd->sk);
722 kfree(cmd->param);
723 kfree(cmd);
724 }
725
726 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
727 struct hci_dev *hdev, void *data,
728 u16 len)
729 {
730 struct pending_cmd *cmd;
731
732 cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
733 if (!cmd)
734 return NULL;
735
736 cmd->opcode = opcode;
737 cmd->index = hdev->id;
738
739 cmd->param = kmalloc(len, GFP_KERNEL);
740 if (!cmd->param) {
741 kfree(cmd);
742 return NULL;
743 }
744
745 if (data)
746 memcpy(cmd->param, data, len);
747
748 cmd->sk = sk;
749 sock_hold(sk);
750
751 list_add(&cmd->list, &hdev->mgmt_pending);
752
753 return cmd;
754 }
755
756 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
757 void (*cb)(struct pending_cmd *cmd,
758 void *data),
759 void *data)
760 {
761 struct pending_cmd *cmd, *tmp;
762
763 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
764 if (opcode > 0 && cmd->opcode != opcode)
765 continue;
766
767 cb(cmd, data);
768 }
769 }
770
771 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
772 {
773 struct pending_cmd *cmd;
774
775 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
776 if (cmd->opcode == opcode)
777 return cmd;
778 }
779
780 return NULL;
781 }
782
783 static void mgmt_pending_remove(struct pending_cmd *cmd)
784 {
785 list_del(&cmd->list);
786 mgmt_pending_free(cmd);
787 }
788
789 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
790 {
791 __le32 settings = cpu_to_le32(get_current_settings(hdev));
792
793 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
794 sizeof(settings));
795 }
796
797 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
798 u16 len)
799 {
800 struct mgmt_mode *cp = data;
801 struct pending_cmd *cmd;
802 int err;
803
804 BT_DBG("request for %s", hdev->name);
805
806 if (cp->val != 0x00 && cp->val != 0x01)
807 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
808 MGMT_STATUS_INVALID_PARAMS);
809
810 hci_dev_lock(hdev);
811
812 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
813 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
814 MGMT_STATUS_BUSY);
815 goto failed;
816 }
817
818 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
819 cancel_delayed_work(&hdev->power_off);
820
821 if (cp->val) {
822 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
823 data, len);
824 err = mgmt_powered(hdev, 1);
825 goto failed;
826 }
827 }
828
829 if (!!cp->val == hdev_is_powered(hdev)) {
830 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
831 goto failed;
832 }
833
834 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
835 if (!cmd) {
836 err = -ENOMEM;
837 goto failed;
838 }
839
840 if (cp->val)
841 queue_work(hdev->req_workqueue, &hdev->power_on);
842 else
843 queue_work(hdev->req_workqueue, &hdev->power_off.work);
844
845 err = 0;
846
847 failed:
848 hci_dev_unlock(hdev);
849 return err;
850 }
851
852 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
853 struct sock *skip_sk)
854 {
855 struct sk_buff *skb;
856 struct mgmt_hdr *hdr;
857
858 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
859 if (!skb)
860 return -ENOMEM;
861
862 hdr = (void *) skb_put(skb, sizeof(*hdr));
863 hdr->opcode = cpu_to_le16(event);
864 if (hdev)
865 hdr->index = cpu_to_le16(hdev->id);
866 else
867 hdr->index = __constant_cpu_to_le16(MGMT_INDEX_NONE);
868 hdr->len = cpu_to_le16(data_len);
869
870 if (data)
871 memcpy(skb_put(skb, data_len), data, data_len);
872
873 /* Time stamp */
874 __net_timestamp(skb);
875
876 hci_send_to_control(skb, skip_sk);
877 kfree_skb(skb);
878
879 return 0;
880 }
881
882 static int new_settings(struct hci_dev *hdev, struct sock *skip)
883 {
884 __le32 ev;
885
886 ev = cpu_to_le32(get_current_settings(hdev));
887
888 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
889 }
890
891 struct cmd_lookup {
892 struct sock *sk;
893 struct hci_dev *hdev;
894 u8 mgmt_status;
895 };
896
897 static void settings_rsp(struct pending_cmd *cmd, void *data)
898 {
899 struct cmd_lookup *match = data;
900
901 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
902
903 list_del(&cmd->list);
904
905 if (match->sk == NULL) {
906 match->sk = cmd->sk;
907 sock_hold(match->sk);
908 }
909
910 mgmt_pending_free(cmd);
911 }
912
913 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
914 {
915 u8 *status = data;
916
917 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
918 mgmt_pending_remove(cmd);
919 }
920
921 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
922 u16 len)
923 {
924 struct mgmt_cp_set_discoverable *cp = data;
925 struct pending_cmd *cmd;
926 u16 timeout;
927 u8 scan;
928 int err;
929
930 BT_DBG("request for %s", hdev->name);
931
932 if (!lmp_bredr_capable(hdev))
933 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
934 MGMT_STATUS_NOT_SUPPORTED);
935
936 if (cp->val != 0x00 && cp->val != 0x01)
937 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
938 MGMT_STATUS_INVALID_PARAMS);
939
940 timeout = __le16_to_cpu(cp->timeout);
941 if (!cp->val && timeout > 0)
942 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
943 MGMT_STATUS_INVALID_PARAMS);
944
945 hci_dev_lock(hdev);
946
947 if (!hdev_is_powered(hdev) && timeout > 0) {
948 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
949 MGMT_STATUS_NOT_POWERED);
950 goto failed;
951 }
952
953 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
954 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
955 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
956 MGMT_STATUS_BUSY);
957 goto failed;
958 }
959
960 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
961 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
962 MGMT_STATUS_REJECTED);
963 goto failed;
964 }
965
966 if (!hdev_is_powered(hdev)) {
967 bool changed = false;
968
969 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
970 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
971 changed = true;
972 }
973
974 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
975 if (err < 0)
976 goto failed;
977
978 if (changed)
979 err = new_settings(hdev, sk);
980
981 goto failed;
982 }
983
984 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
985 if (hdev->discov_timeout > 0) {
986 cancel_delayed_work(&hdev->discov_off);
987 hdev->discov_timeout = 0;
988 }
989
990 if (cp->val && timeout > 0) {
991 hdev->discov_timeout = timeout;
992 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
993 msecs_to_jiffies(hdev->discov_timeout * 1000));
994 }
995
996 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
997 goto failed;
998 }
999
1000 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1001 if (!cmd) {
1002 err = -ENOMEM;
1003 goto failed;
1004 }
1005
1006 scan = SCAN_PAGE;
1007
1008 if (cp->val)
1009 scan |= SCAN_INQUIRY;
1010 else
1011 cancel_delayed_work(&hdev->discov_off);
1012
1013 err = hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1014 if (err < 0)
1015 mgmt_pending_remove(cmd);
1016
1017 if (cp->val)
1018 hdev->discov_timeout = timeout;
1019
1020 failed:
1021 hci_dev_unlock(hdev);
1022 return err;
1023 }
1024
1025 static void write_fast_connectable(struct hci_request *req, bool enable)
1026 {
1027 struct hci_dev *hdev = req->hdev;
1028 struct hci_cp_write_page_scan_activity acp;
1029 u8 type;
1030
1031 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1032 return;
1033
1034 if (enable) {
1035 type = PAGE_SCAN_TYPE_INTERLACED;
1036
1037 /* 160 msec page scan interval */
1038 acp.interval = __constant_cpu_to_le16(0x0100);
1039 } else {
1040 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1041
1042 /* default 1.28 sec page scan */
1043 acp.interval = __constant_cpu_to_le16(0x0800);
1044 }
1045
1046 acp.window = __constant_cpu_to_le16(0x0012);
1047
1048 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1049 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1050 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1051 sizeof(acp), &acp);
1052
1053 if (hdev->page_scan_type != type)
1054 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1055 }
1056
1057 static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1058 {
1059 struct pending_cmd *cmd;
1060
1061 BT_DBG("status 0x%02x", status);
1062
1063 hci_dev_lock(hdev);
1064
1065 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1066 if (!cmd)
1067 goto unlock;
1068
1069 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1070
1071 mgmt_pending_remove(cmd);
1072
1073 unlock:
1074 hci_dev_unlock(hdev);
1075 }
1076
1077 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1078 u16 len)
1079 {
1080 struct mgmt_mode *cp = data;
1081 struct pending_cmd *cmd;
1082 struct hci_request req;
1083 u8 scan;
1084 int err;
1085
1086 BT_DBG("request for %s", hdev->name);
1087
1088 if (!lmp_bredr_capable(hdev))
1089 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1090 MGMT_STATUS_NOT_SUPPORTED);
1091
1092 if (cp->val != 0x00 && cp->val != 0x01)
1093 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1094 MGMT_STATUS_INVALID_PARAMS);
1095
1096 hci_dev_lock(hdev);
1097
1098 if (!hdev_is_powered(hdev)) {
1099 bool changed = false;
1100
1101 if (!!cp->val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1102 changed = true;
1103
1104 if (cp->val) {
1105 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1106 } else {
1107 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1108 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1109 }
1110
1111 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1112 if (err < 0)
1113 goto failed;
1114
1115 if (changed)
1116 err = new_settings(hdev, sk);
1117
1118 goto failed;
1119 }
1120
1121 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1122 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1123 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1124 MGMT_STATUS_BUSY);
1125 goto failed;
1126 }
1127
1128 if (!!cp->val == test_bit(HCI_PSCAN, &hdev->flags)) {
1129 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1130 goto failed;
1131 }
1132
1133 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1134 if (!cmd) {
1135 err = -ENOMEM;
1136 goto failed;
1137 }
1138
1139 if (cp->val) {
1140 scan = SCAN_PAGE;
1141 } else {
1142 scan = 0;
1143
1144 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1145 hdev->discov_timeout > 0)
1146 cancel_delayed_work(&hdev->discov_off);
1147 }
1148
1149 hci_req_init(&req, hdev);
1150
1151 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1152
1153 /* If we're going from non-connectable to connectable or
1154 * vice-versa when fast connectable is enabled ensure that fast
1155 * connectable gets disabled. write_fast_connectable won't do
1156 * anything if the page scan parameters are already what they
1157 * should be.
1158 */
1159 if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1160 write_fast_connectable(&req, false);
1161
1162 err = hci_req_run(&req, set_connectable_complete);
1163 if (err < 0)
1164 mgmt_pending_remove(cmd);
1165
1166 failed:
1167 hci_dev_unlock(hdev);
1168 return err;
1169 }
1170
1171 static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
1172 u16 len)
1173 {
1174 struct mgmt_mode *cp = data;
1175 int err;
1176
1177 BT_DBG("request for %s", hdev->name);
1178
1179 if (cp->val != 0x00 && cp->val != 0x01)
1180 return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE,
1181 MGMT_STATUS_INVALID_PARAMS);
1182
1183 hci_dev_lock(hdev);
1184
1185 if (cp->val)
1186 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1187 else
1188 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1189
1190 err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
1191 if (err < 0)
1192 goto failed;
1193
1194 err = new_settings(hdev, sk);
1195
1196 failed:
1197 hci_dev_unlock(hdev);
1198 return err;
1199 }
1200
1201 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1202 u16 len)
1203 {
1204 struct mgmt_mode *cp = data;
1205 struct pending_cmd *cmd;
1206 u8 val;
1207 int err;
1208
1209 BT_DBG("request for %s", hdev->name);
1210
1211 if (!lmp_bredr_capable(hdev))
1212 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1213 MGMT_STATUS_NOT_SUPPORTED);
1214
1215 if (cp->val != 0x00 && cp->val != 0x01)
1216 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1217 MGMT_STATUS_INVALID_PARAMS);
1218
1219 hci_dev_lock(hdev);
1220
1221 if (!hdev_is_powered(hdev)) {
1222 bool changed = false;
1223
1224 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1225 &hdev->dev_flags)) {
1226 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
1227 changed = true;
1228 }
1229
1230 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1231 if (err < 0)
1232 goto failed;
1233
1234 if (changed)
1235 err = new_settings(hdev, sk);
1236
1237 goto failed;
1238 }
1239
1240 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1241 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1242 MGMT_STATUS_BUSY);
1243 goto failed;
1244 }
1245
1246 val = !!cp->val;
1247
1248 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1249 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1250 goto failed;
1251 }
1252
1253 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1254 if (!cmd) {
1255 err = -ENOMEM;
1256 goto failed;
1257 }
1258
1259 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1260 if (err < 0) {
1261 mgmt_pending_remove(cmd);
1262 goto failed;
1263 }
1264
1265 failed:
1266 hci_dev_unlock(hdev);
1267 return err;
1268 }
1269
1270 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1271 {
1272 struct mgmt_mode *cp = data;
1273 struct pending_cmd *cmd;
1274 u8 val;
1275 int err;
1276
1277 BT_DBG("request for %s", hdev->name);
1278
1279 if (!lmp_ssp_capable(hdev))
1280 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1281 MGMT_STATUS_NOT_SUPPORTED);
1282
1283 if (cp->val != 0x00 && cp->val != 0x01)
1284 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1285 MGMT_STATUS_INVALID_PARAMS);
1286
1287 hci_dev_lock(hdev);
1288
1289 val = !!cp->val;
1290
1291 if (!hdev_is_powered(hdev)) {
1292 bool changed = false;
1293
1294 if (val != test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1295 change_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
1296 changed = true;
1297 }
1298
1299 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1300 if (err < 0)
1301 goto failed;
1302
1303 if (changed)
1304 err = new_settings(hdev, sk);
1305
1306 goto failed;
1307 }
1308
1309 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev)) {
1310 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1311 MGMT_STATUS_BUSY);
1312 goto failed;
1313 }
1314
1315 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) == val) {
1316 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1317 goto failed;
1318 }
1319
1320 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1321 if (!cmd) {
1322 err = -ENOMEM;
1323 goto failed;
1324 }
1325
1326 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, sizeof(val), &val);
1327 if (err < 0) {
1328 mgmt_pending_remove(cmd);
1329 goto failed;
1330 }
1331
1332 failed:
1333 hci_dev_unlock(hdev);
1334 return err;
1335 }
1336
1337 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1338 {
1339 struct mgmt_mode *cp = data;
1340
1341 BT_DBG("request for %s", hdev->name);
1342
1343 if (!lmp_bredr_capable(hdev))
1344 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1345 MGMT_STATUS_NOT_SUPPORTED);
1346
1347 if (cp->val != 0x00 && cp->val != 0x01)
1348 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1349 MGMT_STATUS_INVALID_PARAMS);
1350
1351 if (cp->val)
1352 set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1353 else
1354 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1355
1356 return send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1357 }
1358
1359 static void le_enable_complete(struct hci_dev *hdev, u8 status)
1360 {
1361 struct cmd_lookup match = { NULL, hdev };
1362
1363 if (status) {
1364 u8 mgmt_err = mgmt_status(status);
1365
1366 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1367 &mgmt_err);
1368 return;
1369 }
1370
1371 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1372
1373 new_settings(hdev, match.sk);
1374
1375 if (match.sk)
1376 sock_put(match.sk);
1377 }
1378
1379 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1380 {
1381 struct mgmt_mode *cp = data;
1382 struct hci_cp_write_le_host_supported hci_cp;
1383 struct pending_cmd *cmd;
1384 struct hci_request req;
1385 int err;
1386 u8 val, enabled;
1387
1388 BT_DBG("request for %s", hdev->name);
1389
1390 if (!lmp_le_capable(hdev))
1391 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1392 MGMT_STATUS_NOT_SUPPORTED);
1393
1394 if (cp->val != 0x00 && cp->val != 0x01)
1395 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1396 MGMT_STATUS_INVALID_PARAMS);
1397
1398 /* LE-only devices do not allow toggling LE on/off */
1399 if (!lmp_bredr_capable(hdev))
1400 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1401 MGMT_STATUS_REJECTED);
1402
1403 hci_dev_lock(hdev);
1404
1405 val = !!cp->val;
1406 enabled = lmp_host_le_capable(hdev);
1407
1408 if (!hdev_is_powered(hdev) || val == enabled) {
1409 bool changed = false;
1410
1411 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1412 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1413 changed = true;
1414 }
1415
1416 if (!val && test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags)) {
1417 clear_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags);
1418 changed = true;
1419 }
1420
1421 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1422 if (err < 0)
1423 goto unlock;
1424
1425 if (changed)
1426 err = new_settings(hdev, sk);
1427
1428 goto unlock;
1429 }
1430
1431 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
1432 mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
1433 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1434 MGMT_STATUS_BUSY);
1435 goto unlock;
1436 }
1437
1438 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
1439 if (!cmd) {
1440 err = -ENOMEM;
1441 goto unlock;
1442 }
1443
1444 memset(&hci_cp, 0, sizeof(hci_cp));
1445
1446 if (val) {
1447 hci_cp.le = val;
1448 hci_cp.simul = lmp_le_br_capable(hdev);
1449 }
1450
1451 hci_req_init(&req, hdev);
1452
1453 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags) && !val)
1454 hci_req_add(&req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(val), &val);
1455
1456 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
1457 &hci_cp);
1458
1459 err = hci_req_run(&req, le_enable_complete);
1460 if (err < 0)
1461 mgmt_pending_remove(cmd);
1462
1463 unlock:
1464 hci_dev_unlock(hdev);
1465 return err;
1466 }
1467
1468 /* This is a helper function to test for pending mgmt commands that can
1469 * cause CoD or EIR HCI commands. We can only allow one such pending
1470 * mgmt command at a time since otherwise we cannot easily track what
1471 * the current values are, will be, and based on that calculate if a new
1472 * HCI command needs to be sent and if yes with what value.
1473 */
1474 static bool pending_eir_or_class(struct hci_dev *hdev)
1475 {
1476 struct pending_cmd *cmd;
1477
1478 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
1479 switch (cmd->opcode) {
1480 case MGMT_OP_ADD_UUID:
1481 case MGMT_OP_REMOVE_UUID:
1482 case MGMT_OP_SET_DEV_CLASS:
1483 case MGMT_OP_SET_POWERED:
1484 return true;
1485 }
1486 }
1487
1488 return false;
1489 }
1490
1491 static const u8 bluetooth_base_uuid[] = {
1492 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
1493 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1494 };
1495
1496 static u8 get_uuid_size(const u8 *uuid)
1497 {
1498 u32 val;
1499
1500 if (memcmp(uuid, bluetooth_base_uuid, 12))
1501 return 128;
1502
1503 val = get_unaligned_le32(&uuid[12]);
1504 if (val > 0xffff)
1505 return 32;
1506
1507 return 16;
1508 }
1509
1510 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
1511 {
1512 struct pending_cmd *cmd;
1513
1514 hci_dev_lock(hdev);
1515
1516 cmd = mgmt_pending_find(mgmt_op, hdev);
1517 if (!cmd)
1518 goto unlock;
1519
1520 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
1521 hdev->dev_class, 3);
1522
1523 mgmt_pending_remove(cmd);
1524
1525 unlock:
1526 hci_dev_unlock(hdev);
1527 }
1528
1529 static void add_uuid_complete(struct hci_dev *hdev, u8 status)
1530 {
1531 BT_DBG("status 0x%02x", status);
1532
1533 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
1534 }
1535
1536 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1537 {
1538 struct mgmt_cp_add_uuid *cp = data;
1539 struct pending_cmd *cmd;
1540 struct hci_request req;
1541 struct bt_uuid *uuid;
1542 int err;
1543
1544 BT_DBG("request for %s", hdev->name);
1545
1546 hci_dev_lock(hdev);
1547
1548 if (pending_eir_or_class(hdev)) {
1549 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
1550 MGMT_STATUS_BUSY);
1551 goto failed;
1552 }
1553
1554 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
1555 if (!uuid) {
1556 err = -ENOMEM;
1557 goto failed;
1558 }
1559
1560 memcpy(uuid->uuid, cp->uuid, 16);
1561 uuid->svc_hint = cp->svc_hint;
1562 uuid->size = get_uuid_size(cp->uuid);
1563
1564 list_add_tail(&uuid->list, &hdev->uuids);
1565
1566 hci_req_init(&req, hdev);
1567
1568 update_class(&req);
1569 update_eir(&req);
1570
1571 err = hci_req_run(&req, add_uuid_complete);
1572 if (err < 0) {
1573 if (err != -ENODATA)
1574 goto failed;
1575
1576 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
1577 hdev->dev_class, 3);
1578 goto failed;
1579 }
1580
1581 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
1582 if (!cmd) {
1583 err = -ENOMEM;
1584 goto failed;
1585 }
1586
1587 err = 0;
1588
1589 failed:
1590 hci_dev_unlock(hdev);
1591 return err;
1592 }
1593
1594 static bool enable_service_cache(struct hci_dev *hdev)
1595 {
1596 if (!hdev_is_powered(hdev))
1597 return false;
1598
1599 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
1600 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
1601 CACHE_TIMEOUT);
1602 return true;
1603 }
1604
1605 return false;
1606 }
1607
1608 static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
1609 {
1610 BT_DBG("status 0x%02x", status);
1611
1612 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
1613 }
1614
1615 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
1616 u16 len)
1617 {
1618 struct mgmt_cp_remove_uuid *cp = data;
1619 struct pending_cmd *cmd;
1620 struct bt_uuid *match, *tmp;
1621 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
1622 struct hci_request req;
1623 int err, found;
1624
1625 BT_DBG("request for %s", hdev->name);
1626
1627 hci_dev_lock(hdev);
1628
1629 if (pending_eir_or_class(hdev)) {
1630 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1631 MGMT_STATUS_BUSY);
1632 goto unlock;
1633 }
1634
1635 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
1636 err = hci_uuids_clear(hdev);
1637
1638 if (enable_service_cache(hdev)) {
1639 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1640 0, hdev->dev_class, 3);
1641 goto unlock;
1642 }
1643
1644 goto update_class;
1645 }
1646
1647 found = 0;
1648
1649 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
1650 if (memcmp(match->uuid, cp->uuid, 16) != 0)
1651 continue;
1652
1653 list_del(&match->list);
1654 kfree(match);
1655 found++;
1656 }
1657
1658 if (found == 0) {
1659 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1660 MGMT_STATUS_INVALID_PARAMS);
1661 goto unlock;
1662 }
1663
1664 update_class:
1665 hci_req_init(&req, hdev);
1666
1667 update_class(&req);
1668 update_eir(&req);
1669
1670 err = hci_req_run(&req, remove_uuid_complete);
1671 if (err < 0) {
1672 if (err != -ENODATA)
1673 goto unlock;
1674
1675 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
1676 hdev->dev_class, 3);
1677 goto unlock;
1678 }
1679
1680 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
1681 if (!cmd) {
1682 err = -ENOMEM;
1683 goto unlock;
1684 }
1685
1686 err = 0;
1687
1688 unlock:
1689 hci_dev_unlock(hdev);
1690 return err;
1691 }
1692
1693 static void set_class_complete(struct hci_dev *hdev, u8 status)
1694 {
1695 BT_DBG("status 0x%02x", status);
1696
1697 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
1698 }
1699
1700 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
1701 u16 len)
1702 {
1703 struct mgmt_cp_set_dev_class *cp = data;
1704 struct pending_cmd *cmd;
1705 struct hci_request req;
1706 int err;
1707
1708 BT_DBG("request for %s", hdev->name);
1709
1710 if (!lmp_bredr_capable(hdev))
1711 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
1712 MGMT_STATUS_NOT_SUPPORTED);
1713
1714 hci_dev_lock(hdev);
1715
1716 if (pending_eir_or_class(hdev)) {
1717 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
1718 MGMT_STATUS_BUSY);
1719 goto unlock;
1720 }
1721
1722 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
1723 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
1724 MGMT_STATUS_INVALID_PARAMS);
1725 goto unlock;
1726 }
1727
1728 hdev->major_class = cp->major;
1729 hdev->minor_class = cp->minor;
1730
1731 if (!hdev_is_powered(hdev)) {
1732 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
1733 hdev->dev_class, 3);
1734 goto unlock;
1735 }
1736
1737 hci_req_init(&req, hdev);
1738
1739 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
1740 hci_dev_unlock(hdev);
1741 cancel_delayed_work_sync(&hdev->service_cache);
1742 hci_dev_lock(hdev);
1743 update_eir(&req);
1744 }
1745
1746 update_class(&req);
1747
1748 err = hci_req_run(&req, set_class_complete);
1749 if (err < 0) {
1750 if (err != -ENODATA)
1751 goto unlock;
1752
1753 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
1754 hdev->dev_class, 3);
1755 goto unlock;
1756 }
1757
1758 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
1759 if (!cmd) {
1760 err = -ENOMEM;
1761 goto unlock;
1762 }
1763
1764 err = 0;
1765
1766 unlock:
1767 hci_dev_unlock(hdev);
1768 return err;
1769 }
1770
1771 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
1772 u16 len)
1773 {
1774 struct mgmt_cp_load_link_keys *cp = data;
1775 u16 key_count, expected_len;
1776 int i;
1777
1778 key_count = __le16_to_cpu(cp->key_count);
1779
1780 expected_len = sizeof(*cp) + key_count *
1781 sizeof(struct mgmt_link_key_info);
1782 if (expected_len != len) {
1783 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
1784 len, expected_len);
1785 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
1786 MGMT_STATUS_INVALID_PARAMS);
1787 }
1788
1789 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
1790 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
1791 MGMT_STATUS_INVALID_PARAMS);
1792
1793 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
1794 key_count);
1795
1796 for (i = 0; i < key_count; i++) {
1797 struct mgmt_link_key_info *key = &cp->keys[i];
1798
1799 if (key->addr.type != BDADDR_BREDR)
1800 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
1801 MGMT_STATUS_INVALID_PARAMS);
1802 }
1803
1804 hci_dev_lock(hdev);
1805
1806 hci_link_keys_clear(hdev);
1807
1808 if (cp->debug_keys)
1809 set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
1810 else
1811 clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
1812
1813 for (i = 0; i < key_count; i++) {
1814 struct mgmt_link_key_info *key = &cp->keys[i];
1815
1816 hci_add_link_key(hdev, NULL, 0, &key->addr.bdaddr, key->val,
1817 key->type, key->pin_len);
1818 }
1819
1820 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
1821
1822 hci_dev_unlock(hdev);
1823
1824 return 0;
1825 }
1826
1827 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
1828 u8 addr_type, struct sock *skip_sk)
1829 {
1830 struct mgmt_ev_device_unpaired ev;
1831
1832 bacpy(&ev.addr.bdaddr, bdaddr);
1833 ev.addr.type = addr_type;
1834
1835 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
1836 skip_sk);
1837 }
1838
1839 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
1840 u16 len)
1841 {
1842 struct mgmt_cp_unpair_device *cp = data;
1843 struct mgmt_rp_unpair_device rp;
1844 struct hci_cp_disconnect dc;
1845 struct pending_cmd *cmd;
1846 struct hci_conn *conn;
1847 int err;
1848
1849 memset(&rp, 0, sizeof(rp));
1850 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
1851 rp.addr.type = cp->addr.type;
1852
1853 if (!bdaddr_type_is_valid(cp->addr.type))
1854 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1855 MGMT_STATUS_INVALID_PARAMS,
1856 &rp, sizeof(rp));
1857
1858 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
1859 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1860 MGMT_STATUS_INVALID_PARAMS,
1861 &rp, sizeof(rp));
1862
1863 hci_dev_lock(hdev);
1864
1865 if (!hdev_is_powered(hdev)) {
1866 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1867 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
1868 goto unlock;
1869 }
1870
1871 if (cp->addr.type == BDADDR_BREDR)
1872 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
1873 else
1874 err = hci_remove_ltk(hdev, &cp->addr.bdaddr);
1875
1876 if (err < 0) {
1877 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1878 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
1879 goto unlock;
1880 }
1881
1882 if (cp->disconnect) {
1883 if (cp->addr.type == BDADDR_BREDR)
1884 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
1885 &cp->addr.bdaddr);
1886 else
1887 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
1888 &cp->addr.bdaddr);
1889 } else {
1890 conn = NULL;
1891 }
1892
1893 if (!conn) {
1894 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
1895 &rp, sizeof(rp));
1896 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
1897 goto unlock;
1898 }
1899
1900 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
1901 sizeof(*cp));
1902 if (!cmd) {
1903 err = -ENOMEM;
1904 goto unlock;
1905 }
1906
1907 dc.handle = cpu_to_le16(conn->handle);
1908 dc.reason = 0x13; /* Remote User Terminated Connection */
1909 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1910 if (err < 0)
1911 mgmt_pending_remove(cmd);
1912
1913 unlock:
1914 hci_dev_unlock(hdev);
1915 return err;
1916 }
1917
1918 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
1919 u16 len)
1920 {
1921 struct mgmt_cp_disconnect *cp = data;
1922 struct mgmt_rp_disconnect rp;
1923 struct hci_cp_disconnect dc;
1924 struct pending_cmd *cmd;
1925 struct hci_conn *conn;
1926 int err;
1927
1928 BT_DBG("");
1929
1930 memset(&rp, 0, sizeof(rp));
1931 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
1932 rp.addr.type = cp->addr.type;
1933
1934 if (!bdaddr_type_is_valid(cp->addr.type))
1935 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1936 MGMT_STATUS_INVALID_PARAMS,
1937 &rp, sizeof(rp));
1938
1939 hci_dev_lock(hdev);
1940
1941 if (!test_bit(HCI_UP, &hdev->flags)) {
1942 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1943 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
1944 goto failed;
1945 }
1946
1947 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
1948 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1949 MGMT_STATUS_BUSY, &rp, sizeof(rp));
1950 goto failed;
1951 }
1952
1953 if (cp->addr.type == BDADDR_BREDR)
1954 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
1955 &cp->addr.bdaddr);
1956 else
1957 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
1958
1959 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
1960 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1961 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
1962 goto failed;
1963 }
1964
1965 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
1966 if (!cmd) {
1967 err = -ENOMEM;
1968 goto failed;
1969 }
1970
1971 dc.handle = cpu_to_le16(conn->handle);
1972 dc.reason = HCI_ERROR_REMOTE_USER_TERM;
1973
1974 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1975 if (err < 0)
1976 mgmt_pending_remove(cmd);
1977
1978 failed:
1979 hci_dev_unlock(hdev);
1980 return err;
1981 }
1982
1983 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
1984 {
1985 switch (link_type) {
1986 case LE_LINK:
1987 switch (addr_type) {
1988 case ADDR_LE_DEV_PUBLIC:
1989 return BDADDR_LE_PUBLIC;
1990
1991 default:
1992 /* Fallback to LE Random address type */
1993 return BDADDR_LE_RANDOM;
1994 }
1995
1996 default:
1997 /* Fallback to BR/EDR type */
1998 return BDADDR_BREDR;
1999 }
2000 }
2001
2002 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2003 u16 data_len)
2004 {
2005 struct mgmt_rp_get_connections *rp;
2006 struct hci_conn *c;
2007 size_t rp_len;
2008 int err;
2009 u16 i;
2010
2011 BT_DBG("");
2012
2013 hci_dev_lock(hdev);
2014
2015 if (!hdev_is_powered(hdev)) {
2016 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2017 MGMT_STATUS_NOT_POWERED);
2018 goto unlock;
2019 }
2020
2021 i = 0;
2022 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2023 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2024 i++;
2025 }
2026
2027 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2028 rp = kmalloc(rp_len, GFP_KERNEL);
2029 if (!rp) {
2030 err = -ENOMEM;
2031 goto unlock;
2032 }
2033
2034 i = 0;
2035 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2036 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2037 continue;
2038 bacpy(&rp->addr[i].bdaddr, &c->dst);
2039 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2040 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2041 continue;
2042 i++;
2043 }
2044
2045 rp->conn_count = cpu_to_le16(i);
2046
2047 /* Recalculate length in case of filtered SCO connections, etc */
2048 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2049
2050 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2051 rp_len);
2052
2053 kfree(rp);
2054
2055 unlock:
2056 hci_dev_unlock(hdev);
2057 return err;
2058 }
2059
2060 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2061 struct mgmt_cp_pin_code_neg_reply *cp)
2062 {
2063 struct pending_cmd *cmd;
2064 int err;
2065
2066 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2067 sizeof(*cp));
2068 if (!cmd)
2069 return -ENOMEM;
2070
2071 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2072 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2073 if (err < 0)
2074 mgmt_pending_remove(cmd);
2075
2076 return err;
2077 }
2078
2079 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2080 u16 len)
2081 {
2082 struct hci_conn *conn;
2083 struct mgmt_cp_pin_code_reply *cp = data;
2084 struct hci_cp_pin_code_reply reply;
2085 struct pending_cmd *cmd;
2086 int err;
2087
2088 BT_DBG("");
2089
2090 hci_dev_lock(hdev);
2091
2092 if (!hdev_is_powered(hdev)) {
2093 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2094 MGMT_STATUS_NOT_POWERED);
2095 goto failed;
2096 }
2097
2098 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2099 if (!conn) {
2100 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2101 MGMT_STATUS_NOT_CONNECTED);
2102 goto failed;
2103 }
2104
2105 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2106 struct mgmt_cp_pin_code_neg_reply ncp;
2107
2108 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2109
2110 BT_ERR("PIN code is not 16 bytes long");
2111
2112 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2113 if (err >= 0)
2114 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2115 MGMT_STATUS_INVALID_PARAMS);
2116
2117 goto failed;
2118 }
2119
2120 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2121 if (!cmd) {
2122 err = -ENOMEM;
2123 goto failed;
2124 }
2125
2126 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2127 reply.pin_len = cp->pin_len;
2128 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2129
2130 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2131 if (err < 0)
2132 mgmt_pending_remove(cmd);
2133
2134 failed:
2135 hci_dev_unlock(hdev);
2136 return err;
2137 }
2138
2139 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2140 u16 len)
2141 {
2142 struct mgmt_cp_set_io_capability *cp = data;
2143
2144 BT_DBG("");
2145
2146 hci_dev_lock(hdev);
2147
2148 hdev->io_capability = cp->io_capability;
2149
2150 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2151 hdev->io_capability);
2152
2153 hci_dev_unlock(hdev);
2154
2155 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
2156 0);
2157 }
2158
2159 static struct pending_cmd *find_pairing(struct hci_conn *conn)
2160 {
2161 struct hci_dev *hdev = conn->hdev;
2162 struct pending_cmd *cmd;
2163
2164 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2165 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2166 continue;
2167
2168 if (cmd->user_data != conn)
2169 continue;
2170
2171 return cmd;
2172 }
2173
2174 return NULL;
2175 }
2176
2177 static void pairing_complete(struct pending_cmd *cmd, u8 status)
2178 {
2179 struct mgmt_rp_pair_device rp;
2180 struct hci_conn *conn = cmd->user_data;
2181
2182 bacpy(&rp.addr.bdaddr, &conn->dst);
2183 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2184
2185 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
2186 &rp, sizeof(rp));
2187
2188 /* So we don't get further callbacks for this connection */
2189 conn->connect_cfm_cb = NULL;
2190 conn->security_cfm_cb = NULL;
2191 conn->disconn_cfm_cb = NULL;
2192
2193 hci_conn_drop(conn);
2194
2195 mgmt_pending_remove(cmd);
2196 }
2197
2198 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2199 {
2200 struct pending_cmd *cmd;
2201
2202 BT_DBG("status %u", status);
2203
2204 cmd = find_pairing(conn);
2205 if (!cmd)
2206 BT_DBG("Unable to find a pending command");
2207 else
2208 pairing_complete(cmd, mgmt_status(status));
2209 }
2210
2211 static void le_connect_complete_cb(struct hci_conn *conn, u8 status)
2212 {
2213 struct pending_cmd *cmd;
2214
2215 BT_DBG("status %u", status);
2216
2217 if (!status)
2218 return;
2219
2220 cmd = find_pairing(conn);
2221 if (!cmd)
2222 BT_DBG("Unable to find a pending command");
2223 else
2224 pairing_complete(cmd, mgmt_status(status));
2225 }
2226
2227 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2228 u16 len)
2229 {
2230 struct mgmt_cp_pair_device *cp = data;
2231 struct mgmt_rp_pair_device rp;
2232 struct pending_cmd *cmd;
2233 u8 sec_level, auth_type;
2234 struct hci_conn *conn;
2235 int err;
2236
2237 BT_DBG("");
2238
2239 memset(&rp, 0, sizeof(rp));
2240 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2241 rp.addr.type = cp->addr.type;
2242
2243 if (!bdaddr_type_is_valid(cp->addr.type))
2244 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2245 MGMT_STATUS_INVALID_PARAMS,
2246 &rp, sizeof(rp));
2247
2248 hci_dev_lock(hdev);
2249
2250 if (!hdev_is_powered(hdev)) {
2251 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2252 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2253 goto unlock;
2254 }
2255
2256 sec_level = BT_SECURITY_MEDIUM;
2257 if (cp->io_cap == 0x03)
2258 auth_type = HCI_AT_DEDICATED_BONDING;
2259 else
2260 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
2261
2262 if (cp->addr.type == BDADDR_BREDR)
2263 conn = hci_connect(hdev, ACL_LINK, &cp->addr.bdaddr,
2264 cp->addr.type, sec_level, auth_type);
2265 else
2266 conn = hci_connect(hdev, LE_LINK, &cp->addr.bdaddr,
2267 cp->addr.type, sec_level, auth_type);
2268
2269 if (IS_ERR(conn)) {
2270 int status;
2271
2272 if (PTR_ERR(conn) == -EBUSY)
2273 status = MGMT_STATUS_BUSY;
2274 else
2275 status = MGMT_STATUS_CONNECT_FAILED;
2276
2277 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2278 status, &rp,
2279 sizeof(rp));
2280 goto unlock;
2281 }
2282
2283 if (conn->connect_cfm_cb) {
2284 hci_conn_drop(conn);
2285 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2286 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2287 goto unlock;
2288 }
2289
2290 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2291 if (!cmd) {
2292 err = -ENOMEM;
2293 hci_conn_drop(conn);
2294 goto unlock;
2295 }
2296
2297 /* For LE, just connecting isn't a proof that the pairing finished */
2298 if (cp->addr.type == BDADDR_BREDR)
2299 conn->connect_cfm_cb = pairing_complete_cb;
2300 else
2301 conn->connect_cfm_cb = le_connect_complete_cb;
2302
2303 conn->security_cfm_cb = pairing_complete_cb;
2304 conn->disconn_cfm_cb = pairing_complete_cb;
2305 conn->io_capability = cp->io_cap;
2306 cmd->user_data = conn;
2307
2308 if (conn->state == BT_CONNECTED &&
2309 hci_conn_security(conn, sec_level, auth_type))
2310 pairing_complete(cmd, 0);
2311
2312 err = 0;
2313
2314 unlock:
2315 hci_dev_unlock(hdev);
2316 return err;
2317 }
2318
2319 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2320 u16 len)
2321 {
2322 struct mgmt_addr_info *addr = data;
2323 struct pending_cmd *cmd;
2324 struct hci_conn *conn;
2325 int err;
2326
2327 BT_DBG("");
2328
2329 hci_dev_lock(hdev);
2330
2331 if (!hdev_is_powered(hdev)) {
2332 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2333 MGMT_STATUS_NOT_POWERED);
2334 goto unlock;
2335 }
2336
2337 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
2338 if (!cmd) {
2339 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2340 MGMT_STATUS_INVALID_PARAMS);
2341 goto unlock;
2342 }
2343
2344 conn = cmd->user_data;
2345
2346 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
2347 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2348 MGMT_STATUS_INVALID_PARAMS);
2349 goto unlock;
2350 }
2351
2352 pairing_complete(cmd, MGMT_STATUS_CANCELLED);
2353
2354 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
2355 addr, sizeof(*addr));
2356 unlock:
2357 hci_dev_unlock(hdev);
2358 return err;
2359 }
2360
2361 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
2362 struct mgmt_addr_info *addr, u16 mgmt_op,
2363 u16 hci_op, __le32 passkey)
2364 {
2365 struct pending_cmd *cmd;
2366 struct hci_conn *conn;
2367 int err;
2368
2369 hci_dev_lock(hdev);
2370
2371 if (!hdev_is_powered(hdev)) {
2372 err = cmd_complete(sk, hdev->id, mgmt_op,
2373 MGMT_STATUS_NOT_POWERED, addr,
2374 sizeof(*addr));
2375 goto done;
2376 }
2377
2378 if (addr->type == BDADDR_BREDR)
2379 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
2380 else
2381 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
2382
2383 if (!conn) {
2384 err = cmd_complete(sk, hdev->id, mgmt_op,
2385 MGMT_STATUS_NOT_CONNECTED, addr,
2386 sizeof(*addr));
2387 goto done;
2388 }
2389
2390 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
2391 /* Continue with pairing via SMP */
2392 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
2393
2394 if (!err)
2395 err = cmd_complete(sk, hdev->id, mgmt_op,
2396 MGMT_STATUS_SUCCESS, addr,
2397 sizeof(*addr));
2398 else
2399 err = cmd_complete(sk, hdev->id, mgmt_op,
2400 MGMT_STATUS_FAILED, addr,
2401 sizeof(*addr));
2402
2403 goto done;
2404 }
2405
2406 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
2407 if (!cmd) {
2408 err = -ENOMEM;
2409 goto done;
2410 }
2411
2412 /* Continue with pairing via HCI */
2413 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
2414 struct hci_cp_user_passkey_reply cp;
2415
2416 bacpy(&cp.bdaddr, &addr->bdaddr);
2417 cp.passkey = passkey;
2418 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
2419 } else
2420 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
2421 &addr->bdaddr);
2422
2423 if (err < 0)
2424 mgmt_pending_remove(cmd);
2425
2426 done:
2427 hci_dev_unlock(hdev);
2428 return err;
2429 }
2430
2431 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2432 void *data, u16 len)
2433 {
2434 struct mgmt_cp_pin_code_neg_reply *cp = data;
2435
2436 BT_DBG("");
2437
2438 return user_pairing_resp(sk, hdev, &cp->addr,
2439 MGMT_OP_PIN_CODE_NEG_REPLY,
2440 HCI_OP_PIN_CODE_NEG_REPLY, 0);
2441 }
2442
2443 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2444 u16 len)
2445 {
2446 struct mgmt_cp_user_confirm_reply *cp = data;
2447
2448 BT_DBG("");
2449
2450 if (len != sizeof(*cp))
2451 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
2452 MGMT_STATUS_INVALID_PARAMS);
2453
2454 return user_pairing_resp(sk, hdev, &cp->addr,
2455 MGMT_OP_USER_CONFIRM_REPLY,
2456 HCI_OP_USER_CONFIRM_REPLY, 0);
2457 }
2458
2459 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
2460 void *data, u16 len)
2461 {
2462 struct mgmt_cp_user_confirm_neg_reply *cp = data;
2463
2464 BT_DBG("");
2465
2466 return user_pairing_resp(sk, hdev, &cp->addr,
2467 MGMT_OP_USER_CONFIRM_NEG_REPLY,
2468 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
2469 }
2470
2471 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2472 u16 len)
2473 {
2474 struct mgmt_cp_user_passkey_reply *cp = data;
2475
2476 BT_DBG("");
2477
2478 return user_pairing_resp(sk, hdev, &cp->addr,
2479 MGMT_OP_USER_PASSKEY_REPLY,
2480 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
2481 }
2482
2483 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
2484 void *data, u16 len)
2485 {
2486 struct mgmt_cp_user_passkey_neg_reply *cp = data;
2487
2488 BT_DBG("");
2489
2490 return user_pairing_resp(sk, hdev, &cp->addr,
2491 MGMT_OP_USER_PASSKEY_NEG_REPLY,
2492 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
2493 }
2494
2495 static void update_name(struct hci_request *req)
2496 {
2497 struct hci_dev *hdev = req->hdev;
2498 struct hci_cp_write_local_name cp;
2499
2500 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
2501
2502 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
2503 }
2504
2505 static void set_name_complete(struct hci_dev *hdev, u8 status)
2506 {
2507 struct mgmt_cp_set_local_name *cp;
2508 struct pending_cmd *cmd;
2509
2510 BT_DBG("status 0x%02x", status);
2511
2512 hci_dev_lock(hdev);
2513
2514 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
2515 if (!cmd)
2516 goto unlock;
2517
2518 cp = cmd->param;
2519
2520 if (status)
2521 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
2522 mgmt_status(status));
2523 else
2524 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2525 cp, sizeof(*cp));
2526
2527 mgmt_pending_remove(cmd);
2528
2529 unlock:
2530 hci_dev_unlock(hdev);
2531 }
2532
2533 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
2534 u16 len)
2535 {
2536 struct mgmt_cp_set_local_name *cp = data;
2537 struct pending_cmd *cmd;
2538 struct hci_request req;
2539 int err;
2540
2541 BT_DBG("");
2542
2543 hci_dev_lock(hdev);
2544
2545 /* If the old values are the same as the new ones just return a
2546 * direct command complete event.
2547 */
2548 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
2549 !memcmp(hdev->short_name, cp->short_name,
2550 sizeof(hdev->short_name))) {
2551 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2552 data, len);
2553 goto failed;
2554 }
2555
2556 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
2557
2558 if (!hdev_is_powered(hdev)) {
2559 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
2560
2561 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2562 data, len);
2563 if (err < 0)
2564 goto failed;
2565
2566 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
2567 sk);
2568
2569 goto failed;
2570 }
2571
2572 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
2573 if (!cmd) {
2574 err = -ENOMEM;
2575 goto failed;
2576 }
2577
2578 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
2579
2580 hci_req_init(&req, hdev);
2581
2582 if (lmp_bredr_capable(hdev)) {
2583 update_name(&req);
2584 update_eir(&req);
2585 }
2586
2587 if (lmp_le_capable(hdev))
2588 hci_update_ad(&req);
2589
2590 err = hci_req_run(&req, set_name_complete);
2591 if (err < 0)
2592 mgmt_pending_remove(cmd);
2593
2594 failed:
2595 hci_dev_unlock(hdev);
2596 return err;
2597 }
2598
2599 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
2600 void *data, u16 data_len)
2601 {
2602 struct pending_cmd *cmd;
2603 int err;
2604
2605 BT_DBG("%s", hdev->name);
2606
2607 hci_dev_lock(hdev);
2608
2609 if (!hdev_is_powered(hdev)) {
2610 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2611 MGMT_STATUS_NOT_POWERED);
2612 goto unlock;
2613 }
2614
2615 if (!lmp_ssp_capable(hdev)) {
2616 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2617 MGMT_STATUS_NOT_SUPPORTED);
2618 goto unlock;
2619 }
2620
2621 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
2622 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2623 MGMT_STATUS_BUSY);
2624 goto unlock;
2625 }
2626
2627 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
2628 if (!cmd) {
2629 err = -ENOMEM;
2630 goto unlock;
2631 }
2632
2633 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
2634 if (err < 0)
2635 mgmt_pending_remove(cmd);
2636
2637 unlock:
2638 hci_dev_unlock(hdev);
2639 return err;
2640 }
2641
2642 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
2643 void *data, u16 len)
2644 {
2645 struct mgmt_cp_add_remote_oob_data *cp = data;
2646 u8 status;
2647 int err;
2648
2649 BT_DBG("%s ", hdev->name);
2650
2651 hci_dev_lock(hdev);
2652
2653 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr, cp->hash,
2654 cp->randomizer);
2655 if (err < 0)
2656 status = MGMT_STATUS_FAILED;
2657 else
2658 status = MGMT_STATUS_SUCCESS;
2659
2660 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA, status,
2661 &cp->addr, sizeof(cp->addr));
2662
2663 hci_dev_unlock(hdev);
2664 return err;
2665 }
2666
2667 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
2668 void *data, u16 len)
2669 {
2670 struct mgmt_cp_remove_remote_oob_data *cp = data;
2671 u8 status;
2672 int err;
2673
2674 BT_DBG("%s", hdev->name);
2675
2676 hci_dev_lock(hdev);
2677
2678 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
2679 if (err < 0)
2680 status = MGMT_STATUS_INVALID_PARAMS;
2681 else
2682 status = MGMT_STATUS_SUCCESS;
2683
2684 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
2685 status, &cp->addr, sizeof(cp->addr));
2686
2687 hci_dev_unlock(hdev);
2688 return err;
2689 }
2690
2691 static int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
2692 {
2693 struct pending_cmd *cmd;
2694 u8 type;
2695 int err;
2696
2697 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2698
2699 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
2700 if (!cmd)
2701 return -ENOENT;
2702
2703 type = hdev->discovery.type;
2704
2705 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
2706 &type, sizeof(type));
2707 mgmt_pending_remove(cmd);
2708
2709 return err;
2710 }
2711
2712 static void start_discovery_complete(struct hci_dev *hdev, u8 status)
2713 {
2714 BT_DBG("status %d", status);
2715
2716 if (status) {
2717 hci_dev_lock(hdev);
2718 mgmt_start_discovery_failed(hdev, status);
2719 hci_dev_unlock(hdev);
2720 return;
2721 }
2722
2723 hci_dev_lock(hdev);
2724 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
2725 hci_dev_unlock(hdev);
2726
2727 switch (hdev->discovery.type) {
2728 case DISCOV_TYPE_LE:
2729 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
2730 DISCOV_LE_TIMEOUT);
2731 break;
2732
2733 case DISCOV_TYPE_INTERLEAVED:
2734 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
2735 DISCOV_INTERLEAVED_TIMEOUT);
2736 break;
2737
2738 case DISCOV_TYPE_BREDR:
2739 break;
2740
2741 default:
2742 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
2743 }
2744 }
2745
2746 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
2747 void *data, u16 len)
2748 {
2749 struct mgmt_cp_start_discovery *cp = data;
2750 struct pending_cmd *cmd;
2751 struct hci_cp_le_set_scan_param param_cp;
2752 struct hci_cp_le_set_scan_enable enable_cp;
2753 struct hci_cp_inquiry inq_cp;
2754 struct hci_request req;
2755 /* General inquiry access code (GIAC) */
2756 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2757 int err;
2758
2759 BT_DBG("%s", hdev->name);
2760
2761 hci_dev_lock(hdev);
2762
2763 if (!hdev_is_powered(hdev)) {
2764 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2765 MGMT_STATUS_NOT_POWERED);
2766 goto failed;
2767 }
2768
2769 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
2770 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2771 MGMT_STATUS_BUSY);
2772 goto failed;
2773 }
2774
2775 if (hdev->discovery.state != DISCOVERY_STOPPED) {
2776 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2777 MGMT_STATUS_BUSY);
2778 goto failed;
2779 }
2780
2781 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
2782 if (!cmd) {
2783 err = -ENOMEM;
2784 goto failed;
2785 }
2786
2787 hdev->discovery.type = cp->type;
2788
2789 hci_req_init(&req, hdev);
2790
2791 switch (hdev->discovery.type) {
2792 case DISCOV_TYPE_BREDR:
2793 if (!lmp_bredr_capable(hdev)) {
2794 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2795 MGMT_STATUS_NOT_SUPPORTED);
2796 mgmt_pending_remove(cmd);
2797 goto failed;
2798 }
2799
2800 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
2801 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2802 MGMT_STATUS_BUSY);
2803 mgmt_pending_remove(cmd);
2804 goto failed;
2805 }
2806
2807 hci_inquiry_cache_flush(hdev);
2808
2809 memset(&inq_cp, 0, sizeof(inq_cp));
2810 memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
2811 inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
2812 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
2813 break;
2814
2815 case DISCOV_TYPE_LE:
2816 case DISCOV_TYPE_INTERLEAVED:
2817 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2818 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2819 MGMT_STATUS_NOT_SUPPORTED);
2820 mgmt_pending_remove(cmd);
2821 goto failed;
2822 }
2823
2824 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
2825 !lmp_bredr_capable(hdev)) {
2826 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2827 MGMT_STATUS_NOT_SUPPORTED);
2828 mgmt_pending_remove(cmd);
2829 goto failed;
2830 }
2831
2832 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags)) {
2833 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2834 MGMT_STATUS_REJECTED);
2835 mgmt_pending_remove(cmd);
2836 goto failed;
2837 }
2838
2839 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
2840 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2841 MGMT_STATUS_BUSY);
2842 mgmt_pending_remove(cmd);
2843 goto failed;
2844 }
2845
2846 memset(&param_cp, 0, sizeof(param_cp));
2847 param_cp.type = LE_SCAN_ACTIVE;
2848 param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
2849 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
2850 hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
2851 &param_cp);
2852
2853 memset(&enable_cp, 0, sizeof(enable_cp));
2854 enable_cp.enable = LE_SCAN_ENABLE;
2855 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2856 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
2857 &enable_cp);
2858 break;
2859
2860 default:
2861 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2862 MGMT_STATUS_INVALID_PARAMS);
2863 mgmt_pending_remove(cmd);
2864 goto failed;
2865 }
2866
2867 err = hci_req_run(&req, start_discovery_complete);
2868 if (err < 0)
2869 mgmt_pending_remove(cmd);
2870 else
2871 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
2872
2873 failed:
2874 hci_dev_unlock(hdev);
2875 return err;
2876 }
2877
2878 static int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
2879 {
2880 struct pending_cmd *cmd;
2881 int err;
2882
2883 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
2884 if (!cmd)
2885 return -ENOENT;
2886
2887 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
2888 &hdev->discovery.type, sizeof(hdev->discovery.type));
2889 mgmt_pending_remove(cmd);
2890
2891 return err;
2892 }
2893
2894 static void stop_discovery_complete(struct hci_dev *hdev, u8 status)
2895 {
2896 BT_DBG("status %d", status);
2897
2898 hci_dev_lock(hdev);
2899
2900 if (status) {
2901 mgmt_stop_discovery_failed(hdev, status);
2902 goto unlock;
2903 }
2904
2905 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2906
2907 unlock:
2908 hci_dev_unlock(hdev);
2909 }
2910
2911 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
2912 u16 len)
2913 {
2914 struct mgmt_cp_stop_discovery *mgmt_cp = data;
2915 struct pending_cmd *cmd;
2916 struct hci_cp_remote_name_req_cancel cp;
2917 struct inquiry_entry *e;
2918 struct hci_request req;
2919 struct hci_cp_le_set_scan_enable enable_cp;
2920 int err;
2921
2922 BT_DBG("%s", hdev->name);
2923
2924 hci_dev_lock(hdev);
2925
2926 if (!hci_discovery_active(hdev)) {
2927 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
2928 MGMT_STATUS_REJECTED, &mgmt_cp->type,
2929 sizeof(mgmt_cp->type));
2930 goto unlock;
2931 }
2932
2933 if (hdev->discovery.type != mgmt_cp->type) {
2934 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
2935 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
2936 sizeof(mgmt_cp->type));
2937 goto unlock;
2938 }
2939
2940 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
2941 if (!cmd) {
2942 err = -ENOMEM;
2943 goto unlock;
2944 }
2945
2946 hci_req_init(&req, hdev);
2947
2948 switch (hdev->discovery.state) {
2949 case DISCOVERY_FINDING:
2950 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
2951 hci_req_add(&req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2952 } else {
2953 cancel_delayed_work(&hdev->le_scan_disable);
2954
2955 memset(&enable_cp, 0, sizeof(enable_cp));
2956 enable_cp.enable = LE_SCAN_DISABLE;
2957 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE,
2958 sizeof(enable_cp), &enable_cp);
2959 }
2960
2961 break;
2962
2963 case DISCOVERY_RESOLVING:
2964 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2965 NAME_PENDING);
2966 if (!e) {
2967 mgmt_pending_remove(cmd);
2968 err = cmd_complete(sk, hdev->id,
2969 MGMT_OP_STOP_DISCOVERY, 0,
2970 &mgmt_cp->type,
2971 sizeof(mgmt_cp->type));
2972 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2973 goto unlock;
2974 }
2975
2976 bacpy(&cp.bdaddr, &e->data.bdaddr);
2977 hci_req_add(&req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
2978 &cp);
2979
2980 break;
2981
2982 default:
2983 BT_DBG("unknown discovery state %u", hdev->discovery.state);
2984
2985 mgmt_pending_remove(cmd);
2986 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
2987 MGMT_STATUS_FAILED, &mgmt_cp->type,
2988 sizeof(mgmt_cp->type));
2989 goto unlock;
2990 }
2991
2992 err = hci_req_run(&req, stop_discovery_complete);
2993 if (err < 0)
2994 mgmt_pending_remove(cmd);
2995 else
2996 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
2997
2998 unlock:
2999 hci_dev_unlock(hdev);
3000 return err;
3001 }
3002
3003 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
3004 u16 len)
3005 {
3006 struct mgmt_cp_confirm_name *cp = data;
3007 struct inquiry_entry *e;
3008 int err;
3009
3010 BT_DBG("%s", hdev->name);
3011
3012 hci_dev_lock(hdev);
3013
3014 if (!hci_discovery_active(hdev)) {
3015 err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3016 MGMT_STATUS_FAILED);
3017 goto failed;
3018 }
3019
3020 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
3021 if (!e) {
3022 err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3023 MGMT_STATUS_INVALID_PARAMS);
3024 goto failed;
3025 }
3026
3027 if (cp->name_known) {
3028 e->name_state = NAME_KNOWN;
3029 list_del(&e->list);
3030 } else {
3031 e->name_state = NAME_NEEDED;
3032 hci_inquiry_cache_update_resolve(hdev, e);
3033 }
3034
3035 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
3036 sizeof(cp->addr));
3037
3038 failed:
3039 hci_dev_unlock(hdev);
3040 return err;
3041 }
3042
3043 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
3044 u16 len)
3045 {
3046 struct mgmt_cp_block_device *cp = data;
3047 u8 status;
3048 int err;
3049
3050 BT_DBG("%s", hdev->name);
3051
3052 if (!bdaddr_type_is_valid(cp->addr.type))
3053 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
3054 MGMT_STATUS_INVALID_PARAMS,
3055 &cp->addr, sizeof(cp->addr));
3056
3057 hci_dev_lock(hdev);
3058
3059 err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type);
3060 if (err < 0)
3061 status = MGMT_STATUS_FAILED;
3062 else
3063 status = MGMT_STATUS_SUCCESS;
3064
3065 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
3066 &cp->addr, sizeof(cp->addr));
3067
3068 hci_dev_unlock(hdev);
3069
3070 return err;
3071 }
3072
3073 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
3074 u16 len)
3075 {
3076 struct mgmt_cp_unblock_device *cp = data;
3077 u8 status;
3078 int err;
3079
3080 BT_DBG("%s", hdev->name);
3081
3082 if (!bdaddr_type_is_valid(cp->addr.type))
3083 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
3084 MGMT_STATUS_INVALID_PARAMS,
3085 &cp->addr, sizeof(cp->addr));
3086
3087 hci_dev_lock(hdev);
3088
3089 err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type);
3090 if (err < 0)
3091 status = MGMT_STATUS_INVALID_PARAMS;
3092 else
3093 status = MGMT_STATUS_SUCCESS;
3094
3095 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
3096 &cp->addr, sizeof(cp->addr));
3097
3098 hci_dev_unlock(hdev);
3099
3100 return err;
3101 }
3102
3103 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
3104 u16 len)
3105 {
3106 struct mgmt_cp_set_device_id *cp = data;
3107 struct hci_request req;
3108 int err;
3109 __u16 source;
3110
3111 BT_DBG("%s", hdev->name);
3112
3113 source = __le16_to_cpu(cp->source);
3114
3115 if (source > 0x0002)
3116 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
3117 MGMT_STATUS_INVALID_PARAMS);
3118
3119 hci_dev_lock(hdev);
3120
3121 hdev->devid_source = source;
3122 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
3123 hdev->devid_product = __le16_to_cpu(cp->product);
3124 hdev->devid_version = __le16_to_cpu(cp->version);
3125
3126 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
3127
3128 hci_req_init(&req, hdev);
3129 update_eir(&req);
3130 hci_req_run(&req, NULL);
3131
3132 hci_dev_unlock(hdev);
3133
3134 return err;
3135 }
3136
3137 static void set_advertising_complete(struct hci_dev *hdev, u8 status)
3138 {
3139 struct cmd_lookup match = { NULL, hdev };
3140
3141 if (status) {
3142 u8 mgmt_err = mgmt_status(status);
3143
3144 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
3145 cmd_status_rsp, &mgmt_err);
3146 return;
3147 }
3148
3149 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
3150 &match);
3151
3152 new_settings(hdev, match.sk);
3153
3154 if (match.sk)
3155 sock_put(match.sk);
3156 }
3157
3158 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
3159 {
3160 struct mgmt_mode *cp = data;
3161 struct pending_cmd *cmd;
3162 struct hci_request req;
3163 u8 val, enabled;
3164 int err;
3165
3166 BT_DBG("request for %s", hdev->name);
3167
3168 if (!lmp_le_capable(hdev))
3169 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3170 MGMT_STATUS_NOT_SUPPORTED);
3171
3172 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
3173 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3174 MGMT_STATUS_REJECTED);
3175
3176 if (cp->val != 0x00 && cp->val != 0x01)
3177 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3178 MGMT_STATUS_INVALID_PARAMS);
3179
3180 hci_dev_lock(hdev);
3181
3182 val = !!cp->val;
3183 enabled = test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags);
3184
3185 if (!hdev_is_powered(hdev) || val == enabled) {
3186 bool changed = false;
3187
3188 if (val != test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags)) {
3189 change_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags);
3190 changed = true;
3191 }
3192
3193 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
3194 if (err < 0)
3195 goto unlock;
3196
3197 if (changed)
3198 err = new_settings(hdev, sk);
3199
3200 goto unlock;
3201 }
3202
3203 if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
3204 mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
3205 err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3206 MGMT_STATUS_BUSY);
3207 goto unlock;
3208 }
3209
3210 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
3211 if (!cmd) {
3212 err = -ENOMEM;
3213 goto unlock;
3214 }
3215
3216 hci_req_init(&req, hdev);
3217
3218 hci_req_add(&req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(val), &val);
3219
3220 err = hci_req_run(&req, set_advertising_complete);
3221 if (err < 0)
3222 mgmt_pending_remove(cmd);
3223
3224 unlock:
3225 hci_dev_unlock(hdev);
3226 return err;
3227 }
3228
3229 static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
3230 {
3231 struct pending_cmd *cmd;
3232
3233 BT_DBG("status 0x%02x", status);
3234
3235 hci_dev_lock(hdev);
3236
3237 cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
3238 if (!cmd)
3239 goto unlock;
3240
3241 if (status) {
3242 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3243 mgmt_status(status));
3244 } else {
3245 struct mgmt_mode *cp = cmd->param;
3246
3247 if (cp->val)
3248 set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
3249 else
3250 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
3251
3252 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
3253 new_settings(hdev, cmd->sk);
3254 }
3255
3256 mgmt_pending_remove(cmd);
3257
3258 unlock:
3259 hci_dev_unlock(hdev);
3260 }
3261
3262 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
3263 void *data, u16 len)
3264 {
3265 struct mgmt_mode *cp = data;
3266 struct pending_cmd *cmd;
3267 struct hci_request req;
3268 int err;
3269
3270 BT_DBG("%s", hdev->name);
3271
3272 if (!lmp_bredr_capable(hdev) || hdev->hci_ver < BLUETOOTH_VER_1_2)
3273 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3274 MGMT_STATUS_NOT_SUPPORTED);
3275
3276 if (cp->val != 0x00 && cp->val != 0x01)
3277 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3278 MGMT_STATUS_INVALID_PARAMS);
3279
3280 if (!hdev_is_powered(hdev))
3281 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3282 MGMT_STATUS_NOT_POWERED);
3283
3284 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3285 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3286 MGMT_STATUS_REJECTED);
3287
3288 hci_dev_lock(hdev);
3289
3290 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
3291 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3292 MGMT_STATUS_BUSY);
3293 goto unlock;
3294 }
3295
3296 if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
3297 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
3298 hdev);
3299 goto unlock;
3300 }
3301
3302 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
3303 data, len);
3304 if (!cmd) {
3305 err = -ENOMEM;
3306 goto unlock;
3307 }
3308
3309 hci_req_init(&req, hdev);
3310
3311 write_fast_connectable(&req, cp->val);
3312
3313 err = hci_req_run(&req, fast_connectable_complete);
3314 if (err < 0) {
3315 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3316 MGMT_STATUS_FAILED);
3317 mgmt_pending_remove(cmd);
3318 }
3319
3320 unlock:
3321 hci_dev_unlock(hdev);
3322
3323 return err;
3324 }
3325
3326 static bool ltk_is_valid(struct mgmt_ltk_info *key)
3327 {
3328 if (key->authenticated != 0x00 && key->authenticated != 0x01)
3329 return false;
3330 if (key->master != 0x00 && key->master != 0x01)
3331 return false;
3332 if (!bdaddr_type_is_le(key->addr.type))
3333 return false;
3334 return true;
3335 }
3336
3337 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
3338 void *cp_data, u16 len)
3339 {
3340 struct mgmt_cp_load_long_term_keys *cp = cp_data;
3341 u16 key_count, expected_len;
3342 int i, err;
3343
3344 key_count = __le16_to_cpu(cp->key_count);
3345
3346 expected_len = sizeof(*cp) + key_count *
3347 sizeof(struct mgmt_ltk_info);
3348 if (expected_len != len) {
3349 BT_ERR("load_keys: expected %u bytes, got %u bytes",
3350 len, expected_len);
3351 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
3352 MGMT_STATUS_INVALID_PARAMS);
3353 }
3354
3355 BT_DBG("%s key_count %u", hdev->name, key_count);
3356
3357 for (i = 0; i < key_count; i++) {
3358 struct mgmt_ltk_info *key = &cp->keys[i];
3359
3360 if (!ltk_is_valid(key))
3361 return cmd_status(sk, hdev->id,
3362 MGMT_OP_LOAD_LONG_TERM_KEYS,
3363 MGMT_STATUS_INVALID_PARAMS);
3364 }
3365
3366 hci_dev_lock(hdev);
3367
3368 hci_smp_ltks_clear(hdev);
3369
3370 for (i = 0; i < key_count; i++) {
3371 struct mgmt_ltk_info *key = &cp->keys[i];
3372 u8 type;
3373
3374 if (key->master)
3375 type = HCI_SMP_LTK;
3376 else
3377 type = HCI_SMP_LTK_SLAVE;
3378
3379 hci_add_ltk(hdev, &key->addr.bdaddr,
3380 bdaddr_to_le(key->addr.type),
3381 type, 0, key->authenticated, key->val,
3382 key->enc_size, key->ediv, key->rand);
3383 }
3384
3385 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
3386 NULL, 0);
3387
3388 hci_dev_unlock(hdev);
3389
3390 return err;
3391 }
3392
3393 static const struct mgmt_handler {
3394 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
3395 u16 data_len);
3396 bool var_len;
3397 size_t data_len;
3398 } mgmt_handlers[] = {
3399 { NULL }, /* 0x0000 (no command) */
3400 { read_version, false, MGMT_READ_VERSION_SIZE },
3401 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
3402 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
3403 { read_controller_info, false, MGMT_READ_INFO_SIZE },
3404 { set_powered, false, MGMT_SETTING_SIZE },
3405 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
3406 { set_connectable, false, MGMT_SETTING_SIZE },
3407 { set_fast_connectable, false, MGMT_SETTING_SIZE },
3408 { set_pairable, false, MGMT_SETTING_SIZE },
3409 { set_link_security, false, MGMT_SETTING_SIZE },
3410 { set_ssp, false, MGMT_SETTING_SIZE },
3411 { set_hs, false, MGMT_SETTING_SIZE },
3412 { set_le, false, MGMT_SETTING_SIZE },
3413 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
3414 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
3415 { add_uuid, false, MGMT_ADD_UUID_SIZE },
3416 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
3417 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
3418 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
3419 { disconnect, false, MGMT_DISCONNECT_SIZE },
3420 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
3421 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
3422 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
3423 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
3424 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
3425 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
3426 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
3427 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
3428 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
3429 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
3430 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
3431 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
3432 { add_remote_oob_data, false, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
3433 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
3434 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
3435 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
3436 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
3437 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
3438 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
3439 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
3440 { set_advertising, false, MGMT_SETTING_SIZE },
3441 };
3442
3443
3444 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
3445 {
3446 void *buf;
3447 u8 *cp;
3448 struct mgmt_hdr *hdr;
3449 u16 opcode, index, len;
3450 struct hci_dev *hdev = NULL;
3451 const struct mgmt_handler *handler;
3452 int err;
3453
3454 BT_DBG("got %zu bytes", msglen);
3455
3456 if (msglen < sizeof(*hdr))
3457 return -EINVAL;
3458
3459 buf = kmalloc(msglen, GFP_KERNEL);
3460 if (!buf)
3461 return -ENOMEM;
3462
3463 if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
3464 err = -EFAULT;
3465 goto done;
3466 }
3467
3468 hdr = buf;
3469 opcode = __le16_to_cpu(hdr->opcode);
3470 index = __le16_to_cpu(hdr->index);
3471 len = __le16_to_cpu(hdr->len);
3472
3473 if (len != msglen - sizeof(*hdr)) {
3474 err = -EINVAL;
3475 goto done;
3476 }
3477
3478 if (index != MGMT_INDEX_NONE) {
3479 hdev = hci_dev_get(index);
3480 if (!hdev) {
3481 err = cmd_status(sk, index, opcode,
3482 MGMT_STATUS_INVALID_INDEX);
3483 goto done;
3484 }
3485
3486 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3487 err = cmd_status(sk, index, opcode,
3488 MGMT_STATUS_INVALID_INDEX);
3489 goto done;
3490 }
3491 }
3492
3493 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
3494 mgmt_handlers[opcode].func == NULL) {
3495 BT_DBG("Unknown op %u", opcode);
3496 err = cmd_status(sk, index, opcode,
3497 MGMT_STATUS_UNKNOWN_COMMAND);
3498 goto done;
3499 }
3500
3501 if ((hdev && opcode < MGMT_OP_READ_INFO) ||
3502 (!hdev && opcode >= MGMT_OP_READ_INFO)) {
3503 err = cmd_status(sk, index, opcode,
3504 MGMT_STATUS_INVALID_INDEX);
3505 goto done;
3506 }
3507
3508 handler = &mgmt_handlers[opcode];
3509
3510 if ((handler->var_len && len < handler->data_len) ||
3511 (!handler->var_len && len != handler->data_len)) {
3512 err = cmd_status(sk, index, opcode,
3513 MGMT_STATUS_INVALID_PARAMS);
3514 goto done;
3515 }
3516
3517 if (hdev)
3518 mgmt_init_hdev(sk, hdev);
3519
3520 cp = buf + sizeof(*hdr);
3521
3522 err = handler->func(sk, hdev, cp, len);
3523 if (err < 0)
3524 goto done;
3525
3526 err = msglen;
3527
3528 done:
3529 if (hdev)
3530 hci_dev_put(hdev);
3531
3532 kfree(buf);
3533 return err;
3534 }
3535
3536 int mgmt_index_added(struct hci_dev *hdev)
3537 {
3538 if (!mgmt_valid_hdev(hdev))
3539 return -ENOTSUPP;
3540
3541 return mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
3542 }
3543
3544 int mgmt_index_removed(struct hci_dev *hdev)
3545 {
3546 u8 status = MGMT_STATUS_INVALID_INDEX;
3547
3548 if (!mgmt_valid_hdev(hdev))
3549 return -ENOTSUPP;
3550
3551 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
3552
3553 return mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
3554 }
3555
3556 static void set_bredr_scan(struct hci_request *req)
3557 {
3558 struct hci_dev *hdev = req->hdev;
3559 u8 scan = 0;
3560
3561 /* Ensure that fast connectable is disabled. This function will
3562 * not do anything if the page scan parameters are already what
3563 * they should be.
3564 */
3565 write_fast_connectable(req, false);
3566
3567 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3568 scan |= SCAN_PAGE;
3569 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
3570 scan |= SCAN_INQUIRY;
3571
3572 if (scan)
3573 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
3574 }
3575
3576 static void powered_complete(struct hci_dev *hdev, u8 status)
3577 {
3578 struct cmd_lookup match = { NULL, hdev };
3579
3580 BT_DBG("status 0x%02x", status);
3581
3582 hci_dev_lock(hdev);
3583
3584 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
3585
3586 new_settings(hdev, match.sk);
3587
3588 hci_dev_unlock(hdev);
3589
3590 if (match.sk)
3591 sock_put(match.sk);
3592 }
3593
3594 static int powered_update_hci(struct hci_dev *hdev)
3595 {
3596 struct hci_request req;
3597 u8 link_sec;
3598
3599 hci_req_init(&req, hdev);
3600
3601 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
3602 !lmp_host_ssp_capable(hdev)) {
3603 u8 ssp = 1;
3604
3605 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
3606 }
3607
3608 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
3609 lmp_bredr_capable(hdev)) {
3610 struct hci_cp_write_le_host_supported cp;
3611
3612 cp.le = 1;
3613 cp.simul = lmp_le_br_capable(hdev);
3614
3615 /* Check first if we already have the right
3616 * host state (host features set)
3617 */
3618 if (cp.le != lmp_host_le_capable(hdev) ||
3619 cp.simul != lmp_host_le_br_capable(hdev))
3620 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
3621 sizeof(cp), &cp);
3622 }
3623
3624 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags)) {
3625 u8 adv = 0x01;
3626
3627 hci_req_add(&req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(adv), &adv);
3628 }
3629
3630 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
3631 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
3632 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
3633 sizeof(link_sec), &link_sec);
3634
3635 if (lmp_bredr_capable(hdev)) {
3636 set_bredr_scan(&req);
3637 update_class(&req);
3638 update_name(&req);
3639 update_eir(&req);
3640 }
3641
3642 return hci_req_run(&req, powered_complete);
3643 }
3644
3645 int mgmt_powered(struct hci_dev *hdev, u8 powered)
3646 {
3647 struct cmd_lookup match = { NULL, hdev };
3648 u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
3649 u8 zero_cod[] = { 0, 0, 0 };
3650 int err;
3651
3652 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3653 return 0;
3654
3655 if (powered) {
3656 if (powered_update_hci(hdev) == 0)
3657 return 0;
3658
3659 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
3660 &match);
3661 goto new_settings;
3662 }
3663
3664 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
3665 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered);
3666
3667 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
3668 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
3669 zero_cod, sizeof(zero_cod), NULL);
3670
3671 new_settings:
3672 err = new_settings(hdev, match.sk);
3673
3674 if (match.sk)
3675 sock_put(match.sk);
3676
3677 return err;
3678 }
3679
3680 int mgmt_set_powered_failed(struct hci_dev *hdev, int err)
3681 {
3682 struct pending_cmd *cmd;
3683 u8 status;
3684
3685 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
3686 if (!cmd)
3687 return -ENOENT;
3688
3689 if (err == -ERFKILL)
3690 status = MGMT_STATUS_RFKILLED;
3691 else
3692 status = MGMT_STATUS_FAILED;
3693
3694 err = cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
3695
3696 mgmt_pending_remove(cmd);
3697
3698 return err;
3699 }
3700
3701 int mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
3702 {
3703 struct cmd_lookup match = { NULL, hdev };
3704 bool changed = false;
3705 int err = 0;
3706
3707 if (discoverable) {
3708 if (!test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
3709 changed = true;
3710 } else {
3711 if (test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
3712 changed = true;
3713 }
3714
3715 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev, settings_rsp,
3716 &match);
3717
3718 if (changed)
3719 err = new_settings(hdev, match.sk);
3720
3721 if (match.sk)
3722 sock_put(match.sk);
3723
3724 return err;
3725 }
3726
3727 int mgmt_connectable(struct hci_dev *hdev, u8 connectable)
3728 {
3729 struct pending_cmd *cmd;
3730 bool changed = false;
3731 int err = 0;
3732
3733 if (connectable) {
3734 if (!test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3735 changed = true;
3736 } else {
3737 if (test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3738 changed = true;
3739 }
3740
3741 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
3742
3743 if (changed)
3744 err = new_settings(hdev, cmd ? cmd->sk : NULL);
3745
3746 return err;
3747 }
3748
3749 int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
3750 {
3751 u8 mgmt_err = mgmt_status(status);
3752
3753 if (scan & SCAN_PAGE)
3754 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
3755 cmd_status_rsp, &mgmt_err);
3756
3757 if (scan & SCAN_INQUIRY)
3758 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
3759 cmd_status_rsp, &mgmt_err);
3760
3761 return 0;
3762 }
3763
3764 int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
3765 bool persistent)
3766 {
3767 struct mgmt_ev_new_link_key ev;
3768
3769 memset(&ev, 0, sizeof(ev));
3770
3771 ev.store_hint = persistent;
3772 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
3773 ev.key.addr.type = BDADDR_BREDR;
3774 ev.key.type = key->type;
3775 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
3776 ev.key.pin_len = key->pin_len;
3777
3778 return mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
3779 }
3780
3781 int mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent)
3782 {
3783 struct mgmt_ev_new_long_term_key ev;
3784
3785 memset(&ev, 0, sizeof(ev));
3786
3787 ev.store_hint = persistent;
3788 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
3789 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
3790 ev.key.authenticated = key->authenticated;
3791 ev.key.enc_size = key->enc_size;
3792 ev.key.ediv = key->ediv;
3793
3794 if (key->type == HCI_SMP_LTK)
3795 ev.key.master = 1;
3796
3797 memcpy(ev.key.rand, key->rand, sizeof(key->rand));
3798 memcpy(ev.key.val, key->val, sizeof(key->val));
3799
3800 return mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev),
3801 NULL);
3802 }
3803
3804 int mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3805 u8 addr_type, u32 flags, u8 *name, u8 name_len,
3806 u8 *dev_class)
3807 {
3808 char buf[512];
3809 struct mgmt_ev_device_connected *ev = (void *) buf;
3810 u16 eir_len = 0;
3811
3812 bacpy(&ev->addr.bdaddr, bdaddr);
3813 ev->addr.type = link_to_bdaddr(link_type, addr_type);
3814
3815 ev->flags = __cpu_to_le32(flags);
3816
3817 if (name_len > 0)
3818 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
3819 name, name_len);
3820
3821 if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
3822 eir_len = eir_append_data(ev->eir, eir_len,
3823 EIR_CLASS_OF_DEV, dev_class, 3);
3824
3825 ev->eir_len = cpu_to_le16(eir_len);
3826
3827 return mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
3828 sizeof(*ev) + eir_len, NULL);
3829 }
3830
3831 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
3832 {
3833 struct mgmt_cp_disconnect *cp = cmd->param;
3834 struct sock **sk = data;
3835 struct mgmt_rp_disconnect rp;
3836
3837 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3838 rp.addr.type = cp->addr.type;
3839
3840 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
3841 sizeof(rp));
3842
3843 *sk = cmd->sk;
3844 sock_hold(*sk);
3845
3846 mgmt_pending_remove(cmd);
3847 }
3848
3849 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
3850 {
3851 struct hci_dev *hdev = data;
3852 struct mgmt_cp_unpair_device *cp = cmd->param;
3853 struct mgmt_rp_unpair_device rp;
3854
3855 memset(&rp, 0, sizeof(rp));
3856 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3857 rp.addr.type = cp->addr.type;
3858
3859 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
3860
3861 cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
3862
3863 mgmt_pending_remove(cmd);
3864 }
3865
3866 int mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
3867 u8 link_type, u8 addr_type, u8 reason)
3868 {
3869 struct mgmt_ev_device_disconnected ev;
3870 struct sock *sk = NULL;
3871 int err;
3872
3873 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
3874
3875 bacpy(&ev.addr.bdaddr, bdaddr);
3876 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3877 ev.reason = reason;
3878
3879 err = mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev),
3880 sk);
3881
3882 if (sk)
3883 sock_put(sk);
3884
3885 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
3886 hdev);
3887
3888 return err;
3889 }
3890
3891 int mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
3892 u8 link_type, u8 addr_type, u8 status)
3893 {
3894 struct mgmt_rp_disconnect rp;
3895 struct pending_cmd *cmd;
3896 int err;
3897
3898 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
3899 hdev);
3900
3901 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
3902 if (!cmd)
3903 return -ENOENT;
3904
3905 bacpy(&rp.addr.bdaddr, bdaddr);
3906 rp.addr.type = link_to_bdaddr(link_type, addr_type);
3907
3908 err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
3909 mgmt_status(status), &rp, sizeof(rp));
3910
3911 mgmt_pending_remove(cmd);
3912
3913 return err;
3914 }
3915
3916 int mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3917 u8 addr_type, u8 status)
3918 {
3919 struct mgmt_ev_connect_failed ev;
3920
3921 bacpy(&ev.addr.bdaddr, bdaddr);
3922 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3923 ev.status = mgmt_status(status);
3924
3925 return mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
3926 }
3927
3928 int mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
3929 {
3930 struct mgmt_ev_pin_code_request ev;
3931
3932 bacpy(&ev.addr.bdaddr, bdaddr);
3933 ev.addr.type = BDADDR_BREDR;
3934 ev.secure = secure;
3935
3936 return mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev),
3937 NULL);
3938 }
3939
3940 int mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3941 u8 status)
3942 {
3943 struct pending_cmd *cmd;
3944 struct mgmt_rp_pin_code_reply rp;
3945 int err;
3946
3947 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
3948 if (!cmd)
3949 return -ENOENT;
3950
3951 bacpy(&rp.addr.bdaddr, bdaddr);
3952 rp.addr.type = BDADDR_BREDR;
3953
3954 err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3955 mgmt_status(status), &rp, sizeof(rp));
3956
3957 mgmt_pending_remove(cmd);
3958
3959 return err;
3960 }
3961
3962 int mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3963 u8 status)
3964 {
3965 struct pending_cmd *cmd;
3966 struct mgmt_rp_pin_code_reply rp;
3967 int err;
3968
3969 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
3970 if (!cmd)
3971 return -ENOENT;
3972
3973 bacpy(&rp.addr.bdaddr, bdaddr);
3974 rp.addr.type = BDADDR_BREDR;
3975
3976 err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
3977 mgmt_status(status), &rp, sizeof(rp));
3978
3979 mgmt_pending_remove(cmd);
3980
3981 return err;
3982 }
3983
3984 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
3985 u8 link_type, u8 addr_type, __le32 value,
3986 u8 confirm_hint)
3987 {
3988 struct mgmt_ev_user_confirm_request ev;
3989
3990 BT_DBG("%s", hdev->name);
3991
3992 bacpy(&ev.addr.bdaddr, bdaddr);
3993 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3994 ev.confirm_hint = confirm_hint;
3995 ev.value = value;
3996
3997 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
3998 NULL);
3999 }
4000
4001 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
4002 u8 link_type, u8 addr_type)
4003 {
4004 struct mgmt_ev_user_passkey_request ev;
4005
4006 BT_DBG("%s", hdev->name);
4007
4008 bacpy(&ev.addr.bdaddr, bdaddr);
4009 ev.addr.type = link_to_bdaddr(link_type, addr_type);
4010
4011 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
4012 NULL);
4013 }
4014
4015 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4016 u8 link_type, u8 addr_type, u8 status,
4017 u8 opcode)
4018 {
4019 struct pending_cmd *cmd;
4020 struct mgmt_rp_user_confirm_reply rp;
4021 int err;
4022
4023 cmd = mgmt_pending_find(opcode, hdev);
4024 if (!cmd)
4025 return -ENOENT;
4026
4027 bacpy(&rp.addr.bdaddr, bdaddr);
4028 rp.addr.type = link_to_bdaddr(link_type, addr_type);
4029 err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
4030 &rp, sizeof(rp));
4031
4032 mgmt_pending_remove(cmd);
4033
4034 return err;
4035 }
4036
4037 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4038 u8 link_type, u8 addr_type, u8 status)
4039 {
4040 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
4041 status, MGMT_OP_USER_CONFIRM_REPLY);
4042 }
4043
4044 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4045 u8 link_type, u8 addr_type, u8 status)
4046 {
4047 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
4048 status,
4049 MGMT_OP_USER_CONFIRM_NEG_REPLY);
4050 }
4051
4052 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4053 u8 link_type, u8 addr_type, u8 status)
4054 {
4055 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
4056 status, MGMT_OP_USER_PASSKEY_REPLY);
4057 }
4058
4059 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
4060 u8 link_type, u8 addr_type, u8 status)
4061 {
4062 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
4063 status,
4064 MGMT_OP_USER_PASSKEY_NEG_REPLY);
4065 }
4066
4067 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
4068 u8 link_type, u8 addr_type, u32 passkey,
4069 u8 entered)
4070 {
4071 struct mgmt_ev_passkey_notify ev;
4072
4073 BT_DBG("%s", hdev->name);
4074
4075 bacpy(&ev.addr.bdaddr, bdaddr);
4076 ev.addr.type = link_to_bdaddr(link_type, addr_type);
4077 ev.passkey = __cpu_to_le32(passkey);
4078 ev.entered = entered;
4079
4080 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
4081 }
4082
4083 int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4084 u8 addr_type, u8 status)
4085 {
4086 struct mgmt_ev_auth_failed ev;
4087
4088 bacpy(&ev.addr.bdaddr, bdaddr);
4089 ev.addr.type = link_to_bdaddr(link_type, addr_type);
4090 ev.status = mgmt_status(status);
4091
4092 return mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
4093 }
4094
4095 int mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
4096 {
4097 struct cmd_lookup match = { NULL, hdev };
4098 bool changed = false;
4099 int err = 0;
4100
4101 if (status) {
4102 u8 mgmt_err = mgmt_status(status);
4103 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
4104 cmd_status_rsp, &mgmt_err);
4105 return 0;
4106 }
4107
4108 if (test_bit(HCI_AUTH, &hdev->flags)) {
4109 if (!test_and_set_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
4110 changed = true;
4111 } else {
4112 if (test_and_clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
4113 changed = true;
4114 }
4115
4116 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
4117 &match);
4118
4119 if (changed)
4120 err = new_settings(hdev, match.sk);
4121
4122 if (match.sk)
4123 sock_put(match.sk);
4124
4125 return err;
4126 }
4127
4128 static void clear_eir(struct hci_request *req)
4129 {
4130 struct hci_dev *hdev = req->hdev;
4131 struct hci_cp_write_eir cp;
4132
4133 if (!lmp_ext_inq_capable(hdev))
4134 return;
4135
4136 memset(hdev->eir, 0, sizeof(hdev->eir));
4137
4138 memset(&cp, 0, sizeof(cp));
4139
4140 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
4141 }
4142
4143 int mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
4144 {
4145 struct cmd_lookup match = { NULL, hdev };
4146 struct hci_request req;
4147 bool changed = false;
4148 int err = 0;
4149
4150 if (status) {
4151 u8 mgmt_err = mgmt_status(status);
4152
4153 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
4154 &hdev->dev_flags))
4155 err = new_settings(hdev, NULL);
4156
4157 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
4158 &mgmt_err);
4159
4160 return err;
4161 }
4162
4163 if (enable) {
4164 if (!test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
4165 changed = true;
4166 } else {
4167 if (test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
4168 changed = true;
4169 }
4170
4171 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
4172
4173 if (changed)
4174 err = new_settings(hdev, match.sk);
4175
4176 if (match.sk)
4177 sock_put(match.sk);
4178
4179 hci_req_init(&req, hdev);
4180
4181 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
4182 update_eir(&req);
4183 else
4184 clear_eir(&req);
4185
4186 hci_req_run(&req, NULL);
4187
4188 return err;
4189 }
4190
4191 static void sk_lookup(struct pending_cmd *cmd, void *data)
4192 {
4193 struct cmd_lookup *match = data;
4194
4195 if (match->sk == NULL) {
4196 match->sk = cmd->sk;
4197 sock_hold(match->sk);
4198 }
4199 }
4200
4201 int mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
4202 u8 status)
4203 {
4204 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
4205 int err = 0;
4206
4207 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
4208 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
4209 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
4210
4211 if (!status)
4212 err = mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
4213 3, NULL);
4214
4215 if (match.sk)
4216 sock_put(match.sk);
4217
4218 return err;
4219 }
4220
4221 int mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
4222 {
4223 struct mgmt_cp_set_local_name ev;
4224 struct pending_cmd *cmd;
4225
4226 if (status)
4227 return 0;
4228
4229 memset(&ev, 0, sizeof(ev));
4230 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
4231 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
4232
4233 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
4234 if (!cmd) {
4235 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
4236
4237 /* If this is a HCI command related to powering on the
4238 * HCI dev don't send any mgmt signals.
4239 */
4240 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
4241 return 0;
4242 }
4243
4244 return mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
4245 cmd ? cmd->sk : NULL);
4246 }
4247
4248 int mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash,
4249 u8 *randomizer, u8 status)
4250 {
4251 struct pending_cmd *cmd;
4252 int err;
4253
4254 BT_DBG("%s status %u", hdev->name, status);
4255
4256 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
4257 if (!cmd)
4258 return -ENOENT;
4259
4260 if (status) {
4261 err = cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4262 mgmt_status(status));
4263 } else {
4264 struct mgmt_rp_read_local_oob_data rp;
4265
4266 memcpy(rp.hash, hash, sizeof(rp.hash));
4267 memcpy(rp.randomizer, randomizer, sizeof(rp.randomizer));
4268
4269 err = cmd_complete(cmd->sk, hdev->id,
4270 MGMT_OP_READ_LOCAL_OOB_DATA, 0, &rp,
4271 sizeof(rp));
4272 }
4273
4274 mgmt_pending_remove(cmd);
4275
4276 return err;
4277 }
4278
4279 int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4280 u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name, u8
4281 ssp, u8 *eir, u16 eir_len)
4282 {
4283 char buf[512];
4284 struct mgmt_ev_device_found *ev = (void *) buf;
4285 size_t ev_size;
4286
4287 if (!hci_discovery_active(hdev))
4288 return -EPERM;
4289
4290 /* Leave 5 bytes for a potential CoD field */
4291 if (sizeof(*ev) + eir_len + 5 > sizeof(buf))
4292 return -EINVAL;
4293
4294 memset(buf, 0, sizeof(buf));
4295
4296 bacpy(&ev->addr.bdaddr, bdaddr);
4297 ev->addr.type = link_to_bdaddr(link_type, addr_type);
4298 ev->rssi = rssi;
4299 if (cfm_name)
4300 ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_CONFIRM_NAME);
4301 if (!ssp)
4302 ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_LEGACY_PAIRING);
4303
4304 if (eir_len > 0)
4305 memcpy(ev->eir, eir, eir_len);
4306
4307 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
4308 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
4309 dev_class, 3);
4310
4311 ev->eir_len = cpu_to_le16(eir_len);
4312 ev_size = sizeof(*ev) + eir_len;
4313
4314 return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
4315 }
4316
4317 int mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4318 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
4319 {
4320 struct mgmt_ev_device_found *ev;
4321 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
4322 u16 eir_len;
4323
4324 ev = (struct mgmt_ev_device_found *) buf;
4325
4326 memset(buf, 0, sizeof(buf));
4327
4328 bacpy(&ev->addr.bdaddr, bdaddr);
4329 ev->addr.type = link_to_bdaddr(link_type, addr_type);
4330 ev->rssi = rssi;
4331
4332 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
4333 name_len);
4334
4335 ev->eir_len = cpu_to_le16(eir_len);
4336
4337 return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev,
4338 sizeof(*ev) + eir_len, NULL);
4339 }
4340
4341 int mgmt_discovering(struct hci_dev *hdev, u8 discovering)
4342 {
4343 struct mgmt_ev_discovering ev;
4344 struct pending_cmd *cmd;
4345
4346 BT_DBG("%s discovering %u", hdev->name, discovering);
4347
4348 if (discovering)
4349 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
4350 else
4351 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4352
4353 if (cmd != NULL) {
4354 u8 type = hdev->discovery.type;
4355
4356 cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
4357 sizeof(type));
4358 mgmt_pending_remove(cmd);
4359 }
4360
4361 memset(&ev, 0, sizeof(ev));
4362 ev.type = hdev->discovery.type;
4363 ev.discovering = discovering;
4364
4365 return mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
4366 }
4367
4368 int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4369 {
4370 struct pending_cmd *cmd;
4371 struct mgmt_ev_device_blocked ev;
4372
4373 cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, hdev);
4374
4375 bacpy(&ev.addr.bdaddr, bdaddr);
4376 ev.addr.type = type;
4377
4378 return mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &ev, sizeof(ev),
4379 cmd ? cmd->sk : NULL);
4380 }
4381
4382 int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4383 {
4384 struct pending_cmd *cmd;
4385 struct mgmt_ev_device_unblocked ev;
4386
4387 cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, hdev);
4388
4389 bacpy(&ev.addr.bdaddr, bdaddr);
4390 ev.addr.type = type;
4391
4392 return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &ev, sizeof(ev),
4393 cmd ? cmd->sk : NULL);
4394 }