]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/bluetooth/hci_core.c
f33268004195be390da829a21699ec3850350c2d
[mirror_ubuntu-bionic-kernel.git] / net / bluetooth / hci_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
34
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
39
40 #include "hci_request.h"
41 #include "hci_debugfs.h"
42 #include "smp.h"
43
44 static void hci_rx_work(struct work_struct *work);
45 static void hci_cmd_work(struct work_struct *work);
46 static void hci_tx_work(struct work_struct *work);
47
48 /* HCI device list */
49 LIST_HEAD(hci_dev_list);
50 DEFINE_RWLOCK(hci_dev_list_lock);
51
52 /* HCI callback list */
53 LIST_HEAD(hci_cb_list);
54 DEFINE_MUTEX(hci_cb_list_lock);
55
56 /* HCI ID Numbering */
57 static DEFINE_IDA(hci_index_ida);
58
59 /* ----- HCI requests ----- */
60
61 #define HCI_REQ_DONE 0
62 #define HCI_REQ_PEND 1
63 #define HCI_REQ_CANCELED 2
64
65 #define hci_req_lock(d) mutex_lock(&d->req_lock)
66 #define hci_req_unlock(d) mutex_unlock(&d->req_lock)
67
68 /* ---- HCI notifications ---- */
69
70 static void hci_notify(struct hci_dev *hdev, int event)
71 {
72 hci_sock_dev_event(hdev, event);
73 }
74
75 /* ---- HCI debugfs entries ---- */
76
77 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
78 size_t count, loff_t *ppos)
79 {
80 struct hci_dev *hdev = file->private_data;
81 char buf[3];
82
83 buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y': 'N';
84 buf[1] = '\n';
85 buf[2] = '\0';
86 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
87 }
88
89 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
90 size_t count, loff_t *ppos)
91 {
92 struct hci_dev *hdev = file->private_data;
93 struct sk_buff *skb;
94 char buf[32];
95 size_t buf_size = min(count, (sizeof(buf)-1));
96 bool enable;
97
98 if (!test_bit(HCI_UP, &hdev->flags))
99 return -ENETDOWN;
100
101 if (copy_from_user(buf, user_buf, buf_size))
102 return -EFAULT;
103
104 buf[buf_size] = '\0';
105 if (strtobool(buf, &enable))
106 return -EINVAL;
107
108 if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
109 return -EALREADY;
110
111 hci_req_lock(hdev);
112 if (enable)
113 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
114 HCI_CMD_TIMEOUT);
115 else
116 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
117 HCI_CMD_TIMEOUT);
118 hci_req_unlock(hdev);
119
120 if (IS_ERR(skb))
121 return PTR_ERR(skb);
122
123 kfree_skb(skb);
124
125 hci_dev_change_flag(hdev, HCI_DUT_MODE);
126
127 return count;
128 }
129
130 static const struct file_operations dut_mode_fops = {
131 .open = simple_open,
132 .read = dut_mode_read,
133 .write = dut_mode_write,
134 .llseek = default_llseek,
135 };
136
137 static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
138 size_t count, loff_t *ppos)
139 {
140 struct hci_dev *hdev = file->private_data;
141 char buf[3];
142
143 buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y': 'N';
144 buf[1] = '\n';
145 buf[2] = '\0';
146 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
147 }
148
149 static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
150 size_t count, loff_t *ppos)
151 {
152 struct hci_dev *hdev = file->private_data;
153 char buf[32];
154 size_t buf_size = min(count, (sizeof(buf)-1));
155 bool enable;
156 int err;
157
158 if (copy_from_user(buf, user_buf, buf_size))
159 return -EFAULT;
160
161 buf[buf_size] = '\0';
162 if (strtobool(buf, &enable))
163 return -EINVAL;
164
165 /* When the diagnostic flags are not persistent and the transport
166 * is not active, then there is no need for the vendor callback.
167 *
168 * Instead just store the desired value. If needed the setting
169 * will be programmed when the controller gets powered on.
170 */
171 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
172 !test_bit(HCI_RUNNING, &hdev->flags))
173 goto done;
174
175 hci_req_lock(hdev);
176 err = hdev->set_diag(hdev, enable);
177 hci_req_unlock(hdev);
178
179 if (err < 0)
180 return err;
181
182 done:
183 if (enable)
184 hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
185 else
186 hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
187
188 return count;
189 }
190
191 static const struct file_operations vendor_diag_fops = {
192 .open = simple_open,
193 .read = vendor_diag_read,
194 .write = vendor_diag_write,
195 .llseek = default_llseek,
196 };
197
198 static void hci_debugfs_create_basic(struct hci_dev *hdev)
199 {
200 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
201 &dut_mode_fops);
202
203 if (hdev->set_diag)
204 debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
205 &vendor_diag_fops);
206 }
207
208 /* ---- HCI requests ---- */
209
210 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
211 struct sk_buff *skb)
212 {
213 BT_DBG("%s result 0x%2.2x", hdev->name, result);
214
215 if (hdev->req_status == HCI_REQ_PEND) {
216 hdev->req_result = result;
217 hdev->req_status = HCI_REQ_DONE;
218 if (skb)
219 hdev->req_skb = skb_get(skb);
220 wake_up_interruptible(&hdev->req_wait_q);
221 }
222 }
223
224 static void hci_req_cancel(struct hci_dev *hdev, int err)
225 {
226 BT_DBG("%s err 0x%2.2x", hdev->name, err);
227
228 if (hdev->req_status == HCI_REQ_PEND) {
229 hdev->req_result = err;
230 hdev->req_status = HCI_REQ_CANCELED;
231 wake_up_interruptible(&hdev->req_wait_q);
232 }
233 }
234
235 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
236 const void *param, u8 event, u32 timeout)
237 {
238 DECLARE_WAITQUEUE(wait, current);
239 struct hci_request req;
240 struct sk_buff *skb;
241 int err = 0;
242
243 BT_DBG("%s", hdev->name);
244
245 hci_req_init(&req, hdev);
246
247 hci_req_add_ev(&req, opcode, plen, param, event);
248
249 hdev->req_status = HCI_REQ_PEND;
250
251 add_wait_queue(&hdev->req_wait_q, &wait);
252 set_current_state(TASK_INTERRUPTIBLE);
253
254 err = hci_req_run_skb(&req, hci_req_sync_complete);
255 if (err < 0) {
256 remove_wait_queue(&hdev->req_wait_q, &wait);
257 set_current_state(TASK_RUNNING);
258 return ERR_PTR(err);
259 }
260
261 schedule_timeout(timeout);
262
263 remove_wait_queue(&hdev->req_wait_q, &wait);
264
265 if (signal_pending(current))
266 return ERR_PTR(-EINTR);
267
268 switch (hdev->req_status) {
269 case HCI_REQ_DONE:
270 err = -bt_to_errno(hdev->req_result);
271 break;
272
273 case HCI_REQ_CANCELED:
274 err = -hdev->req_result;
275 break;
276
277 default:
278 err = -ETIMEDOUT;
279 break;
280 }
281
282 hdev->req_status = hdev->req_result = 0;
283 skb = hdev->req_skb;
284 hdev->req_skb = NULL;
285
286 BT_DBG("%s end: err %d", hdev->name, err);
287
288 if (err < 0) {
289 kfree_skb(skb);
290 return ERR_PTR(err);
291 }
292
293 if (!skb)
294 return ERR_PTR(-ENODATA);
295
296 return skb;
297 }
298 EXPORT_SYMBOL(__hci_cmd_sync_ev);
299
300 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
301 const void *param, u32 timeout)
302 {
303 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
304 }
305 EXPORT_SYMBOL(__hci_cmd_sync);
306
307 /* Execute request and wait for completion. */
308 static int __hci_req_sync(struct hci_dev *hdev,
309 void (*func)(struct hci_request *req,
310 unsigned long opt),
311 unsigned long opt, __u32 timeout)
312 {
313 struct hci_request req;
314 DECLARE_WAITQUEUE(wait, current);
315 int err = 0;
316
317 BT_DBG("%s start", hdev->name);
318
319 hci_req_init(&req, hdev);
320
321 hdev->req_status = HCI_REQ_PEND;
322
323 func(&req, opt);
324
325 add_wait_queue(&hdev->req_wait_q, &wait);
326 set_current_state(TASK_INTERRUPTIBLE);
327
328 err = hci_req_run_skb(&req, hci_req_sync_complete);
329 if (err < 0) {
330 hdev->req_status = 0;
331
332 remove_wait_queue(&hdev->req_wait_q, &wait);
333 set_current_state(TASK_RUNNING);
334
335 /* ENODATA means the HCI request command queue is empty.
336 * This can happen when a request with conditionals doesn't
337 * trigger any commands to be sent. This is normal behavior
338 * and should not trigger an error return.
339 */
340 if (err == -ENODATA)
341 return 0;
342
343 return err;
344 }
345
346 schedule_timeout(timeout);
347
348 remove_wait_queue(&hdev->req_wait_q, &wait);
349
350 if (signal_pending(current))
351 return -EINTR;
352
353 switch (hdev->req_status) {
354 case HCI_REQ_DONE:
355 err = -bt_to_errno(hdev->req_result);
356 break;
357
358 case HCI_REQ_CANCELED:
359 err = -hdev->req_result;
360 break;
361
362 default:
363 err = -ETIMEDOUT;
364 break;
365 }
366
367 hdev->req_status = hdev->req_result = 0;
368
369 BT_DBG("%s end: err %d", hdev->name, err);
370
371 return err;
372 }
373
374 static int hci_req_sync(struct hci_dev *hdev,
375 void (*req)(struct hci_request *req,
376 unsigned long opt),
377 unsigned long opt, __u32 timeout)
378 {
379 int ret;
380
381 if (!test_bit(HCI_UP, &hdev->flags))
382 return -ENETDOWN;
383
384 /* Serialize all requests */
385 hci_req_lock(hdev);
386 ret = __hci_req_sync(hdev, req, opt, timeout);
387 hci_req_unlock(hdev);
388
389 return ret;
390 }
391
392 static void hci_reset_req(struct hci_request *req, unsigned long opt)
393 {
394 BT_DBG("%s %ld", req->hdev->name, opt);
395
396 /* Reset device */
397 set_bit(HCI_RESET, &req->hdev->flags);
398 hci_req_add(req, HCI_OP_RESET, 0, NULL);
399 }
400
401 static void bredr_init(struct hci_request *req)
402 {
403 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
404
405 /* Read Local Supported Features */
406 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
407
408 /* Read Local Version */
409 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
410
411 /* Read BD Address */
412 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
413 }
414
415 static void amp_init1(struct hci_request *req)
416 {
417 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
418
419 /* Read Local Version */
420 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
421
422 /* Read Local Supported Commands */
423 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
424
425 /* Read Local AMP Info */
426 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
427
428 /* Read Data Blk size */
429 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
430
431 /* Read Flow Control Mode */
432 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
433
434 /* Read Location Data */
435 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
436 }
437
438 static void amp_init2(struct hci_request *req)
439 {
440 /* Read Local Supported Features. Not all AMP controllers
441 * support this so it's placed conditionally in the second
442 * stage init.
443 */
444 if (req->hdev->commands[14] & 0x20)
445 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
446 }
447
448 static void hci_init1_req(struct hci_request *req, unsigned long opt)
449 {
450 struct hci_dev *hdev = req->hdev;
451
452 BT_DBG("%s %ld", hdev->name, opt);
453
454 /* Reset */
455 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
456 hci_reset_req(req, 0);
457
458 switch (hdev->dev_type) {
459 case HCI_BREDR:
460 bredr_init(req);
461 break;
462
463 case HCI_AMP:
464 amp_init1(req);
465 break;
466
467 default:
468 BT_ERR("Unknown device type %d", hdev->dev_type);
469 break;
470 }
471 }
472
473 static void bredr_setup(struct hci_request *req)
474 {
475 __le16 param;
476 __u8 flt_type;
477
478 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
479 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
480
481 /* Read Class of Device */
482 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
483
484 /* Read Local Name */
485 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
486
487 /* Read Voice Setting */
488 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
489
490 /* Read Number of Supported IAC */
491 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
492
493 /* Read Current IAC LAP */
494 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
495
496 /* Clear Event Filters */
497 flt_type = HCI_FLT_CLEAR_ALL;
498 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
499
500 /* Connection accept timeout ~20 secs */
501 param = cpu_to_le16(0x7d00);
502 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
503 }
504
505 static void le_setup(struct hci_request *req)
506 {
507 struct hci_dev *hdev = req->hdev;
508
509 /* Read LE Buffer Size */
510 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
511
512 /* Read LE Local Supported Features */
513 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
514
515 /* Read LE Supported States */
516 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
517
518 /* Read LE White List Size */
519 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
520
521 /* Clear LE White List */
522 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
523
524 /* LE-only controllers have LE implicitly enabled */
525 if (!lmp_bredr_capable(hdev))
526 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
527 }
528
529 static void hci_setup_event_mask(struct hci_request *req)
530 {
531 struct hci_dev *hdev = req->hdev;
532
533 /* The second byte is 0xff instead of 0x9f (two reserved bits
534 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
535 * command otherwise.
536 */
537 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
538
539 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
540 * any event mask for pre 1.2 devices.
541 */
542 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
543 return;
544
545 if (lmp_bredr_capable(hdev)) {
546 events[4] |= 0x01; /* Flow Specification Complete */
547 events[4] |= 0x02; /* Inquiry Result with RSSI */
548 events[4] |= 0x04; /* Read Remote Extended Features Complete */
549 events[5] |= 0x08; /* Synchronous Connection Complete */
550 events[5] |= 0x10; /* Synchronous Connection Changed */
551 } else {
552 /* Use a different default for LE-only devices */
553 memset(events, 0, sizeof(events));
554 events[0] |= 0x10; /* Disconnection Complete */
555 events[1] |= 0x08; /* Read Remote Version Information Complete */
556 events[1] |= 0x20; /* Command Complete */
557 events[1] |= 0x40; /* Command Status */
558 events[1] |= 0x80; /* Hardware Error */
559 events[2] |= 0x04; /* Number of Completed Packets */
560 events[3] |= 0x02; /* Data Buffer Overflow */
561
562 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
563 events[0] |= 0x80; /* Encryption Change */
564 events[5] |= 0x80; /* Encryption Key Refresh Complete */
565 }
566 }
567
568 if (lmp_inq_rssi_capable(hdev))
569 events[4] |= 0x02; /* Inquiry Result with RSSI */
570
571 if (lmp_sniffsubr_capable(hdev))
572 events[5] |= 0x20; /* Sniff Subrating */
573
574 if (lmp_pause_enc_capable(hdev))
575 events[5] |= 0x80; /* Encryption Key Refresh Complete */
576
577 if (lmp_ext_inq_capable(hdev))
578 events[5] |= 0x40; /* Extended Inquiry Result */
579
580 if (lmp_no_flush_capable(hdev))
581 events[7] |= 0x01; /* Enhanced Flush Complete */
582
583 if (lmp_lsto_capable(hdev))
584 events[6] |= 0x80; /* Link Supervision Timeout Changed */
585
586 if (lmp_ssp_capable(hdev)) {
587 events[6] |= 0x01; /* IO Capability Request */
588 events[6] |= 0x02; /* IO Capability Response */
589 events[6] |= 0x04; /* User Confirmation Request */
590 events[6] |= 0x08; /* User Passkey Request */
591 events[6] |= 0x10; /* Remote OOB Data Request */
592 events[6] |= 0x20; /* Simple Pairing Complete */
593 events[7] |= 0x04; /* User Passkey Notification */
594 events[7] |= 0x08; /* Keypress Notification */
595 events[7] |= 0x10; /* Remote Host Supported
596 * Features Notification
597 */
598 }
599
600 if (lmp_le_capable(hdev))
601 events[7] |= 0x20; /* LE Meta-Event */
602
603 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
604 }
605
606 static void hci_init2_req(struct hci_request *req, unsigned long opt)
607 {
608 struct hci_dev *hdev = req->hdev;
609
610 if (hdev->dev_type == HCI_AMP)
611 return amp_init2(req);
612
613 if (lmp_bredr_capable(hdev))
614 bredr_setup(req);
615 else
616 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
617
618 if (lmp_le_capable(hdev))
619 le_setup(req);
620
621 /* All Bluetooth 1.2 and later controllers should support the
622 * HCI command for reading the local supported commands.
623 *
624 * Unfortunately some controllers indicate Bluetooth 1.2 support,
625 * but do not have support for this command. If that is the case,
626 * the driver can quirk the behavior and skip reading the local
627 * supported commands.
628 */
629 if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
630 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
631 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
632
633 if (lmp_ssp_capable(hdev)) {
634 /* When SSP is available, then the host features page
635 * should also be available as well. However some
636 * controllers list the max_page as 0 as long as SSP
637 * has not been enabled. To achieve proper debugging
638 * output, force the minimum max_page to 1 at least.
639 */
640 hdev->max_page = 0x01;
641
642 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
643 u8 mode = 0x01;
644
645 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
646 sizeof(mode), &mode);
647 } else {
648 struct hci_cp_write_eir cp;
649
650 memset(hdev->eir, 0, sizeof(hdev->eir));
651 memset(&cp, 0, sizeof(cp));
652
653 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
654 }
655 }
656
657 if (lmp_inq_rssi_capable(hdev) ||
658 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
659 u8 mode;
660
661 /* If Extended Inquiry Result events are supported, then
662 * they are clearly preferred over Inquiry Result with RSSI
663 * events.
664 */
665 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
666
667 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
668 }
669
670 if (lmp_inq_tx_pwr_capable(hdev))
671 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
672
673 if (lmp_ext_feat_capable(hdev)) {
674 struct hci_cp_read_local_ext_features cp;
675
676 cp.page = 0x01;
677 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
678 sizeof(cp), &cp);
679 }
680
681 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
682 u8 enable = 1;
683 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
684 &enable);
685 }
686 }
687
688 static void hci_setup_link_policy(struct hci_request *req)
689 {
690 struct hci_dev *hdev = req->hdev;
691 struct hci_cp_write_def_link_policy cp;
692 u16 link_policy = 0;
693
694 if (lmp_rswitch_capable(hdev))
695 link_policy |= HCI_LP_RSWITCH;
696 if (lmp_hold_capable(hdev))
697 link_policy |= HCI_LP_HOLD;
698 if (lmp_sniff_capable(hdev))
699 link_policy |= HCI_LP_SNIFF;
700 if (lmp_park_capable(hdev))
701 link_policy |= HCI_LP_PARK;
702
703 cp.policy = cpu_to_le16(link_policy);
704 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
705 }
706
707 static void hci_set_le_support(struct hci_request *req)
708 {
709 struct hci_dev *hdev = req->hdev;
710 struct hci_cp_write_le_host_supported cp;
711
712 /* LE-only devices do not support explicit enablement */
713 if (!lmp_bredr_capable(hdev))
714 return;
715
716 memset(&cp, 0, sizeof(cp));
717
718 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
719 cp.le = 0x01;
720 cp.simul = 0x00;
721 }
722
723 if (cp.le != lmp_host_le_capable(hdev))
724 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
725 &cp);
726 }
727
728 static void hci_set_event_mask_page_2(struct hci_request *req)
729 {
730 struct hci_dev *hdev = req->hdev;
731 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
732
733 /* If Connectionless Slave Broadcast master role is supported
734 * enable all necessary events for it.
735 */
736 if (lmp_csb_master_capable(hdev)) {
737 events[1] |= 0x40; /* Triggered Clock Capture */
738 events[1] |= 0x80; /* Synchronization Train Complete */
739 events[2] |= 0x10; /* Slave Page Response Timeout */
740 events[2] |= 0x20; /* CSB Channel Map Change */
741 }
742
743 /* If Connectionless Slave Broadcast slave role is supported
744 * enable all necessary events for it.
745 */
746 if (lmp_csb_slave_capable(hdev)) {
747 events[2] |= 0x01; /* Synchronization Train Received */
748 events[2] |= 0x02; /* CSB Receive */
749 events[2] |= 0x04; /* CSB Timeout */
750 events[2] |= 0x08; /* Truncated Page Complete */
751 }
752
753 /* Enable Authenticated Payload Timeout Expired event if supported */
754 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
755 events[2] |= 0x80;
756
757 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
758 }
759
760 static void hci_init3_req(struct hci_request *req, unsigned long opt)
761 {
762 struct hci_dev *hdev = req->hdev;
763 u8 p;
764
765 hci_setup_event_mask(req);
766
767 if (hdev->commands[6] & 0x20 &&
768 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
769 struct hci_cp_read_stored_link_key cp;
770
771 bacpy(&cp.bdaddr, BDADDR_ANY);
772 cp.read_all = 0x01;
773 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
774 }
775
776 if (hdev->commands[5] & 0x10)
777 hci_setup_link_policy(req);
778
779 if (hdev->commands[8] & 0x01)
780 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
781
782 /* Some older Broadcom based Bluetooth 1.2 controllers do not
783 * support the Read Page Scan Type command. Check support for
784 * this command in the bit mask of supported commands.
785 */
786 if (hdev->commands[13] & 0x01)
787 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
788
789 if (lmp_le_capable(hdev)) {
790 u8 events[8];
791
792 memset(events, 0, sizeof(events));
793 events[0] = 0x0f;
794
795 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
796 events[0] |= 0x10; /* LE Long Term Key Request */
797
798 /* If controller supports the Connection Parameters Request
799 * Link Layer Procedure, enable the corresponding event.
800 */
801 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
802 events[0] |= 0x20; /* LE Remote Connection
803 * Parameter Request
804 */
805
806 /* If the controller supports the Data Length Extension
807 * feature, enable the corresponding event.
808 */
809 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
810 events[0] |= 0x40; /* LE Data Length Change */
811
812 /* If the controller supports Extended Scanner Filter
813 * Policies, enable the correspondig event.
814 */
815 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
816 events[1] |= 0x04; /* LE Direct Advertising
817 * Report
818 */
819
820 /* If the controller supports the LE Read Local P-256
821 * Public Key command, enable the corresponding event.
822 */
823 if (hdev->commands[34] & 0x02)
824 events[0] |= 0x80; /* LE Read Local P-256
825 * Public Key Complete
826 */
827
828 /* If the controller supports the LE Generate DHKey
829 * command, enable the corresponding event.
830 */
831 if (hdev->commands[34] & 0x04)
832 events[1] |= 0x01; /* LE Generate DHKey Complete */
833
834 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
835 events);
836
837 if (hdev->commands[25] & 0x40) {
838 /* Read LE Advertising Channel TX Power */
839 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
840 }
841
842 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
843 /* Read LE Maximum Data Length */
844 hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
845
846 /* Read LE Suggested Default Data Length */
847 hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
848 }
849
850 hci_set_le_support(req);
851 }
852
853 /* Read features beyond page 1 if available */
854 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
855 struct hci_cp_read_local_ext_features cp;
856
857 cp.page = p;
858 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
859 sizeof(cp), &cp);
860 }
861 }
862
863 static void hci_init4_req(struct hci_request *req, unsigned long opt)
864 {
865 struct hci_dev *hdev = req->hdev;
866
867 /* Some Broadcom based Bluetooth controllers do not support the
868 * Delete Stored Link Key command. They are clearly indicating its
869 * absence in the bit mask of supported commands.
870 *
871 * Check the supported commands and only if the the command is marked
872 * as supported send it. If not supported assume that the controller
873 * does not have actual support for stored link keys which makes this
874 * command redundant anyway.
875 *
876 * Some controllers indicate that they support handling deleting
877 * stored link keys, but they don't. The quirk lets a driver
878 * just disable this command.
879 */
880 if (hdev->commands[6] & 0x80 &&
881 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
882 struct hci_cp_delete_stored_link_key cp;
883
884 bacpy(&cp.bdaddr, BDADDR_ANY);
885 cp.delete_all = 0x01;
886 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
887 sizeof(cp), &cp);
888 }
889
890 /* Set event mask page 2 if the HCI command for it is supported */
891 if (hdev->commands[22] & 0x04)
892 hci_set_event_mask_page_2(req);
893
894 /* Read local codec list if the HCI command is supported */
895 if (hdev->commands[29] & 0x20)
896 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
897
898 /* Get MWS transport configuration if the HCI command is supported */
899 if (hdev->commands[30] & 0x08)
900 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
901
902 /* Check for Synchronization Train support */
903 if (lmp_sync_train_capable(hdev))
904 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
905
906 /* Enable Secure Connections if supported and configured */
907 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
908 bredr_sc_enabled(hdev)) {
909 u8 support = 0x01;
910
911 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
912 sizeof(support), &support);
913 }
914 }
915
916 static int __hci_init(struct hci_dev *hdev)
917 {
918 int err;
919
920 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
921 if (err < 0)
922 return err;
923
924 if (hci_dev_test_flag(hdev, HCI_SETUP))
925 hci_debugfs_create_basic(hdev);
926
927 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
928 if (err < 0)
929 return err;
930
931 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
932 * BR/EDR/LE type controllers. AMP controllers only need the
933 * first two stages of init.
934 */
935 if (hdev->dev_type != HCI_BREDR)
936 return 0;
937
938 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
939 if (err < 0)
940 return err;
941
942 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
943 if (err < 0)
944 return err;
945
946 /* This function is only called when the controller is actually in
947 * configured state. When the controller is marked as unconfigured,
948 * this initialization procedure is not run.
949 *
950 * It means that it is possible that a controller runs through its
951 * setup phase and then discovers missing settings. If that is the
952 * case, then this function will not be called. It then will only
953 * be called during the config phase.
954 *
955 * So only when in setup phase or config phase, create the debugfs
956 * entries and register the SMP channels.
957 */
958 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
959 !hci_dev_test_flag(hdev, HCI_CONFIG))
960 return 0;
961
962 hci_debugfs_create_common(hdev);
963
964 if (lmp_bredr_capable(hdev))
965 hci_debugfs_create_bredr(hdev);
966
967 if (lmp_le_capable(hdev))
968 hci_debugfs_create_le(hdev);
969
970 return 0;
971 }
972
973 static void hci_init0_req(struct hci_request *req, unsigned long opt)
974 {
975 struct hci_dev *hdev = req->hdev;
976
977 BT_DBG("%s %ld", hdev->name, opt);
978
979 /* Reset */
980 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
981 hci_reset_req(req, 0);
982
983 /* Read Local Version */
984 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
985
986 /* Read BD Address */
987 if (hdev->set_bdaddr)
988 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
989 }
990
991 static int __hci_unconf_init(struct hci_dev *hdev)
992 {
993 int err;
994
995 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
996 return 0;
997
998 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
999 if (err < 0)
1000 return err;
1001
1002 if (hci_dev_test_flag(hdev, HCI_SETUP))
1003 hci_debugfs_create_basic(hdev);
1004
1005 return 0;
1006 }
1007
1008 static void hci_scan_req(struct hci_request *req, unsigned long opt)
1009 {
1010 __u8 scan = opt;
1011
1012 BT_DBG("%s %x", req->hdev->name, scan);
1013
1014 /* Inquiry and Page scans */
1015 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1016 }
1017
1018 static void hci_auth_req(struct hci_request *req, unsigned long opt)
1019 {
1020 __u8 auth = opt;
1021
1022 BT_DBG("%s %x", req->hdev->name, auth);
1023
1024 /* Authentication */
1025 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1026 }
1027
1028 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1029 {
1030 __u8 encrypt = opt;
1031
1032 BT_DBG("%s %x", req->hdev->name, encrypt);
1033
1034 /* Encryption */
1035 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1036 }
1037
1038 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1039 {
1040 __le16 policy = cpu_to_le16(opt);
1041
1042 BT_DBG("%s %x", req->hdev->name, policy);
1043
1044 /* Default link policy */
1045 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1046 }
1047
1048 /* Get HCI device by index.
1049 * Device is held on return. */
1050 struct hci_dev *hci_dev_get(int index)
1051 {
1052 struct hci_dev *hdev = NULL, *d;
1053
1054 BT_DBG("%d", index);
1055
1056 if (index < 0)
1057 return NULL;
1058
1059 read_lock(&hci_dev_list_lock);
1060 list_for_each_entry(d, &hci_dev_list, list) {
1061 if (d->id == index) {
1062 hdev = hci_dev_hold(d);
1063 break;
1064 }
1065 }
1066 read_unlock(&hci_dev_list_lock);
1067 return hdev;
1068 }
1069
1070 /* ---- Inquiry support ---- */
1071
1072 bool hci_discovery_active(struct hci_dev *hdev)
1073 {
1074 struct discovery_state *discov = &hdev->discovery;
1075
1076 switch (discov->state) {
1077 case DISCOVERY_FINDING:
1078 case DISCOVERY_RESOLVING:
1079 return true;
1080
1081 default:
1082 return false;
1083 }
1084 }
1085
1086 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1087 {
1088 int old_state = hdev->discovery.state;
1089
1090 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1091
1092 if (old_state == state)
1093 return;
1094
1095 hdev->discovery.state = state;
1096
1097 switch (state) {
1098 case DISCOVERY_STOPPED:
1099 hci_update_background_scan(hdev);
1100
1101 if (old_state != DISCOVERY_STARTING)
1102 mgmt_discovering(hdev, 0);
1103 break;
1104 case DISCOVERY_STARTING:
1105 break;
1106 case DISCOVERY_FINDING:
1107 mgmt_discovering(hdev, 1);
1108 break;
1109 case DISCOVERY_RESOLVING:
1110 break;
1111 case DISCOVERY_STOPPING:
1112 break;
1113 }
1114 }
1115
1116 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1117 {
1118 struct discovery_state *cache = &hdev->discovery;
1119 struct inquiry_entry *p, *n;
1120
1121 list_for_each_entry_safe(p, n, &cache->all, all) {
1122 list_del(&p->all);
1123 kfree(p);
1124 }
1125
1126 INIT_LIST_HEAD(&cache->unknown);
1127 INIT_LIST_HEAD(&cache->resolve);
1128 }
1129
1130 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1131 bdaddr_t *bdaddr)
1132 {
1133 struct discovery_state *cache = &hdev->discovery;
1134 struct inquiry_entry *e;
1135
1136 BT_DBG("cache %p, %pMR", cache, bdaddr);
1137
1138 list_for_each_entry(e, &cache->all, all) {
1139 if (!bacmp(&e->data.bdaddr, bdaddr))
1140 return e;
1141 }
1142
1143 return NULL;
1144 }
1145
1146 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1147 bdaddr_t *bdaddr)
1148 {
1149 struct discovery_state *cache = &hdev->discovery;
1150 struct inquiry_entry *e;
1151
1152 BT_DBG("cache %p, %pMR", cache, bdaddr);
1153
1154 list_for_each_entry(e, &cache->unknown, list) {
1155 if (!bacmp(&e->data.bdaddr, bdaddr))
1156 return e;
1157 }
1158
1159 return NULL;
1160 }
1161
1162 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1163 bdaddr_t *bdaddr,
1164 int state)
1165 {
1166 struct discovery_state *cache = &hdev->discovery;
1167 struct inquiry_entry *e;
1168
1169 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1170
1171 list_for_each_entry(e, &cache->resolve, list) {
1172 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1173 return e;
1174 if (!bacmp(&e->data.bdaddr, bdaddr))
1175 return e;
1176 }
1177
1178 return NULL;
1179 }
1180
1181 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1182 struct inquiry_entry *ie)
1183 {
1184 struct discovery_state *cache = &hdev->discovery;
1185 struct list_head *pos = &cache->resolve;
1186 struct inquiry_entry *p;
1187
1188 list_del(&ie->list);
1189
1190 list_for_each_entry(p, &cache->resolve, list) {
1191 if (p->name_state != NAME_PENDING &&
1192 abs(p->data.rssi) >= abs(ie->data.rssi))
1193 break;
1194 pos = &p->list;
1195 }
1196
1197 list_add(&ie->list, pos);
1198 }
1199
1200 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1201 bool name_known)
1202 {
1203 struct discovery_state *cache = &hdev->discovery;
1204 struct inquiry_entry *ie;
1205 u32 flags = 0;
1206
1207 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1208
1209 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1210
1211 if (!data->ssp_mode)
1212 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1213
1214 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1215 if (ie) {
1216 if (!ie->data.ssp_mode)
1217 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1218
1219 if (ie->name_state == NAME_NEEDED &&
1220 data->rssi != ie->data.rssi) {
1221 ie->data.rssi = data->rssi;
1222 hci_inquiry_cache_update_resolve(hdev, ie);
1223 }
1224
1225 goto update;
1226 }
1227
1228 /* Entry not in the cache. Add new one. */
1229 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1230 if (!ie) {
1231 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1232 goto done;
1233 }
1234
1235 list_add(&ie->all, &cache->all);
1236
1237 if (name_known) {
1238 ie->name_state = NAME_KNOWN;
1239 } else {
1240 ie->name_state = NAME_NOT_KNOWN;
1241 list_add(&ie->list, &cache->unknown);
1242 }
1243
1244 update:
1245 if (name_known && ie->name_state != NAME_KNOWN &&
1246 ie->name_state != NAME_PENDING) {
1247 ie->name_state = NAME_KNOWN;
1248 list_del(&ie->list);
1249 }
1250
1251 memcpy(&ie->data, data, sizeof(*data));
1252 ie->timestamp = jiffies;
1253 cache->timestamp = jiffies;
1254
1255 if (ie->name_state == NAME_NOT_KNOWN)
1256 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1257
1258 done:
1259 return flags;
1260 }
1261
1262 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1263 {
1264 struct discovery_state *cache = &hdev->discovery;
1265 struct inquiry_info *info = (struct inquiry_info *) buf;
1266 struct inquiry_entry *e;
1267 int copied = 0;
1268
1269 list_for_each_entry(e, &cache->all, all) {
1270 struct inquiry_data *data = &e->data;
1271
1272 if (copied >= num)
1273 break;
1274
1275 bacpy(&info->bdaddr, &data->bdaddr);
1276 info->pscan_rep_mode = data->pscan_rep_mode;
1277 info->pscan_period_mode = data->pscan_period_mode;
1278 info->pscan_mode = data->pscan_mode;
1279 memcpy(info->dev_class, data->dev_class, 3);
1280 info->clock_offset = data->clock_offset;
1281
1282 info++;
1283 copied++;
1284 }
1285
1286 BT_DBG("cache %p, copied %d", cache, copied);
1287 return copied;
1288 }
1289
1290 static void hci_inq_req(struct hci_request *req, unsigned long opt)
1291 {
1292 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1293 struct hci_dev *hdev = req->hdev;
1294 struct hci_cp_inquiry cp;
1295
1296 BT_DBG("%s", hdev->name);
1297
1298 if (test_bit(HCI_INQUIRY, &hdev->flags))
1299 return;
1300
1301 /* Start Inquiry */
1302 memcpy(&cp.lap, &ir->lap, 3);
1303 cp.length = ir->length;
1304 cp.num_rsp = ir->num_rsp;
1305 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1306 }
1307
1308 int hci_inquiry(void __user *arg)
1309 {
1310 __u8 __user *ptr = arg;
1311 struct hci_inquiry_req ir;
1312 struct hci_dev *hdev;
1313 int err = 0, do_inquiry = 0, max_rsp;
1314 long timeo;
1315 __u8 *buf;
1316
1317 if (copy_from_user(&ir, ptr, sizeof(ir)))
1318 return -EFAULT;
1319
1320 hdev = hci_dev_get(ir.dev_id);
1321 if (!hdev)
1322 return -ENODEV;
1323
1324 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1325 err = -EBUSY;
1326 goto done;
1327 }
1328
1329 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1330 err = -EOPNOTSUPP;
1331 goto done;
1332 }
1333
1334 if (hdev->dev_type != HCI_BREDR) {
1335 err = -EOPNOTSUPP;
1336 goto done;
1337 }
1338
1339 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1340 err = -EOPNOTSUPP;
1341 goto done;
1342 }
1343
1344 hci_dev_lock(hdev);
1345 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1346 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1347 hci_inquiry_cache_flush(hdev);
1348 do_inquiry = 1;
1349 }
1350 hci_dev_unlock(hdev);
1351
1352 timeo = ir.length * msecs_to_jiffies(2000);
1353
1354 if (do_inquiry) {
1355 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1356 timeo);
1357 if (err < 0)
1358 goto done;
1359
1360 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1361 * cleared). If it is interrupted by a signal, return -EINTR.
1362 */
1363 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1364 TASK_INTERRUPTIBLE))
1365 return -EINTR;
1366 }
1367
1368 /* for unlimited number of responses we will use buffer with
1369 * 255 entries
1370 */
1371 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1372
1373 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1374 * copy it to the user space.
1375 */
1376 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1377 if (!buf) {
1378 err = -ENOMEM;
1379 goto done;
1380 }
1381
1382 hci_dev_lock(hdev);
1383 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1384 hci_dev_unlock(hdev);
1385
1386 BT_DBG("num_rsp %d", ir.num_rsp);
1387
1388 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1389 ptr += sizeof(ir);
1390 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1391 ir.num_rsp))
1392 err = -EFAULT;
1393 } else
1394 err = -EFAULT;
1395
1396 kfree(buf);
1397
1398 done:
1399 hci_dev_put(hdev);
1400 return err;
1401 }
1402
1403 static int hci_dev_do_open(struct hci_dev *hdev)
1404 {
1405 int ret = 0;
1406
1407 BT_DBG("%s %p", hdev->name, hdev);
1408
1409 hci_req_lock(hdev);
1410
1411 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1412 ret = -ENODEV;
1413 goto done;
1414 }
1415
1416 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1417 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
1418 /* Check for rfkill but allow the HCI setup stage to
1419 * proceed (which in itself doesn't cause any RF activity).
1420 */
1421 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
1422 ret = -ERFKILL;
1423 goto done;
1424 }
1425
1426 /* Check for valid public address or a configured static
1427 * random adddress, but let the HCI setup proceed to
1428 * be able to determine if there is a public address
1429 * or not.
1430 *
1431 * In case of user channel usage, it is not important
1432 * if a public address or static random address is
1433 * available.
1434 *
1435 * This check is only valid for BR/EDR controllers
1436 * since AMP controllers do not have an address.
1437 */
1438 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1439 hdev->dev_type == HCI_BREDR &&
1440 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1441 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1442 ret = -EADDRNOTAVAIL;
1443 goto done;
1444 }
1445 }
1446
1447 if (test_bit(HCI_UP, &hdev->flags)) {
1448 ret = -EALREADY;
1449 goto done;
1450 }
1451
1452 if (hdev->open(hdev)) {
1453 ret = -EIO;
1454 goto done;
1455 }
1456
1457 set_bit(HCI_RUNNING, &hdev->flags);
1458 hci_notify(hdev, HCI_DEV_OPEN);
1459
1460 atomic_set(&hdev->cmd_cnt, 1);
1461 set_bit(HCI_INIT, &hdev->flags);
1462
1463 if (hci_dev_test_flag(hdev, HCI_SETUP)) {
1464 if (hdev->setup)
1465 ret = hdev->setup(hdev);
1466
1467 /* The transport driver can set these quirks before
1468 * creating the HCI device or in its setup callback.
1469 *
1470 * In case any of them is set, the controller has to
1471 * start up as unconfigured.
1472 */
1473 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1474 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
1475 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
1476
1477 /* For an unconfigured controller it is required to
1478 * read at least the version information provided by
1479 * the Read Local Version Information command.
1480 *
1481 * If the set_bdaddr driver callback is provided, then
1482 * also the original Bluetooth public device address
1483 * will be read using the Read BD Address command.
1484 */
1485 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1486 ret = __hci_unconf_init(hdev);
1487 }
1488
1489 if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
1490 /* If public address change is configured, ensure that
1491 * the address gets programmed. If the driver does not
1492 * support changing the public address, fail the power
1493 * on procedure.
1494 */
1495 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1496 hdev->set_bdaddr)
1497 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1498 else
1499 ret = -EADDRNOTAVAIL;
1500 }
1501
1502 if (!ret) {
1503 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1504 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
1505 ret = __hci_init(hdev);
1506 }
1507
1508 /* If the HCI Reset command is clearing all diagnostic settings,
1509 * then they need to be reprogrammed after the init procedure
1510 * completed.
1511 */
1512 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
1513 hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1514 ret = hdev->set_diag(hdev, true);
1515
1516 clear_bit(HCI_INIT, &hdev->flags);
1517
1518 if (!ret) {
1519 hci_dev_hold(hdev);
1520 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1521 set_bit(HCI_UP, &hdev->flags);
1522 hci_notify(hdev, HCI_DEV_UP);
1523 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1524 !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1525 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1526 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1527 hdev->dev_type == HCI_BREDR) {
1528 hci_dev_lock(hdev);
1529 mgmt_powered(hdev, 1);
1530 hci_dev_unlock(hdev);
1531 }
1532 } else {
1533 /* Init failed, cleanup */
1534 flush_work(&hdev->tx_work);
1535 flush_work(&hdev->cmd_work);
1536 flush_work(&hdev->rx_work);
1537
1538 skb_queue_purge(&hdev->cmd_q);
1539 skb_queue_purge(&hdev->rx_q);
1540
1541 if (hdev->flush)
1542 hdev->flush(hdev);
1543
1544 if (hdev->sent_cmd) {
1545 kfree_skb(hdev->sent_cmd);
1546 hdev->sent_cmd = NULL;
1547 }
1548
1549 clear_bit(HCI_RUNNING, &hdev->flags);
1550 hci_notify(hdev, HCI_DEV_CLOSE);
1551
1552 hdev->close(hdev);
1553 hdev->flags &= BIT(HCI_RAW);
1554 }
1555
1556 done:
1557 hci_req_unlock(hdev);
1558 return ret;
1559 }
1560
1561 /* ---- HCI ioctl helpers ---- */
1562
1563 int hci_dev_open(__u16 dev)
1564 {
1565 struct hci_dev *hdev;
1566 int err;
1567
1568 hdev = hci_dev_get(dev);
1569 if (!hdev)
1570 return -ENODEV;
1571
1572 /* Devices that are marked as unconfigured can only be powered
1573 * up as user channel. Trying to bring them up as normal devices
1574 * will result into a failure. Only user channel operation is
1575 * possible.
1576 *
1577 * When this function is called for a user channel, the flag
1578 * HCI_USER_CHANNEL will be set first before attempting to
1579 * open the device.
1580 */
1581 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1582 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1583 err = -EOPNOTSUPP;
1584 goto done;
1585 }
1586
1587 /* We need to ensure that no other power on/off work is pending
1588 * before proceeding to call hci_dev_do_open. This is
1589 * particularly important if the setup procedure has not yet
1590 * completed.
1591 */
1592 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1593 cancel_delayed_work(&hdev->power_off);
1594
1595 /* After this call it is guaranteed that the setup procedure
1596 * has finished. This means that error conditions like RFKILL
1597 * or no valid public or static random address apply.
1598 */
1599 flush_workqueue(hdev->req_workqueue);
1600
1601 /* For controllers not using the management interface and that
1602 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1603 * so that pairing works for them. Once the management interface
1604 * is in use this bit will be cleared again and userspace has
1605 * to explicitly enable it.
1606 */
1607 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1608 !hci_dev_test_flag(hdev, HCI_MGMT))
1609 hci_dev_set_flag(hdev, HCI_BONDABLE);
1610
1611 err = hci_dev_do_open(hdev);
1612
1613 done:
1614 hci_dev_put(hdev);
1615 return err;
1616 }
1617
1618 /* This function requires the caller holds hdev->lock */
1619 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1620 {
1621 struct hci_conn_params *p;
1622
1623 list_for_each_entry(p, &hdev->le_conn_params, list) {
1624 if (p->conn) {
1625 hci_conn_drop(p->conn);
1626 hci_conn_put(p->conn);
1627 p->conn = NULL;
1628 }
1629 list_del_init(&p->action);
1630 }
1631
1632 BT_DBG("All LE pending actions cleared");
1633 }
1634
1635 int hci_dev_do_close(struct hci_dev *hdev)
1636 {
1637 bool auto_off;
1638
1639 BT_DBG("%s %p", hdev->name, hdev);
1640
1641 if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1642 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1643 test_bit(HCI_UP, &hdev->flags)) {
1644 /* Execute vendor specific shutdown routine */
1645 if (hdev->shutdown)
1646 hdev->shutdown(hdev);
1647 }
1648
1649 cancel_delayed_work(&hdev->power_off);
1650
1651 hci_req_cancel(hdev, ENODEV);
1652 hci_req_lock(hdev);
1653
1654 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1655 cancel_delayed_work_sync(&hdev->cmd_timer);
1656 hci_req_unlock(hdev);
1657 return 0;
1658 }
1659
1660 /* Flush RX and TX works */
1661 flush_work(&hdev->tx_work);
1662 flush_work(&hdev->rx_work);
1663
1664 if (hdev->discov_timeout > 0) {
1665 cancel_delayed_work(&hdev->discov_off);
1666 hdev->discov_timeout = 0;
1667 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1668 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1669 }
1670
1671 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1672 cancel_delayed_work(&hdev->service_cache);
1673
1674 cancel_delayed_work_sync(&hdev->le_scan_disable);
1675 cancel_delayed_work_sync(&hdev->le_scan_restart);
1676
1677 if (hci_dev_test_flag(hdev, HCI_MGMT))
1678 cancel_delayed_work_sync(&hdev->rpa_expired);
1679
1680 if (hdev->adv_instance_timeout) {
1681 cancel_delayed_work_sync(&hdev->adv_instance_expire);
1682 hdev->adv_instance_timeout = 0;
1683 }
1684
1685 /* Avoid potential lockdep warnings from the *_flush() calls by
1686 * ensuring the workqueue is empty up front.
1687 */
1688 drain_workqueue(hdev->workqueue);
1689
1690 hci_dev_lock(hdev);
1691
1692 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1693
1694 auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1695
1696 if (!auto_off && hdev->dev_type == HCI_BREDR)
1697 mgmt_powered(hdev, 0);
1698
1699 hci_inquiry_cache_flush(hdev);
1700 hci_pend_le_actions_clear(hdev);
1701 hci_conn_hash_flush(hdev);
1702 hci_dev_unlock(hdev);
1703
1704 smp_unregister(hdev);
1705
1706 hci_notify(hdev, HCI_DEV_DOWN);
1707
1708 if (hdev->flush)
1709 hdev->flush(hdev);
1710
1711 /* Reset device */
1712 skb_queue_purge(&hdev->cmd_q);
1713 atomic_set(&hdev->cmd_cnt, 1);
1714 if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1715 !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1716 set_bit(HCI_INIT, &hdev->flags);
1717 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1718 clear_bit(HCI_INIT, &hdev->flags);
1719 }
1720
1721 /* flush cmd work */
1722 flush_work(&hdev->cmd_work);
1723
1724 /* Drop queues */
1725 skb_queue_purge(&hdev->rx_q);
1726 skb_queue_purge(&hdev->cmd_q);
1727 skb_queue_purge(&hdev->raw_q);
1728
1729 /* Drop last sent command */
1730 if (hdev->sent_cmd) {
1731 cancel_delayed_work_sync(&hdev->cmd_timer);
1732 kfree_skb(hdev->sent_cmd);
1733 hdev->sent_cmd = NULL;
1734 }
1735
1736 clear_bit(HCI_RUNNING, &hdev->flags);
1737 hci_notify(hdev, HCI_DEV_CLOSE);
1738
1739 /* After this point our queues are empty
1740 * and no tasks are scheduled. */
1741 hdev->close(hdev);
1742
1743 /* Clear flags */
1744 hdev->flags &= BIT(HCI_RAW);
1745 hci_dev_clear_volatile_flags(hdev);
1746
1747 /* Controller radio is available but is currently powered down */
1748 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1749
1750 memset(hdev->eir, 0, sizeof(hdev->eir));
1751 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1752 bacpy(&hdev->random_addr, BDADDR_ANY);
1753
1754 hci_req_unlock(hdev);
1755
1756 hci_dev_put(hdev);
1757 return 0;
1758 }
1759
1760 int hci_dev_close(__u16 dev)
1761 {
1762 struct hci_dev *hdev;
1763 int err;
1764
1765 hdev = hci_dev_get(dev);
1766 if (!hdev)
1767 return -ENODEV;
1768
1769 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1770 err = -EBUSY;
1771 goto done;
1772 }
1773
1774 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1775 cancel_delayed_work(&hdev->power_off);
1776
1777 err = hci_dev_do_close(hdev);
1778
1779 done:
1780 hci_dev_put(hdev);
1781 return err;
1782 }
1783
1784 static int hci_dev_do_reset(struct hci_dev *hdev)
1785 {
1786 int ret;
1787
1788 BT_DBG("%s %p", hdev->name, hdev);
1789
1790 hci_req_lock(hdev);
1791
1792 /* Drop queues */
1793 skb_queue_purge(&hdev->rx_q);
1794 skb_queue_purge(&hdev->cmd_q);
1795
1796 /* Avoid potential lockdep warnings from the *_flush() calls by
1797 * ensuring the workqueue is empty up front.
1798 */
1799 drain_workqueue(hdev->workqueue);
1800
1801 hci_dev_lock(hdev);
1802 hci_inquiry_cache_flush(hdev);
1803 hci_conn_hash_flush(hdev);
1804 hci_dev_unlock(hdev);
1805
1806 if (hdev->flush)
1807 hdev->flush(hdev);
1808
1809 atomic_set(&hdev->cmd_cnt, 1);
1810 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1811
1812 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1813
1814 hci_req_unlock(hdev);
1815 return ret;
1816 }
1817
1818 int hci_dev_reset(__u16 dev)
1819 {
1820 struct hci_dev *hdev;
1821 int err;
1822
1823 hdev = hci_dev_get(dev);
1824 if (!hdev)
1825 return -ENODEV;
1826
1827 if (!test_bit(HCI_UP, &hdev->flags)) {
1828 err = -ENETDOWN;
1829 goto done;
1830 }
1831
1832 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1833 err = -EBUSY;
1834 goto done;
1835 }
1836
1837 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1838 err = -EOPNOTSUPP;
1839 goto done;
1840 }
1841
1842 err = hci_dev_do_reset(hdev);
1843
1844 done:
1845 hci_dev_put(hdev);
1846 return err;
1847 }
1848
1849 int hci_dev_reset_stat(__u16 dev)
1850 {
1851 struct hci_dev *hdev;
1852 int ret = 0;
1853
1854 hdev = hci_dev_get(dev);
1855 if (!hdev)
1856 return -ENODEV;
1857
1858 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1859 ret = -EBUSY;
1860 goto done;
1861 }
1862
1863 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1864 ret = -EOPNOTSUPP;
1865 goto done;
1866 }
1867
1868 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1869
1870 done:
1871 hci_dev_put(hdev);
1872 return ret;
1873 }
1874
1875 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1876 {
1877 bool conn_changed, discov_changed;
1878
1879 BT_DBG("%s scan 0x%02x", hdev->name, scan);
1880
1881 if ((scan & SCAN_PAGE))
1882 conn_changed = !hci_dev_test_and_set_flag(hdev,
1883 HCI_CONNECTABLE);
1884 else
1885 conn_changed = hci_dev_test_and_clear_flag(hdev,
1886 HCI_CONNECTABLE);
1887
1888 if ((scan & SCAN_INQUIRY)) {
1889 discov_changed = !hci_dev_test_and_set_flag(hdev,
1890 HCI_DISCOVERABLE);
1891 } else {
1892 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1893 discov_changed = hci_dev_test_and_clear_flag(hdev,
1894 HCI_DISCOVERABLE);
1895 }
1896
1897 if (!hci_dev_test_flag(hdev, HCI_MGMT))
1898 return;
1899
1900 if (conn_changed || discov_changed) {
1901 /* In case this was disabled through mgmt */
1902 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
1903
1904 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1905 mgmt_update_adv_data(hdev);
1906
1907 mgmt_new_settings(hdev);
1908 }
1909 }
1910
1911 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1912 {
1913 struct hci_dev *hdev;
1914 struct hci_dev_req dr;
1915 int err = 0;
1916
1917 if (copy_from_user(&dr, arg, sizeof(dr)))
1918 return -EFAULT;
1919
1920 hdev = hci_dev_get(dr.dev_id);
1921 if (!hdev)
1922 return -ENODEV;
1923
1924 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1925 err = -EBUSY;
1926 goto done;
1927 }
1928
1929 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1930 err = -EOPNOTSUPP;
1931 goto done;
1932 }
1933
1934 if (hdev->dev_type != HCI_BREDR) {
1935 err = -EOPNOTSUPP;
1936 goto done;
1937 }
1938
1939 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1940 err = -EOPNOTSUPP;
1941 goto done;
1942 }
1943
1944 switch (cmd) {
1945 case HCISETAUTH:
1946 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1947 HCI_INIT_TIMEOUT);
1948 break;
1949
1950 case HCISETENCRYPT:
1951 if (!lmp_encrypt_capable(hdev)) {
1952 err = -EOPNOTSUPP;
1953 break;
1954 }
1955
1956 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1957 /* Auth must be enabled first */
1958 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1959 HCI_INIT_TIMEOUT);
1960 if (err)
1961 break;
1962 }
1963
1964 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1965 HCI_INIT_TIMEOUT);
1966 break;
1967
1968 case HCISETSCAN:
1969 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1970 HCI_INIT_TIMEOUT);
1971
1972 /* Ensure that the connectable and discoverable states
1973 * get correctly modified as this was a non-mgmt change.
1974 */
1975 if (!err)
1976 hci_update_scan_state(hdev, dr.dev_opt);
1977 break;
1978
1979 case HCISETLINKPOL:
1980 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1981 HCI_INIT_TIMEOUT);
1982 break;
1983
1984 case HCISETLINKMODE:
1985 hdev->link_mode = ((__u16) dr.dev_opt) &
1986 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1987 break;
1988
1989 case HCISETPTYPE:
1990 hdev->pkt_type = (__u16) dr.dev_opt;
1991 break;
1992
1993 case HCISETACLMTU:
1994 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1995 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1996 break;
1997
1998 case HCISETSCOMTU:
1999 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2000 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2001 break;
2002
2003 default:
2004 err = -EINVAL;
2005 break;
2006 }
2007
2008 done:
2009 hci_dev_put(hdev);
2010 return err;
2011 }
2012
2013 int hci_get_dev_list(void __user *arg)
2014 {
2015 struct hci_dev *hdev;
2016 struct hci_dev_list_req *dl;
2017 struct hci_dev_req *dr;
2018 int n = 0, size, err;
2019 __u16 dev_num;
2020
2021 if (get_user(dev_num, (__u16 __user *) arg))
2022 return -EFAULT;
2023
2024 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2025 return -EINVAL;
2026
2027 size = sizeof(*dl) + dev_num * sizeof(*dr);
2028
2029 dl = kzalloc(size, GFP_KERNEL);
2030 if (!dl)
2031 return -ENOMEM;
2032
2033 dr = dl->dev_req;
2034
2035 read_lock(&hci_dev_list_lock);
2036 list_for_each_entry(hdev, &hci_dev_list, list) {
2037 unsigned long flags = hdev->flags;
2038
2039 /* When the auto-off is configured it means the transport
2040 * is running, but in that case still indicate that the
2041 * device is actually down.
2042 */
2043 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2044 flags &= ~BIT(HCI_UP);
2045
2046 (dr + n)->dev_id = hdev->id;
2047 (dr + n)->dev_opt = flags;
2048
2049 if (++n >= dev_num)
2050 break;
2051 }
2052 read_unlock(&hci_dev_list_lock);
2053
2054 dl->dev_num = n;
2055 size = sizeof(*dl) + n * sizeof(*dr);
2056
2057 err = copy_to_user(arg, dl, size);
2058 kfree(dl);
2059
2060 return err ? -EFAULT : 0;
2061 }
2062
2063 int hci_get_dev_info(void __user *arg)
2064 {
2065 struct hci_dev *hdev;
2066 struct hci_dev_info di;
2067 unsigned long flags;
2068 int err = 0;
2069
2070 if (copy_from_user(&di, arg, sizeof(di)))
2071 return -EFAULT;
2072
2073 hdev = hci_dev_get(di.dev_id);
2074 if (!hdev)
2075 return -ENODEV;
2076
2077 /* When the auto-off is configured it means the transport
2078 * is running, but in that case still indicate that the
2079 * device is actually down.
2080 */
2081 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2082 flags = hdev->flags & ~BIT(HCI_UP);
2083 else
2084 flags = hdev->flags;
2085
2086 strcpy(di.name, hdev->name);
2087 di.bdaddr = hdev->bdaddr;
2088 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2089 di.flags = flags;
2090 di.pkt_type = hdev->pkt_type;
2091 if (lmp_bredr_capable(hdev)) {
2092 di.acl_mtu = hdev->acl_mtu;
2093 di.acl_pkts = hdev->acl_pkts;
2094 di.sco_mtu = hdev->sco_mtu;
2095 di.sco_pkts = hdev->sco_pkts;
2096 } else {
2097 di.acl_mtu = hdev->le_mtu;
2098 di.acl_pkts = hdev->le_pkts;
2099 di.sco_mtu = 0;
2100 di.sco_pkts = 0;
2101 }
2102 di.link_policy = hdev->link_policy;
2103 di.link_mode = hdev->link_mode;
2104
2105 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2106 memcpy(&di.features, &hdev->features, sizeof(di.features));
2107
2108 if (copy_to_user(arg, &di, sizeof(di)))
2109 err = -EFAULT;
2110
2111 hci_dev_put(hdev);
2112
2113 return err;
2114 }
2115
2116 /* ---- Interface to HCI drivers ---- */
2117
2118 static int hci_rfkill_set_block(void *data, bool blocked)
2119 {
2120 struct hci_dev *hdev = data;
2121
2122 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2123
2124 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2125 return -EBUSY;
2126
2127 if (blocked) {
2128 hci_dev_set_flag(hdev, HCI_RFKILLED);
2129 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2130 !hci_dev_test_flag(hdev, HCI_CONFIG))
2131 hci_dev_do_close(hdev);
2132 } else {
2133 hci_dev_clear_flag(hdev, HCI_RFKILLED);
2134 }
2135
2136 return 0;
2137 }
2138
2139 static const struct rfkill_ops hci_rfkill_ops = {
2140 .set_block = hci_rfkill_set_block,
2141 };
2142
2143 static void hci_power_on(struct work_struct *work)
2144 {
2145 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2146 int err;
2147
2148 BT_DBG("%s", hdev->name);
2149
2150 err = hci_dev_do_open(hdev);
2151 if (err < 0) {
2152 hci_dev_lock(hdev);
2153 mgmt_set_powered_failed(hdev, err);
2154 hci_dev_unlock(hdev);
2155 return;
2156 }
2157
2158 /* During the HCI setup phase, a few error conditions are
2159 * ignored and they need to be checked now. If they are still
2160 * valid, it is important to turn the device back off.
2161 */
2162 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2163 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
2164 (hdev->dev_type == HCI_BREDR &&
2165 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2166 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2167 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
2168 hci_dev_do_close(hdev);
2169 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
2170 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2171 HCI_AUTO_OFF_TIMEOUT);
2172 }
2173
2174 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
2175 /* For unconfigured devices, set the HCI_RAW flag
2176 * so that userspace can easily identify them.
2177 */
2178 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2179 set_bit(HCI_RAW, &hdev->flags);
2180
2181 /* For fully configured devices, this will send
2182 * the Index Added event. For unconfigured devices,
2183 * it will send Unconfigued Index Added event.
2184 *
2185 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2186 * and no event will be send.
2187 */
2188 mgmt_index_added(hdev);
2189 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
2190 /* When the controller is now configured, then it
2191 * is important to clear the HCI_RAW flag.
2192 */
2193 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2194 clear_bit(HCI_RAW, &hdev->flags);
2195
2196 /* Powering on the controller with HCI_CONFIG set only
2197 * happens with the transition from unconfigured to
2198 * configured. This will send the Index Added event.
2199 */
2200 mgmt_index_added(hdev);
2201 }
2202 }
2203
2204 static void hci_power_off(struct work_struct *work)
2205 {
2206 struct hci_dev *hdev = container_of(work, struct hci_dev,
2207 power_off.work);
2208
2209 BT_DBG("%s", hdev->name);
2210
2211 hci_dev_do_close(hdev);
2212 }
2213
2214 static void hci_error_reset(struct work_struct *work)
2215 {
2216 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2217
2218 BT_DBG("%s", hdev->name);
2219
2220 if (hdev->hw_error)
2221 hdev->hw_error(hdev, hdev->hw_error_code);
2222 else
2223 BT_ERR("%s hardware error 0x%2.2x", hdev->name,
2224 hdev->hw_error_code);
2225
2226 if (hci_dev_do_close(hdev))
2227 return;
2228
2229 hci_dev_do_open(hdev);
2230 }
2231
2232 static void hci_discov_off(struct work_struct *work)
2233 {
2234 struct hci_dev *hdev;
2235
2236 hdev = container_of(work, struct hci_dev, discov_off.work);
2237
2238 BT_DBG("%s", hdev->name);
2239
2240 mgmt_discoverable_timeout(hdev);
2241 }
2242
2243 static void hci_adv_timeout_expire(struct work_struct *work)
2244 {
2245 struct hci_dev *hdev;
2246
2247 hdev = container_of(work, struct hci_dev, adv_instance_expire.work);
2248
2249 BT_DBG("%s", hdev->name);
2250
2251 mgmt_adv_timeout_expired(hdev);
2252 }
2253
2254 void hci_uuids_clear(struct hci_dev *hdev)
2255 {
2256 struct bt_uuid *uuid, *tmp;
2257
2258 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2259 list_del(&uuid->list);
2260 kfree(uuid);
2261 }
2262 }
2263
2264 void hci_link_keys_clear(struct hci_dev *hdev)
2265 {
2266 struct link_key *key;
2267
2268 list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2269 list_del_rcu(&key->list);
2270 kfree_rcu(key, rcu);
2271 }
2272 }
2273
2274 void hci_smp_ltks_clear(struct hci_dev *hdev)
2275 {
2276 struct smp_ltk *k;
2277
2278 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2279 list_del_rcu(&k->list);
2280 kfree_rcu(k, rcu);
2281 }
2282 }
2283
2284 void hci_smp_irks_clear(struct hci_dev *hdev)
2285 {
2286 struct smp_irk *k;
2287
2288 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2289 list_del_rcu(&k->list);
2290 kfree_rcu(k, rcu);
2291 }
2292 }
2293
2294 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2295 {
2296 struct link_key *k;
2297
2298 rcu_read_lock();
2299 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2300 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2301 rcu_read_unlock();
2302 return k;
2303 }
2304 }
2305 rcu_read_unlock();
2306
2307 return NULL;
2308 }
2309
2310 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2311 u8 key_type, u8 old_key_type)
2312 {
2313 /* Legacy key */
2314 if (key_type < 0x03)
2315 return true;
2316
2317 /* Debug keys are insecure so don't store them persistently */
2318 if (key_type == HCI_LK_DEBUG_COMBINATION)
2319 return false;
2320
2321 /* Changed combination key and there's no previous one */
2322 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2323 return false;
2324
2325 /* Security mode 3 case */
2326 if (!conn)
2327 return true;
2328
2329 /* BR/EDR key derived using SC from an LE link */
2330 if (conn->type == LE_LINK)
2331 return true;
2332
2333 /* Neither local nor remote side had no-bonding as requirement */
2334 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2335 return true;
2336
2337 /* Local side had dedicated bonding as requirement */
2338 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2339 return true;
2340
2341 /* Remote side had dedicated bonding as requirement */
2342 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2343 return true;
2344
2345 /* If none of the above criteria match, then don't store the key
2346 * persistently */
2347 return false;
2348 }
2349
2350 static u8 ltk_role(u8 type)
2351 {
2352 if (type == SMP_LTK)
2353 return HCI_ROLE_MASTER;
2354
2355 return HCI_ROLE_SLAVE;
2356 }
2357
2358 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2359 u8 addr_type, u8 role)
2360 {
2361 struct smp_ltk *k;
2362
2363 rcu_read_lock();
2364 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2365 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2366 continue;
2367
2368 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2369 rcu_read_unlock();
2370 return k;
2371 }
2372 }
2373 rcu_read_unlock();
2374
2375 return NULL;
2376 }
2377
2378 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2379 {
2380 struct smp_irk *irk;
2381
2382 rcu_read_lock();
2383 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2384 if (!bacmp(&irk->rpa, rpa)) {
2385 rcu_read_unlock();
2386 return irk;
2387 }
2388 }
2389
2390 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2391 if (smp_irk_matches(hdev, irk->val, rpa)) {
2392 bacpy(&irk->rpa, rpa);
2393 rcu_read_unlock();
2394 return irk;
2395 }
2396 }
2397 rcu_read_unlock();
2398
2399 return NULL;
2400 }
2401
2402 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2403 u8 addr_type)
2404 {
2405 struct smp_irk *irk;
2406
2407 /* Identity Address must be public or static random */
2408 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2409 return NULL;
2410
2411 rcu_read_lock();
2412 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2413 if (addr_type == irk->addr_type &&
2414 bacmp(bdaddr, &irk->bdaddr) == 0) {
2415 rcu_read_unlock();
2416 return irk;
2417 }
2418 }
2419 rcu_read_unlock();
2420
2421 return NULL;
2422 }
2423
2424 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2425 bdaddr_t *bdaddr, u8 *val, u8 type,
2426 u8 pin_len, bool *persistent)
2427 {
2428 struct link_key *key, *old_key;
2429 u8 old_key_type;
2430
2431 old_key = hci_find_link_key(hdev, bdaddr);
2432 if (old_key) {
2433 old_key_type = old_key->type;
2434 key = old_key;
2435 } else {
2436 old_key_type = conn ? conn->key_type : 0xff;
2437 key = kzalloc(sizeof(*key), GFP_KERNEL);
2438 if (!key)
2439 return NULL;
2440 list_add_rcu(&key->list, &hdev->link_keys);
2441 }
2442
2443 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2444
2445 /* Some buggy controller combinations generate a changed
2446 * combination key for legacy pairing even when there's no
2447 * previous key */
2448 if (type == HCI_LK_CHANGED_COMBINATION &&
2449 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2450 type = HCI_LK_COMBINATION;
2451 if (conn)
2452 conn->key_type = type;
2453 }
2454
2455 bacpy(&key->bdaddr, bdaddr);
2456 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2457 key->pin_len = pin_len;
2458
2459 if (type == HCI_LK_CHANGED_COMBINATION)
2460 key->type = old_key_type;
2461 else
2462 key->type = type;
2463
2464 if (persistent)
2465 *persistent = hci_persistent_key(hdev, conn, type,
2466 old_key_type);
2467
2468 return key;
2469 }
2470
2471 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2472 u8 addr_type, u8 type, u8 authenticated,
2473 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2474 {
2475 struct smp_ltk *key, *old_key;
2476 u8 role = ltk_role(type);
2477
2478 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2479 if (old_key)
2480 key = old_key;
2481 else {
2482 key = kzalloc(sizeof(*key), GFP_KERNEL);
2483 if (!key)
2484 return NULL;
2485 list_add_rcu(&key->list, &hdev->long_term_keys);
2486 }
2487
2488 bacpy(&key->bdaddr, bdaddr);
2489 key->bdaddr_type = addr_type;
2490 memcpy(key->val, tk, sizeof(key->val));
2491 key->authenticated = authenticated;
2492 key->ediv = ediv;
2493 key->rand = rand;
2494 key->enc_size = enc_size;
2495 key->type = type;
2496
2497 return key;
2498 }
2499
2500 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2501 u8 addr_type, u8 val[16], bdaddr_t *rpa)
2502 {
2503 struct smp_irk *irk;
2504
2505 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2506 if (!irk) {
2507 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2508 if (!irk)
2509 return NULL;
2510
2511 bacpy(&irk->bdaddr, bdaddr);
2512 irk->addr_type = addr_type;
2513
2514 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2515 }
2516
2517 memcpy(irk->val, val, 16);
2518 bacpy(&irk->rpa, rpa);
2519
2520 return irk;
2521 }
2522
2523 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2524 {
2525 struct link_key *key;
2526
2527 key = hci_find_link_key(hdev, bdaddr);
2528 if (!key)
2529 return -ENOENT;
2530
2531 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2532
2533 list_del_rcu(&key->list);
2534 kfree_rcu(key, rcu);
2535
2536 return 0;
2537 }
2538
2539 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2540 {
2541 struct smp_ltk *k;
2542 int removed = 0;
2543
2544 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2545 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2546 continue;
2547
2548 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2549
2550 list_del_rcu(&k->list);
2551 kfree_rcu(k, rcu);
2552 removed++;
2553 }
2554
2555 return removed ? 0 : -ENOENT;
2556 }
2557
2558 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2559 {
2560 struct smp_irk *k;
2561
2562 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2563 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2564 continue;
2565
2566 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2567
2568 list_del_rcu(&k->list);
2569 kfree_rcu(k, rcu);
2570 }
2571 }
2572
2573 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2574 {
2575 struct smp_ltk *k;
2576 struct smp_irk *irk;
2577 u8 addr_type;
2578
2579 if (type == BDADDR_BREDR) {
2580 if (hci_find_link_key(hdev, bdaddr))
2581 return true;
2582 return false;
2583 }
2584
2585 /* Convert to HCI addr type which struct smp_ltk uses */
2586 if (type == BDADDR_LE_PUBLIC)
2587 addr_type = ADDR_LE_DEV_PUBLIC;
2588 else
2589 addr_type = ADDR_LE_DEV_RANDOM;
2590
2591 irk = hci_get_irk(hdev, bdaddr, addr_type);
2592 if (irk) {
2593 bdaddr = &irk->bdaddr;
2594 addr_type = irk->addr_type;
2595 }
2596
2597 rcu_read_lock();
2598 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2599 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2600 rcu_read_unlock();
2601 return true;
2602 }
2603 }
2604 rcu_read_unlock();
2605
2606 return false;
2607 }
2608
2609 /* HCI command timer function */
2610 static void hci_cmd_timeout(struct work_struct *work)
2611 {
2612 struct hci_dev *hdev = container_of(work, struct hci_dev,
2613 cmd_timer.work);
2614
2615 if (hdev->sent_cmd) {
2616 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2617 u16 opcode = __le16_to_cpu(sent->opcode);
2618
2619 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2620 } else {
2621 BT_ERR("%s command tx timeout", hdev->name);
2622 }
2623
2624 atomic_set(&hdev->cmd_cnt, 1);
2625 queue_work(hdev->workqueue, &hdev->cmd_work);
2626 }
2627
2628 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2629 bdaddr_t *bdaddr, u8 bdaddr_type)
2630 {
2631 struct oob_data *data;
2632
2633 list_for_each_entry(data, &hdev->remote_oob_data, list) {
2634 if (bacmp(bdaddr, &data->bdaddr) != 0)
2635 continue;
2636 if (data->bdaddr_type != bdaddr_type)
2637 continue;
2638 return data;
2639 }
2640
2641 return NULL;
2642 }
2643
2644 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2645 u8 bdaddr_type)
2646 {
2647 struct oob_data *data;
2648
2649 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2650 if (!data)
2651 return -ENOENT;
2652
2653 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2654
2655 list_del(&data->list);
2656 kfree(data);
2657
2658 return 0;
2659 }
2660
2661 void hci_remote_oob_data_clear(struct hci_dev *hdev)
2662 {
2663 struct oob_data *data, *n;
2664
2665 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2666 list_del(&data->list);
2667 kfree(data);
2668 }
2669 }
2670
2671 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2672 u8 bdaddr_type, u8 *hash192, u8 *rand192,
2673 u8 *hash256, u8 *rand256)
2674 {
2675 struct oob_data *data;
2676
2677 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2678 if (!data) {
2679 data = kmalloc(sizeof(*data), GFP_KERNEL);
2680 if (!data)
2681 return -ENOMEM;
2682
2683 bacpy(&data->bdaddr, bdaddr);
2684 data->bdaddr_type = bdaddr_type;
2685 list_add(&data->list, &hdev->remote_oob_data);
2686 }
2687
2688 if (hash192 && rand192) {
2689 memcpy(data->hash192, hash192, sizeof(data->hash192));
2690 memcpy(data->rand192, rand192, sizeof(data->rand192));
2691 if (hash256 && rand256)
2692 data->present = 0x03;
2693 } else {
2694 memset(data->hash192, 0, sizeof(data->hash192));
2695 memset(data->rand192, 0, sizeof(data->rand192));
2696 if (hash256 && rand256)
2697 data->present = 0x02;
2698 else
2699 data->present = 0x00;
2700 }
2701
2702 if (hash256 && rand256) {
2703 memcpy(data->hash256, hash256, sizeof(data->hash256));
2704 memcpy(data->rand256, rand256, sizeof(data->rand256));
2705 } else {
2706 memset(data->hash256, 0, sizeof(data->hash256));
2707 memset(data->rand256, 0, sizeof(data->rand256));
2708 if (hash192 && rand192)
2709 data->present = 0x01;
2710 }
2711
2712 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2713
2714 return 0;
2715 }
2716
2717 /* This function requires the caller holds hdev->lock */
2718 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2719 {
2720 struct adv_info *adv_instance;
2721
2722 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2723 if (adv_instance->instance == instance)
2724 return adv_instance;
2725 }
2726
2727 return NULL;
2728 }
2729
2730 /* This function requires the caller holds hdev->lock */
2731 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance) {
2732 struct adv_info *cur_instance;
2733
2734 cur_instance = hci_find_adv_instance(hdev, instance);
2735 if (!cur_instance)
2736 return NULL;
2737
2738 if (cur_instance == list_last_entry(&hdev->adv_instances,
2739 struct adv_info, list))
2740 return list_first_entry(&hdev->adv_instances,
2741 struct adv_info, list);
2742 else
2743 return list_next_entry(cur_instance, list);
2744 }
2745
2746 /* This function requires the caller holds hdev->lock */
2747 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2748 {
2749 struct adv_info *adv_instance;
2750
2751 adv_instance = hci_find_adv_instance(hdev, instance);
2752 if (!adv_instance)
2753 return -ENOENT;
2754
2755 BT_DBG("%s removing %dMR", hdev->name, instance);
2756
2757 if (hdev->cur_adv_instance == instance && hdev->adv_instance_timeout) {
2758 cancel_delayed_work(&hdev->adv_instance_expire);
2759 hdev->adv_instance_timeout = 0;
2760 }
2761
2762 list_del(&adv_instance->list);
2763 kfree(adv_instance);
2764
2765 hdev->adv_instance_cnt--;
2766
2767 return 0;
2768 }
2769
2770 /* This function requires the caller holds hdev->lock */
2771 void hci_adv_instances_clear(struct hci_dev *hdev)
2772 {
2773 struct adv_info *adv_instance, *n;
2774
2775 if (hdev->adv_instance_timeout) {
2776 cancel_delayed_work(&hdev->adv_instance_expire);
2777 hdev->adv_instance_timeout = 0;
2778 }
2779
2780 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2781 list_del(&adv_instance->list);
2782 kfree(adv_instance);
2783 }
2784
2785 hdev->adv_instance_cnt = 0;
2786 }
2787
2788 /* This function requires the caller holds hdev->lock */
2789 int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2790 u16 adv_data_len, u8 *adv_data,
2791 u16 scan_rsp_len, u8 *scan_rsp_data,
2792 u16 timeout, u16 duration)
2793 {
2794 struct adv_info *adv_instance;
2795
2796 adv_instance = hci_find_adv_instance(hdev, instance);
2797 if (adv_instance) {
2798 memset(adv_instance->adv_data, 0,
2799 sizeof(adv_instance->adv_data));
2800 memset(adv_instance->scan_rsp_data, 0,
2801 sizeof(adv_instance->scan_rsp_data));
2802 } else {
2803 if (hdev->adv_instance_cnt >= HCI_MAX_ADV_INSTANCES ||
2804 instance < 1 || instance > HCI_MAX_ADV_INSTANCES)
2805 return -EOVERFLOW;
2806
2807 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
2808 if (!adv_instance)
2809 return -ENOMEM;
2810
2811 adv_instance->pending = true;
2812 adv_instance->instance = instance;
2813 list_add(&adv_instance->list, &hdev->adv_instances);
2814 hdev->adv_instance_cnt++;
2815 }
2816
2817 adv_instance->flags = flags;
2818 adv_instance->adv_data_len = adv_data_len;
2819 adv_instance->scan_rsp_len = scan_rsp_len;
2820
2821 if (adv_data_len)
2822 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2823
2824 if (scan_rsp_len)
2825 memcpy(adv_instance->scan_rsp_data,
2826 scan_rsp_data, scan_rsp_len);
2827
2828 adv_instance->timeout = timeout;
2829 adv_instance->remaining_time = timeout;
2830
2831 if (duration == 0)
2832 adv_instance->duration = HCI_DEFAULT_ADV_DURATION;
2833 else
2834 adv_instance->duration = duration;
2835
2836 BT_DBG("%s for %dMR", hdev->name, instance);
2837
2838 return 0;
2839 }
2840
2841 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2842 bdaddr_t *bdaddr, u8 type)
2843 {
2844 struct bdaddr_list *b;
2845
2846 list_for_each_entry(b, bdaddr_list, list) {
2847 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2848 return b;
2849 }
2850
2851 return NULL;
2852 }
2853
2854 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2855 {
2856 struct list_head *p, *n;
2857
2858 list_for_each_safe(p, n, bdaddr_list) {
2859 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
2860
2861 list_del(p);
2862 kfree(b);
2863 }
2864 }
2865
2866 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2867 {
2868 struct bdaddr_list *entry;
2869
2870 if (!bacmp(bdaddr, BDADDR_ANY))
2871 return -EBADF;
2872
2873 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2874 return -EEXIST;
2875
2876 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2877 if (!entry)
2878 return -ENOMEM;
2879
2880 bacpy(&entry->bdaddr, bdaddr);
2881 entry->bdaddr_type = type;
2882
2883 list_add(&entry->list, list);
2884
2885 return 0;
2886 }
2887
2888 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2889 {
2890 struct bdaddr_list *entry;
2891
2892 if (!bacmp(bdaddr, BDADDR_ANY)) {
2893 hci_bdaddr_list_clear(list);
2894 return 0;
2895 }
2896
2897 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2898 if (!entry)
2899 return -ENOENT;
2900
2901 list_del(&entry->list);
2902 kfree(entry);
2903
2904 return 0;
2905 }
2906
2907 /* This function requires the caller holds hdev->lock */
2908 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2909 bdaddr_t *addr, u8 addr_type)
2910 {
2911 struct hci_conn_params *params;
2912
2913 list_for_each_entry(params, &hdev->le_conn_params, list) {
2914 if (bacmp(&params->addr, addr) == 0 &&
2915 params->addr_type == addr_type) {
2916 return params;
2917 }
2918 }
2919
2920 return NULL;
2921 }
2922
2923 /* This function requires the caller holds hdev->lock */
2924 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2925 bdaddr_t *addr, u8 addr_type)
2926 {
2927 struct hci_conn_params *param;
2928
2929 list_for_each_entry(param, list, action) {
2930 if (bacmp(&param->addr, addr) == 0 &&
2931 param->addr_type == addr_type)
2932 return param;
2933 }
2934
2935 return NULL;
2936 }
2937
2938 /* This function requires the caller holds hdev->lock */
2939 struct hci_conn_params *hci_explicit_connect_lookup(struct hci_dev *hdev,
2940 bdaddr_t *addr,
2941 u8 addr_type)
2942 {
2943 struct hci_conn_params *param;
2944
2945 list_for_each_entry(param, &hdev->pend_le_conns, action) {
2946 if (bacmp(&param->addr, addr) == 0 &&
2947 param->addr_type == addr_type &&
2948 param->explicit_connect)
2949 return param;
2950 }
2951
2952 return NULL;
2953 }
2954
2955 /* This function requires the caller holds hdev->lock */
2956 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2957 bdaddr_t *addr, u8 addr_type)
2958 {
2959 struct hci_conn_params *params;
2960
2961 params = hci_conn_params_lookup(hdev, addr, addr_type);
2962 if (params)
2963 return params;
2964
2965 params = kzalloc(sizeof(*params), GFP_KERNEL);
2966 if (!params) {
2967 BT_ERR("Out of memory");
2968 return NULL;
2969 }
2970
2971 bacpy(&params->addr, addr);
2972 params->addr_type = addr_type;
2973
2974 list_add(&params->list, &hdev->le_conn_params);
2975 INIT_LIST_HEAD(&params->action);
2976
2977 params->conn_min_interval = hdev->le_conn_min_interval;
2978 params->conn_max_interval = hdev->le_conn_max_interval;
2979 params->conn_latency = hdev->le_conn_latency;
2980 params->supervision_timeout = hdev->le_supv_timeout;
2981 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2982
2983 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2984
2985 return params;
2986 }
2987
2988 static void hci_conn_params_free(struct hci_conn_params *params)
2989 {
2990 if (params->conn) {
2991 hci_conn_drop(params->conn);
2992 hci_conn_put(params->conn);
2993 }
2994
2995 list_del(&params->action);
2996 list_del(&params->list);
2997 kfree(params);
2998 }
2999
3000 /* This function requires the caller holds hdev->lock */
3001 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3002 {
3003 struct hci_conn_params *params;
3004
3005 params = hci_conn_params_lookup(hdev, addr, addr_type);
3006 if (!params)
3007 return;
3008
3009 hci_conn_params_free(params);
3010
3011 hci_update_background_scan(hdev);
3012
3013 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3014 }
3015
3016 /* This function requires the caller holds hdev->lock */
3017 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3018 {
3019 struct hci_conn_params *params, *tmp;
3020
3021 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3022 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3023 continue;
3024
3025 /* If trying to estabilish one time connection to disabled
3026 * device, leave the params, but mark them as just once.
3027 */
3028 if (params->explicit_connect) {
3029 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3030 continue;
3031 }
3032
3033 list_del(&params->list);
3034 kfree(params);
3035 }
3036
3037 BT_DBG("All LE disabled connection parameters were removed");
3038 }
3039
3040 /* This function requires the caller holds hdev->lock */
3041 void hci_conn_params_clear_all(struct hci_dev *hdev)
3042 {
3043 struct hci_conn_params *params, *tmp;
3044
3045 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3046 hci_conn_params_free(params);
3047
3048 hci_update_background_scan(hdev);
3049
3050 BT_DBG("All LE connection parameters were removed");
3051 }
3052
3053 static void inquiry_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3054 {
3055 if (status) {
3056 BT_ERR("Failed to start inquiry: status %d", status);
3057
3058 hci_dev_lock(hdev);
3059 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3060 hci_dev_unlock(hdev);
3061 return;
3062 }
3063 }
3064
3065 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status,
3066 u16 opcode)
3067 {
3068 /* General inquiry access code (GIAC) */
3069 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3070 struct hci_cp_inquiry cp;
3071 int err;
3072
3073 if (status) {
3074 BT_ERR("Failed to disable LE scanning: status %d", status);
3075 return;
3076 }
3077
3078 hdev->discovery.scan_start = 0;
3079
3080 switch (hdev->discovery.type) {
3081 case DISCOV_TYPE_LE:
3082 hci_dev_lock(hdev);
3083 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3084 hci_dev_unlock(hdev);
3085 break;
3086
3087 case DISCOV_TYPE_INTERLEAVED:
3088 hci_dev_lock(hdev);
3089
3090 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
3091 &hdev->quirks)) {
3092 /* If we were running LE only scan, change discovery
3093 * state. If we were running both LE and BR/EDR inquiry
3094 * simultaneously, and BR/EDR inquiry is already
3095 * finished, stop discovery, otherwise BR/EDR inquiry
3096 * will stop discovery when finished. If we will resolve
3097 * remote device name, do not change discovery state.
3098 */
3099 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
3100 hdev->discovery.state != DISCOVERY_RESOLVING)
3101 hci_discovery_set_state(hdev,
3102 DISCOVERY_STOPPED);
3103 } else {
3104 struct hci_request req;
3105
3106 hci_inquiry_cache_flush(hdev);
3107
3108 hci_req_init(&req, hdev);
3109
3110 memset(&cp, 0, sizeof(cp));
3111 memcpy(&cp.lap, lap, sizeof(cp.lap));
3112 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3113 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3114
3115 err = hci_req_run(&req, inquiry_complete);
3116 if (err) {
3117 BT_ERR("Inquiry request failed: err %d", err);
3118 hci_discovery_set_state(hdev,
3119 DISCOVERY_STOPPED);
3120 }
3121 }
3122
3123 hci_dev_unlock(hdev);
3124 break;
3125 }
3126 }
3127
3128 static void le_scan_disable_work(struct work_struct *work)
3129 {
3130 struct hci_dev *hdev = container_of(work, struct hci_dev,
3131 le_scan_disable.work);
3132 struct hci_request req;
3133 int err;
3134
3135 BT_DBG("%s", hdev->name);
3136
3137 cancel_delayed_work_sync(&hdev->le_scan_restart);
3138
3139 hci_req_init(&req, hdev);
3140
3141 hci_req_add_le_scan_disable(&req);
3142
3143 err = hci_req_run(&req, le_scan_disable_work_complete);
3144 if (err)
3145 BT_ERR("Disable LE scanning request failed: err %d", err);
3146 }
3147
3148 static void le_scan_restart_work_complete(struct hci_dev *hdev, u8 status,
3149 u16 opcode)
3150 {
3151 unsigned long timeout, duration, scan_start, now;
3152
3153 BT_DBG("%s", hdev->name);
3154
3155 if (status) {
3156 BT_ERR("Failed to restart LE scan: status %d", status);
3157 return;
3158 }
3159
3160 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
3161 !hdev->discovery.scan_start)
3162 return;
3163
3164 /* When the scan was started, hdev->le_scan_disable has been queued
3165 * after duration from scan_start. During scan restart this job
3166 * has been canceled, and we need to queue it again after proper
3167 * timeout, to make sure that scan does not run indefinitely.
3168 */
3169 duration = hdev->discovery.scan_duration;
3170 scan_start = hdev->discovery.scan_start;
3171 now = jiffies;
3172 if (now - scan_start <= duration) {
3173 int elapsed;
3174
3175 if (now >= scan_start)
3176 elapsed = now - scan_start;
3177 else
3178 elapsed = ULONG_MAX - scan_start + now;
3179
3180 timeout = duration - elapsed;
3181 } else {
3182 timeout = 0;
3183 }
3184 queue_delayed_work(hdev->workqueue,
3185 &hdev->le_scan_disable, timeout);
3186 }
3187
3188 static void le_scan_restart_work(struct work_struct *work)
3189 {
3190 struct hci_dev *hdev = container_of(work, struct hci_dev,
3191 le_scan_restart.work);
3192 struct hci_request req;
3193 struct hci_cp_le_set_scan_enable cp;
3194 int err;
3195
3196 BT_DBG("%s", hdev->name);
3197
3198 /* If controller is not scanning we are done. */
3199 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
3200 return;
3201
3202 hci_req_init(&req, hdev);
3203
3204 hci_req_add_le_scan_disable(&req);
3205
3206 memset(&cp, 0, sizeof(cp));
3207 cp.enable = LE_SCAN_ENABLE;
3208 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3209 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
3210
3211 err = hci_req_run(&req, le_scan_restart_work_complete);
3212 if (err)
3213 BT_ERR("Restart LE scan request failed: err %d", err);
3214 }
3215
3216 /* Copy the Identity Address of the controller.
3217 *
3218 * If the controller has a public BD_ADDR, then by default use that one.
3219 * If this is a LE only controller without a public address, default to
3220 * the static random address.
3221 *
3222 * For debugging purposes it is possible to force controllers with a
3223 * public address to use the static random address instead.
3224 *
3225 * In case BR/EDR has been disabled on a dual-mode controller and
3226 * userspace has configured a static address, then that address
3227 * becomes the identity address instead of the public BR/EDR address.
3228 */
3229 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3230 u8 *bdaddr_type)
3231 {
3232 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
3233 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
3234 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
3235 bacmp(&hdev->static_addr, BDADDR_ANY))) {
3236 bacpy(bdaddr, &hdev->static_addr);
3237 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3238 } else {
3239 bacpy(bdaddr, &hdev->bdaddr);
3240 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3241 }
3242 }
3243
3244 /* Alloc HCI device */
3245 struct hci_dev *hci_alloc_dev(void)
3246 {
3247 struct hci_dev *hdev;
3248
3249 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
3250 if (!hdev)
3251 return NULL;
3252
3253 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3254 hdev->esco_type = (ESCO_HV1);
3255 hdev->link_mode = (HCI_LM_ACCEPT);
3256 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3257 hdev->io_capability = 0x03; /* No Input No Output */
3258 hdev->manufacturer = 0xffff; /* Default to internal use */
3259 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3260 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3261 hdev->adv_instance_cnt = 0;
3262 hdev->cur_adv_instance = 0x00;
3263 hdev->adv_instance_timeout = 0;
3264
3265 hdev->sniff_max_interval = 800;
3266 hdev->sniff_min_interval = 80;
3267
3268 hdev->le_adv_channel_map = 0x07;
3269 hdev->le_adv_min_interval = 0x0800;
3270 hdev->le_adv_max_interval = 0x0800;
3271 hdev->le_scan_interval = 0x0060;
3272 hdev->le_scan_window = 0x0030;
3273 hdev->le_conn_min_interval = 0x0028;
3274 hdev->le_conn_max_interval = 0x0038;
3275 hdev->le_conn_latency = 0x0000;
3276 hdev->le_supv_timeout = 0x002a;
3277 hdev->le_def_tx_len = 0x001b;
3278 hdev->le_def_tx_time = 0x0148;
3279 hdev->le_max_tx_len = 0x001b;
3280 hdev->le_max_tx_time = 0x0148;
3281 hdev->le_max_rx_len = 0x001b;
3282 hdev->le_max_rx_time = 0x0148;
3283
3284 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3285 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3286 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3287 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3288
3289 mutex_init(&hdev->lock);
3290 mutex_init(&hdev->req_lock);
3291
3292 INIT_LIST_HEAD(&hdev->mgmt_pending);
3293 INIT_LIST_HEAD(&hdev->blacklist);
3294 INIT_LIST_HEAD(&hdev->whitelist);
3295 INIT_LIST_HEAD(&hdev->uuids);
3296 INIT_LIST_HEAD(&hdev->link_keys);
3297 INIT_LIST_HEAD(&hdev->long_term_keys);
3298 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3299 INIT_LIST_HEAD(&hdev->remote_oob_data);
3300 INIT_LIST_HEAD(&hdev->le_white_list);
3301 INIT_LIST_HEAD(&hdev->le_conn_params);
3302 INIT_LIST_HEAD(&hdev->pend_le_conns);
3303 INIT_LIST_HEAD(&hdev->pend_le_reports);
3304 INIT_LIST_HEAD(&hdev->conn_hash.list);
3305 INIT_LIST_HEAD(&hdev->adv_instances);
3306
3307 INIT_WORK(&hdev->rx_work, hci_rx_work);
3308 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3309 INIT_WORK(&hdev->tx_work, hci_tx_work);
3310 INIT_WORK(&hdev->power_on, hci_power_on);
3311 INIT_WORK(&hdev->error_reset, hci_error_reset);
3312
3313 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3314 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3315 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3316 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
3317 INIT_DELAYED_WORK(&hdev->adv_instance_expire, hci_adv_timeout_expire);
3318
3319 skb_queue_head_init(&hdev->rx_q);
3320 skb_queue_head_init(&hdev->cmd_q);
3321 skb_queue_head_init(&hdev->raw_q);
3322
3323 init_waitqueue_head(&hdev->req_wait_q);
3324
3325 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3326
3327 hci_init_sysfs(hdev);
3328 discovery_init(hdev);
3329
3330 return hdev;
3331 }
3332 EXPORT_SYMBOL(hci_alloc_dev);
3333
3334 /* Free HCI device */
3335 void hci_free_dev(struct hci_dev *hdev)
3336 {
3337 /* will free via device release */
3338 put_device(&hdev->dev);
3339 }
3340 EXPORT_SYMBOL(hci_free_dev);
3341
3342 /* Register HCI device */
3343 int hci_register_dev(struct hci_dev *hdev)
3344 {
3345 int id, error;
3346
3347 if (!hdev->open || !hdev->close || !hdev->send)
3348 return -EINVAL;
3349
3350 /* Do not allow HCI_AMP devices to register at index 0,
3351 * so the index can be used as the AMP controller ID.
3352 */
3353 switch (hdev->dev_type) {
3354 case HCI_BREDR:
3355 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3356 break;
3357 case HCI_AMP:
3358 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3359 break;
3360 default:
3361 return -EINVAL;
3362 }
3363
3364 if (id < 0)
3365 return id;
3366
3367 sprintf(hdev->name, "hci%d", id);
3368 hdev->id = id;
3369
3370 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3371
3372 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3373 WQ_MEM_RECLAIM, 1, hdev->name);
3374 if (!hdev->workqueue) {
3375 error = -ENOMEM;
3376 goto err;
3377 }
3378
3379 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3380 WQ_MEM_RECLAIM, 1, hdev->name);
3381 if (!hdev->req_workqueue) {
3382 destroy_workqueue(hdev->workqueue);
3383 error = -ENOMEM;
3384 goto err;
3385 }
3386
3387 if (!IS_ERR_OR_NULL(bt_debugfs))
3388 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3389
3390 dev_set_name(&hdev->dev, "%s", hdev->name);
3391
3392 error = device_add(&hdev->dev);
3393 if (error < 0)
3394 goto err_wqueue;
3395
3396 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3397 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3398 hdev);
3399 if (hdev->rfkill) {
3400 if (rfkill_register(hdev->rfkill) < 0) {
3401 rfkill_destroy(hdev->rfkill);
3402 hdev->rfkill = NULL;
3403 }
3404 }
3405
3406 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3407 hci_dev_set_flag(hdev, HCI_RFKILLED);
3408
3409 hci_dev_set_flag(hdev, HCI_SETUP);
3410 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
3411
3412 if (hdev->dev_type == HCI_BREDR) {
3413 /* Assume BR/EDR support until proven otherwise (such as
3414 * through reading supported features during init.
3415 */
3416 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
3417 }
3418
3419 write_lock(&hci_dev_list_lock);
3420 list_add(&hdev->list, &hci_dev_list);
3421 write_unlock(&hci_dev_list_lock);
3422
3423 /* Devices that are marked for raw-only usage are unconfigured
3424 * and should not be included in normal operation.
3425 */
3426 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3427 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
3428
3429 hci_notify(hdev, HCI_DEV_REG);
3430 hci_dev_hold(hdev);
3431
3432 queue_work(hdev->req_workqueue, &hdev->power_on);
3433
3434 return id;
3435
3436 err_wqueue:
3437 destroy_workqueue(hdev->workqueue);
3438 destroy_workqueue(hdev->req_workqueue);
3439 err:
3440 ida_simple_remove(&hci_index_ida, hdev->id);
3441
3442 return error;
3443 }
3444 EXPORT_SYMBOL(hci_register_dev);
3445
3446 /* Unregister HCI device */
3447 void hci_unregister_dev(struct hci_dev *hdev)
3448 {
3449 int id;
3450
3451 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3452
3453 hci_dev_set_flag(hdev, HCI_UNREGISTER);
3454
3455 id = hdev->id;
3456
3457 write_lock(&hci_dev_list_lock);
3458 list_del(&hdev->list);
3459 write_unlock(&hci_dev_list_lock);
3460
3461 hci_dev_do_close(hdev);
3462
3463 cancel_work_sync(&hdev->power_on);
3464
3465 if (!test_bit(HCI_INIT, &hdev->flags) &&
3466 !hci_dev_test_flag(hdev, HCI_SETUP) &&
3467 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
3468 hci_dev_lock(hdev);
3469 mgmt_index_removed(hdev);
3470 hci_dev_unlock(hdev);
3471 }
3472
3473 /* mgmt_index_removed should take care of emptying the
3474 * pending list */
3475 BUG_ON(!list_empty(&hdev->mgmt_pending));
3476
3477 hci_notify(hdev, HCI_DEV_UNREG);
3478
3479 if (hdev->rfkill) {
3480 rfkill_unregister(hdev->rfkill);
3481 rfkill_destroy(hdev->rfkill);
3482 }
3483
3484 device_del(&hdev->dev);
3485
3486 debugfs_remove_recursive(hdev->debugfs);
3487
3488 destroy_workqueue(hdev->workqueue);
3489 destroy_workqueue(hdev->req_workqueue);
3490
3491 hci_dev_lock(hdev);
3492 hci_bdaddr_list_clear(&hdev->blacklist);
3493 hci_bdaddr_list_clear(&hdev->whitelist);
3494 hci_uuids_clear(hdev);
3495 hci_link_keys_clear(hdev);
3496 hci_smp_ltks_clear(hdev);
3497 hci_smp_irks_clear(hdev);
3498 hci_remote_oob_data_clear(hdev);
3499 hci_adv_instances_clear(hdev);
3500 hci_bdaddr_list_clear(&hdev->le_white_list);
3501 hci_conn_params_clear_all(hdev);
3502 hci_discovery_filter_clear(hdev);
3503 hci_dev_unlock(hdev);
3504
3505 hci_dev_put(hdev);
3506
3507 ida_simple_remove(&hci_index_ida, id);
3508 }
3509 EXPORT_SYMBOL(hci_unregister_dev);
3510
3511 /* Suspend HCI device */
3512 int hci_suspend_dev(struct hci_dev *hdev)
3513 {
3514 hci_notify(hdev, HCI_DEV_SUSPEND);
3515 return 0;
3516 }
3517 EXPORT_SYMBOL(hci_suspend_dev);
3518
3519 /* Resume HCI device */
3520 int hci_resume_dev(struct hci_dev *hdev)
3521 {
3522 hci_notify(hdev, HCI_DEV_RESUME);
3523 return 0;
3524 }
3525 EXPORT_SYMBOL(hci_resume_dev);
3526
3527 /* Reset HCI device */
3528 int hci_reset_dev(struct hci_dev *hdev)
3529 {
3530 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3531 struct sk_buff *skb;
3532
3533 skb = bt_skb_alloc(3, GFP_ATOMIC);
3534 if (!skb)
3535 return -ENOMEM;
3536
3537 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
3538 memcpy(skb_put(skb, 3), hw_err, 3);
3539
3540 /* Send Hardware Error to upper stack */
3541 return hci_recv_frame(hdev, skb);
3542 }
3543 EXPORT_SYMBOL(hci_reset_dev);
3544
3545 /* Receive frame from HCI drivers */
3546 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3547 {
3548 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3549 && !test_bit(HCI_INIT, &hdev->flags))) {
3550 kfree_skb(skb);
3551 return -ENXIO;
3552 }
3553
3554 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
3555 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
3556 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
3557 kfree_skb(skb);
3558 return -EINVAL;
3559 }
3560
3561 /* Incoming skb */
3562 bt_cb(skb)->incoming = 1;
3563
3564 /* Time stamp */
3565 __net_timestamp(skb);
3566
3567 skb_queue_tail(&hdev->rx_q, skb);
3568 queue_work(hdev->workqueue, &hdev->rx_work);
3569
3570 return 0;
3571 }
3572 EXPORT_SYMBOL(hci_recv_frame);
3573
3574 /* Receive diagnostic message from HCI drivers */
3575 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
3576 {
3577 /* Mark as diagnostic packet */
3578 bt_cb(skb)->pkt_type = HCI_DIAG_PKT;
3579
3580 /* Time stamp */
3581 __net_timestamp(skb);
3582
3583 skb_queue_tail(&hdev->rx_q, skb);
3584 queue_work(hdev->workqueue, &hdev->rx_work);
3585
3586 return 0;
3587 }
3588 EXPORT_SYMBOL(hci_recv_diag);
3589
3590 /* ---- Interface to upper protocols ---- */
3591
3592 int hci_register_cb(struct hci_cb *cb)
3593 {
3594 BT_DBG("%p name %s", cb, cb->name);
3595
3596 mutex_lock(&hci_cb_list_lock);
3597 list_add_tail(&cb->list, &hci_cb_list);
3598 mutex_unlock(&hci_cb_list_lock);
3599
3600 return 0;
3601 }
3602 EXPORT_SYMBOL(hci_register_cb);
3603
3604 int hci_unregister_cb(struct hci_cb *cb)
3605 {
3606 BT_DBG("%p name %s", cb, cb->name);
3607
3608 mutex_lock(&hci_cb_list_lock);
3609 list_del(&cb->list);
3610 mutex_unlock(&hci_cb_list_lock);
3611
3612 return 0;
3613 }
3614 EXPORT_SYMBOL(hci_unregister_cb);
3615
3616 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3617 {
3618 int err;
3619
3620 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
3621
3622 /* Time stamp */
3623 __net_timestamp(skb);
3624
3625 /* Send copy to monitor */
3626 hci_send_to_monitor(hdev, skb);
3627
3628 if (atomic_read(&hdev->promisc)) {
3629 /* Send copy to the sockets */
3630 hci_send_to_sock(hdev, skb);
3631 }
3632
3633 /* Get rid of skb owner, prior to sending to the driver. */
3634 skb_orphan(skb);
3635
3636 if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3637 kfree_skb(skb);
3638 return;
3639 }
3640
3641 err = hdev->send(hdev, skb);
3642 if (err < 0) {
3643 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3644 kfree_skb(skb);
3645 }
3646 }
3647
3648 /* Send HCI command */
3649 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3650 const void *param)
3651 {
3652 struct sk_buff *skb;
3653
3654 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3655
3656 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3657 if (!skb) {
3658 BT_ERR("%s no memory for command", hdev->name);
3659 return -ENOMEM;
3660 }
3661
3662 /* Stand-alone HCI commands must be flagged as
3663 * single-command requests.
3664 */
3665 bt_cb(skb)->req.start = true;
3666
3667 skb_queue_tail(&hdev->cmd_q, skb);
3668 queue_work(hdev->workqueue, &hdev->cmd_work);
3669
3670 return 0;
3671 }
3672
3673 /* Get data from the previously sent command */
3674 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3675 {
3676 struct hci_command_hdr *hdr;
3677
3678 if (!hdev->sent_cmd)
3679 return NULL;
3680
3681 hdr = (void *) hdev->sent_cmd->data;
3682
3683 if (hdr->opcode != cpu_to_le16(opcode))
3684 return NULL;
3685
3686 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3687
3688 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3689 }
3690
3691 /* Send HCI command and wait for command commplete event */
3692 struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
3693 const void *param, u32 timeout)
3694 {
3695 struct sk_buff *skb;
3696
3697 if (!test_bit(HCI_UP, &hdev->flags))
3698 return ERR_PTR(-ENETDOWN);
3699
3700 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
3701
3702 hci_req_lock(hdev);
3703 skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
3704 hci_req_unlock(hdev);
3705
3706 return skb;
3707 }
3708 EXPORT_SYMBOL(hci_cmd_sync);
3709
3710 /* Send ACL data */
3711 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3712 {
3713 struct hci_acl_hdr *hdr;
3714 int len = skb->len;
3715
3716 skb_push(skb, HCI_ACL_HDR_SIZE);
3717 skb_reset_transport_header(skb);
3718 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3719 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3720 hdr->dlen = cpu_to_le16(len);
3721 }
3722
3723 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3724 struct sk_buff *skb, __u16 flags)
3725 {
3726 struct hci_conn *conn = chan->conn;
3727 struct hci_dev *hdev = conn->hdev;
3728 struct sk_buff *list;
3729
3730 skb->len = skb_headlen(skb);
3731 skb->data_len = 0;
3732
3733 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3734
3735 switch (hdev->dev_type) {
3736 case HCI_BREDR:
3737 hci_add_acl_hdr(skb, conn->handle, flags);
3738 break;
3739 case HCI_AMP:
3740 hci_add_acl_hdr(skb, chan->handle, flags);
3741 break;
3742 default:
3743 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3744 return;
3745 }
3746
3747 list = skb_shinfo(skb)->frag_list;
3748 if (!list) {
3749 /* Non fragmented */
3750 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3751
3752 skb_queue_tail(queue, skb);
3753 } else {
3754 /* Fragmented */
3755 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3756
3757 skb_shinfo(skb)->frag_list = NULL;
3758
3759 /* Queue all fragments atomically. We need to use spin_lock_bh
3760 * here because of 6LoWPAN links, as there this function is
3761 * called from softirq and using normal spin lock could cause
3762 * deadlocks.
3763 */
3764 spin_lock_bh(&queue->lock);
3765
3766 __skb_queue_tail(queue, skb);
3767
3768 flags &= ~ACL_START;
3769 flags |= ACL_CONT;
3770 do {
3771 skb = list; list = list->next;
3772
3773 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3774 hci_add_acl_hdr(skb, conn->handle, flags);
3775
3776 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3777
3778 __skb_queue_tail(queue, skb);
3779 } while (list);
3780
3781 spin_unlock_bh(&queue->lock);
3782 }
3783 }
3784
3785 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3786 {
3787 struct hci_dev *hdev = chan->conn->hdev;
3788
3789 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3790
3791 hci_queue_acl(chan, &chan->data_q, skb, flags);
3792
3793 queue_work(hdev->workqueue, &hdev->tx_work);
3794 }
3795
3796 /* Send SCO data */
3797 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3798 {
3799 struct hci_dev *hdev = conn->hdev;
3800 struct hci_sco_hdr hdr;
3801
3802 BT_DBG("%s len %d", hdev->name, skb->len);
3803
3804 hdr.handle = cpu_to_le16(conn->handle);
3805 hdr.dlen = skb->len;
3806
3807 skb_push(skb, HCI_SCO_HDR_SIZE);
3808 skb_reset_transport_header(skb);
3809 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3810
3811 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
3812
3813 skb_queue_tail(&conn->data_q, skb);
3814 queue_work(hdev->workqueue, &hdev->tx_work);
3815 }
3816
3817 /* ---- HCI TX task (outgoing data) ---- */
3818
3819 /* HCI Connection scheduler */
3820 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3821 int *quote)
3822 {
3823 struct hci_conn_hash *h = &hdev->conn_hash;
3824 struct hci_conn *conn = NULL, *c;
3825 unsigned int num = 0, min = ~0;
3826
3827 /* We don't have to lock device here. Connections are always
3828 * added and removed with TX task disabled. */
3829
3830 rcu_read_lock();
3831
3832 list_for_each_entry_rcu(c, &h->list, list) {
3833 if (c->type != type || skb_queue_empty(&c->data_q))
3834 continue;
3835
3836 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3837 continue;
3838
3839 num++;
3840
3841 if (c->sent < min) {
3842 min = c->sent;
3843 conn = c;
3844 }
3845
3846 if (hci_conn_num(hdev, type) == num)
3847 break;
3848 }
3849
3850 rcu_read_unlock();
3851
3852 if (conn) {
3853 int cnt, q;
3854
3855 switch (conn->type) {
3856 case ACL_LINK:
3857 cnt = hdev->acl_cnt;
3858 break;
3859 case SCO_LINK:
3860 case ESCO_LINK:
3861 cnt = hdev->sco_cnt;
3862 break;
3863 case LE_LINK:
3864 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3865 break;
3866 default:
3867 cnt = 0;
3868 BT_ERR("Unknown link type");
3869 }
3870
3871 q = cnt / num;
3872 *quote = q ? q : 1;
3873 } else
3874 *quote = 0;
3875
3876 BT_DBG("conn %p quote %d", conn, *quote);
3877 return conn;
3878 }
3879
3880 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3881 {
3882 struct hci_conn_hash *h = &hdev->conn_hash;
3883 struct hci_conn *c;
3884
3885 BT_ERR("%s link tx timeout", hdev->name);
3886
3887 rcu_read_lock();
3888
3889 /* Kill stalled connections */
3890 list_for_each_entry_rcu(c, &h->list, list) {
3891 if (c->type == type && c->sent) {
3892 BT_ERR("%s killing stalled connection %pMR",
3893 hdev->name, &c->dst);
3894 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3895 }
3896 }
3897
3898 rcu_read_unlock();
3899 }
3900
3901 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3902 int *quote)
3903 {
3904 struct hci_conn_hash *h = &hdev->conn_hash;
3905 struct hci_chan *chan = NULL;
3906 unsigned int num = 0, min = ~0, cur_prio = 0;
3907 struct hci_conn *conn;
3908 int cnt, q, conn_num = 0;
3909
3910 BT_DBG("%s", hdev->name);
3911
3912 rcu_read_lock();
3913
3914 list_for_each_entry_rcu(conn, &h->list, list) {
3915 struct hci_chan *tmp;
3916
3917 if (conn->type != type)
3918 continue;
3919
3920 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3921 continue;
3922
3923 conn_num++;
3924
3925 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3926 struct sk_buff *skb;
3927
3928 if (skb_queue_empty(&tmp->data_q))
3929 continue;
3930
3931 skb = skb_peek(&tmp->data_q);
3932 if (skb->priority < cur_prio)
3933 continue;
3934
3935 if (skb->priority > cur_prio) {
3936 num = 0;
3937 min = ~0;
3938 cur_prio = skb->priority;
3939 }
3940
3941 num++;
3942
3943 if (conn->sent < min) {
3944 min = conn->sent;
3945 chan = tmp;
3946 }
3947 }
3948
3949 if (hci_conn_num(hdev, type) == conn_num)
3950 break;
3951 }
3952
3953 rcu_read_unlock();
3954
3955 if (!chan)
3956 return NULL;
3957
3958 switch (chan->conn->type) {
3959 case ACL_LINK:
3960 cnt = hdev->acl_cnt;
3961 break;
3962 case AMP_LINK:
3963 cnt = hdev->block_cnt;
3964 break;
3965 case SCO_LINK:
3966 case ESCO_LINK:
3967 cnt = hdev->sco_cnt;
3968 break;
3969 case LE_LINK:
3970 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3971 break;
3972 default:
3973 cnt = 0;
3974 BT_ERR("Unknown link type");
3975 }
3976
3977 q = cnt / num;
3978 *quote = q ? q : 1;
3979 BT_DBG("chan %p quote %d", chan, *quote);
3980 return chan;
3981 }
3982
3983 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3984 {
3985 struct hci_conn_hash *h = &hdev->conn_hash;
3986 struct hci_conn *conn;
3987 int num = 0;
3988
3989 BT_DBG("%s", hdev->name);
3990
3991 rcu_read_lock();
3992
3993 list_for_each_entry_rcu(conn, &h->list, list) {
3994 struct hci_chan *chan;
3995
3996 if (conn->type != type)
3997 continue;
3998
3999 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4000 continue;
4001
4002 num++;
4003
4004 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4005 struct sk_buff *skb;
4006
4007 if (chan->sent) {
4008 chan->sent = 0;
4009 continue;
4010 }
4011
4012 if (skb_queue_empty(&chan->data_q))
4013 continue;
4014
4015 skb = skb_peek(&chan->data_q);
4016 if (skb->priority >= HCI_PRIO_MAX - 1)
4017 continue;
4018
4019 skb->priority = HCI_PRIO_MAX - 1;
4020
4021 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4022 skb->priority);
4023 }
4024
4025 if (hci_conn_num(hdev, type) == num)
4026 break;
4027 }
4028
4029 rcu_read_unlock();
4030
4031 }
4032
4033 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4034 {
4035 /* Calculate count of blocks used by this packet */
4036 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4037 }
4038
4039 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4040 {
4041 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
4042 /* ACL tx timeout must be longer than maximum
4043 * link supervision timeout (40.9 seconds) */
4044 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4045 HCI_ACL_TX_TIMEOUT))
4046 hci_link_tx_to(hdev, ACL_LINK);
4047 }
4048 }
4049
4050 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4051 {
4052 unsigned int cnt = hdev->acl_cnt;
4053 struct hci_chan *chan;
4054 struct sk_buff *skb;
4055 int quote;
4056
4057 __check_timeout(hdev, cnt);
4058
4059 while (hdev->acl_cnt &&
4060 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
4061 u32 priority = (skb_peek(&chan->data_q))->priority;
4062 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4063 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4064 skb->len, skb->priority);
4065
4066 /* Stop if priority has changed */
4067 if (skb->priority < priority)
4068 break;
4069
4070 skb = skb_dequeue(&chan->data_q);
4071
4072 hci_conn_enter_active_mode(chan->conn,
4073 bt_cb(skb)->force_active);
4074
4075 hci_send_frame(hdev, skb);
4076 hdev->acl_last_tx = jiffies;
4077
4078 hdev->acl_cnt--;
4079 chan->sent++;
4080 chan->conn->sent++;
4081 }
4082 }
4083
4084 if (cnt != hdev->acl_cnt)
4085 hci_prio_recalculate(hdev, ACL_LINK);
4086 }
4087
4088 static void hci_sched_acl_blk(struct hci_dev *hdev)
4089 {
4090 unsigned int cnt = hdev->block_cnt;
4091 struct hci_chan *chan;
4092 struct sk_buff *skb;
4093 int quote;
4094 u8 type;
4095
4096 __check_timeout(hdev, cnt);
4097
4098 BT_DBG("%s", hdev->name);
4099
4100 if (hdev->dev_type == HCI_AMP)
4101 type = AMP_LINK;
4102 else
4103 type = ACL_LINK;
4104
4105 while (hdev->block_cnt > 0 &&
4106 (chan = hci_chan_sent(hdev, type, &quote))) {
4107 u32 priority = (skb_peek(&chan->data_q))->priority;
4108 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4109 int blocks;
4110
4111 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4112 skb->len, skb->priority);
4113
4114 /* Stop if priority has changed */
4115 if (skb->priority < priority)
4116 break;
4117
4118 skb = skb_dequeue(&chan->data_q);
4119
4120 blocks = __get_blocks(hdev, skb);
4121 if (blocks > hdev->block_cnt)
4122 return;
4123
4124 hci_conn_enter_active_mode(chan->conn,
4125 bt_cb(skb)->force_active);
4126
4127 hci_send_frame(hdev, skb);
4128 hdev->acl_last_tx = jiffies;
4129
4130 hdev->block_cnt -= blocks;
4131 quote -= blocks;
4132
4133 chan->sent += blocks;
4134 chan->conn->sent += blocks;
4135 }
4136 }
4137
4138 if (cnt != hdev->block_cnt)
4139 hci_prio_recalculate(hdev, type);
4140 }
4141
4142 static void hci_sched_acl(struct hci_dev *hdev)
4143 {
4144 BT_DBG("%s", hdev->name);
4145
4146 /* No ACL link over BR/EDR controller */
4147 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4148 return;
4149
4150 /* No AMP link over AMP controller */
4151 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4152 return;
4153
4154 switch (hdev->flow_ctl_mode) {
4155 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4156 hci_sched_acl_pkt(hdev);
4157 break;
4158
4159 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4160 hci_sched_acl_blk(hdev);
4161 break;
4162 }
4163 }
4164
4165 /* Schedule SCO */
4166 static void hci_sched_sco(struct hci_dev *hdev)
4167 {
4168 struct hci_conn *conn;
4169 struct sk_buff *skb;
4170 int quote;
4171
4172 BT_DBG("%s", hdev->name);
4173
4174 if (!hci_conn_num(hdev, SCO_LINK))
4175 return;
4176
4177 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4178 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4179 BT_DBG("skb %p len %d", skb, skb->len);
4180 hci_send_frame(hdev, skb);
4181
4182 conn->sent++;
4183 if (conn->sent == ~0)
4184 conn->sent = 0;
4185 }
4186 }
4187 }
4188
4189 static void hci_sched_esco(struct hci_dev *hdev)
4190 {
4191 struct hci_conn *conn;
4192 struct sk_buff *skb;
4193 int quote;
4194
4195 BT_DBG("%s", hdev->name);
4196
4197 if (!hci_conn_num(hdev, ESCO_LINK))
4198 return;
4199
4200 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4201 &quote))) {
4202 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4203 BT_DBG("skb %p len %d", skb, skb->len);
4204 hci_send_frame(hdev, skb);
4205
4206 conn->sent++;
4207 if (conn->sent == ~0)
4208 conn->sent = 0;
4209 }
4210 }
4211 }
4212
4213 static void hci_sched_le(struct hci_dev *hdev)
4214 {
4215 struct hci_chan *chan;
4216 struct sk_buff *skb;
4217 int quote, cnt, tmp;
4218
4219 BT_DBG("%s", hdev->name);
4220
4221 if (!hci_conn_num(hdev, LE_LINK))
4222 return;
4223
4224 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
4225 /* LE tx timeout must be longer than maximum
4226 * link supervision timeout (40.9 seconds) */
4227 if (!hdev->le_cnt && hdev->le_pkts &&
4228 time_after(jiffies, hdev->le_last_tx + HZ * 45))
4229 hci_link_tx_to(hdev, LE_LINK);
4230 }
4231
4232 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4233 tmp = cnt;
4234 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
4235 u32 priority = (skb_peek(&chan->data_q))->priority;
4236 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4237 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4238 skb->len, skb->priority);
4239
4240 /* Stop if priority has changed */
4241 if (skb->priority < priority)
4242 break;
4243
4244 skb = skb_dequeue(&chan->data_q);
4245
4246 hci_send_frame(hdev, skb);
4247 hdev->le_last_tx = jiffies;
4248
4249 cnt--;
4250 chan->sent++;
4251 chan->conn->sent++;
4252 }
4253 }
4254
4255 if (hdev->le_pkts)
4256 hdev->le_cnt = cnt;
4257 else
4258 hdev->acl_cnt = cnt;
4259
4260 if (cnt != tmp)
4261 hci_prio_recalculate(hdev, LE_LINK);
4262 }
4263
4264 static void hci_tx_work(struct work_struct *work)
4265 {
4266 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4267 struct sk_buff *skb;
4268
4269 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4270 hdev->sco_cnt, hdev->le_cnt);
4271
4272 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4273 /* Schedule queues and send stuff to HCI driver */
4274 hci_sched_acl(hdev);
4275 hci_sched_sco(hdev);
4276 hci_sched_esco(hdev);
4277 hci_sched_le(hdev);
4278 }
4279
4280 /* Send next queued raw (unknown type) packet */
4281 while ((skb = skb_dequeue(&hdev->raw_q)))
4282 hci_send_frame(hdev, skb);
4283 }
4284
4285 /* ----- HCI RX task (incoming data processing) ----- */
4286
4287 /* ACL data packet */
4288 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4289 {
4290 struct hci_acl_hdr *hdr = (void *) skb->data;
4291 struct hci_conn *conn;
4292 __u16 handle, flags;
4293
4294 skb_pull(skb, HCI_ACL_HDR_SIZE);
4295
4296 handle = __le16_to_cpu(hdr->handle);
4297 flags = hci_flags(handle);
4298 handle = hci_handle(handle);
4299
4300 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4301 handle, flags);
4302
4303 hdev->stat.acl_rx++;
4304
4305 hci_dev_lock(hdev);
4306 conn = hci_conn_hash_lookup_handle(hdev, handle);
4307 hci_dev_unlock(hdev);
4308
4309 if (conn) {
4310 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4311
4312 /* Send to upper protocol */
4313 l2cap_recv_acldata(conn, skb, flags);
4314 return;
4315 } else {
4316 BT_ERR("%s ACL packet for unknown connection handle %d",
4317 hdev->name, handle);
4318 }
4319
4320 kfree_skb(skb);
4321 }
4322
4323 /* SCO data packet */
4324 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4325 {
4326 struct hci_sco_hdr *hdr = (void *) skb->data;
4327 struct hci_conn *conn;
4328 __u16 handle;
4329
4330 skb_pull(skb, HCI_SCO_HDR_SIZE);
4331
4332 handle = __le16_to_cpu(hdr->handle);
4333
4334 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
4335
4336 hdev->stat.sco_rx++;
4337
4338 hci_dev_lock(hdev);
4339 conn = hci_conn_hash_lookup_handle(hdev, handle);
4340 hci_dev_unlock(hdev);
4341
4342 if (conn) {
4343 /* Send to upper protocol */
4344 sco_recv_scodata(conn, skb);
4345 return;
4346 } else {
4347 BT_ERR("%s SCO packet for unknown connection handle %d",
4348 hdev->name, handle);
4349 }
4350
4351 kfree_skb(skb);
4352 }
4353
4354 static bool hci_req_is_complete(struct hci_dev *hdev)
4355 {
4356 struct sk_buff *skb;
4357
4358 skb = skb_peek(&hdev->cmd_q);
4359 if (!skb)
4360 return true;
4361
4362 return bt_cb(skb)->req.start;
4363 }
4364
4365 static void hci_resend_last(struct hci_dev *hdev)
4366 {
4367 struct hci_command_hdr *sent;
4368 struct sk_buff *skb;
4369 u16 opcode;
4370
4371 if (!hdev->sent_cmd)
4372 return;
4373
4374 sent = (void *) hdev->sent_cmd->data;
4375 opcode = __le16_to_cpu(sent->opcode);
4376 if (opcode == HCI_OP_RESET)
4377 return;
4378
4379 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4380 if (!skb)
4381 return;
4382
4383 skb_queue_head(&hdev->cmd_q, skb);
4384 queue_work(hdev->workqueue, &hdev->cmd_work);
4385 }
4386
4387 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4388 hci_req_complete_t *req_complete,
4389 hci_req_complete_skb_t *req_complete_skb)
4390 {
4391 struct sk_buff *skb;
4392 unsigned long flags;
4393
4394 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4395
4396 /* If the completed command doesn't match the last one that was
4397 * sent we need to do special handling of it.
4398 */
4399 if (!hci_sent_cmd_data(hdev, opcode)) {
4400 /* Some CSR based controllers generate a spontaneous
4401 * reset complete event during init and any pending
4402 * command will never be completed. In such a case we
4403 * need to resend whatever was the last sent
4404 * command.
4405 */
4406 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4407 hci_resend_last(hdev);
4408
4409 return;
4410 }
4411
4412 /* If the command succeeded and there's still more commands in
4413 * this request the request is not yet complete.
4414 */
4415 if (!status && !hci_req_is_complete(hdev))
4416 return;
4417
4418 /* If this was the last command in a request the complete
4419 * callback would be found in hdev->sent_cmd instead of the
4420 * command queue (hdev->cmd_q).
4421 */
4422 if (bt_cb(hdev->sent_cmd)->req.complete) {
4423 *req_complete = bt_cb(hdev->sent_cmd)->req.complete;
4424 return;
4425 }
4426
4427 if (bt_cb(hdev->sent_cmd)->req.complete_skb) {
4428 *req_complete_skb = bt_cb(hdev->sent_cmd)->req.complete_skb;
4429 return;
4430 }
4431
4432 /* Remove all pending commands belonging to this request */
4433 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4434 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4435 if (bt_cb(skb)->req.start) {
4436 __skb_queue_head(&hdev->cmd_q, skb);
4437 break;
4438 }
4439
4440 *req_complete = bt_cb(skb)->req.complete;
4441 *req_complete_skb = bt_cb(skb)->req.complete_skb;
4442 kfree_skb(skb);
4443 }
4444 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4445 }
4446
4447 static void hci_rx_work(struct work_struct *work)
4448 {
4449 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4450 struct sk_buff *skb;
4451
4452 BT_DBG("%s", hdev->name);
4453
4454 while ((skb = skb_dequeue(&hdev->rx_q))) {
4455 /* Send copy to monitor */
4456 hci_send_to_monitor(hdev, skb);
4457
4458 if (atomic_read(&hdev->promisc)) {
4459 /* Send copy to the sockets */
4460 hci_send_to_sock(hdev, skb);
4461 }
4462
4463 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4464 kfree_skb(skb);
4465 continue;
4466 }
4467
4468 if (test_bit(HCI_INIT, &hdev->flags)) {
4469 /* Don't process data packets in this states. */
4470 switch (bt_cb(skb)->pkt_type) {
4471 case HCI_ACLDATA_PKT:
4472 case HCI_SCODATA_PKT:
4473 kfree_skb(skb);
4474 continue;
4475 }
4476 }
4477
4478 /* Process frame */
4479 switch (bt_cb(skb)->pkt_type) {
4480 case HCI_EVENT_PKT:
4481 BT_DBG("%s Event packet", hdev->name);
4482 hci_event_packet(hdev, skb);
4483 break;
4484
4485 case HCI_ACLDATA_PKT:
4486 BT_DBG("%s ACL data packet", hdev->name);
4487 hci_acldata_packet(hdev, skb);
4488 break;
4489
4490 case HCI_SCODATA_PKT:
4491 BT_DBG("%s SCO data packet", hdev->name);
4492 hci_scodata_packet(hdev, skb);
4493 break;
4494
4495 default:
4496 kfree_skb(skb);
4497 break;
4498 }
4499 }
4500 }
4501
4502 static void hci_cmd_work(struct work_struct *work)
4503 {
4504 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4505 struct sk_buff *skb;
4506
4507 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4508 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4509
4510 /* Send queued commands */
4511 if (atomic_read(&hdev->cmd_cnt)) {
4512 skb = skb_dequeue(&hdev->cmd_q);
4513 if (!skb)
4514 return;
4515
4516 kfree_skb(hdev->sent_cmd);
4517
4518 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4519 if (hdev->sent_cmd) {
4520 atomic_dec(&hdev->cmd_cnt);
4521 hci_send_frame(hdev, skb);
4522 if (test_bit(HCI_RESET, &hdev->flags))
4523 cancel_delayed_work(&hdev->cmd_timer);
4524 else
4525 schedule_delayed_work(&hdev->cmd_timer,
4526 HCI_CMD_TIMEOUT);
4527 } else {
4528 skb_queue_head(&hdev->cmd_q, skb);
4529 queue_work(hdev->workqueue, &hdev->cmd_work);
4530 }
4531 }
4532 }