]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/bluetooth/hci_core.c
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/davem/net...
[mirror_ubuntu-bionic-kernel.git] / net / bluetooth / hci_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
34
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
39
40 #include "hci_request.h"
41 #include "hci_debugfs.h"
42 #include "smp.h"
43
44 static void hci_rx_work(struct work_struct *work);
45 static void hci_cmd_work(struct work_struct *work);
46 static void hci_tx_work(struct work_struct *work);
47
48 /* HCI device list */
49 LIST_HEAD(hci_dev_list);
50 DEFINE_RWLOCK(hci_dev_list_lock);
51
52 /* HCI callback list */
53 LIST_HEAD(hci_cb_list);
54 DEFINE_MUTEX(hci_cb_list_lock);
55
56 /* HCI ID Numbering */
57 static DEFINE_IDA(hci_index_ida);
58
59 /* ----- HCI requests ----- */
60
61 #define HCI_REQ_DONE 0
62 #define HCI_REQ_PEND 1
63 #define HCI_REQ_CANCELED 2
64
65 #define hci_req_lock(d) mutex_lock(&d->req_lock)
66 #define hci_req_unlock(d) mutex_unlock(&d->req_lock)
67
68 /* ---- HCI notifications ---- */
69
70 static void hci_notify(struct hci_dev *hdev, int event)
71 {
72 hci_sock_dev_event(hdev, event);
73 }
74
75 /* ---- HCI debugfs entries ---- */
76
77 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
78 size_t count, loff_t *ppos)
79 {
80 struct hci_dev *hdev = file->private_data;
81 char buf[3];
82
83 buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y': 'N';
84 buf[1] = '\n';
85 buf[2] = '\0';
86 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
87 }
88
89 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
90 size_t count, loff_t *ppos)
91 {
92 struct hci_dev *hdev = file->private_data;
93 struct sk_buff *skb;
94 char buf[32];
95 size_t buf_size = min(count, (sizeof(buf)-1));
96 bool enable;
97
98 if (!test_bit(HCI_UP, &hdev->flags))
99 return -ENETDOWN;
100
101 if (copy_from_user(buf, user_buf, buf_size))
102 return -EFAULT;
103
104 buf[buf_size] = '\0';
105 if (strtobool(buf, &enable))
106 return -EINVAL;
107
108 if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
109 return -EALREADY;
110
111 hci_req_lock(hdev);
112 if (enable)
113 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
114 HCI_CMD_TIMEOUT);
115 else
116 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
117 HCI_CMD_TIMEOUT);
118 hci_req_unlock(hdev);
119
120 if (IS_ERR(skb))
121 return PTR_ERR(skb);
122
123 kfree_skb(skb);
124
125 hci_dev_change_flag(hdev, HCI_DUT_MODE);
126
127 return count;
128 }
129
130 static const struct file_operations dut_mode_fops = {
131 .open = simple_open,
132 .read = dut_mode_read,
133 .write = dut_mode_write,
134 .llseek = default_llseek,
135 };
136
137 static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
138 size_t count, loff_t *ppos)
139 {
140 struct hci_dev *hdev = file->private_data;
141 char buf[3];
142
143 buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y': 'N';
144 buf[1] = '\n';
145 buf[2] = '\0';
146 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
147 }
148
149 static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
150 size_t count, loff_t *ppos)
151 {
152 struct hci_dev *hdev = file->private_data;
153 char buf[32];
154 size_t buf_size = min(count, (sizeof(buf)-1));
155 bool enable;
156 int err;
157
158 if (copy_from_user(buf, user_buf, buf_size))
159 return -EFAULT;
160
161 buf[buf_size] = '\0';
162 if (strtobool(buf, &enable))
163 return -EINVAL;
164
165 hci_req_lock(hdev);
166 err = hdev->set_diag(hdev, enable);
167 hci_req_unlock(hdev);
168
169 if (err < 0)
170 return err;
171
172 if (enable)
173 hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
174 else
175 hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
176
177 return count;
178 }
179
180 static const struct file_operations vendor_diag_fops = {
181 .open = simple_open,
182 .read = vendor_diag_read,
183 .write = vendor_diag_write,
184 .llseek = default_llseek,
185 };
186
187 static void hci_debugfs_create_basic(struct hci_dev *hdev)
188 {
189 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
190 &dut_mode_fops);
191
192 if (hdev->set_diag)
193 debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
194 &vendor_diag_fops);
195 }
196
197 /* ---- HCI requests ---- */
198
199 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
200 struct sk_buff *skb)
201 {
202 BT_DBG("%s result 0x%2.2x", hdev->name, result);
203
204 if (hdev->req_status == HCI_REQ_PEND) {
205 hdev->req_result = result;
206 hdev->req_status = HCI_REQ_DONE;
207 if (skb)
208 hdev->req_skb = skb_get(skb);
209 wake_up_interruptible(&hdev->req_wait_q);
210 }
211 }
212
213 static void hci_req_cancel(struct hci_dev *hdev, int err)
214 {
215 BT_DBG("%s err 0x%2.2x", hdev->name, err);
216
217 if (hdev->req_status == HCI_REQ_PEND) {
218 hdev->req_result = err;
219 hdev->req_status = HCI_REQ_CANCELED;
220 wake_up_interruptible(&hdev->req_wait_q);
221 }
222 }
223
224 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
225 const void *param, u8 event, u32 timeout)
226 {
227 DECLARE_WAITQUEUE(wait, current);
228 struct hci_request req;
229 struct sk_buff *skb;
230 int err = 0;
231
232 BT_DBG("%s", hdev->name);
233
234 hci_req_init(&req, hdev);
235
236 hci_req_add_ev(&req, opcode, plen, param, event);
237
238 hdev->req_status = HCI_REQ_PEND;
239
240 add_wait_queue(&hdev->req_wait_q, &wait);
241 set_current_state(TASK_INTERRUPTIBLE);
242
243 err = hci_req_run_skb(&req, hci_req_sync_complete);
244 if (err < 0) {
245 remove_wait_queue(&hdev->req_wait_q, &wait);
246 set_current_state(TASK_RUNNING);
247 return ERR_PTR(err);
248 }
249
250 schedule_timeout(timeout);
251
252 remove_wait_queue(&hdev->req_wait_q, &wait);
253
254 if (signal_pending(current))
255 return ERR_PTR(-EINTR);
256
257 switch (hdev->req_status) {
258 case HCI_REQ_DONE:
259 err = -bt_to_errno(hdev->req_result);
260 break;
261
262 case HCI_REQ_CANCELED:
263 err = -hdev->req_result;
264 break;
265
266 default:
267 err = -ETIMEDOUT;
268 break;
269 }
270
271 hdev->req_status = hdev->req_result = 0;
272 skb = hdev->req_skb;
273 hdev->req_skb = NULL;
274
275 BT_DBG("%s end: err %d", hdev->name, err);
276
277 if (err < 0) {
278 kfree_skb(skb);
279 return ERR_PTR(err);
280 }
281
282 if (!skb)
283 return ERR_PTR(-ENODATA);
284
285 return skb;
286 }
287 EXPORT_SYMBOL(__hci_cmd_sync_ev);
288
289 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
290 const void *param, u32 timeout)
291 {
292 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
293 }
294 EXPORT_SYMBOL(__hci_cmd_sync);
295
296 /* Execute request and wait for completion. */
297 static int __hci_req_sync(struct hci_dev *hdev,
298 void (*func)(struct hci_request *req,
299 unsigned long opt),
300 unsigned long opt, __u32 timeout)
301 {
302 struct hci_request req;
303 DECLARE_WAITQUEUE(wait, current);
304 int err = 0;
305
306 BT_DBG("%s start", hdev->name);
307
308 hci_req_init(&req, hdev);
309
310 hdev->req_status = HCI_REQ_PEND;
311
312 func(&req, opt);
313
314 add_wait_queue(&hdev->req_wait_q, &wait);
315 set_current_state(TASK_INTERRUPTIBLE);
316
317 err = hci_req_run_skb(&req, hci_req_sync_complete);
318 if (err < 0) {
319 hdev->req_status = 0;
320
321 remove_wait_queue(&hdev->req_wait_q, &wait);
322 set_current_state(TASK_RUNNING);
323
324 /* ENODATA means the HCI request command queue is empty.
325 * This can happen when a request with conditionals doesn't
326 * trigger any commands to be sent. This is normal behavior
327 * and should not trigger an error return.
328 */
329 if (err == -ENODATA)
330 return 0;
331
332 return err;
333 }
334
335 schedule_timeout(timeout);
336
337 remove_wait_queue(&hdev->req_wait_q, &wait);
338
339 if (signal_pending(current))
340 return -EINTR;
341
342 switch (hdev->req_status) {
343 case HCI_REQ_DONE:
344 err = -bt_to_errno(hdev->req_result);
345 break;
346
347 case HCI_REQ_CANCELED:
348 err = -hdev->req_result;
349 break;
350
351 default:
352 err = -ETIMEDOUT;
353 break;
354 }
355
356 hdev->req_status = hdev->req_result = 0;
357
358 BT_DBG("%s end: err %d", hdev->name, err);
359
360 return err;
361 }
362
363 static int hci_req_sync(struct hci_dev *hdev,
364 void (*req)(struct hci_request *req,
365 unsigned long opt),
366 unsigned long opt, __u32 timeout)
367 {
368 int ret;
369
370 if (!test_bit(HCI_UP, &hdev->flags))
371 return -ENETDOWN;
372
373 /* Serialize all requests */
374 hci_req_lock(hdev);
375 ret = __hci_req_sync(hdev, req, opt, timeout);
376 hci_req_unlock(hdev);
377
378 return ret;
379 }
380
381 static void hci_reset_req(struct hci_request *req, unsigned long opt)
382 {
383 BT_DBG("%s %ld", req->hdev->name, opt);
384
385 /* Reset device */
386 set_bit(HCI_RESET, &req->hdev->flags);
387 hci_req_add(req, HCI_OP_RESET, 0, NULL);
388 }
389
390 static void bredr_init(struct hci_request *req)
391 {
392 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
393
394 /* Read Local Supported Features */
395 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
396
397 /* Read Local Version */
398 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
399
400 /* Read BD Address */
401 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
402 }
403
404 static void amp_init1(struct hci_request *req)
405 {
406 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
407
408 /* Read Local Version */
409 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
410
411 /* Read Local Supported Commands */
412 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
413
414 /* Read Local AMP Info */
415 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
416
417 /* Read Data Blk size */
418 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
419
420 /* Read Flow Control Mode */
421 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
422
423 /* Read Location Data */
424 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
425 }
426
427 static void amp_init2(struct hci_request *req)
428 {
429 /* Read Local Supported Features. Not all AMP controllers
430 * support this so it's placed conditionally in the second
431 * stage init.
432 */
433 if (req->hdev->commands[14] & 0x20)
434 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
435 }
436
437 static void hci_init1_req(struct hci_request *req, unsigned long opt)
438 {
439 struct hci_dev *hdev = req->hdev;
440
441 BT_DBG("%s %ld", hdev->name, opt);
442
443 /* Reset */
444 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
445 hci_reset_req(req, 0);
446
447 switch (hdev->dev_type) {
448 case HCI_BREDR:
449 bredr_init(req);
450 break;
451
452 case HCI_AMP:
453 amp_init1(req);
454 break;
455
456 default:
457 BT_ERR("Unknown device type %d", hdev->dev_type);
458 break;
459 }
460 }
461
462 static void bredr_setup(struct hci_request *req)
463 {
464 __le16 param;
465 __u8 flt_type;
466
467 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
468 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
469
470 /* Read Class of Device */
471 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
472
473 /* Read Local Name */
474 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
475
476 /* Read Voice Setting */
477 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
478
479 /* Read Number of Supported IAC */
480 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
481
482 /* Read Current IAC LAP */
483 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
484
485 /* Clear Event Filters */
486 flt_type = HCI_FLT_CLEAR_ALL;
487 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
488
489 /* Connection accept timeout ~20 secs */
490 param = cpu_to_le16(0x7d00);
491 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
492 }
493
494 static void le_setup(struct hci_request *req)
495 {
496 struct hci_dev *hdev = req->hdev;
497
498 /* Read LE Buffer Size */
499 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
500
501 /* Read LE Local Supported Features */
502 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
503
504 /* Read LE Supported States */
505 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
506
507 /* Read LE White List Size */
508 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
509
510 /* Clear LE White List */
511 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
512
513 /* LE-only controllers have LE implicitly enabled */
514 if (!lmp_bredr_capable(hdev))
515 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
516 }
517
518 static void hci_setup_event_mask(struct hci_request *req)
519 {
520 struct hci_dev *hdev = req->hdev;
521
522 /* The second byte is 0xff instead of 0x9f (two reserved bits
523 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
524 * command otherwise.
525 */
526 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
527
528 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
529 * any event mask for pre 1.2 devices.
530 */
531 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
532 return;
533
534 if (lmp_bredr_capable(hdev)) {
535 events[4] |= 0x01; /* Flow Specification Complete */
536 events[4] |= 0x02; /* Inquiry Result with RSSI */
537 events[4] |= 0x04; /* Read Remote Extended Features Complete */
538 events[5] |= 0x08; /* Synchronous Connection Complete */
539 events[5] |= 0x10; /* Synchronous Connection Changed */
540 } else {
541 /* Use a different default for LE-only devices */
542 memset(events, 0, sizeof(events));
543 events[0] |= 0x10; /* Disconnection Complete */
544 events[1] |= 0x08; /* Read Remote Version Information Complete */
545 events[1] |= 0x20; /* Command Complete */
546 events[1] |= 0x40; /* Command Status */
547 events[1] |= 0x80; /* Hardware Error */
548 events[2] |= 0x04; /* Number of Completed Packets */
549 events[3] |= 0x02; /* Data Buffer Overflow */
550
551 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
552 events[0] |= 0x80; /* Encryption Change */
553 events[5] |= 0x80; /* Encryption Key Refresh Complete */
554 }
555 }
556
557 if (lmp_inq_rssi_capable(hdev))
558 events[4] |= 0x02; /* Inquiry Result with RSSI */
559
560 if (lmp_sniffsubr_capable(hdev))
561 events[5] |= 0x20; /* Sniff Subrating */
562
563 if (lmp_pause_enc_capable(hdev))
564 events[5] |= 0x80; /* Encryption Key Refresh Complete */
565
566 if (lmp_ext_inq_capable(hdev))
567 events[5] |= 0x40; /* Extended Inquiry Result */
568
569 if (lmp_no_flush_capable(hdev))
570 events[7] |= 0x01; /* Enhanced Flush Complete */
571
572 if (lmp_lsto_capable(hdev))
573 events[6] |= 0x80; /* Link Supervision Timeout Changed */
574
575 if (lmp_ssp_capable(hdev)) {
576 events[6] |= 0x01; /* IO Capability Request */
577 events[6] |= 0x02; /* IO Capability Response */
578 events[6] |= 0x04; /* User Confirmation Request */
579 events[6] |= 0x08; /* User Passkey Request */
580 events[6] |= 0x10; /* Remote OOB Data Request */
581 events[6] |= 0x20; /* Simple Pairing Complete */
582 events[7] |= 0x04; /* User Passkey Notification */
583 events[7] |= 0x08; /* Keypress Notification */
584 events[7] |= 0x10; /* Remote Host Supported
585 * Features Notification
586 */
587 }
588
589 if (lmp_le_capable(hdev))
590 events[7] |= 0x20; /* LE Meta-Event */
591
592 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
593 }
594
595 static void hci_init2_req(struct hci_request *req, unsigned long opt)
596 {
597 struct hci_dev *hdev = req->hdev;
598
599 if (hdev->dev_type == HCI_AMP)
600 return amp_init2(req);
601
602 if (lmp_bredr_capable(hdev))
603 bredr_setup(req);
604 else
605 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
606
607 if (lmp_le_capable(hdev))
608 le_setup(req);
609
610 /* All Bluetooth 1.2 and later controllers should support the
611 * HCI command for reading the local supported commands.
612 *
613 * Unfortunately some controllers indicate Bluetooth 1.2 support,
614 * but do not have support for this command. If that is the case,
615 * the driver can quirk the behavior and skip reading the local
616 * supported commands.
617 */
618 if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
619 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
620 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
621
622 if (lmp_ssp_capable(hdev)) {
623 /* When SSP is available, then the host features page
624 * should also be available as well. However some
625 * controllers list the max_page as 0 as long as SSP
626 * has not been enabled. To achieve proper debugging
627 * output, force the minimum max_page to 1 at least.
628 */
629 hdev->max_page = 0x01;
630
631 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
632 u8 mode = 0x01;
633
634 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
635 sizeof(mode), &mode);
636 } else {
637 struct hci_cp_write_eir cp;
638
639 memset(hdev->eir, 0, sizeof(hdev->eir));
640 memset(&cp, 0, sizeof(cp));
641
642 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
643 }
644 }
645
646 if (lmp_inq_rssi_capable(hdev) ||
647 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
648 u8 mode;
649
650 /* If Extended Inquiry Result events are supported, then
651 * they are clearly preferred over Inquiry Result with RSSI
652 * events.
653 */
654 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
655
656 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
657 }
658
659 if (lmp_inq_tx_pwr_capable(hdev))
660 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
661
662 if (lmp_ext_feat_capable(hdev)) {
663 struct hci_cp_read_local_ext_features cp;
664
665 cp.page = 0x01;
666 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
667 sizeof(cp), &cp);
668 }
669
670 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
671 u8 enable = 1;
672 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
673 &enable);
674 }
675 }
676
677 static void hci_setup_link_policy(struct hci_request *req)
678 {
679 struct hci_dev *hdev = req->hdev;
680 struct hci_cp_write_def_link_policy cp;
681 u16 link_policy = 0;
682
683 if (lmp_rswitch_capable(hdev))
684 link_policy |= HCI_LP_RSWITCH;
685 if (lmp_hold_capable(hdev))
686 link_policy |= HCI_LP_HOLD;
687 if (lmp_sniff_capable(hdev))
688 link_policy |= HCI_LP_SNIFF;
689 if (lmp_park_capable(hdev))
690 link_policy |= HCI_LP_PARK;
691
692 cp.policy = cpu_to_le16(link_policy);
693 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
694 }
695
696 static void hci_set_le_support(struct hci_request *req)
697 {
698 struct hci_dev *hdev = req->hdev;
699 struct hci_cp_write_le_host_supported cp;
700
701 /* LE-only devices do not support explicit enablement */
702 if (!lmp_bredr_capable(hdev))
703 return;
704
705 memset(&cp, 0, sizeof(cp));
706
707 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
708 cp.le = 0x01;
709 cp.simul = 0x00;
710 }
711
712 if (cp.le != lmp_host_le_capable(hdev))
713 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
714 &cp);
715 }
716
717 static void hci_set_event_mask_page_2(struct hci_request *req)
718 {
719 struct hci_dev *hdev = req->hdev;
720 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
721
722 /* If Connectionless Slave Broadcast master role is supported
723 * enable all necessary events for it.
724 */
725 if (lmp_csb_master_capable(hdev)) {
726 events[1] |= 0x40; /* Triggered Clock Capture */
727 events[1] |= 0x80; /* Synchronization Train Complete */
728 events[2] |= 0x10; /* Slave Page Response Timeout */
729 events[2] |= 0x20; /* CSB Channel Map Change */
730 }
731
732 /* If Connectionless Slave Broadcast slave role is supported
733 * enable all necessary events for it.
734 */
735 if (lmp_csb_slave_capable(hdev)) {
736 events[2] |= 0x01; /* Synchronization Train Received */
737 events[2] |= 0x02; /* CSB Receive */
738 events[2] |= 0x04; /* CSB Timeout */
739 events[2] |= 0x08; /* Truncated Page Complete */
740 }
741
742 /* Enable Authenticated Payload Timeout Expired event if supported */
743 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
744 events[2] |= 0x80;
745
746 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
747 }
748
749 static void hci_init3_req(struct hci_request *req, unsigned long opt)
750 {
751 struct hci_dev *hdev = req->hdev;
752 u8 p;
753
754 hci_setup_event_mask(req);
755
756 if (hdev->commands[6] & 0x20 &&
757 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
758 struct hci_cp_read_stored_link_key cp;
759
760 bacpy(&cp.bdaddr, BDADDR_ANY);
761 cp.read_all = 0x01;
762 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
763 }
764
765 if (hdev->commands[5] & 0x10)
766 hci_setup_link_policy(req);
767
768 if (hdev->commands[8] & 0x01)
769 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
770
771 /* Some older Broadcom based Bluetooth 1.2 controllers do not
772 * support the Read Page Scan Type command. Check support for
773 * this command in the bit mask of supported commands.
774 */
775 if (hdev->commands[13] & 0x01)
776 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
777
778 if (lmp_le_capable(hdev)) {
779 u8 events[8];
780
781 memset(events, 0, sizeof(events));
782 events[0] = 0x0f;
783
784 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
785 events[0] |= 0x10; /* LE Long Term Key Request */
786
787 /* If controller supports the Connection Parameters Request
788 * Link Layer Procedure, enable the corresponding event.
789 */
790 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
791 events[0] |= 0x20; /* LE Remote Connection
792 * Parameter Request
793 */
794
795 /* If the controller supports the Data Length Extension
796 * feature, enable the corresponding event.
797 */
798 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
799 events[0] |= 0x40; /* LE Data Length Change */
800
801 /* If the controller supports Extended Scanner Filter
802 * Policies, enable the correspondig event.
803 */
804 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
805 events[1] |= 0x04; /* LE Direct Advertising
806 * Report
807 */
808
809 /* If the controller supports the LE Read Local P-256
810 * Public Key command, enable the corresponding event.
811 */
812 if (hdev->commands[34] & 0x02)
813 events[0] |= 0x80; /* LE Read Local P-256
814 * Public Key Complete
815 */
816
817 /* If the controller supports the LE Generate DHKey
818 * command, enable the corresponding event.
819 */
820 if (hdev->commands[34] & 0x04)
821 events[1] |= 0x01; /* LE Generate DHKey Complete */
822
823 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
824 events);
825
826 if (hdev->commands[25] & 0x40) {
827 /* Read LE Advertising Channel TX Power */
828 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
829 }
830
831 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
832 /* Read LE Maximum Data Length */
833 hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
834
835 /* Read LE Suggested Default Data Length */
836 hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
837 }
838
839 hci_set_le_support(req);
840 }
841
842 /* Read features beyond page 1 if available */
843 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
844 struct hci_cp_read_local_ext_features cp;
845
846 cp.page = p;
847 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
848 sizeof(cp), &cp);
849 }
850 }
851
852 static void hci_init4_req(struct hci_request *req, unsigned long opt)
853 {
854 struct hci_dev *hdev = req->hdev;
855
856 /* Some Broadcom based Bluetooth controllers do not support the
857 * Delete Stored Link Key command. They are clearly indicating its
858 * absence in the bit mask of supported commands.
859 *
860 * Check the supported commands and only if the the command is marked
861 * as supported send it. If not supported assume that the controller
862 * does not have actual support for stored link keys which makes this
863 * command redundant anyway.
864 *
865 * Some controllers indicate that they support handling deleting
866 * stored link keys, but they don't. The quirk lets a driver
867 * just disable this command.
868 */
869 if (hdev->commands[6] & 0x80 &&
870 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
871 struct hci_cp_delete_stored_link_key cp;
872
873 bacpy(&cp.bdaddr, BDADDR_ANY);
874 cp.delete_all = 0x01;
875 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
876 sizeof(cp), &cp);
877 }
878
879 /* Set event mask page 2 if the HCI command for it is supported */
880 if (hdev->commands[22] & 0x04)
881 hci_set_event_mask_page_2(req);
882
883 /* Read local codec list if the HCI command is supported */
884 if (hdev->commands[29] & 0x20)
885 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
886
887 /* Get MWS transport configuration if the HCI command is supported */
888 if (hdev->commands[30] & 0x08)
889 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
890
891 /* Check for Synchronization Train support */
892 if (lmp_sync_train_capable(hdev))
893 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
894
895 /* Enable Secure Connections if supported and configured */
896 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
897 bredr_sc_enabled(hdev)) {
898 u8 support = 0x01;
899
900 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
901 sizeof(support), &support);
902 }
903 }
904
905 static int __hci_init(struct hci_dev *hdev)
906 {
907 int err;
908
909 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
910 if (err < 0)
911 return err;
912
913 if (hci_dev_test_flag(hdev, HCI_SETUP))
914 hci_debugfs_create_basic(hdev);
915
916 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
917 if (err < 0)
918 return err;
919
920 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
921 * BR/EDR/LE type controllers. AMP controllers only need the
922 * first two stages of init.
923 */
924 if (hdev->dev_type != HCI_BREDR)
925 return 0;
926
927 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
928 if (err < 0)
929 return err;
930
931 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
932 if (err < 0)
933 return err;
934
935 /* This function is only called when the controller is actually in
936 * configured state. When the controller is marked as unconfigured,
937 * this initialization procedure is not run.
938 *
939 * It means that it is possible that a controller runs through its
940 * setup phase and then discovers missing settings. If that is the
941 * case, then this function will not be called. It then will only
942 * be called during the config phase.
943 *
944 * So only when in setup phase or config phase, create the debugfs
945 * entries and register the SMP channels.
946 */
947 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
948 !hci_dev_test_flag(hdev, HCI_CONFIG))
949 return 0;
950
951 hci_debugfs_create_common(hdev);
952
953 if (lmp_bredr_capable(hdev))
954 hci_debugfs_create_bredr(hdev);
955
956 if (lmp_le_capable(hdev))
957 hci_debugfs_create_le(hdev);
958
959 return 0;
960 }
961
962 static void hci_init0_req(struct hci_request *req, unsigned long opt)
963 {
964 struct hci_dev *hdev = req->hdev;
965
966 BT_DBG("%s %ld", hdev->name, opt);
967
968 /* Reset */
969 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
970 hci_reset_req(req, 0);
971
972 /* Read Local Version */
973 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
974
975 /* Read BD Address */
976 if (hdev->set_bdaddr)
977 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
978 }
979
980 static int __hci_unconf_init(struct hci_dev *hdev)
981 {
982 int err;
983
984 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
985 return 0;
986
987 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
988 if (err < 0)
989 return err;
990
991 if (hci_dev_test_flag(hdev, HCI_SETUP))
992 hci_debugfs_create_basic(hdev);
993
994 return 0;
995 }
996
997 static void hci_scan_req(struct hci_request *req, unsigned long opt)
998 {
999 __u8 scan = opt;
1000
1001 BT_DBG("%s %x", req->hdev->name, scan);
1002
1003 /* Inquiry and Page scans */
1004 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1005 }
1006
1007 static void hci_auth_req(struct hci_request *req, unsigned long opt)
1008 {
1009 __u8 auth = opt;
1010
1011 BT_DBG("%s %x", req->hdev->name, auth);
1012
1013 /* Authentication */
1014 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1015 }
1016
1017 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1018 {
1019 __u8 encrypt = opt;
1020
1021 BT_DBG("%s %x", req->hdev->name, encrypt);
1022
1023 /* Encryption */
1024 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1025 }
1026
1027 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1028 {
1029 __le16 policy = cpu_to_le16(opt);
1030
1031 BT_DBG("%s %x", req->hdev->name, policy);
1032
1033 /* Default link policy */
1034 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1035 }
1036
1037 /* Get HCI device by index.
1038 * Device is held on return. */
1039 struct hci_dev *hci_dev_get(int index)
1040 {
1041 struct hci_dev *hdev = NULL, *d;
1042
1043 BT_DBG("%d", index);
1044
1045 if (index < 0)
1046 return NULL;
1047
1048 read_lock(&hci_dev_list_lock);
1049 list_for_each_entry(d, &hci_dev_list, list) {
1050 if (d->id == index) {
1051 hdev = hci_dev_hold(d);
1052 break;
1053 }
1054 }
1055 read_unlock(&hci_dev_list_lock);
1056 return hdev;
1057 }
1058
1059 /* ---- Inquiry support ---- */
1060
1061 bool hci_discovery_active(struct hci_dev *hdev)
1062 {
1063 struct discovery_state *discov = &hdev->discovery;
1064
1065 switch (discov->state) {
1066 case DISCOVERY_FINDING:
1067 case DISCOVERY_RESOLVING:
1068 return true;
1069
1070 default:
1071 return false;
1072 }
1073 }
1074
1075 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1076 {
1077 int old_state = hdev->discovery.state;
1078
1079 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1080
1081 if (old_state == state)
1082 return;
1083
1084 hdev->discovery.state = state;
1085
1086 switch (state) {
1087 case DISCOVERY_STOPPED:
1088 hci_update_background_scan(hdev);
1089
1090 if (old_state != DISCOVERY_STARTING)
1091 mgmt_discovering(hdev, 0);
1092 break;
1093 case DISCOVERY_STARTING:
1094 break;
1095 case DISCOVERY_FINDING:
1096 mgmt_discovering(hdev, 1);
1097 break;
1098 case DISCOVERY_RESOLVING:
1099 break;
1100 case DISCOVERY_STOPPING:
1101 break;
1102 }
1103 }
1104
1105 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1106 {
1107 struct discovery_state *cache = &hdev->discovery;
1108 struct inquiry_entry *p, *n;
1109
1110 list_for_each_entry_safe(p, n, &cache->all, all) {
1111 list_del(&p->all);
1112 kfree(p);
1113 }
1114
1115 INIT_LIST_HEAD(&cache->unknown);
1116 INIT_LIST_HEAD(&cache->resolve);
1117 }
1118
1119 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1120 bdaddr_t *bdaddr)
1121 {
1122 struct discovery_state *cache = &hdev->discovery;
1123 struct inquiry_entry *e;
1124
1125 BT_DBG("cache %p, %pMR", cache, bdaddr);
1126
1127 list_for_each_entry(e, &cache->all, all) {
1128 if (!bacmp(&e->data.bdaddr, bdaddr))
1129 return e;
1130 }
1131
1132 return NULL;
1133 }
1134
1135 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1136 bdaddr_t *bdaddr)
1137 {
1138 struct discovery_state *cache = &hdev->discovery;
1139 struct inquiry_entry *e;
1140
1141 BT_DBG("cache %p, %pMR", cache, bdaddr);
1142
1143 list_for_each_entry(e, &cache->unknown, list) {
1144 if (!bacmp(&e->data.bdaddr, bdaddr))
1145 return e;
1146 }
1147
1148 return NULL;
1149 }
1150
1151 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1152 bdaddr_t *bdaddr,
1153 int state)
1154 {
1155 struct discovery_state *cache = &hdev->discovery;
1156 struct inquiry_entry *e;
1157
1158 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1159
1160 list_for_each_entry(e, &cache->resolve, list) {
1161 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1162 return e;
1163 if (!bacmp(&e->data.bdaddr, bdaddr))
1164 return e;
1165 }
1166
1167 return NULL;
1168 }
1169
1170 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1171 struct inquiry_entry *ie)
1172 {
1173 struct discovery_state *cache = &hdev->discovery;
1174 struct list_head *pos = &cache->resolve;
1175 struct inquiry_entry *p;
1176
1177 list_del(&ie->list);
1178
1179 list_for_each_entry(p, &cache->resolve, list) {
1180 if (p->name_state != NAME_PENDING &&
1181 abs(p->data.rssi) >= abs(ie->data.rssi))
1182 break;
1183 pos = &p->list;
1184 }
1185
1186 list_add(&ie->list, pos);
1187 }
1188
1189 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1190 bool name_known)
1191 {
1192 struct discovery_state *cache = &hdev->discovery;
1193 struct inquiry_entry *ie;
1194 u32 flags = 0;
1195
1196 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1197
1198 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1199
1200 if (!data->ssp_mode)
1201 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1202
1203 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1204 if (ie) {
1205 if (!ie->data.ssp_mode)
1206 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1207
1208 if (ie->name_state == NAME_NEEDED &&
1209 data->rssi != ie->data.rssi) {
1210 ie->data.rssi = data->rssi;
1211 hci_inquiry_cache_update_resolve(hdev, ie);
1212 }
1213
1214 goto update;
1215 }
1216
1217 /* Entry not in the cache. Add new one. */
1218 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1219 if (!ie) {
1220 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1221 goto done;
1222 }
1223
1224 list_add(&ie->all, &cache->all);
1225
1226 if (name_known) {
1227 ie->name_state = NAME_KNOWN;
1228 } else {
1229 ie->name_state = NAME_NOT_KNOWN;
1230 list_add(&ie->list, &cache->unknown);
1231 }
1232
1233 update:
1234 if (name_known && ie->name_state != NAME_KNOWN &&
1235 ie->name_state != NAME_PENDING) {
1236 ie->name_state = NAME_KNOWN;
1237 list_del(&ie->list);
1238 }
1239
1240 memcpy(&ie->data, data, sizeof(*data));
1241 ie->timestamp = jiffies;
1242 cache->timestamp = jiffies;
1243
1244 if (ie->name_state == NAME_NOT_KNOWN)
1245 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1246
1247 done:
1248 return flags;
1249 }
1250
1251 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1252 {
1253 struct discovery_state *cache = &hdev->discovery;
1254 struct inquiry_info *info = (struct inquiry_info *) buf;
1255 struct inquiry_entry *e;
1256 int copied = 0;
1257
1258 list_for_each_entry(e, &cache->all, all) {
1259 struct inquiry_data *data = &e->data;
1260
1261 if (copied >= num)
1262 break;
1263
1264 bacpy(&info->bdaddr, &data->bdaddr);
1265 info->pscan_rep_mode = data->pscan_rep_mode;
1266 info->pscan_period_mode = data->pscan_period_mode;
1267 info->pscan_mode = data->pscan_mode;
1268 memcpy(info->dev_class, data->dev_class, 3);
1269 info->clock_offset = data->clock_offset;
1270
1271 info++;
1272 copied++;
1273 }
1274
1275 BT_DBG("cache %p, copied %d", cache, copied);
1276 return copied;
1277 }
1278
1279 static void hci_inq_req(struct hci_request *req, unsigned long opt)
1280 {
1281 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1282 struct hci_dev *hdev = req->hdev;
1283 struct hci_cp_inquiry cp;
1284
1285 BT_DBG("%s", hdev->name);
1286
1287 if (test_bit(HCI_INQUIRY, &hdev->flags))
1288 return;
1289
1290 /* Start Inquiry */
1291 memcpy(&cp.lap, &ir->lap, 3);
1292 cp.length = ir->length;
1293 cp.num_rsp = ir->num_rsp;
1294 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1295 }
1296
1297 int hci_inquiry(void __user *arg)
1298 {
1299 __u8 __user *ptr = arg;
1300 struct hci_inquiry_req ir;
1301 struct hci_dev *hdev;
1302 int err = 0, do_inquiry = 0, max_rsp;
1303 long timeo;
1304 __u8 *buf;
1305
1306 if (copy_from_user(&ir, ptr, sizeof(ir)))
1307 return -EFAULT;
1308
1309 hdev = hci_dev_get(ir.dev_id);
1310 if (!hdev)
1311 return -ENODEV;
1312
1313 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1314 err = -EBUSY;
1315 goto done;
1316 }
1317
1318 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1319 err = -EOPNOTSUPP;
1320 goto done;
1321 }
1322
1323 if (hdev->dev_type != HCI_BREDR) {
1324 err = -EOPNOTSUPP;
1325 goto done;
1326 }
1327
1328 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1329 err = -EOPNOTSUPP;
1330 goto done;
1331 }
1332
1333 hci_dev_lock(hdev);
1334 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1335 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1336 hci_inquiry_cache_flush(hdev);
1337 do_inquiry = 1;
1338 }
1339 hci_dev_unlock(hdev);
1340
1341 timeo = ir.length * msecs_to_jiffies(2000);
1342
1343 if (do_inquiry) {
1344 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1345 timeo);
1346 if (err < 0)
1347 goto done;
1348
1349 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1350 * cleared). If it is interrupted by a signal, return -EINTR.
1351 */
1352 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1353 TASK_INTERRUPTIBLE))
1354 return -EINTR;
1355 }
1356
1357 /* for unlimited number of responses we will use buffer with
1358 * 255 entries
1359 */
1360 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1361
1362 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1363 * copy it to the user space.
1364 */
1365 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1366 if (!buf) {
1367 err = -ENOMEM;
1368 goto done;
1369 }
1370
1371 hci_dev_lock(hdev);
1372 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1373 hci_dev_unlock(hdev);
1374
1375 BT_DBG("num_rsp %d", ir.num_rsp);
1376
1377 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1378 ptr += sizeof(ir);
1379 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1380 ir.num_rsp))
1381 err = -EFAULT;
1382 } else
1383 err = -EFAULT;
1384
1385 kfree(buf);
1386
1387 done:
1388 hci_dev_put(hdev);
1389 return err;
1390 }
1391
1392 static int hci_dev_do_open(struct hci_dev *hdev)
1393 {
1394 int ret = 0;
1395
1396 BT_DBG("%s %p", hdev->name, hdev);
1397
1398 hci_req_lock(hdev);
1399
1400 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1401 ret = -ENODEV;
1402 goto done;
1403 }
1404
1405 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1406 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
1407 /* Check for rfkill but allow the HCI setup stage to
1408 * proceed (which in itself doesn't cause any RF activity).
1409 */
1410 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
1411 ret = -ERFKILL;
1412 goto done;
1413 }
1414
1415 /* Check for valid public address or a configured static
1416 * random adddress, but let the HCI setup proceed to
1417 * be able to determine if there is a public address
1418 * or not.
1419 *
1420 * In case of user channel usage, it is not important
1421 * if a public address or static random address is
1422 * available.
1423 *
1424 * This check is only valid for BR/EDR controllers
1425 * since AMP controllers do not have an address.
1426 */
1427 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1428 hdev->dev_type == HCI_BREDR &&
1429 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1430 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1431 ret = -EADDRNOTAVAIL;
1432 goto done;
1433 }
1434 }
1435
1436 if (test_bit(HCI_UP, &hdev->flags)) {
1437 ret = -EALREADY;
1438 goto done;
1439 }
1440
1441 if (hdev->open(hdev)) {
1442 ret = -EIO;
1443 goto done;
1444 }
1445
1446 set_bit(HCI_RUNNING, &hdev->flags);
1447 hci_notify(hdev, HCI_DEV_OPEN);
1448
1449 atomic_set(&hdev->cmd_cnt, 1);
1450 set_bit(HCI_INIT, &hdev->flags);
1451
1452 if (hci_dev_test_flag(hdev, HCI_SETUP)) {
1453 if (hdev->setup)
1454 ret = hdev->setup(hdev);
1455
1456 /* The transport driver can set these quirks before
1457 * creating the HCI device or in its setup callback.
1458 *
1459 * In case any of them is set, the controller has to
1460 * start up as unconfigured.
1461 */
1462 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1463 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
1464 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
1465
1466 /* For an unconfigured controller it is required to
1467 * read at least the version information provided by
1468 * the Read Local Version Information command.
1469 *
1470 * If the set_bdaddr driver callback is provided, then
1471 * also the original Bluetooth public device address
1472 * will be read using the Read BD Address command.
1473 */
1474 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1475 ret = __hci_unconf_init(hdev);
1476 }
1477
1478 if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
1479 /* If public address change is configured, ensure that
1480 * the address gets programmed. If the driver does not
1481 * support changing the public address, fail the power
1482 * on procedure.
1483 */
1484 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1485 hdev->set_bdaddr)
1486 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1487 else
1488 ret = -EADDRNOTAVAIL;
1489 }
1490
1491 if (!ret) {
1492 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1493 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
1494 ret = __hci_init(hdev);
1495 }
1496
1497 clear_bit(HCI_INIT, &hdev->flags);
1498
1499 if (!ret) {
1500 hci_dev_hold(hdev);
1501 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1502 set_bit(HCI_UP, &hdev->flags);
1503 hci_notify(hdev, HCI_DEV_UP);
1504 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1505 !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1506 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1507 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1508 hdev->dev_type == HCI_BREDR) {
1509 hci_dev_lock(hdev);
1510 mgmt_powered(hdev, 1);
1511 hci_dev_unlock(hdev);
1512 }
1513 } else {
1514 /* Init failed, cleanup */
1515 flush_work(&hdev->tx_work);
1516 flush_work(&hdev->cmd_work);
1517 flush_work(&hdev->rx_work);
1518
1519 skb_queue_purge(&hdev->cmd_q);
1520 skb_queue_purge(&hdev->rx_q);
1521
1522 if (hdev->flush)
1523 hdev->flush(hdev);
1524
1525 if (hdev->sent_cmd) {
1526 kfree_skb(hdev->sent_cmd);
1527 hdev->sent_cmd = NULL;
1528 }
1529
1530 clear_bit(HCI_RUNNING, &hdev->flags);
1531 hci_notify(hdev, HCI_DEV_CLOSE);
1532
1533 hdev->close(hdev);
1534 hdev->flags &= BIT(HCI_RAW);
1535 }
1536
1537 done:
1538 hci_req_unlock(hdev);
1539 return ret;
1540 }
1541
1542 /* ---- HCI ioctl helpers ---- */
1543
1544 int hci_dev_open(__u16 dev)
1545 {
1546 struct hci_dev *hdev;
1547 int err;
1548
1549 hdev = hci_dev_get(dev);
1550 if (!hdev)
1551 return -ENODEV;
1552
1553 /* Devices that are marked as unconfigured can only be powered
1554 * up as user channel. Trying to bring them up as normal devices
1555 * will result into a failure. Only user channel operation is
1556 * possible.
1557 *
1558 * When this function is called for a user channel, the flag
1559 * HCI_USER_CHANNEL will be set first before attempting to
1560 * open the device.
1561 */
1562 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1563 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1564 err = -EOPNOTSUPP;
1565 goto done;
1566 }
1567
1568 /* We need to ensure that no other power on/off work is pending
1569 * before proceeding to call hci_dev_do_open. This is
1570 * particularly important if the setup procedure has not yet
1571 * completed.
1572 */
1573 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1574 cancel_delayed_work(&hdev->power_off);
1575
1576 /* After this call it is guaranteed that the setup procedure
1577 * has finished. This means that error conditions like RFKILL
1578 * or no valid public or static random address apply.
1579 */
1580 flush_workqueue(hdev->req_workqueue);
1581
1582 /* For controllers not using the management interface and that
1583 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1584 * so that pairing works for them. Once the management interface
1585 * is in use this bit will be cleared again and userspace has
1586 * to explicitly enable it.
1587 */
1588 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1589 !hci_dev_test_flag(hdev, HCI_MGMT))
1590 hci_dev_set_flag(hdev, HCI_BONDABLE);
1591
1592 err = hci_dev_do_open(hdev);
1593
1594 done:
1595 hci_dev_put(hdev);
1596 return err;
1597 }
1598
1599 /* This function requires the caller holds hdev->lock */
1600 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1601 {
1602 struct hci_conn_params *p;
1603
1604 list_for_each_entry(p, &hdev->le_conn_params, list) {
1605 if (p->conn) {
1606 hci_conn_drop(p->conn);
1607 hci_conn_put(p->conn);
1608 p->conn = NULL;
1609 }
1610 list_del_init(&p->action);
1611 }
1612
1613 BT_DBG("All LE pending actions cleared");
1614 }
1615
1616 int hci_dev_do_close(struct hci_dev *hdev)
1617 {
1618 bool auto_off;
1619
1620 BT_DBG("%s %p", hdev->name, hdev);
1621
1622 if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1623 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1624 test_bit(HCI_UP, &hdev->flags)) {
1625 /* Execute vendor specific shutdown routine */
1626 if (hdev->shutdown)
1627 hdev->shutdown(hdev);
1628 }
1629
1630 cancel_delayed_work(&hdev->power_off);
1631
1632 hci_req_cancel(hdev, ENODEV);
1633 hci_req_lock(hdev);
1634
1635 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1636 cancel_delayed_work_sync(&hdev->cmd_timer);
1637 hci_req_unlock(hdev);
1638 return 0;
1639 }
1640
1641 /* Flush RX and TX works */
1642 flush_work(&hdev->tx_work);
1643 flush_work(&hdev->rx_work);
1644
1645 if (hdev->discov_timeout > 0) {
1646 cancel_delayed_work(&hdev->discov_off);
1647 hdev->discov_timeout = 0;
1648 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1649 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1650 }
1651
1652 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1653 cancel_delayed_work(&hdev->service_cache);
1654
1655 cancel_delayed_work_sync(&hdev->le_scan_disable);
1656 cancel_delayed_work_sync(&hdev->le_scan_restart);
1657
1658 if (hci_dev_test_flag(hdev, HCI_MGMT))
1659 cancel_delayed_work_sync(&hdev->rpa_expired);
1660
1661 if (hdev->adv_instance_timeout) {
1662 cancel_delayed_work_sync(&hdev->adv_instance_expire);
1663 hdev->adv_instance_timeout = 0;
1664 }
1665
1666 /* Avoid potential lockdep warnings from the *_flush() calls by
1667 * ensuring the workqueue is empty up front.
1668 */
1669 drain_workqueue(hdev->workqueue);
1670
1671 hci_dev_lock(hdev);
1672
1673 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1674
1675 auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1676
1677 if (!auto_off && hdev->dev_type == HCI_BREDR)
1678 mgmt_powered(hdev, 0);
1679
1680 hci_inquiry_cache_flush(hdev);
1681 hci_pend_le_actions_clear(hdev);
1682 hci_conn_hash_flush(hdev);
1683 hci_dev_unlock(hdev);
1684
1685 smp_unregister(hdev);
1686
1687 hci_notify(hdev, HCI_DEV_DOWN);
1688
1689 if (hdev->flush)
1690 hdev->flush(hdev);
1691
1692 /* Reset device */
1693 skb_queue_purge(&hdev->cmd_q);
1694 atomic_set(&hdev->cmd_cnt, 1);
1695 if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1696 !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1697 set_bit(HCI_INIT, &hdev->flags);
1698 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1699 clear_bit(HCI_INIT, &hdev->flags);
1700 }
1701
1702 /* flush cmd work */
1703 flush_work(&hdev->cmd_work);
1704
1705 /* Drop queues */
1706 skb_queue_purge(&hdev->rx_q);
1707 skb_queue_purge(&hdev->cmd_q);
1708 skb_queue_purge(&hdev->raw_q);
1709
1710 /* Drop last sent command */
1711 if (hdev->sent_cmd) {
1712 cancel_delayed_work_sync(&hdev->cmd_timer);
1713 kfree_skb(hdev->sent_cmd);
1714 hdev->sent_cmd = NULL;
1715 }
1716
1717 clear_bit(HCI_RUNNING, &hdev->flags);
1718 hci_notify(hdev, HCI_DEV_CLOSE);
1719
1720 /* After this point our queues are empty
1721 * and no tasks are scheduled. */
1722 hdev->close(hdev);
1723
1724 /* Clear flags */
1725 hdev->flags &= BIT(HCI_RAW);
1726 hci_dev_clear_volatile_flags(hdev);
1727
1728 /* Controller radio is available but is currently powered down */
1729 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1730
1731 memset(hdev->eir, 0, sizeof(hdev->eir));
1732 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1733 bacpy(&hdev->random_addr, BDADDR_ANY);
1734
1735 hci_req_unlock(hdev);
1736
1737 hci_dev_put(hdev);
1738 return 0;
1739 }
1740
1741 int hci_dev_close(__u16 dev)
1742 {
1743 struct hci_dev *hdev;
1744 int err;
1745
1746 hdev = hci_dev_get(dev);
1747 if (!hdev)
1748 return -ENODEV;
1749
1750 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1751 err = -EBUSY;
1752 goto done;
1753 }
1754
1755 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1756 cancel_delayed_work(&hdev->power_off);
1757
1758 err = hci_dev_do_close(hdev);
1759
1760 done:
1761 hci_dev_put(hdev);
1762 return err;
1763 }
1764
1765 static int hci_dev_do_reset(struct hci_dev *hdev)
1766 {
1767 int ret;
1768
1769 BT_DBG("%s %p", hdev->name, hdev);
1770
1771 hci_req_lock(hdev);
1772
1773 /* Drop queues */
1774 skb_queue_purge(&hdev->rx_q);
1775 skb_queue_purge(&hdev->cmd_q);
1776
1777 /* Avoid potential lockdep warnings from the *_flush() calls by
1778 * ensuring the workqueue is empty up front.
1779 */
1780 drain_workqueue(hdev->workqueue);
1781
1782 hci_dev_lock(hdev);
1783 hci_inquiry_cache_flush(hdev);
1784 hci_conn_hash_flush(hdev);
1785 hci_dev_unlock(hdev);
1786
1787 if (hdev->flush)
1788 hdev->flush(hdev);
1789
1790 atomic_set(&hdev->cmd_cnt, 1);
1791 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1792
1793 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1794
1795 hci_req_unlock(hdev);
1796 return ret;
1797 }
1798
1799 int hci_dev_reset(__u16 dev)
1800 {
1801 struct hci_dev *hdev;
1802 int err;
1803
1804 hdev = hci_dev_get(dev);
1805 if (!hdev)
1806 return -ENODEV;
1807
1808 if (!test_bit(HCI_UP, &hdev->flags)) {
1809 err = -ENETDOWN;
1810 goto done;
1811 }
1812
1813 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1814 err = -EBUSY;
1815 goto done;
1816 }
1817
1818 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1819 err = -EOPNOTSUPP;
1820 goto done;
1821 }
1822
1823 err = hci_dev_do_reset(hdev);
1824
1825 done:
1826 hci_dev_put(hdev);
1827 return err;
1828 }
1829
1830 int hci_dev_reset_stat(__u16 dev)
1831 {
1832 struct hci_dev *hdev;
1833 int ret = 0;
1834
1835 hdev = hci_dev_get(dev);
1836 if (!hdev)
1837 return -ENODEV;
1838
1839 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1840 ret = -EBUSY;
1841 goto done;
1842 }
1843
1844 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1845 ret = -EOPNOTSUPP;
1846 goto done;
1847 }
1848
1849 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1850
1851 done:
1852 hci_dev_put(hdev);
1853 return ret;
1854 }
1855
1856 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1857 {
1858 bool conn_changed, discov_changed;
1859
1860 BT_DBG("%s scan 0x%02x", hdev->name, scan);
1861
1862 if ((scan & SCAN_PAGE))
1863 conn_changed = !hci_dev_test_and_set_flag(hdev,
1864 HCI_CONNECTABLE);
1865 else
1866 conn_changed = hci_dev_test_and_clear_flag(hdev,
1867 HCI_CONNECTABLE);
1868
1869 if ((scan & SCAN_INQUIRY)) {
1870 discov_changed = !hci_dev_test_and_set_flag(hdev,
1871 HCI_DISCOVERABLE);
1872 } else {
1873 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1874 discov_changed = hci_dev_test_and_clear_flag(hdev,
1875 HCI_DISCOVERABLE);
1876 }
1877
1878 if (!hci_dev_test_flag(hdev, HCI_MGMT))
1879 return;
1880
1881 if (conn_changed || discov_changed) {
1882 /* In case this was disabled through mgmt */
1883 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
1884
1885 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1886 mgmt_update_adv_data(hdev);
1887
1888 mgmt_new_settings(hdev);
1889 }
1890 }
1891
1892 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1893 {
1894 struct hci_dev *hdev;
1895 struct hci_dev_req dr;
1896 int err = 0;
1897
1898 if (copy_from_user(&dr, arg, sizeof(dr)))
1899 return -EFAULT;
1900
1901 hdev = hci_dev_get(dr.dev_id);
1902 if (!hdev)
1903 return -ENODEV;
1904
1905 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1906 err = -EBUSY;
1907 goto done;
1908 }
1909
1910 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1911 err = -EOPNOTSUPP;
1912 goto done;
1913 }
1914
1915 if (hdev->dev_type != HCI_BREDR) {
1916 err = -EOPNOTSUPP;
1917 goto done;
1918 }
1919
1920 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1921 err = -EOPNOTSUPP;
1922 goto done;
1923 }
1924
1925 switch (cmd) {
1926 case HCISETAUTH:
1927 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1928 HCI_INIT_TIMEOUT);
1929 break;
1930
1931 case HCISETENCRYPT:
1932 if (!lmp_encrypt_capable(hdev)) {
1933 err = -EOPNOTSUPP;
1934 break;
1935 }
1936
1937 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1938 /* Auth must be enabled first */
1939 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1940 HCI_INIT_TIMEOUT);
1941 if (err)
1942 break;
1943 }
1944
1945 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1946 HCI_INIT_TIMEOUT);
1947 break;
1948
1949 case HCISETSCAN:
1950 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1951 HCI_INIT_TIMEOUT);
1952
1953 /* Ensure that the connectable and discoverable states
1954 * get correctly modified as this was a non-mgmt change.
1955 */
1956 if (!err)
1957 hci_update_scan_state(hdev, dr.dev_opt);
1958 break;
1959
1960 case HCISETLINKPOL:
1961 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1962 HCI_INIT_TIMEOUT);
1963 break;
1964
1965 case HCISETLINKMODE:
1966 hdev->link_mode = ((__u16) dr.dev_opt) &
1967 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1968 break;
1969
1970 case HCISETPTYPE:
1971 hdev->pkt_type = (__u16) dr.dev_opt;
1972 break;
1973
1974 case HCISETACLMTU:
1975 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1976 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1977 break;
1978
1979 case HCISETSCOMTU:
1980 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1981 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1982 break;
1983
1984 default:
1985 err = -EINVAL;
1986 break;
1987 }
1988
1989 done:
1990 hci_dev_put(hdev);
1991 return err;
1992 }
1993
1994 int hci_get_dev_list(void __user *arg)
1995 {
1996 struct hci_dev *hdev;
1997 struct hci_dev_list_req *dl;
1998 struct hci_dev_req *dr;
1999 int n = 0, size, err;
2000 __u16 dev_num;
2001
2002 if (get_user(dev_num, (__u16 __user *) arg))
2003 return -EFAULT;
2004
2005 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2006 return -EINVAL;
2007
2008 size = sizeof(*dl) + dev_num * sizeof(*dr);
2009
2010 dl = kzalloc(size, GFP_KERNEL);
2011 if (!dl)
2012 return -ENOMEM;
2013
2014 dr = dl->dev_req;
2015
2016 read_lock(&hci_dev_list_lock);
2017 list_for_each_entry(hdev, &hci_dev_list, list) {
2018 unsigned long flags = hdev->flags;
2019
2020 /* When the auto-off is configured it means the transport
2021 * is running, but in that case still indicate that the
2022 * device is actually down.
2023 */
2024 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2025 flags &= ~BIT(HCI_UP);
2026
2027 (dr + n)->dev_id = hdev->id;
2028 (dr + n)->dev_opt = flags;
2029
2030 if (++n >= dev_num)
2031 break;
2032 }
2033 read_unlock(&hci_dev_list_lock);
2034
2035 dl->dev_num = n;
2036 size = sizeof(*dl) + n * sizeof(*dr);
2037
2038 err = copy_to_user(arg, dl, size);
2039 kfree(dl);
2040
2041 return err ? -EFAULT : 0;
2042 }
2043
2044 int hci_get_dev_info(void __user *arg)
2045 {
2046 struct hci_dev *hdev;
2047 struct hci_dev_info di;
2048 unsigned long flags;
2049 int err = 0;
2050
2051 if (copy_from_user(&di, arg, sizeof(di)))
2052 return -EFAULT;
2053
2054 hdev = hci_dev_get(di.dev_id);
2055 if (!hdev)
2056 return -ENODEV;
2057
2058 /* When the auto-off is configured it means the transport
2059 * is running, but in that case still indicate that the
2060 * device is actually down.
2061 */
2062 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2063 flags = hdev->flags & ~BIT(HCI_UP);
2064 else
2065 flags = hdev->flags;
2066
2067 strcpy(di.name, hdev->name);
2068 di.bdaddr = hdev->bdaddr;
2069 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2070 di.flags = flags;
2071 di.pkt_type = hdev->pkt_type;
2072 if (lmp_bredr_capable(hdev)) {
2073 di.acl_mtu = hdev->acl_mtu;
2074 di.acl_pkts = hdev->acl_pkts;
2075 di.sco_mtu = hdev->sco_mtu;
2076 di.sco_pkts = hdev->sco_pkts;
2077 } else {
2078 di.acl_mtu = hdev->le_mtu;
2079 di.acl_pkts = hdev->le_pkts;
2080 di.sco_mtu = 0;
2081 di.sco_pkts = 0;
2082 }
2083 di.link_policy = hdev->link_policy;
2084 di.link_mode = hdev->link_mode;
2085
2086 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2087 memcpy(&di.features, &hdev->features, sizeof(di.features));
2088
2089 if (copy_to_user(arg, &di, sizeof(di)))
2090 err = -EFAULT;
2091
2092 hci_dev_put(hdev);
2093
2094 return err;
2095 }
2096
2097 /* ---- Interface to HCI drivers ---- */
2098
2099 static int hci_rfkill_set_block(void *data, bool blocked)
2100 {
2101 struct hci_dev *hdev = data;
2102
2103 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2104
2105 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2106 return -EBUSY;
2107
2108 if (blocked) {
2109 hci_dev_set_flag(hdev, HCI_RFKILLED);
2110 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2111 !hci_dev_test_flag(hdev, HCI_CONFIG))
2112 hci_dev_do_close(hdev);
2113 } else {
2114 hci_dev_clear_flag(hdev, HCI_RFKILLED);
2115 }
2116
2117 return 0;
2118 }
2119
2120 static const struct rfkill_ops hci_rfkill_ops = {
2121 .set_block = hci_rfkill_set_block,
2122 };
2123
2124 static void hci_power_on(struct work_struct *work)
2125 {
2126 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2127 int err;
2128
2129 BT_DBG("%s", hdev->name);
2130
2131 err = hci_dev_do_open(hdev);
2132 if (err < 0) {
2133 hci_dev_lock(hdev);
2134 mgmt_set_powered_failed(hdev, err);
2135 hci_dev_unlock(hdev);
2136 return;
2137 }
2138
2139 /* During the HCI setup phase, a few error conditions are
2140 * ignored and they need to be checked now. If they are still
2141 * valid, it is important to turn the device back off.
2142 */
2143 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2144 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
2145 (hdev->dev_type == HCI_BREDR &&
2146 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2147 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2148 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
2149 hci_dev_do_close(hdev);
2150 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
2151 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2152 HCI_AUTO_OFF_TIMEOUT);
2153 }
2154
2155 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
2156 /* For unconfigured devices, set the HCI_RAW flag
2157 * so that userspace can easily identify them.
2158 */
2159 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2160 set_bit(HCI_RAW, &hdev->flags);
2161
2162 /* For fully configured devices, this will send
2163 * the Index Added event. For unconfigured devices,
2164 * it will send Unconfigued Index Added event.
2165 *
2166 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2167 * and no event will be send.
2168 */
2169 mgmt_index_added(hdev);
2170 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
2171 /* When the controller is now configured, then it
2172 * is important to clear the HCI_RAW flag.
2173 */
2174 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2175 clear_bit(HCI_RAW, &hdev->flags);
2176
2177 /* Powering on the controller with HCI_CONFIG set only
2178 * happens with the transition from unconfigured to
2179 * configured. This will send the Index Added event.
2180 */
2181 mgmt_index_added(hdev);
2182 }
2183 }
2184
2185 static void hci_power_off(struct work_struct *work)
2186 {
2187 struct hci_dev *hdev = container_of(work, struct hci_dev,
2188 power_off.work);
2189
2190 BT_DBG("%s", hdev->name);
2191
2192 hci_dev_do_close(hdev);
2193 }
2194
2195 static void hci_error_reset(struct work_struct *work)
2196 {
2197 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2198
2199 BT_DBG("%s", hdev->name);
2200
2201 if (hdev->hw_error)
2202 hdev->hw_error(hdev, hdev->hw_error_code);
2203 else
2204 BT_ERR("%s hardware error 0x%2.2x", hdev->name,
2205 hdev->hw_error_code);
2206
2207 if (hci_dev_do_close(hdev))
2208 return;
2209
2210 hci_dev_do_open(hdev);
2211 }
2212
2213 static void hci_discov_off(struct work_struct *work)
2214 {
2215 struct hci_dev *hdev;
2216
2217 hdev = container_of(work, struct hci_dev, discov_off.work);
2218
2219 BT_DBG("%s", hdev->name);
2220
2221 mgmt_discoverable_timeout(hdev);
2222 }
2223
2224 static void hci_adv_timeout_expire(struct work_struct *work)
2225 {
2226 struct hci_dev *hdev;
2227
2228 hdev = container_of(work, struct hci_dev, adv_instance_expire.work);
2229
2230 BT_DBG("%s", hdev->name);
2231
2232 mgmt_adv_timeout_expired(hdev);
2233 }
2234
2235 void hci_uuids_clear(struct hci_dev *hdev)
2236 {
2237 struct bt_uuid *uuid, *tmp;
2238
2239 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2240 list_del(&uuid->list);
2241 kfree(uuid);
2242 }
2243 }
2244
2245 void hci_link_keys_clear(struct hci_dev *hdev)
2246 {
2247 struct link_key *key;
2248
2249 list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2250 list_del_rcu(&key->list);
2251 kfree_rcu(key, rcu);
2252 }
2253 }
2254
2255 void hci_smp_ltks_clear(struct hci_dev *hdev)
2256 {
2257 struct smp_ltk *k;
2258
2259 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2260 list_del_rcu(&k->list);
2261 kfree_rcu(k, rcu);
2262 }
2263 }
2264
2265 void hci_smp_irks_clear(struct hci_dev *hdev)
2266 {
2267 struct smp_irk *k;
2268
2269 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2270 list_del_rcu(&k->list);
2271 kfree_rcu(k, rcu);
2272 }
2273 }
2274
2275 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2276 {
2277 struct link_key *k;
2278
2279 rcu_read_lock();
2280 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2281 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2282 rcu_read_unlock();
2283 return k;
2284 }
2285 }
2286 rcu_read_unlock();
2287
2288 return NULL;
2289 }
2290
2291 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2292 u8 key_type, u8 old_key_type)
2293 {
2294 /* Legacy key */
2295 if (key_type < 0x03)
2296 return true;
2297
2298 /* Debug keys are insecure so don't store them persistently */
2299 if (key_type == HCI_LK_DEBUG_COMBINATION)
2300 return false;
2301
2302 /* Changed combination key and there's no previous one */
2303 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2304 return false;
2305
2306 /* Security mode 3 case */
2307 if (!conn)
2308 return true;
2309
2310 /* BR/EDR key derived using SC from an LE link */
2311 if (conn->type == LE_LINK)
2312 return true;
2313
2314 /* Neither local nor remote side had no-bonding as requirement */
2315 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2316 return true;
2317
2318 /* Local side had dedicated bonding as requirement */
2319 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2320 return true;
2321
2322 /* Remote side had dedicated bonding as requirement */
2323 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2324 return true;
2325
2326 /* If none of the above criteria match, then don't store the key
2327 * persistently */
2328 return false;
2329 }
2330
2331 static u8 ltk_role(u8 type)
2332 {
2333 if (type == SMP_LTK)
2334 return HCI_ROLE_MASTER;
2335
2336 return HCI_ROLE_SLAVE;
2337 }
2338
2339 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2340 u8 addr_type, u8 role)
2341 {
2342 struct smp_ltk *k;
2343
2344 rcu_read_lock();
2345 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2346 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2347 continue;
2348
2349 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2350 rcu_read_unlock();
2351 return k;
2352 }
2353 }
2354 rcu_read_unlock();
2355
2356 return NULL;
2357 }
2358
2359 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2360 {
2361 struct smp_irk *irk;
2362
2363 rcu_read_lock();
2364 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2365 if (!bacmp(&irk->rpa, rpa)) {
2366 rcu_read_unlock();
2367 return irk;
2368 }
2369 }
2370
2371 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2372 if (smp_irk_matches(hdev, irk->val, rpa)) {
2373 bacpy(&irk->rpa, rpa);
2374 rcu_read_unlock();
2375 return irk;
2376 }
2377 }
2378 rcu_read_unlock();
2379
2380 return NULL;
2381 }
2382
2383 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2384 u8 addr_type)
2385 {
2386 struct smp_irk *irk;
2387
2388 /* Identity Address must be public or static random */
2389 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2390 return NULL;
2391
2392 rcu_read_lock();
2393 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2394 if (addr_type == irk->addr_type &&
2395 bacmp(bdaddr, &irk->bdaddr) == 0) {
2396 rcu_read_unlock();
2397 return irk;
2398 }
2399 }
2400 rcu_read_unlock();
2401
2402 return NULL;
2403 }
2404
2405 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2406 bdaddr_t *bdaddr, u8 *val, u8 type,
2407 u8 pin_len, bool *persistent)
2408 {
2409 struct link_key *key, *old_key;
2410 u8 old_key_type;
2411
2412 old_key = hci_find_link_key(hdev, bdaddr);
2413 if (old_key) {
2414 old_key_type = old_key->type;
2415 key = old_key;
2416 } else {
2417 old_key_type = conn ? conn->key_type : 0xff;
2418 key = kzalloc(sizeof(*key), GFP_KERNEL);
2419 if (!key)
2420 return NULL;
2421 list_add_rcu(&key->list, &hdev->link_keys);
2422 }
2423
2424 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2425
2426 /* Some buggy controller combinations generate a changed
2427 * combination key for legacy pairing even when there's no
2428 * previous key */
2429 if (type == HCI_LK_CHANGED_COMBINATION &&
2430 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2431 type = HCI_LK_COMBINATION;
2432 if (conn)
2433 conn->key_type = type;
2434 }
2435
2436 bacpy(&key->bdaddr, bdaddr);
2437 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2438 key->pin_len = pin_len;
2439
2440 if (type == HCI_LK_CHANGED_COMBINATION)
2441 key->type = old_key_type;
2442 else
2443 key->type = type;
2444
2445 if (persistent)
2446 *persistent = hci_persistent_key(hdev, conn, type,
2447 old_key_type);
2448
2449 return key;
2450 }
2451
2452 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2453 u8 addr_type, u8 type, u8 authenticated,
2454 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2455 {
2456 struct smp_ltk *key, *old_key;
2457 u8 role = ltk_role(type);
2458
2459 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2460 if (old_key)
2461 key = old_key;
2462 else {
2463 key = kzalloc(sizeof(*key), GFP_KERNEL);
2464 if (!key)
2465 return NULL;
2466 list_add_rcu(&key->list, &hdev->long_term_keys);
2467 }
2468
2469 bacpy(&key->bdaddr, bdaddr);
2470 key->bdaddr_type = addr_type;
2471 memcpy(key->val, tk, sizeof(key->val));
2472 key->authenticated = authenticated;
2473 key->ediv = ediv;
2474 key->rand = rand;
2475 key->enc_size = enc_size;
2476 key->type = type;
2477
2478 return key;
2479 }
2480
2481 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2482 u8 addr_type, u8 val[16], bdaddr_t *rpa)
2483 {
2484 struct smp_irk *irk;
2485
2486 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2487 if (!irk) {
2488 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2489 if (!irk)
2490 return NULL;
2491
2492 bacpy(&irk->bdaddr, bdaddr);
2493 irk->addr_type = addr_type;
2494
2495 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2496 }
2497
2498 memcpy(irk->val, val, 16);
2499 bacpy(&irk->rpa, rpa);
2500
2501 return irk;
2502 }
2503
2504 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2505 {
2506 struct link_key *key;
2507
2508 key = hci_find_link_key(hdev, bdaddr);
2509 if (!key)
2510 return -ENOENT;
2511
2512 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2513
2514 list_del_rcu(&key->list);
2515 kfree_rcu(key, rcu);
2516
2517 return 0;
2518 }
2519
2520 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2521 {
2522 struct smp_ltk *k;
2523 int removed = 0;
2524
2525 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2526 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2527 continue;
2528
2529 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2530
2531 list_del_rcu(&k->list);
2532 kfree_rcu(k, rcu);
2533 removed++;
2534 }
2535
2536 return removed ? 0 : -ENOENT;
2537 }
2538
2539 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2540 {
2541 struct smp_irk *k;
2542
2543 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2544 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2545 continue;
2546
2547 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2548
2549 list_del_rcu(&k->list);
2550 kfree_rcu(k, rcu);
2551 }
2552 }
2553
2554 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2555 {
2556 struct smp_ltk *k;
2557 struct smp_irk *irk;
2558 u8 addr_type;
2559
2560 if (type == BDADDR_BREDR) {
2561 if (hci_find_link_key(hdev, bdaddr))
2562 return true;
2563 return false;
2564 }
2565
2566 /* Convert to HCI addr type which struct smp_ltk uses */
2567 if (type == BDADDR_LE_PUBLIC)
2568 addr_type = ADDR_LE_DEV_PUBLIC;
2569 else
2570 addr_type = ADDR_LE_DEV_RANDOM;
2571
2572 irk = hci_get_irk(hdev, bdaddr, addr_type);
2573 if (irk) {
2574 bdaddr = &irk->bdaddr;
2575 addr_type = irk->addr_type;
2576 }
2577
2578 rcu_read_lock();
2579 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2580 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2581 rcu_read_unlock();
2582 return true;
2583 }
2584 }
2585 rcu_read_unlock();
2586
2587 return false;
2588 }
2589
2590 /* HCI command timer function */
2591 static void hci_cmd_timeout(struct work_struct *work)
2592 {
2593 struct hci_dev *hdev = container_of(work, struct hci_dev,
2594 cmd_timer.work);
2595
2596 if (hdev->sent_cmd) {
2597 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2598 u16 opcode = __le16_to_cpu(sent->opcode);
2599
2600 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2601 } else {
2602 BT_ERR("%s command tx timeout", hdev->name);
2603 }
2604
2605 atomic_set(&hdev->cmd_cnt, 1);
2606 queue_work(hdev->workqueue, &hdev->cmd_work);
2607 }
2608
2609 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2610 bdaddr_t *bdaddr, u8 bdaddr_type)
2611 {
2612 struct oob_data *data;
2613
2614 list_for_each_entry(data, &hdev->remote_oob_data, list) {
2615 if (bacmp(bdaddr, &data->bdaddr) != 0)
2616 continue;
2617 if (data->bdaddr_type != bdaddr_type)
2618 continue;
2619 return data;
2620 }
2621
2622 return NULL;
2623 }
2624
2625 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2626 u8 bdaddr_type)
2627 {
2628 struct oob_data *data;
2629
2630 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2631 if (!data)
2632 return -ENOENT;
2633
2634 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2635
2636 list_del(&data->list);
2637 kfree(data);
2638
2639 return 0;
2640 }
2641
2642 void hci_remote_oob_data_clear(struct hci_dev *hdev)
2643 {
2644 struct oob_data *data, *n;
2645
2646 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2647 list_del(&data->list);
2648 kfree(data);
2649 }
2650 }
2651
2652 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2653 u8 bdaddr_type, u8 *hash192, u8 *rand192,
2654 u8 *hash256, u8 *rand256)
2655 {
2656 struct oob_data *data;
2657
2658 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2659 if (!data) {
2660 data = kmalloc(sizeof(*data), GFP_KERNEL);
2661 if (!data)
2662 return -ENOMEM;
2663
2664 bacpy(&data->bdaddr, bdaddr);
2665 data->bdaddr_type = bdaddr_type;
2666 list_add(&data->list, &hdev->remote_oob_data);
2667 }
2668
2669 if (hash192 && rand192) {
2670 memcpy(data->hash192, hash192, sizeof(data->hash192));
2671 memcpy(data->rand192, rand192, sizeof(data->rand192));
2672 if (hash256 && rand256)
2673 data->present = 0x03;
2674 } else {
2675 memset(data->hash192, 0, sizeof(data->hash192));
2676 memset(data->rand192, 0, sizeof(data->rand192));
2677 if (hash256 && rand256)
2678 data->present = 0x02;
2679 else
2680 data->present = 0x00;
2681 }
2682
2683 if (hash256 && rand256) {
2684 memcpy(data->hash256, hash256, sizeof(data->hash256));
2685 memcpy(data->rand256, rand256, sizeof(data->rand256));
2686 } else {
2687 memset(data->hash256, 0, sizeof(data->hash256));
2688 memset(data->rand256, 0, sizeof(data->rand256));
2689 if (hash192 && rand192)
2690 data->present = 0x01;
2691 }
2692
2693 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2694
2695 return 0;
2696 }
2697
2698 /* This function requires the caller holds hdev->lock */
2699 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2700 {
2701 struct adv_info *adv_instance;
2702
2703 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2704 if (adv_instance->instance == instance)
2705 return adv_instance;
2706 }
2707
2708 return NULL;
2709 }
2710
2711 /* This function requires the caller holds hdev->lock */
2712 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance) {
2713 struct adv_info *cur_instance;
2714
2715 cur_instance = hci_find_adv_instance(hdev, instance);
2716 if (!cur_instance)
2717 return NULL;
2718
2719 if (cur_instance == list_last_entry(&hdev->adv_instances,
2720 struct adv_info, list))
2721 return list_first_entry(&hdev->adv_instances,
2722 struct adv_info, list);
2723 else
2724 return list_next_entry(cur_instance, list);
2725 }
2726
2727 /* This function requires the caller holds hdev->lock */
2728 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2729 {
2730 struct adv_info *adv_instance;
2731
2732 adv_instance = hci_find_adv_instance(hdev, instance);
2733 if (!adv_instance)
2734 return -ENOENT;
2735
2736 BT_DBG("%s removing %dMR", hdev->name, instance);
2737
2738 if (hdev->cur_adv_instance == instance && hdev->adv_instance_timeout) {
2739 cancel_delayed_work(&hdev->adv_instance_expire);
2740 hdev->adv_instance_timeout = 0;
2741 }
2742
2743 list_del(&adv_instance->list);
2744 kfree(adv_instance);
2745
2746 hdev->adv_instance_cnt--;
2747
2748 return 0;
2749 }
2750
2751 /* This function requires the caller holds hdev->lock */
2752 void hci_adv_instances_clear(struct hci_dev *hdev)
2753 {
2754 struct adv_info *adv_instance, *n;
2755
2756 if (hdev->adv_instance_timeout) {
2757 cancel_delayed_work(&hdev->adv_instance_expire);
2758 hdev->adv_instance_timeout = 0;
2759 }
2760
2761 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2762 list_del(&adv_instance->list);
2763 kfree(adv_instance);
2764 }
2765
2766 hdev->adv_instance_cnt = 0;
2767 }
2768
2769 /* This function requires the caller holds hdev->lock */
2770 int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2771 u16 adv_data_len, u8 *adv_data,
2772 u16 scan_rsp_len, u8 *scan_rsp_data,
2773 u16 timeout, u16 duration)
2774 {
2775 struct adv_info *adv_instance;
2776
2777 adv_instance = hci_find_adv_instance(hdev, instance);
2778 if (adv_instance) {
2779 memset(adv_instance->adv_data, 0,
2780 sizeof(adv_instance->adv_data));
2781 memset(adv_instance->scan_rsp_data, 0,
2782 sizeof(adv_instance->scan_rsp_data));
2783 } else {
2784 if (hdev->adv_instance_cnt >= HCI_MAX_ADV_INSTANCES ||
2785 instance < 1 || instance > HCI_MAX_ADV_INSTANCES)
2786 return -EOVERFLOW;
2787
2788 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
2789 if (!adv_instance)
2790 return -ENOMEM;
2791
2792 adv_instance->pending = true;
2793 adv_instance->instance = instance;
2794 list_add(&adv_instance->list, &hdev->adv_instances);
2795 hdev->adv_instance_cnt++;
2796 }
2797
2798 adv_instance->flags = flags;
2799 adv_instance->adv_data_len = adv_data_len;
2800 adv_instance->scan_rsp_len = scan_rsp_len;
2801
2802 if (adv_data_len)
2803 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2804
2805 if (scan_rsp_len)
2806 memcpy(adv_instance->scan_rsp_data,
2807 scan_rsp_data, scan_rsp_len);
2808
2809 adv_instance->timeout = timeout;
2810 adv_instance->remaining_time = timeout;
2811
2812 if (duration == 0)
2813 adv_instance->duration = HCI_DEFAULT_ADV_DURATION;
2814 else
2815 adv_instance->duration = duration;
2816
2817 BT_DBG("%s for %dMR", hdev->name, instance);
2818
2819 return 0;
2820 }
2821
2822 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2823 bdaddr_t *bdaddr, u8 type)
2824 {
2825 struct bdaddr_list *b;
2826
2827 list_for_each_entry(b, bdaddr_list, list) {
2828 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2829 return b;
2830 }
2831
2832 return NULL;
2833 }
2834
2835 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2836 {
2837 struct list_head *p, *n;
2838
2839 list_for_each_safe(p, n, bdaddr_list) {
2840 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
2841
2842 list_del(p);
2843 kfree(b);
2844 }
2845 }
2846
2847 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2848 {
2849 struct bdaddr_list *entry;
2850
2851 if (!bacmp(bdaddr, BDADDR_ANY))
2852 return -EBADF;
2853
2854 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2855 return -EEXIST;
2856
2857 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2858 if (!entry)
2859 return -ENOMEM;
2860
2861 bacpy(&entry->bdaddr, bdaddr);
2862 entry->bdaddr_type = type;
2863
2864 list_add(&entry->list, list);
2865
2866 return 0;
2867 }
2868
2869 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2870 {
2871 struct bdaddr_list *entry;
2872
2873 if (!bacmp(bdaddr, BDADDR_ANY)) {
2874 hci_bdaddr_list_clear(list);
2875 return 0;
2876 }
2877
2878 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2879 if (!entry)
2880 return -ENOENT;
2881
2882 list_del(&entry->list);
2883 kfree(entry);
2884
2885 return 0;
2886 }
2887
2888 /* This function requires the caller holds hdev->lock */
2889 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2890 bdaddr_t *addr, u8 addr_type)
2891 {
2892 struct hci_conn_params *params;
2893
2894 list_for_each_entry(params, &hdev->le_conn_params, list) {
2895 if (bacmp(&params->addr, addr) == 0 &&
2896 params->addr_type == addr_type) {
2897 return params;
2898 }
2899 }
2900
2901 return NULL;
2902 }
2903
2904 /* This function requires the caller holds hdev->lock */
2905 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2906 bdaddr_t *addr, u8 addr_type)
2907 {
2908 struct hci_conn_params *param;
2909
2910 list_for_each_entry(param, list, action) {
2911 if (bacmp(&param->addr, addr) == 0 &&
2912 param->addr_type == addr_type)
2913 return param;
2914 }
2915
2916 return NULL;
2917 }
2918
2919 /* This function requires the caller holds hdev->lock */
2920 struct hci_conn_params *hci_explicit_connect_lookup(struct hci_dev *hdev,
2921 bdaddr_t *addr,
2922 u8 addr_type)
2923 {
2924 struct hci_conn_params *param;
2925
2926 list_for_each_entry(param, &hdev->pend_le_conns, action) {
2927 if (bacmp(&param->addr, addr) == 0 &&
2928 param->addr_type == addr_type &&
2929 param->explicit_connect)
2930 return param;
2931 }
2932
2933 list_for_each_entry(param, &hdev->pend_le_reports, action) {
2934 if (bacmp(&param->addr, addr) == 0 &&
2935 param->addr_type == addr_type &&
2936 param->explicit_connect)
2937 return param;
2938 }
2939
2940 return NULL;
2941 }
2942
2943 /* This function requires the caller holds hdev->lock */
2944 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2945 bdaddr_t *addr, u8 addr_type)
2946 {
2947 struct hci_conn_params *params;
2948
2949 params = hci_conn_params_lookup(hdev, addr, addr_type);
2950 if (params)
2951 return params;
2952
2953 params = kzalloc(sizeof(*params), GFP_KERNEL);
2954 if (!params) {
2955 BT_ERR("Out of memory");
2956 return NULL;
2957 }
2958
2959 bacpy(&params->addr, addr);
2960 params->addr_type = addr_type;
2961
2962 list_add(&params->list, &hdev->le_conn_params);
2963 INIT_LIST_HEAD(&params->action);
2964
2965 params->conn_min_interval = hdev->le_conn_min_interval;
2966 params->conn_max_interval = hdev->le_conn_max_interval;
2967 params->conn_latency = hdev->le_conn_latency;
2968 params->supervision_timeout = hdev->le_supv_timeout;
2969 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2970
2971 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2972
2973 return params;
2974 }
2975
2976 static void hci_conn_params_free(struct hci_conn_params *params)
2977 {
2978 if (params->conn) {
2979 hci_conn_drop(params->conn);
2980 hci_conn_put(params->conn);
2981 }
2982
2983 list_del(&params->action);
2984 list_del(&params->list);
2985 kfree(params);
2986 }
2987
2988 /* This function requires the caller holds hdev->lock */
2989 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2990 {
2991 struct hci_conn_params *params;
2992
2993 params = hci_conn_params_lookup(hdev, addr, addr_type);
2994 if (!params)
2995 return;
2996
2997 hci_conn_params_free(params);
2998
2999 hci_update_background_scan(hdev);
3000
3001 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3002 }
3003
3004 /* This function requires the caller holds hdev->lock */
3005 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3006 {
3007 struct hci_conn_params *params, *tmp;
3008
3009 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3010 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3011 continue;
3012
3013 /* If trying to estabilish one time connection to disabled
3014 * device, leave the params, but mark them as just once.
3015 */
3016 if (params->explicit_connect) {
3017 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3018 continue;
3019 }
3020
3021 list_del(&params->list);
3022 kfree(params);
3023 }
3024
3025 BT_DBG("All LE disabled connection parameters were removed");
3026 }
3027
3028 /* This function requires the caller holds hdev->lock */
3029 void hci_conn_params_clear_all(struct hci_dev *hdev)
3030 {
3031 struct hci_conn_params *params, *tmp;
3032
3033 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3034 hci_conn_params_free(params);
3035
3036 hci_update_background_scan(hdev);
3037
3038 BT_DBG("All LE connection parameters were removed");
3039 }
3040
3041 static void inquiry_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3042 {
3043 if (status) {
3044 BT_ERR("Failed to start inquiry: status %d", status);
3045
3046 hci_dev_lock(hdev);
3047 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3048 hci_dev_unlock(hdev);
3049 return;
3050 }
3051 }
3052
3053 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status,
3054 u16 opcode)
3055 {
3056 /* General inquiry access code (GIAC) */
3057 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3058 struct hci_cp_inquiry cp;
3059 int err;
3060
3061 if (status) {
3062 BT_ERR("Failed to disable LE scanning: status %d", status);
3063 return;
3064 }
3065
3066 hdev->discovery.scan_start = 0;
3067
3068 switch (hdev->discovery.type) {
3069 case DISCOV_TYPE_LE:
3070 hci_dev_lock(hdev);
3071 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3072 hci_dev_unlock(hdev);
3073 break;
3074
3075 case DISCOV_TYPE_INTERLEAVED:
3076 hci_dev_lock(hdev);
3077
3078 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
3079 &hdev->quirks)) {
3080 /* If we were running LE only scan, change discovery
3081 * state. If we were running both LE and BR/EDR inquiry
3082 * simultaneously, and BR/EDR inquiry is already
3083 * finished, stop discovery, otherwise BR/EDR inquiry
3084 * will stop discovery when finished. If we will resolve
3085 * remote device name, do not change discovery state.
3086 */
3087 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
3088 hdev->discovery.state != DISCOVERY_RESOLVING)
3089 hci_discovery_set_state(hdev,
3090 DISCOVERY_STOPPED);
3091 } else {
3092 struct hci_request req;
3093
3094 hci_inquiry_cache_flush(hdev);
3095
3096 hci_req_init(&req, hdev);
3097
3098 memset(&cp, 0, sizeof(cp));
3099 memcpy(&cp.lap, lap, sizeof(cp.lap));
3100 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3101 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3102
3103 err = hci_req_run(&req, inquiry_complete);
3104 if (err) {
3105 BT_ERR("Inquiry request failed: err %d", err);
3106 hci_discovery_set_state(hdev,
3107 DISCOVERY_STOPPED);
3108 }
3109 }
3110
3111 hci_dev_unlock(hdev);
3112 break;
3113 }
3114 }
3115
3116 static void le_scan_disable_work(struct work_struct *work)
3117 {
3118 struct hci_dev *hdev = container_of(work, struct hci_dev,
3119 le_scan_disable.work);
3120 struct hci_request req;
3121 int err;
3122
3123 BT_DBG("%s", hdev->name);
3124
3125 cancel_delayed_work_sync(&hdev->le_scan_restart);
3126
3127 hci_req_init(&req, hdev);
3128
3129 hci_req_add_le_scan_disable(&req);
3130
3131 err = hci_req_run(&req, le_scan_disable_work_complete);
3132 if (err)
3133 BT_ERR("Disable LE scanning request failed: err %d", err);
3134 }
3135
3136 static void le_scan_restart_work_complete(struct hci_dev *hdev, u8 status,
3137 u16 opcode)
3138 {
3139 unsigned long timeout, duration, scan_start, now;
3140
3141 BT_DBG("%s", hdev->name);
3142
3143 if (status) {
3144 BT_ERR("Failed to restart LE scan: status %d", status);
3145 return;
3146 }
3147
3148 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
3149 !hdev->discovery.scan_start)
3150 return;
3151
3152 /* When the scan was started, hdev->le_scan_disable has been queued
3153 * after duration from scan_start. During scan restart this job
3154 * has been canceled, and we need to queue it again after proper
3155 * timeout, to make sure that scan does not run indefinitely.
3156 */
3157 duration = hdev->discovery.scan_duration;
3158 scan_start = hdev->discovery.scan_start;
3159 now = jiffies;
3160 if (now - scan_start <= duration) {
3161 int elapsed;
3162
3163 if (now >= scan_start)
3164 elapsed = now - scan_start;
3165 else
3166 elapsed = ULONG_MAX - scan_start + now;
3167
3168 timeout = duration - elapsed;
3169 } else {
3170 timeout = 0;
3171 }
3172 queue_delayed_work(hdev->workqueue,
3173 &hdev->le_scan_disable, timeout);
3174 }
3175
3176 static void le_scan_restart_work(struct work_struct *work)
3177 {
3178 struct hci_dev *hdev = container_of(work, struct hci_dev,
3179 le_scan_restart.work);
3180 struct hci_request req;
3181 struct hci_cp_le_set_scan_enable cp;
3182 int err;
3183
3184 BT_DBG("%s", hdev->name);
3185
3186 /* If controller is not scanning we are done. */
3187 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
3188 return;
3189
3190 hci_req_init(&req, hdev);
3191
3192 hci_req_add_le_scan_disable(&req);
3193
3194 memset(&cp, 0, sizeof(cp));
3195 cp.enable = LE_SCAN_ENABLE;
3196 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3197 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
3198
3199 err = hci_req_run(&req, le_scan_restart_work_complete);
3200 if (err)
3201 BT_ERR("Restart LE scan request failed: err %d", err);
3202 }
3203
3204 /* Copy the Identity Address of the controller.
3205 *
3206 * If the controller has a public BD_ADDR, then by default use that one.
3207 * If this is a LE only controller without a public address, default to
3208 * the static random address.
3209 *
3210 * For debugging purposes it is possible to force controllers with a
3211 * public address to use the static random address instead.
3212 *
3213 * In case BR/EDR has been disabled on a dual-mode controller and
3214 * userspace has configured a static address, then that address
3215 * becomes the identity address instead of the public BR/EDR address.
3216 */
3217 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3218 u8 *bdaddr_type)
3219 {
3220 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
3221 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
3222 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
3223 bacmp(&hdev->static_addr, BDADDR_ANY))) {
3224 bacpy(bdaddr, &hdev->static_addr);
3225 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3226 } else {
3227 bacpy(bdaddr, &hdev->bdaddr);
3228 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3229 }
3230 }
3231
3232 /* Alloc HCI device */
3233 struct hci_dev *hci_alloc_dev(void)
3234 {
3235 struct hci_dev *hdev;
3236
3237 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
3238 if (!hdev)
3239 return NULL;
3240
3241 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3242 hdev->esco_type = (ESCO_HV1);
3243 hdev->link_mode = (HCI_LM_ACCEPT);
3244 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3245 hdev->io_capability = 0x03; /* No Input No Output */
3246 hdev->manufacturer = 0xffff; /* Default to internal use */
3247 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3248 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3249 hdev->adv_instance_cnt = 0;
3250 hdev->cur_adv_instance = 0x00;
3251 hdev->adv_instance_timeout = 0;
3252
3253 hdev->sniff_max_interval = 800;
3254 hdev->sniff_min_interval = 80;
3255
3256 hdev->le_adv_channel_map = 0x07;
3257 hdev->le_adv_min_interval = 0x0800;
3258 hdev->le_adv_max_interval = 0x0800;
3259 hdev->le_scan_interval = 0x0060;
3260 hdev->le_scan_window = 0x0030;
3261 hdev->le_conn_min_interval = 0x0028;
3262 hdev->le_conn_max_interval = 0x0038;
3263 hdev->le_conn_latency = 0x0000;
3264 hdev->le_supv_timeout = 0x002a;
3265 hdev->le_def_tx_len = 0x001b;
3266 hdev->le_def_tx_time = 0x0148;
3267 hdev->le_max_tx_len = 0x001b;
3268 hdev->le_max_tx_time = 0x0148;
3269 hdev->le_max_rx_len = 0x001b;
3270 hdev->le_max_rx_time = 0x0148;
3271
3272 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3273 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3274 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3275 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3276
3277 mutex_init(&hdev->lock);
3278 mutex_init(&hdev->req_lock);
3279
3280 INIT_LIST_HEAD(&hdev->mgmt_pending);
3281 INIT_LIST_HEAD(&hdev->blacklist);
3282 INIT_LIST_HEAD(&hdev->whitelist);
3283 INIT_LIST_HEAD(&hdev->uuids);
3284 INIT_LIST_HEAD(&hdev->link_keys);
3285 INIT_LIST_HEAD(&hdev->long_term_keys);
3286 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3287 INIT_LIST_HEAD(&hdev->remote_oob_data);
3288 INIT_LIST_HEAD(&hdev->le_white_list);
3289 INIT_LIST_HEAD(&hdev->le_conn_params);
3290 INIT_LIST_HEAD(&hdev->pend_le_conns);
3291 INIT_LIST_HEAD(&hdev->pend_le_reports);
3292 INIT_LIST_HEAD(&hdev->conn_hash.list);
3293 INIT_LIST_HEAD(&hdev->adv_instances);
3294
3295 INIT_WORK(&hdev->rx_work, hci_rx_work);
3296 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3297 INIT_WORK(&hdev->tx_work, hci_tx_work);
3298 INIT_WORK(&hdev->power_on, hci_power_on);
3299 INIT_WORK(&hdev->error_reset, hci_error_reset);
3300
3301 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3302 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3303 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3304 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
3305 INIT_DELAYED_WORK(&hdev->adv_instance_expire, hci_adv_timeout_expire);
3306
3307 skb_queue_head_init(&hdev->rx_q);
3308 skb_queue_head_init(&hdev->cmd_q);
3309 skb_queue_head_init(&hdev->raw_q);
3310
3311 init_waitqueue_head(&hdev->req_wait_q);
3312
3313 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3314
3315 hci_init_sysfs(hdev);
3316 discovery_init(hdev);
3317
3318 return hdev;
3319 }
3320 EXPORT_SYMBOL(hci_alloc_dev);
3321
3322 /* Free HCI device */
3323 void hci_free_dev(struct hci_dev *hdev)
3324 {
3325 /* will free via device release */
3326 put_device(&hdev->dev);
3327 }
3328 EXPORT_SYMBOL(hci_free_dev);
3329
3330 /* Register HCI device */
3331 int hci_register_dev(struct hci_dev *hdev)
3332 {
3333 int id, error;
3334
3335 if (!hdev->open || !hdev->close || !hdev->send)
3336 return -EINVAL;
3337
3338 /* Do not allow HCI_AMP devices to register at index 0,
3339 * so the index can be used as the AMP controller ID.
3340 */
3341 switch (hdev->dev_type) {
3342 case HCI_BREDR:
3343 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3344 break;
3345 case HCI_AMP:
3346 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3347 break;
3348 default:
3349 return -EINVAL;
3350 }
3351
3352 if (id < 0)
3353 return id;
3354
3355 sprintf(hdev->name, "hci%d", id);
3356 hdev->id = id;
3357
3358 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3359
3360 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3361 WQ_MEM_RECLAIM, 1, hdev->name);
3362 if (!hdev->workqueue) {
3363 error = -ENOMEM;
3364 goto err;
3365 }
3366
3367 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3368 WQ_MEM_RECLAIM, 1, hdev->name);
3369 if (!hdev->req_workqueue) {
3370 destroy_workqueue(hdev->workqueue);
3371 error = -ENOMEM;
3372 goto err;
3373 }
3374
3375 if (!IS_ERR_OR_NULL(bt_debugfs))
3376 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3377
3378 dev_set_name(&hdev->dev, "%s", hdev->name);
3379
3380 error = device_add(&hdev->dev);
3381 if (error < 0)
3382 goto err_wqueue;
3383
3384 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3385 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3386 hdev);
3387 if (hdev->rfkill) {
3388 if (rfkill_register(hdev->rfkill) < 0) {
3389 rfkill_destroy(hdev->rfkill);
3390 hdev->rfkill = NULL;
3391 }
3392 }
3393
3394 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3395 hci_dev_set_flag(hdev, HCI_RFKILLED);
3396
3397 hci_dev_set_flag(hdev, HCI_SETUP);
3398 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
3399
3400 if (hdev->dev_type == HCI_BREDR) {
3401 /* Assume BR/EDR support until proven otherwise (such as
3402 * through reading supported features during init.
3403 */
3404 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
3405 }
3406
3407 write_lock(&hci_dev_list_lock);
3408 list_add(&hdev->list, &hci_dev_list);
3409 write_unlock(&hci_dev_list_lock);
3410
3411 /* Devices that are marked for raw-only usage are unconfigured
3412 * and should not be included in normal operation.
3413 */
3414 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3415 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
3416
3417 hci_notify(hdev, HCI_DEV_REG);
3418 hci_dev_hold(hdev);
3419
3420 queue_work(hdev->req_workqueue, &hdev->power_on);
3421
3422 return id;
3423
3424 err_wqueue:
3425 destroy_workqueue(hdev->workqueue);
3426 destroy_workqueue(hdev->req_workqueue);
3427 err:
3428 ida_simple_remove(&hci_index_ida, hdev->id);
3429
3430 return error;
3431 }
3432 EXPORT_SYMBOL(hci_register_dev);
3433
3434 /* Unregister HCI device */
3435 void hci_unregister_dev(struct hci_dev *hdev)
3436 {
3437 int id;
3438
3439 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3440
3441 hci_dev_set_flag(hdev, HCI_UNREGISTER);
3442
3443 id = hdev->id;
3444
3445 write_lock(&hci_dev_list_lock);
3446 list_del(&hdev->list);
3447 write_unlock(&hci_dev_list_lock);
3448
3449 hci_dev_do_close(hdev);
3450
3451 cancel_work_sync(&hdev->power_on);
3452
3453 if (!test_bit(HCI_INIT, &hdev->flags) &&
3454 !hci_dev_test_flag(hdev, HCI_SETUP) &&
3455 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
3456 hci_dev_lock(hdev);
3457 mgmt_index_removed(hdev);
3458 hci_dev_unlock(hdev);
3459 }
3460
3461 /* mgmt_index_removed should take care of emptying the
3462 * pending list */
3463 BUG_ON(!list_empty(&hdev->mgmt_pending));
3464
3465 hci_notify(hdev, HCI_DEV_UNREG);
3466
3467 if (hdev->rfkill) {
3468 rfkill_unregister(hdev->rfkill);
3469 rfkill_destroy(hdev->rfkill);
3470 }
3471
3472 device_del(&hdev->dev);
3473
3474 debugfs_remove_recursive(hdev->debugfs);
3475
3476 destroy_workqueue(hdev->workqueue);
3477 destroy_workqueue(hdev->req_workqueue);
3478
3479 hci_dev_lock(hdev);
3480 hci_bdaddr_list_clear(&hdev->blacklist);
3481 hci_bdaddr_list_clear(&hdev->whitelist);
3482 hci_uuids_clear(hdev);
3483 hci_link_keys_clear(hdev);
3484 hci_smp_ltks_clear(hdev);
3485 hci_smp_irks_clear(hdev);
3486 hci_remote_oob_data_clear(hdev);
3487 hci_adv_instances_clear(hdev);
3488 hci_bdaddr_list_clear(&hdev->le_white_list);
3489 hci_conn_params_clear_all(hdev);
3490 hci_discovery_filter_clear(hdev);
3491 hci_dev_unlock(hdev);
3492
3493 hci_dev_put(hdev);
3494
3495 ida_simple_remove(&hci_index_ida, id);
3496 }
3497 EXPORT_SYMBOL(hci_unregister_dev);
3498
3499 /* Suspend HCI device */
3500 int hci_suspend_dev(struct hci_dev *hdev)
3501 {
3502 hci_notify(hdev, HCI_DEV_SUSPEND);
3503 return 0;
3504 }
3505 EXPORT_SYMBOL(hci_suspend_dev);
3506
3507 /* Resume HCI device */
3508 int hci_resume_dev(struct hci_dev *hdev)
3509 {
3510 hci_notify(hdev, HCI_DEV_RESUME);
3511 return 0;
3512 }
3513 EXPORT_SYMBOL(hci_resume_dev);
3514
3515 /* Reset HCI device */
3516 int hci_reset_dev(struct hci_dev *hdev)
3517 {
3518 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3519 struct sk_buff *skb;
3520
3521 skb = bt_skb_alloc(3, GFP_ATOMIC);
3522 if (!skb)
3523 return -ENOMEM;
3524
3525 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
3526 memcpy(skb_put(skb, 3), hw_err, 3);
3527
3528 /* Send Hardware Error to upper stack */
3529 return hci_recv_frame(hdev, skb);
3530 }
3531 EXPORT_SYMBOL(hci_reset_dev);
3532
3533 /* Receive frame from HCI drivers */
3534 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3535 {
3536 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3537 && !test_bit(HCI_INIT, &hdev->flags))) {
3538 kfree_skb(skb);
3539 return -ENXIO;
3540 }
3541
3542 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
3543 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
3544 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
3545 kfree_skb(skb);
3546 return -EINVAL;
3547 }
3548
3549 /* Incoming skb */
3550 bt_cb(skb)->incoming = 1;
3551
3552 /* Time stamp */
3553 __net_timestamp(skb);
3554
3555 skb_queue_tail(&hdev->rx_q, skb);
3556 queue_work(hdev->workqueue, &hdev->rx_work);
3557
3558 return 0;
3559 }
3560 EXPORT_SYMBOL(hci_recv_frame);
3561
3562 /* Receive diagnostic message from HCI drivers */
3563 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
3564 {
3565 /* Time stamp */
3566 __net_timestamp(skb);
3567
3568 /* Mark as diagnostic packet and send to monitor */
3569 bt_cb(skb)->pkt_type = HCI_DIAG_PKT;
3570 hci_send_to_monitor(hdev, skb);
3571
3572 kfree_skb(skb);
3573 return 0;
3574 }
3575 EXPORT_SYMBOL(hci_recv_diag);
3576
3577 /* ---- Interface to upper protocols ---- */
3578
3579 int hci_register_cb(struct hci_cb *cb)
3580 {
3581 BT_DBG("%p name %s", cb, cb->name);
3582
3583 mutex_lock(&hci_cb_list_lock);
3584 list_add_tail(&cb->list, &hci_cb_list);
3585 mutex_unlock(&hci_cb_list_lock);
3586
3587 return 0;
3588 }
3589 EXPORT_SYMBOL(hci_register_cb);
3590
3591 int hci_unregister_cb(struct hci_cb *cb)
3592 {
3593 BT_DBG("%p name %s", cb, cb->name);
3594
3595 mutex_lock(&hci_cb_list_lock);
3596 list_del(&cb->list);
3597 mutex_unlock(&hci_cb_list_lock);
3598
3599 return 0;
3600 }
3601 EXPORT_SYMBOL(hci_unregister_cb);
3602
3603 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3604 {
3605 int err;
3606
3607 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
3608
3609 /* Time stamp */
3610 __net_timestamp(skb);
3611
3612 /* Send copy to monitor */
3613 hci_send_to_monitor(hdev, skb);
3614
3615 if (atomic_read(&hdev->promisc)) {
3616 /* Send copy to the sockets */
3617 hci_send_to_sock(hdev, skb);
3618 }
3619
3620 /* Get rid of skb owner, prior to sending to the driver. */
3621 skb_orphan(skb);
3622
3623 if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3624 kfree_skb(skb);
3625 return;
3626 }
3627
3628 err = hdev->send(hdev, skb);
3629 if (err < 0) {
3630 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3631 kfree_skb(skb);
3632 }
3633 }
3634
3635 /* Send HCI command */
3636 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3637 const void *param)
3638 {
3639 struct sk_buff *skb;
3640
3641 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3642
3643 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3644 if (!skb) {
3645 BT_ERR("%s no memory for command", hdev->name);
3646 return -ENOMEM;
3647 }
3648
3649 /* Stand-alone HCI commands must be flagged as
3650 * single-command requests.
3651 */
3652 bt_cb(skb)->req.start = true;
3653
3654 skb_queue_tail(&hdev->cmd_q, skb);
3655 queue_work(hdev->workqueue, &hdev->cmd_work);
3656
3657 return 0;
3658 }
3659
3660 /* Get data from the previously sent command */
3661 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3662 {
3663 struct hci_command_hdr *hdr;
3664
3665 if (!hdev->sent_cmd)
3666 return NULL;
3667
3668 hdr = (void *) hdev->sent_cmd->data;
3669
3670 if (hdr->opcode != cpu_to_le16(opcode))
3671 return NULL;
3672
3673 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3674
3675 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3676 }
3677
3678 /* Send HCI command and wait for command commplete event */
3679 struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
3680 const void *param, u32 timeout)
3681 {
3682 struct sk_buff *skb;
3683
3684 if (!test_bit(HCI_UP, &hdev->flags))
3685 return ERR_PTR(-ENETDOWN);
3686
3687 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
3688
3689 hci_req_lock(hdev);
3690 skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
3691 hci_req_unlock(hdev);
3692
3693 return skb;
3694 }
3695 EXPORT_SYMBOL(hci_cmd_sync);
3696
3697 /* Send ACL data */
3698 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3699 {
3700 struct hci_acl_hdr *hdr;
3701 int len = skb->len;
3702
3703 skb_push(skb, HCI_ACL_HDR_SIZE);
3704 skb_reset_transport_header(skb);
3705 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3706 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3707 hdr->dlen = cpu_to_le16(len);
3708 }
3709
3710 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3711 struct sk_buff *skb, __u16 flags)
3712 {
3713 struct hci_conn *conn = chan->conn;
3714 struct hci_dev *hdev = conn->hdev;
3715 struct sk_buff *list;
3716
3717 skb->len = skb_headlen(skb);
3718 skb->data_len = 0;
3719
3720 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3721
3722 switch (hdev->dev_type) {
3723 case HCI_BREDR:
3724 hci_add_acl_hdr(skb, conn->handle, flags);
3725 break;
3726 case HCI_AMP:
3727 hci_add_acl_hdr(skb, chan->handle, flags);
3728 break;
3729 default:
3730 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3731 return;
3732 }
3733
3734 list = skb_shinfo(skb)->frag_list;
3735 if (!list) {
3736 /* Non fragmented */
3737 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3738
3739 skb_queue_tail(queue, skb);
3740 } else {
3741 /* Fragmented */
3742 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3743
3744 skb_shinfo(skb)->frag_list = NULL;
3745
3746 /* Queue all fragments atomically. We need to use spin_lock_bh
3747 * here because of 6LoWPAN links, as there this function is
3748 * called from softirq and using normal spin lock could cause
3749 * deadlocks.
3750 */
3751 spin_lock_bh(&queue->lock);
3752
3753 __skb_queue_tail(queue, skb);
3754
3755 flags &= ~ACL_START;
3756 flags |= ACL_CONT;
3757 do {
3758 skb = list; list = list->next;
3759
3760 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3761 hci_add_acl_hdr(skb, conn->handle, flags);
3762
3763 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3764
3765 __skb_queue_tail(queue, skb);
3766 } while (list);
3767
3768 spin_unlock_bh(&queue->lock);
3769 }
3770 }
3771
3772 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3773 {
3774 struct hci_dev *hdev = chan->conn->hdev;
3775
3776 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3777
3778 hci_queue_acl(chan, &chan->data_q, skb, flags);
3779
3780 queue_work(hdev->workqueue, &hdev->tx_work);
3781 }
3782
3783 /* Send SCO data */
3784 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3785 {
3786 struct hci_dev *hdev = conn->hdev;
3787 struct hci_sco_hdr hdr;
3788
3789 BT_DBG("%s len %d", hdev->name, skb->len);
3790
3791 hdr.handle = cpu_to_le16(conn->handle);
3792 hdr.dlen = skb->len;
3793
3794 skb_push(skb, HCI_SCO_HDR_SIZE);
3795 skb_reset_transport_header(skb);
3796 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3797
3798 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
3799
3800 skb_queue_tail(&conn->data_q, skb);
3801 queue_work(hdev->workqueue, &hdev->tx_work);
3802 }
3803
3804 /* ---- HCI TX task (outgoing data) ---- */
3805
3806 /* HCI Connection scheduler */
3807 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3808 int *quote)
3809 {
3810 struct hci_conn_hash *h = &hdev->conn_hash;
3811 struct hci_conn *conn = NULL, *c;
3812 unsigned int num = 0, min = ~0;
3813
3814 /* We don't have to lock device here. Connections are always
3815 * added and removed with TX task disabled. */
3816
3817 rcu_read_lock();
3818
3819 list_for_each_entry_rcu(c, &h->list, list) {
3820 if (c->type != type || skb_queue_empty(&c->data_q))
3821 continue;
3822
3823 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3824 continue;
3825
3826 num++;
3827
3828 if (c->sent < min) {
3829 min = c->sent;
3830 conn = c;
3831 }
3832
3833 if (hci_conn_num(hdev, type) == num)
3834 break;
3835 }
3836
3837 rcu_read_unlock();
3838
3839 if (conn) {
3840 int cnt, q;
3841
3842 switch (conn->type) {
3843 case ACL_LINK:
3844 cnt = hdev->acl_cnt;
3845 break;
3846 case SCO_LINK:
3847 case ESCO_LINK:
3848 cnt = hdev->sco_cnt;
3849 break;
3850 case LE_LINK:
3851 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3852 break;
3853 default:
3854 cnt = 0;
3855 BT_ERR("Unknown link type");
3856 }
3857
3858 q = cnt / num;
3859 *quote = q ? q : 1;
3860 } else
3861 *quote = 0;
3862
3863 BT_DBG("conn %p quote %d", conn, *quote);
3864 return conn;
3865 }
3866
3867 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3868 {
3869 struct hci_conn_hash *h = &hdev->conn_hash;
3870 struct hci_conn *c;
3871
3872 BT_ERR("%s link tx timeout", hdev->name);
3873
3874 rcu_read_lock();
3875
3876 /* Kill stalled connections */
3877 list_for_each_entry_rcu(c, &h->list, list) {
3878 if (c->type == type && c->sent) {
3879 BT_ERR("%s killing stalled connection %pMR",
3880 hdev->name, &c->dst);
3881 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3882 }
3883 }
3884
3885 rcu_read_unlock();
3886 }
3887
3888 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3889 int *quote)
3890 {
3891 struct hci_conn_hash *h = &hdev->conn_hash;
3892 struct hci_chan *chan = NULL;
3893 unsigned int num = 0, min = ~0, cur_prio = 0;
3894 struct hci_conn *conn;
3895 int cnt, q, conn_num = 0;
3896
3897 BT_DBG("%s", hdev->name);
3898
3899 rcu_read_lock();
3900
3901 list_for_each_entry_rcu(conn, &h->list, list) {
3902 struct hci_chan *tmp;
3903
3904 if (conn->type != type)
3905 continue;
3906
3907 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3908 continue;
3909
3910 conn_num++;
3911
3912 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3913 struct sk_buff *skb;
3914
3915 if (skb_queue_empty(&tmp->data_q))
3916 continue;
3917
3918 skb = skb_peek(&tmp->data_q);
3919 if (skb->priority < cur_prio)
3920 continue;
3921
3922 if (skb->priority > cur_prio) {
3923 num = 0;
3924 min = ~0;
3925 cur_prio = skb->priority;
3926 }
3927
3928 num++;
3929
3930 if (conn->sent < min) {
3931 min = conn->sent;
3932 chan = tmp;
3933 }
3934 }
3935
3936 if (hci_conn_num(hdev, type) == conn_num)
3937 break;
3938 }
3939
3940 rcu_read_unlock();
3941
3942 if (!chan)
3943 return NULL;
3944
3945 switch (chan->conn->type) {
3946 case ACL_LINK:
3947 cnt = hdev->acl_cnt;
3948 break;
3949 case AMP_LINK:
3950 cnt = hdev->block_cnt;
3951 break;
3952 case SCO_LINK:
3953 case ESCO_LINK:
3954 cnt = hdev->sco_cnt;
3955 break;
3956 case LE_LINK:
3957 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3958 break;
3959 default:
3960 cnt = 0;
3961 BT_ERR("Unknown link type");
3962 }
3963
3964 q = cnt / num;
3965 *quote = q ? q : 1;
3966 BT_DBG("chan %p quote %d", chan, *quote);
3967 return chan;
3968 }
3969
3970 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3971 {
3972 struct hci_conn_hash *h = &hdev->conn_hash;
3973 struct hci_conn *conn;
3974 int num = 0;
3975
3976 BT_DBG("%s", hdev->name);
3977
3978 rcu_read_lock();
3979
3980 list_for_each_entry_rcu(conn, &h->list, list) {
3981 struct hci_chan *chan;
3982
3983 if (conn->type != type)
3984 continue;
3985
3986 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3987 continue;
3988
3989 num++;
3990
3991 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3992 struct sk_buff *skb;
3993
3994 if (chan->sent) {
3995 chan->sent = 0;
3996 continue;
3997 }
3998
3999 if (skb_queue_empty(&chan->data_q))
4000 continue;
4001
4002 skb = skb_peek(&chan->data_q);
4003 if (skb->priority >= HCI_PRIO_MAX - 1)
4004 continue;
4005
4006 skb->priority = HCI_PRIO_MAX - 1;
4007
4008 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4009 skb->priority);
4010 }
4011
4012 if (hci_conn_num(hdev, type) == num)
4013 break;
4014 }
4015
4016 rcu_read_unlock();
4017
4018 }
4019
4020 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4021 {
4022 /* Calculate count of blocks used by this packet */
4023 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4024 }
4025
4026 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4027 {
4028 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
4029 /* ACL tx timeout must be longer than maximum
4030 * link supervision timeout (40.9 seconds) */
4031 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4032 HCI_ACL_TX_TIMEOUT))
4033 hci_link_tx_to(hdev, ACL_LINK);
4034 }
4035 }
4036
4037 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4038 {
4039 unsigned int cnt = hdev->acl_cnt;
4040 struct hci_chan *chan;
4041 struct sk_buff *skb;
4042 int quote;
4043
4044 __check_timeout(hdev, cnt);
4045
4046 while (hdev->acl_cnt &&
4047 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
4048 u32 priority = (skb_peek(&chan->data_q))->priority;
4049 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4050 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4051 skb->len, skb->priority);
4052
4053 /* Stop if priority has changed */
4054 if (skb->priority < priority)
4055 break;
4056
4057 skb = skb_dequeue(&chan->data_q);
4058
4059 hci_conn_enter_active_mode(chan->conn,
4060 bt_cb(skb)->force_active);
4061
4062 hci_send_frame(hdev, skb);
4063 hdev->acl_last_tx = jiffies;
4064
4065 hdev->acl_cnt--;
4066 chan->sent++;
4067 chan->conn->sent++;
4068 }
4069 }
4070
4071 if (cnt != hdev->acl_cnt)
4072 hci_prio_recalculate(hdev, ACL_LINK);
4073 }
4074
4075 static void hci_sched_acl_blk(struct hci_dev *hdev)
4076 {
4077 unsigned int cnt = hdev->block_cnt;
4078 struct hci_chan *chan;
4079 struct sk_buff *skb;
4080 int quote;
4081 u8 type;
4082
4083 __check_timeout(hdev, cnt);
4084
4085 BT_DBG("%s", hdev->name);
4086
4087 if (hdev->dev_type == HCI_AMP)
4088 type = AMP_LINK;
4089 else
4090 type = ACL_LINK;
4091
4092 while (hdev->block_cnt > 0 &&
4093 (chan = hci_chan_sent(hdev, type, &quote))) {
4094 u32 priority = (skb_peek(&chan->data_q))->priority;
4095 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4096 int blocks;
4097
4098 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4099 skb->len, skb->priority);
4100
4101 /* Stop if priority has changed */
4102 if (skb->priority < priority)
4103 break;
4104
4105 skb = skb_dequeue(&chan->data_q);
4106
4107 blocks = __get_blocks(hdev, skb);
4108 if (blocks > hdev->block_cnt)
4109 return;
4110
4111 hci_conn_enter_active_mode(chan->conn,
4112 bt_cb(skb)->force_active);
4113
4114 hci_send_frame(hdev, skb);
4115 hdev->acl_last_tx = jiffies;
4116
4117 hdev->block_cnt -= blocks;
4118 quote -= blocks;
4119
4120 chan->sent += blocks;
4121 chan->conn->sent += blocks;
4122 }
4123 }
4124
4125 if (cnt != hdev->block_cnt)
4126 hci_prio_recalculate(hdev, type);
4127 }
4128
4129 static void hci_sched_acl(struct hci_dev *hdev)
4130 {
4131 BT_DBG("%s", hdev->name);
4132
4133 /* No ACL link over BR/EDR controller */
4134 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4135 return;
4136
4137 /* No AMP link over AMP controller */
4138 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4139 return;
4140
4141 switch (hdev->flow_ctl_mode) {
4142 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4143 hci_sched_acl_pkt(hdev);
4144 break;
4145
4146 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4147 hci_sched_acl_blk(hdev);
4148 break;
4149 }
4150 }
4151
4152 /* Schedule SCO */
4153 static void hci_sched_sco(struct hci_dev *hdev)
4154 {
4155 struct hci_conn *conn;
4156 struct sk_buff *skb;
4157 int quote;
4158
4159 BT_DBG("%s", hdev->name);
4160
4161 if (!hci_conn_num(hdev, SCO_LINK))
4162 return;
4163
4164 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4165 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4166 BT_DBG("skb %p len %d", skb, skb->len);
4167 hci_send_frame(hdev, skb);
4168
4169 conn->sent++;
4170 if (conn->sent == ~0)
4171 conn->sent = 0;
4172 }
4173 }
4174 }
4175
4176 static void hci_sched_esco(struct hci_dev *hdev)
4177 {
4178 struct hci_conn *conn;
4179 struct sk_buff *skb;
4180 int quote;
4181
4182 BT_DBG("%s", hdev->name);
4183
4184 if (!hci_conn_num(hdev, ESCO_LINK))
4185 return;
4186
4187 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4188 &quote))) {
4189 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4190 BT_DBG("skb %p len %d", skb, skb->len);
4191 hci_send_frame(hdev, skb);
4192
4193 conn->sent++;
4194 if (conn->sent == ~0)
4195 conn->sent = 0;
4196 }
4197 }
4198 }
4199
4200 static void hci_sched_le(struct hci_dev *hdev)
4201 {
4202 struct hci_chan *chan;
4203 struct sk_buff *skb;
4204 int quote, cnt, tmp;
4205
4206 BT_DBG("%s", hdev->name);
4207
4208 if (!hci_conn_num(hdev, LE_LINK))
4209 return;
4210
4211 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
4212 /* LE tx timeout must be longer than maximum
4213 * link supervision timeout (40.9 seconds) */
4214 if (!hdev->le_cnt && hdev->le_pkts &&
4215 time_after(jiffies, hdev->le_last_tx + HZ * 45))
4216 hci_link_tx_to(hdev, LE_LINK);
4217 }
4218
4219 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4220 tmp = cnt;
4221 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
4222 u32 priority = (skb_peek(&chan->data_q))->priority;
4223 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4224 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4225 skb->len, skb->priority);
4226
4227 /* Stop if priority has changed */
4228 if (skb->priority < priority)
4229 break;
4230
4231 skb = skb_dequeue(&chan->data_q);
4232
4233 hci_send_frame(hdev, skb);
4234 hdev->le_last_tx = jiffies;
4235
4236 cnt--;
4237 chan->sent++;
4238 chan->conn->sent++;
4239 }
4240 }
4241
4242 if (hdev->le_pkts)
4243 hdev->le_cnt = cnt;
4244 else
4245 hdev->acl_cnt = cnt;
4246
4247 if (cnt != tmp)
4248 hci_prio_recalculate(hdev, LE_LINK);
4249 }
4250
4251 static void hci_tx_work(struct work_struct *work)
4252 {
4253 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4254 struct sk_buff *skb;
4255
4256 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4257 hdev->sco_cnt, hdev->le_cnt);
4258
4259 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4260 /* Schedule queues and send stuff to HCI driver */
4261 hci_sched_acl(hdev);
4262 hci_sched_sco(hdev);
4263 hci_sched_esco(hdev);
4264 hci_sched_le(hdev);
4265 }
4266
4267 /* Send next queued raw (unknown type) packet */
4268 while ((skb = skb_dequeue(&hdev->raw_q)))
4269 hci_send_frame(hdev, skb);
4270 }
4271
4272 /* ----- HCI RX task (incoming data processing) ----- */
4273
4274 /* ACL data packet */
4275 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4276 {
4277 struct hci_acl_hdr *hdr = (void *) skb->data;
4278 struct hci_conn *conn;
4279 __u16 handle, flags;
4280
4281 skb_pull(skb, HCI_ACL_HDR_SIZE);
4282
4283 handle = __le16_to_cpu(hdr->handle);
4284 flags = hci_flags(handle);
4285 handle = hci_handle(handle);
4286
4287 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4288 handle, flags);
4289
4290 hdev->stat.acl_rx++;
4291
4292 hci_dev_lock(hdev);
4293 conn = hci_conn_hash_lookup_handle(hdev, handle);
4294 hci_dev_unlock(hdev);
4295
4296 if (conn) {
4297 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4298
4299 /* Send to upper protocol */
4300 l2cap_recv_acldata(conn, skb, flags);
4301 return;
4302 } else {
4303 BT_ERR("%s ACL packet for unknown connection handle %d",
4304 hdev->name, handle);
4305 }
4306
4307 kfree_skb(skb);
4308 }
4309
4310 /* SCO data packet */
4311 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4312 {
4313 struct hci_sco_hdr *hdr = (void *) skb->data;
4314 struct hci_conn *conn;
4315 __u16 handle;
4316
4317 skb_pull(skb, HCI_SCO_HDR_SIZE);
4318
4319 handle = __le16_to_cpu(hdr->handle);
4320
4321 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
4322
4323 hdev->stat.sco_rx++;
4324
4325 hci_dev_lock(hdev);
4326 conn = hci_conn_hash_lookup_handle(hdev, handle);
4327 hci_dev_unlock(hdev);
4328
4329 if (conn) {
4330 /* Send to upper protocol */
4331 sco_recv_scodata(conn, skb);
4332 return;
4333 } else {
4334 BT_ERR("%s SCO packet for unknown connection handle %d",
4335 hdev->name, handle);
4336 }
4337
4338 kfree_skb(skb);
4339 }
4340
4341 static bool hci_req_is_complete(struct hci_dev *hdev)
4342 {
4343 struct sk_buff *skb;
4344
4345 skb = skb_peek(&hdev->cmd_q);
4346 if (!skb)
4347 return true;
4348
4349 return bt_cb(skb)->req.start;
4350 }
4351
4352 static void hci_resend_last(struct hci_dev *hdev)
4353 {
4354 struct hci_command_hdr *sent;
4355 struct sk_buff *skb;
4356 u16 opcode;
4357
4358 if (!hdev->sent_cmd)
4359 return;
4360
4361 sent = (void *) hdev->sent_cmd->data;
4362 opcode = __le16_to_cpu(sent->opcode);
4363 if (opcode == HCI_OP_RESET)
4364 return;
4365
4366 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4367 if (!skb)
4368 return;
4369
4370 skb_queue_head(&hdev->cmd_q, skb);
4371 queue_work(hdev->workqueue, &hdev->cmd_work);
4372 }
4373
4374 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4375 hci_req_complete_t *req_complete,
4376 hci_req_complete_skb_t *req_complete_skb)
4377 {
4378 struct sk_buff *skb;
4379 unsigned long flags;
4380
4381 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4382
4383 /* If the completed command doesn't match the last one that was
4384 * sent we need to do special handling of it.
4385 */
4386 if (!hci_sent_cmd_data(hdev, opcode)) {
4387 /* Some CSR based controllers generate a spontaneous
4388 * reset complete event during init and any pending
4389 * command will never be completed. In such a case we
4390 * need to resend whatever was the last sent
4391 * command.
4392 */
4393 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4394 hci_resend_last(hdev);
4395
4396 return;
4397 }
4398
4399 /* If the command succeeded and there's still more commands in
4400 * this request the request is not yet complete.
4401 */
4402 if (!status && !hci_req_is_complete(hdev))
4403 return;
4404
4405 /* If this was the last command in a request the complete
4406 * callback would be found in hdev->sent_cmd instead of the
4407 * command queue (hdev->cmd_q).
4408 */
4409 if (bt_cb(hdev->sent_cmd)->req.complete) {
4410 *req_complete = bt_cb(hdev->sent_cmd)->req.complete;
4411 return;
4412 }
4413
4414 if (bt_cb(hdev->sent_cmd)->req.complete_skb) {
4415 *req_complete_skb = bt_cb(hdev->sent_cmd)->req.complete_skb;
4416 return;
4417 }
4418
4419 /* Remove all pending commands belonging to this request */
4420 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4421 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4422 if (bt_cb(skb)->req.start) {
4423 __skb_queue_head(&hdev->cmd_q, skb);
4424 break;
4425 }
4426
4427 *req_complete = bt_cb(skb)->req.complete;
4428 *req_complete_skb = bt_cb(skb)->req.complete_skb;
4429 kfree_skb(skb);
4430 }
4431 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4432 }
4433
4434 static void hci_rx_work(struct work_struct *work)
4435 {
4436 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4437 struct sk_buff *skb;
4438
4439 BT_DBG("%s", hdev->name);
4440
4441 while ((skb = skb_dequeue(&hdev->rx_q))) {
4442 /* Send copy to monitor */
4443 hci_send_to_monitor(hdev, skb);
4444
4445 if (atomic_read(&hdev->promisc)) {
4446 /* Send copy to the sockets */
4447 hci_send_to_sock(hdev, skb);
4448 }
4449
4450 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4451 kfree_skb(skb);
4452 continue;
4453 }
4454
4455 if (test_bit(HCI_INIT, &hdev->flags)) {
4456 /* Don't process data packets in this states. */
4457 switch (bt_cb(skb)->pkt_type) {
4458 case HCI_ACLDATA_PKT:
4459 case HCI_SCODATA_PKT:
4460 kfree_skb(skb);
4461 continue;
4462 }
4463 }
4464
4465 /* Process frame */
4466 switch (bt_cb(skb)->pkt_type) {
4467 case HCI_EVENT_PKT:
4468 BT_DBG("%s Event packet", hdev->name);
4469 hci_event_packet(hdev, skb);
4470 break;
4471
4472 case HCI_ACLDATA_PKT:
4473 BT_DBG("%s ACL data packet", hdev->name);
4474 hci_acldata_packet(hdev, skb);
4475 break;
4476
4477 case HCI_SCODATA_PKT:
4478 BT_DBG("%s SCO data packet", hdev->name);
4479 hci_scodata_packet(hdev, skb);
4480 break;
4481
4482 default:
4483 kfree_skb(skb);
4484 break;
4485 }
4486 }
4487 }
4488
4489 static void hci_cmd_work(struct work_struct *work)
4490 {
4491 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4492 struct sk_buff *skb;
4493
4494 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4495 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4496
4497 /* Send queued commands */
4498 if (atomic_read(&hdev->cmd_cnt)) {
4499 skb = skb_dequeue(&hdev->cmd_q);
4500 if (!skb)
4501 return;
4502
4503 kfree_skb(hdev->sent_cmd);
4504
4505 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4506 if (hdev->sent_cmd) {
4507 atomic_dec(&hdev->cmd_cnt);
4508 hci_send_frame(hdev, skb);
4509 if (test_bit(HCI_RESET, &hdev->flags))
4510 cancel_delayed_work(&hdev->cmd_timer);
4511 else
4512 schedule_delayed_work(&hdev->cmd_timer,
4513 HCI_CMD_TIMEOUT);
4514 } else {
4515 skb_queue_head(&hdev->cmd_q, skb);
4516 queue_work(hdev->workqueue, &hdev->cmd_work);
4517 }
4518 }
4519 }