]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - net/bluetooth/hci_core.c
Bluetooth: Track number of added devices with HCI_AUTO_CONN_REPORT
[mirror_ubuntu-bionic-kernel.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
8c520a59 30#include <linux/rfkill.h>
baf27f6e 31#include <linux/debugfs.h>
99780a7b 32#include <linux/crypto.h>
47219839 33#include <asm/unaligned.h>
1da177e4
LT
34
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
4bc58f51 37#include <net/bluetooth/l2cap.h>
af58925c 38#include <net/bluetooth/mgmt.h>
1da177e4 39
970c4e46
JH
40#include "smp.h"
41
b78752cc 42static void hci_rx_work(struct work_struct *work);
c347b765 43static void hci_cmd_work(struct work_struct *work);
3eff45ea 44static void hci_tx_work(struct work_struct *work);
1da177e4 45
1da177e4
LT
46/* HCI device list */
47LIST_HEAD(hci_dev_list);
48DEFINE_RWLOCK(hci_dev_list_lock);
49
50/* HCI callback list */
51LIST_HEAD(hci_cb_list);
52DEFINE_RWLOCK(hci_cb_list_lock);
53
3df92b31
SL
54/* HCI ID Numbering */
55static DEFINE_IDA(hci_index_ida);
56
1da177e4
LT
57/* ---- HCI notifications ---- */
58
6516455d 59static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 60{
040030ef 61 hci_sock_dev_event(hdev, event);
1da177e4
LT
62}
63
baf27f6e
MH
64/* ---- HCI debugfs entries ---- */
65
4b4148e9
MH
66static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
67 size_t count, loff_t *ppos)
68{
69 struct hci_dev *hdev = file->private_data;
70 char buf[3];
71
111902f7 72 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
4b4148e9
MH
73 buf[1] = '\n';
74 buf[2] = '\0';
75 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
76}
77
78static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
79 size_t count, loff_t *ppos)
80{
81 struct hci_dev *hdev = file->private_data;
82 struct sk_buff *skb;
83 char buf[32];
84 size_t buf_size = min(count, (sizeof(buf)-1));
85 bool enable;
86 int err;
87
88 if (!test_bit(HCI_UP, &hdev->flags))
89 return -ENETDOWN;
90
91 if (copy_from_user(buf, user_buf, buf_size))
92 return -EFAULT;
93
94 buf[buf_size] = '\0';
95 if (strtobool(buf, &enable))
96 return -EINVAL;
97
111902f7 98 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
4b4148e9
MH
99 return -EALREADY;
100
101 hci_req_lock(hdev);
102 if (enable)
103 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
104 HCI_CMD_TIMEOUT);
105 else
106 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
107 HCI_CMD_TIMEOUT);
108 hci_req_unlock(hdev);
109
110 if (IS_ERR(skb))
111 return PTR_ERR(skb);
112
113 err = -bt_to_errno(skb->data[0]);
114 kfree_skb(skb);
115
116 if (err < 0)
117 return err;
118
111902f7 119 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
4b4148e9
MH
120
121 return count;
122}
123
124static const struct file_operations dut_mode_fops = {
125 .open = simple_open,
126 .read = dut_mode_read,
127 .write = dut_mode_write,
128 .llseek = default_llseek,
129};
130
dfb826a8
MH
131static int features_show(struct seq_file *f, void *ptr)
132{
133 struct hci_dev *hdev = f->private;
134 u8 p;
135
136 hci_dev_lock(hdev);
137 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
cfbb2b5b 138 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
dfb826a8
MH
139 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
140 hdev->features[p][0], hdev->features[p][1],
141 hdev->features[p][2], hdev->features[p][3],
142 hdev->features[p][4], hdev->features[p][5],
143 hdev->features[p][6], hdev->features[p][7]);
144 }
cfbb2b5b
MH
145 if (lmp_le_capable(hdev))
146 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
147 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
148 hdev->le_features[0], hdev->le_features[1],
149 hdev->le_features[2], hdev->le_features[3],
150 hdev->le_features[4], hdev->le_features[5],
151 hdev->le_features[6], hdev->le_features[7]);
dfb826a8
MH
152 hci_dev_unlock(hdev);
153
154 return 0;
155}
156
157static int features_open(struct inode *inode, struct file *file)
158{
159 return single_open(file, features_show, inode->i_private);
160}
161
162static const struct file_operations features_fops = {
163 .open = features_open,
164 .read = seq_read,
165 .llseek = seq_lseek,
166 .release = single_release,
167};
168
70afe0b8
MH
169static int blacklist_show(struct seq_file *f, void *p)
170{
171 struct hci_dev *hdev = f->private;
172 struct bdaddr_list *b;
173
174 hci_dev_lock(hdev);
175 list_for_each_entry(b, &hdev->blacklist, list)
b25f0785 176 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
70afe0b8
MH
177 hci_dev_unlock(hdev);
178
179 return 0;
180}
181
182static int blacklist_open(struct inode *inode, struct file *file)
183{
184 return single_open(file, blacklist_show, inode->i_private);
185}
186
187static const struct file_operations blacklist_fops = {
188 .open = blacklist_open,
189 .read = seq_read,
190 .llseek = seq_lseek,
191 .release = single_release,
192};
193
47219839
MH
194static int uuids_show(struct seq_file *f, void *p)
195{
196 struct hci_dev *hdev = f->private;
197 struct bt_uuid *uuid;
198
199 hci_dev_lock(hdev);
200 list_for_each_entry(uuid, &hdev->uuids, list) {
58f01aa9
MH
201 u8 i, val[16];
202
203 /* The Bluetooth UUID values are stored in big endian,
204 * but with reversed byte order. So convert them into
205 * the right order for the %pUb modifier.
206 */
207 for (i = 0; i < 16; i++)
208 val[i] = uuid->uuid[15 - i];
209
210 seq_printf(f, "%pUb\n", val);
47219839
MH
211 }
212 hci_dev_unlock(hdev);
213
214 return 0;
215}
216
217static int uuids_open(struct inode *inode, struct file *file)
218{
219 return single_open(file, uuids_show, inode->i_private);
220}
221
222static const struct file_operations uuids_fops = {
223 .open = uuids_open,
224 .read = seq_read,
225 .llseek = seq_lseek,
226 .release = single_release,
227};
228
baf27f6e
MH
229static int inquiry_cache_show(struct seq_file *f, void *p)
230{
231 struct hci_dev *hdev = f->private;
232 struct discovery_state *cache = &hdev->discovery;
233 struct inquiry_entry *e;
234
235 hci_dev_lock(hdev);
236
237 list_for_each_entry(e, &cache->all, all) {
238 struct inquiry_data *data = &e->data;
239 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
240 &data->bdaddr,
241 data->pscan_rep_mode, data->pscan_period_mode,
242 data->pscan_mode, data->dev_class[2],
243 data->dev_class[1], data->dev_class[0],
244 __le16_to_cpu(data->clock_offset),
245 data->rssi, data->ssp_mode, e->timestamp);
246 }
247
248 hci_dev_unlock(hdev);
249
250 return 0;
251}
252
253static int inquiry_cache_open(struct inode *inode, struct file *file)
254{
255 return single_open(file, inquiry_cache_show, inode->i_private);
256}
257
258static const struct file_operations inquiry_cache_fops = {
259 .open = inquiry_cache_open,
260 .read = seq_read,
261 .llseek = seq_lseek,
262 .release = single_release,
263};
264
02d08d15
MH
265static int link_keys_show(struct seq_file *f, void *ptr)
266{
267 struct hci_dev *hdev = f->private;
268 struct list_head *p, *n;
269
270 hci_dev_lock(hdev);
271 list_for_each_safe(p, n, &hdev->link_keys) {
272 struct link_key *key = list_entry(p, struct link_key, list);
273 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
274 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
275 }
276 hci_dev_unlock(hdev);
277
278 return 0;
279}
280
281static int link_keys_open(struct inode *inode, struct file *file)
282{
283 return single_open(file, link_keys_show, inode->i_private);
284}
285
286static const struct file_operations link_keys_fops = {
287 .open = link_keys_open,
288 .read = seq_read,
289 .llseek = seq_lseek,
290 .release = single_release,
291};
292
babdbb3c
MH
293static int dev_class_show(struct seq_file *f, void *ptr)
294{
295 struct hci_dev *hdev = f->private;
296
297 hci_dev_lock(hdev);
298 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
299 hdev->dev_class[1], hdev->dev_class[0]);
300 hci_dev_unlock(hdev);
301
302 return 0;
303}
304
305static int dev_class_open(struct inode *inode, struct file *file)
306{
307 return single_open(file, dev_class_show, inode->i_private);
308}
309
310static const struct file_operations dev_class_fops = {
311 .open = dev_class_open,
312 .read = seq_read,
313 .llseek = seq_lseek,
314 .release = single_release,
315};
316
041000b9
MH
317static int voice_setting_get(void *data, u64 *val)
318{
319 struct hci_dev *hdev = data;
320
321 hci_dev_lock(hdev);
322 *val = hdev->voice_setting;
323 hci_dev_unlock(hdev);
324
325 return 0;
326}
327
328DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
329 NULL, "0x%4.4llx\n");
330
ebd1e33b
MH
331static int auto_accept_delay_set(void *data, u64 val)
332{
333 struct hci_dev *hdev = data;
334
335 hci_dev_lock(hdev);
336 hdev->auto_accept_delay = val;
337 hci_dev_unlock(hdev);
338
339 return 0;
340}
341
342static int auto_accept_delay_get(void *data, u64 *val)
343{
344 struct hci_dev *hdev = data;
345
346 hci_dev_lock(hdev);
347 *val = hdev->auto_accept_delay;
348 hci_dev_unlock(hdev);
349
350 return 0;
351}
352
353DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
354 auto_accept_delay_set, "%llu\n");
355
5afeac14
MH
356static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
357 size_t count, loff_t *ppos)
358{
359 struct hci_dev *hdev = file->private_data;
360 char buf[3];
361
111902f7 362 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
5afeac14
MH
363 buf[1] = '\n';
364 buf[2] = '\0';
365 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
366}
367
368static ssize_t force_sc_support_write(struct file *file,
369 const char __user *user_buf,
370 size_t count, loff_t *ppos)
371{
372 struct hci_dev *hdev = file->private_data;
373 char buf[32];
374 size_t buf_size = min(count, (sizeof(buf)-1));
375 bool enable;
376
377 if (test_bit(HCI_UP, &hdev->flags))
378 return -EBUSY;
379
380 if (copy_from_user(buf, user_buf, buf_size))
381 return -EFAULT;
382
383 buf[buf_size] = '\0';
384 if (strtobool(buf, &enable))
385 return -EINVAL;
386
111902f7 387 if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
5afeac14
MH
388 return -EALREADY;
389
111902f7 390 change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
5afeac14
MH
391
392 return count;
393}
394
395static const struct file_operations force_sc_support_fops = {
396 .open = simple_open,
397 .read = force_sc_support_read,
398 .write = force_sc_support_write,
399 .llseek = default_llseek,
400};
401
134c2a89
MH
402static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
403 size_t count, loff_t *ppos)
404{
405 struct hci_dev *hdev = file->private_data;
406 char buf[3];
407
408 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
409 buf[1] = '\n';
410 buf[2] = '\0';
411 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
412}
413
414static const struct file_operations sc_only_mode_fops = {
415 .open = simple_open,
416 .read = sc_only_mode_read,
417 .llseek = default_llseek,
418};
419
2bfa3531
MH
420static int idle_timeout_set(void *data, u64 val)
421{
422 struct hci_dev *hdev = data;
423
424 if (val != 0 && (val < 500 || val > 3600000))
425 return -EINVAL;
426
427 hci_dev_lock(hdev);
2be48b65 428 hdev->idle_timeout = val;
2bfa3531
MH
429 hci_dev_unlock(hdev);
430
431 return 0;
432}
433
434static int idle_timeout_get(void *data, u64 *val)
435{
436 struct hci_dev *hdev = data;
437
438 hci_dev_lock(hdev);
439 *val = hdev->idle_timeout;
440 hci_dev_unlock(hdev);
441
442 return 0;
443}
444
445DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
446 idle_timeout_set, "%llu\n");
447
c982b2ea
JH
448static int rpa_timeout_set(void *data, u64 val)
449{
450 struct hci_dev *hdev = data;
451
452 /* Require the RPA timeout to be at least 30 seconds and at most
453 * 24 hours.
454 */
455 if (val < 30 || val > (60 * 60 * 24))
456 return -EINVAL;
457
458 hci_dev_lock(hdev);
459 hdev->rpa_timeout = val;
460 hci_dev_unlock(hdev);
461
462 return 0;
463}
464
465static int rpa_timeout_get(void *data, u64 *val)
466{
467 struct hci_dev *hdev = data;
468
469 hci_dev_lock(hdev);
470 *val = hdev->rpa_timeout;
471 hci_dev_unlock(hdev);
472
473 return 0;
474}
475
476DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
477 rpa_timeout_set, "%llu\n");
478
2bfa3531
MH
479static int sniff_min_interval_set(void *data, u64 val)
480{
481 struct hci_dev *hdev = data;
482
483 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
484 return -EINVAL;
485
486 hci_dev_lock(hdev);
2be48b65 487 hdev->sniff_min_interval = val;
2bfa3531
MH
488 hci_dev_unlock(hdev);
489
490 return 0;
491}
492
493static int sniff_min_interval_get(void *data, u64 *val)
494{
495 struct hci_dev *hdev = data;
496
497 hci_dev_lock(hdev);
498 *val = hdev->sniff_min_interval;
499 hci_dev_unlock(hdev);
500
501 return 0;
502}
503
504DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
505 sniff_min_interval_set, "%llu\n");
506
507static int sniff_max_interval_set(void *data, u64 val)
508{
509 struct hci_dev *hdev = data;
510
511 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
512 return -EINVAL;
513
514 hci_dev_lock(hdev);
2be48b65 515 hdev->sniff_max_interval = val;
2bfa3531
MH
516 hci_dev_unlock(hdev);
517
518 return 0;
519}
520
521static int sniff_max_interval_get(void *data, u64 *val)
522{
523 struct hci_dev *hdev = data;
524
525 hci_dev_lock(hdev);
526 *val = hdev->sniff_max_interval;
527 hci_dev_unlock(hdev);
528
529 return 0;
530}
531
532DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
533 sniff_max_interval_set, "%llu\n");
534
31ad1691
AK
535static int conn_info_min_age_set(void *data, u64 val)
536{
537 struct hci_dev *hdev = data;
538
539 if (val == 0 || val > hdev->conn_info_max_age)
540 return -EINVAL;
541
542 hci_dev_lock(hdev);
543 hdev->conn_info_min_age = val;
544 hci_dev_unlock(hdev);
545
546 return 0;
547}
548
549static int conn_info_min_age_get(void *data, u64 *val)
550{
551 struct hci_dev *hdev = data;
552
553 hci_dev_lock(hdev);
554 *val = hdev->conn_info_min_age;
555 hci_dev_unlock(hdev);
556
557 return 0;
558}
559
560DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
561 conn_info_min_age_set, "%llu\n");
562
563static int conn_info_max_age_set(void *data, u64 val)
564{
565 struct hci_dev *hdev = data;
566
567 if (val == 0 || val < hdev->conn_info_min_age)
568 return -EINVAL;
569
570 hci_dev_lock(hdev);
571 hdev->conn_info_max_age = val;
572 hci_dev_unlock(hdev);
573
574 return 0;
575}
576
577static int conn_info_max_age_get(void *data, u64 *val)
578{
579 struct hci_dev *hdev = data;
580
581 hci_dev_lock(hdev);
582 *val = hdev->conn_info_max_age;
583 hci_dev_unlock(hdev);
584
585 return 0;
586}
587
588DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
589 conn_info_max_age_set, "%llu\n");
590
ac345813
MH
591static int identity_show(struct seq_file *f, void *p)
592{
593 struct hci_dev *hdev = f->private;
a1f4c318 594 bdaddr_t addr;
ac345813
MH
595 u8 addr_type;
596
597 hci_dev_lock(hdev);
598
a1f4c318 599 hci_copy_identity_address(hdev, &addr, &addr_type);
ac345813 600
a1f4c318 601 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
473deef2 602 16, hdev->irk, &hdev->rpa);
ac345813
MH
603
604 hci_dev_unlock(hdev);
605
606 return 0;
607}
608
609static int identity_open(struct inode *inode, struct file *file)
610{
611 return single_open(file, identity_show, inode->i_private);
612}
613
614static const struct file_operations identity_fops = {
615 .open = identity_open,
616 .read = seq_read,
617 .llseek = seq_lseek,
618 .release = single_release,
619};
620
7a4cd51d
MH
621static int random_address_show(struct seq_file *f, void *p)
622{
623 struct hci_dev *hdev = f->private;
624
625 hci_dev_lock(hdev);
626 seq_printf(f, "%pMR\n", &hdev->random_addr);
627 hci_dev_unlock(hdev);
628
629 return 0;
630}
631
632static int random_address_open(struct inode *inode, struct file *file)
633{
634 return single_open(file, random_address_show, inode->i_private);
635}
636
637static const struct file_operations random_address_fops = {
638 .open = random_address_open,
639 .read = seq_read,
640 .llseek = seq_lseek,
641 .release = single_release,
642};
643
e7b8fc92
MH
644static int static_address_show(struct seq_file *f, void *p)
645{
646 struct hci_dev *hdev = f->private;
647
648 hci_dev_lock(hdev);
649 seq_printf(f, "%pMR\n", &hdev->static_addr);
650 hci_dev_unlock(hdev);
651
652 return 0;
653}
654
655static int static_address_open(struct inode *inode, struct file *file)
656{
657 return single_open(file, static_address_show, inode->i_private);
658}
659
660static const struct file_operations static_address_fops = {
661 .open = static_address_open,
662 .read = seq_read,
663 .llseek = seq_lseek,
664 .release = single_release,
665};
666
b32bba6c
MH
667static ssize_t force_static_address_read(struct file *file,
668 char __user *user_buf,
669 size_t count, loff_t *ppos)
92202185 670{
b32bba6c
MH
671 struct hci_dev *hdev = file->private_data;
672 char buf[3];
92202185 673
111902f7 674 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
b32bba6c
MH
675 buf[1] = '\n';
676 buf[2] = '\0';
677 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
92202185
MH
678}
679
b32bba6c
MH
680static ssize_t force_static_address_write(struct file *file,
681 const char __user *user_buf,
682 size_t count, loff_t *ppos)
92202185 683{
b32bba6c
MH
684 struct hci_dev *hdev = file->private_data;
685 char buf[32];
686 size_t buf_size = min(count, (sizeof(buf)-1));
687 bool enable;
92202185 688
b32bba6c
MH
689 if (test_bit(HCI_UP, &hdev->flags))
690 return -EBUSY;
92202185 691
b32bba6c
MH
692 if (copy_from_user(buf, user_buf, buf_size))
693 return -EFAULT;
694
695 buf[buf_size] = '\0';
696 if (strtobool(buf, &enable))
697 return -EINVAL;
698
111902f7 699 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
b32bba6c
MH
700 return -EALREADY;
701
111902f7 702 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
b32bba6c
MH
703
704 return count;
92202185
MH
705}
706
b32bba6c
MH
707static const struct file_operations force_static_address_fops = {
708 .open = simple_open,
709 .read = force_static_address_read,
710 .write = force_static_address_write,
711 .llseek = default_llseek,
712};
92202185 713
d2ab0ac1
MH
714static int white_list_show(struct seq_file *f, void *ptr)
715{
716 struct hci_dev *hdev = f->private;
717 struct bdaddr_list *b;
718
719 hci_dev_lock(hdev);
720 list_for_each_entry(b, &hdev->le_white_list, list)
721 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
722 hci_dev_unlock(hdev);
723
724 return 0;
725}
726
727static int white_list_open(struct inode *inode, struct file *file)
728{
729 return single_open(file, white_list_show, inode->i_private);
730}
731
732static const struct file_operations white_list_fops = {
733 .open = white_list_open,
734 .read = seq_read,
735 .llseek = seq_lseek,
736 .release = single_release,
737};
738
3698d704
MH
739static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
740{
741 struct hci_dev *hdev = f->private;
742 struct list_head *p, *n;
743
744 hci_dev_lock(hdev);
745 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
746 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
747 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
748 &irk->bdaddr, irk->addr_type,
749 16, irk->val, &irk->rpa);
750 }
751 hci_dev_unlock(hdev);
752
753 return 0;
754}
755
756static int identity_resolving_keys_open(struct inode *inode, struct file *file)
757{
758 return single_open(file, identity_resolving_keys_show,
759 inode->i_private);
760}
761
762static const struct file_operations identity_resolving_keys_fops = {
763 .open = identity_resolving_keys_open,
764 .read = seq_read,
765 .llseek = seq_lseek,
766 .release = single_release,
767};
768
8f8625cd
MH
769static int long_term_keys_show(struct seq_file *f, void *ptr)
770{
771 struct hci_dev *hdev = f->private;
772 struct list_head *p, *n;
773
774 hci_dev_lock(hdev);
f813f1be 775 list_for_each_safe(p, n, &hdev->long_term_keys) {
8f8625cd 776 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
fe39c7b2 777 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
8f8625cd
MH
778 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
779 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
fe39c7b2 780 __le64_to_cpu(ltk->rand), 16, ltk->val);
8f8625cd
MH
781 }
782 hci_dev_unlock(hdev);
783
784 return 0;
785}
786
787static int long_term_keys_open(struct inode *inode, struct file *file)
788{
789 return single_open(file, long_term_keys_show, inode->i_private);
790}
791
792static const struct file_operations long_term_keys_fops = {
793 .open = long_term_keys_open,
794 .read = seq_read,
795 .llseek = seq_lseek,
796 .release = single_release,
797};
798
4e70c7e7
MH
799static int conn_min_interval_set(void *data, u64 val)
800{
801 struct hci_dev *hdev = data;
802
803 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
804 return -EINVAL;
805
806 hci_dev_lock(hdev);
2be48b65 807 hdev->le_conn_min_interval = val;
4e70c7e7
MH
808 hci_dev_unlock(hdev);
809
810 return 0;
811}
812
813static int conn_min_interval_get(void *data, u64 *val)
814{
815 struct hci_dev *hdev = data;
816
817 hci_dev_lock(hdev);
818 *val = hdev->le_conn_min_interval;
819 hci_dev_unlock(hdev);
820
821 return 0;
822}
823
824DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
825 conn_min_interval_set, "%llu\n");
826
827static int conn_max_interval_set(void *data, u64 val)
828{
829 struct hci_dev *hdev = data;
830
831 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
832 return -EINVAL;
833
834 hci_dev_lock(hdev);
2be48b65 835 hdev->le_conn_max_interval = val;
4e70c7e7
MH
836 hci_dev_unlock(hdev);
837
838 return 0;
839}
840
841static int conn_max_interval_get(void *data, u64 *val)
842{
843 struct hci_dev *hdev = data;
844
845 hci_dev_lock(hdev);
846 *val = hdev->le_conn_max_interval;
847 hci_dev_unlock(hdev);
848
849 return 0;
850}
851
852DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
853 conn_max_interval_set, "%llu\n");
854
816a93d1
MH
855static int conn_latency_set(void *data, u64 val)
856{
857 struct hci_dev *hdev = data;
858
859 if (val > 0x01f3)
860 return -EINVAL;
861
862 hci_dev_lock(hdev);
863 hdev->le_conn_latency = val;
864 hci_dev_unlock(hdev);
865
866 return 0;
867}
868
869static int conn_latency_get(void *data, u64 *val)
870{
871 struct hci_dev *hdev = data;
872
873 hci_dev_lock(hdev);
874 *val = hdev->le_conn_latency;
875 hci_dev_unlock(hdev);
876
877 return 0;
878}
879
880DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
881 conn_latency_set, "%llu\n");
882
f1649577
MH
883static int supervision_timeout_set(void *data, u64 val)
884{
885 struct hci_dev *hdev = data;
886
887 if (val < 0x000a || val > 0x0c80)
888 return -EINVAL;
889
890 hci_dev_lock(hdev);
891 hdev->le_supv_timeout = val;
892 hci_dev_unlock(hdev);
893
894 return 0;
895}
896
897static int supervision_timeout_get(void *data, u64 *val)
898{
899 struct hci_dev *hdev = data;
900
901 hci_dev_lock(hdev);
902 *val = hdev->le_supv_timeout;
903 hci_dev_unlock(hdev);
904
905 return 0;
906}
907
908DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
909 supervision_timeout_set, "%llu\n");
910
3f959d46
MH
911static int adv_channel_map_set(void *data, u64 val)
912{
913 struct hci_dev *hdev = data;
914
915 if (val < 0x01 || val > 0x07)
916 return -EINVAL;
917
918 hci_dev_lock(hdev);
919 hdev->le_adv_channel_map = val;
920 hci_dev_unlock(hdev);
921
922 return 0;
923}
924
925static int adv_channel_map_get(void *data, u64 *val)
926{
927 struct hci_dev *hdev = data;
928
929 hci_dev_lock(hdev);
930 *val = hdev->le_adv_channel_map;
931 hci_dev_unlock(hdev);
932
933 return 0;
934}
935
936DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
937 adv_channel_map_set, "%llu\n");
938
0b3c7d37 939static int device_list_show(struct seq_file *f, void *ptr)
7d474e06 940{
0b3c7d37 941 struct hci_dev *hdev = f->private;
7d474e06
AG
942 struct hci_conn_params *p;
943
944 hci_dev_lock(hdev);
7d474e06 945 list_for_each_entry(p, &hdev->le_conn_params, list) {
0b3c7d37 946 seq_printf(f, "%pMR %u %u\n", &p->addr, p->addr_type,
7d474e06
AG
947 p->auto_connect);
948 }
7d474e06
AG
949 hci_dev_unlock(hdev);
950
951 return 0;
952}
953
0b3c7d37 954static int device_list_open(struct inode *inode, struct file *file)
7d474e06 955{
0b3c7d37 956 return single_open(file, device_list_show, inode->i_private);
7d474e06
AG
957}
958
0b3c7d37
MH
959static const struct file_operations device_list_fops = {
960 .open = device_list_open,
7d474e06 961 .read = seq_read,
7d474e06
AG
962 .llseek = seq_lseek,
963 .release = single_release,
964};
965
1da177e4
LT
966/* ---- HCI requests ---- */
967
42c6b129 968static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1da177e4 969{
42c6b129 970 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1da177e4
LT
971
972 if (hdev->req_status == HCI_REQ_PEND) {
973 hdev->req_result = result;
974 hdev->req_status = HCI_REQ_DONE;
975 wake_up_interruptible(&hdev->req_wait_q);
976 }
977}
978
979static void hci_req_cancel(struct hci_dev *hdev, int err)
980{
981 BT_DBG("%s err 0x%2.2x", hdev->name, err);
982
983 if (hdev->req_status == HCI_REQ_PEND) {
984 hdev->req_result = err;
985 hdev->req_status = HCI_REQ_CANCELED;
986 wake_up_interruptible(&hdev->req_wait_q);
987 }
988}
989
77a63e0a
FW
990static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
991 u8 event)
75e84b7c
JH
992{
993 struct hci_ev_cmd_complete *ev;
994 struct hci_event_hdr *hdr;
995 struct sk_buff *skb;
996
997 hci_dev_lock(hdev);
998
999 skb = hdev->recv_evt;
1000 hdev->recv_evt = NULL;
1001
1002 hci_dev_unlock(hdev);
1003
1004 if (!skb)
1005 return ERR_PTR(-ENODATA);
1006
1007 if (skb->len < sizeof(*hdr)) {
1008 BT_ERR("Too short HCI event");
1009 goto failed;
1010 }
1011
1012 hdr = (void *) skb->data;
1013 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1014
7b1abbbe
JH
1015 if (event) {
1016 if (hdr->evt != event)
1017 goto failed;
1018 return skb;
1019 }
1020
75e84b7c
JH
1021 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1022 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1023 goto failed;
1024 }
1025
1026 if (skb->len < sizeof(*ev)) {
1027 BT_ERR("Too short cmd_complete event");
1028 goto failed;
1029 }
1030
1031 ev = (void *) skb->data;
1032 skb_pull(skb, sizeof(*ev));
1033
1034 if (opcode == __le16_to_cpu(ev->opcode))
1035 return skb;
1036
1037 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1038 __le16_to_cpu(ev->opcode));
1039
1040failed:
1041 kfree_skb(skb);
1042 return ERR_PTR(-ENODATA);
1043}
1044
7b1abbbe 1045struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 1046 const void *param, u8 event, u32 timeout)
75e84b7c
JH
1047{
1048 DECLARE_WAITQUEUE(wait, current);
1049 struct hci_request req;
1050 int err = 0;
1051
1052 BT_DBG("%s", hdev->name);
1053
1054 hci_req_init(&req, hdev);
1055
7b1abbbe 1056 hci_req_add_ev(&req, opcode, plen, param, event);
75e84b7c
JH
1057
1058 hdev->req_status = HCI_REQ_PEND;
1059
1060 err = hci_req_run(&req, hci_req_sync_complete);
1061 if (err < 0)
1062 return ERR_PTR(err);
1063
1064 add_wait_queue(&hdev->req_wait_q, &wait);
1065 set_current_state(TASK_INTERRUPTIBLE);
1066
1067 schedule_timeout(timeout);
1068
1069 remove_wait_queue(&hdev->req_wait_q, &wait);
1070
1071 if (signal_pending(current))
1072 return ERR_PTR(-EINTR);
1073
1074 switch (hdev->req_status) {
1075 case HCI_REQ_DONE:
1076 err = -bt_to_errno(hdev->req_result);
1077 break;
1078
1079 case HCI_REQ_CANCELED:
1080 err = -hdev->req_result;
1081 break;
1082
1083 default:
1084 err = -ETIMEDOUT;
1085 break;
1086 }
1087
1088 hdev->req_status = hdev->req_result = 0;
1089
1090 BT_DBG("%s end: err %d", hdev->name, err);
1091
1092 if (err < 0)
1093 return ERR_PTR(err);
1094
7b1abbbe
JH
1095 return hci_get_cmd_complete(hdev, opcode, event);
1096}
1097EXPORT_SYMBOL(__hci_cmd_sync_ev);
1098
1099struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 1100 const void *param, u32 timeout)
7b1abbbe
JH
1101{
1102 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
75e84b7c
JH
1103}
1104EXPORT_SYMBOL(__hci_cmd_sync);
1105
1da177e4 1106/* Execute request and wait for completion. */
01178cd4 1107static int __hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
1108 void (*func)(struct hci_request *req,
1109 unsigned long opt),
01178cd4 1110 unsigned long opt, __u32 timeout)
1da177e4 1111{
42c6b129 1112 struct hci_request req;
1da177e4
LT
1113 DECLARE_WAITQUEUE(wait, current);
1114 int err = 0;
1115
1116 BT_DBG("%s start", hdev->name);
1117
42c6b129
JH
1118 hci_req_init(&req, hdev);
1119
1da177e4
LT
1120 hdev->req_status = HCI_REQ_PEND;
1121
42c6b129 1122 func(&req, opt);
53cce22d 1123
42c6b129
JH
1124 err = hci_req_run(&req, hci_req_sync_complete);
1125 if (err < 0) {
53cce22d 1126 hdev->req_status = 0;
920c8300
AG
1127
1128 /* ENODATA means the HCI request command queue is empty.
1129 * This can happen when a request with conditionals doesn't
1130 * trigger any commands to be sent. This is normal behavior
1131 * and should not trigger an error return.
42c6b129 1132 */
920c8300
AG
1133 if (err == -ENODATA)
1134 return 0;
1135
1136 return err;
53cce22d
JH
1137 }
1138
bc4445c7
AG
1139 add_wait_queue(&hdev->req_wait_q, &wait);
1140 set_current_state(TASK_INTERRUPTIBLE);
1141
1da177e4
LT
1142 schedule_timeout(timeout);
1143
1144 remove_wait_queue(&hdev->req_wait_q, &wait);
1145
1146 if (signal_pending(current))
1147 return -EINTR;
1148
1149 switch (hdev->req_status) {
1150 case HCI_REQ_DONE:
e175072f 1151 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
1152 break;
1153
1154 case HCI_REQ_CANCELED:
1155 err = -hdev->req_result;
1156 break;
1157
1158 default:
1159 err = -ETIMEDOUT;
1160 break;
3ff50b79 1161 }
1da177e4 1162
a5040efa 1163 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
1164
1165 BT_DBG("%s end: err %d", hdev->name, err);
1166
1167 return err;
1168}
1169
01178cd4 1170static int hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
1171 void (*req)(struct hci_request *req,
1172 unsigned long opt),
01178cd4 1173 unsigned long opt, __u32 timeout)
1da177e4
LT
1174{
1175 int ret;
1176
7c6a329e
MH
1177 if (!test_bit(HCI_UP, &hdev->flags))
1178 return -ENETDOWN;
1179
1da177e4
LT
1180 /* Serialize all requests */
1181 hci_req_lock(hdev);
01178cd4 1182 ret = __hci_req_sync(hdev, req, opt, timeout);
1da177e4
LT
1183 hci_req_unlock(hdev);
1184
1185 return ret;
1186}
1187
42c6b129 1188static void hci_reset_req(struct hci_request *req, unsigned long opt)
1da177e4 1189{
42c6b129 1190 BT_DBG("%s %ld", req->hdev->name, opt);
1da177e4
LT
1191
1192 /* Reset device */
42c6b129
JH
1193 set_bit(HCI_RESET, &req->hdev->flags);
1194 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1da177e4
LT
1195}
1196
42c6b129 1197static void bredr_init(struct hci_request *req)
1da177e4 1198{
42c6b129 1199 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
2455a3ea 1200
1da177e4 1201 /* Read Local Supported Features */
42c6b129 1202 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 1203
1143e5a6 1204 /* Read Local Version */
42c6b129 1205 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
2177bab5
JH
1206
1207 /* Read BD Address */
42c6b129 1208 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1da177e4
LT
1209}
1210
42c6b129 1211static void amp_init(struct hci_request *req)
e61ef499 1212{
42c6b129 1213 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
2455a3ea 1214
e61ef499 1215 /* Read Local Version */
42c6b129 1216 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489 1217
f6996cfe
MH
1218 /* Read Local Supported Commands */
1219 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1220
1221 /* Read Local Supported Features */
1222 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1223
6bcbc489 1224 /* Read Local AMP Info */
42c6b129 1225 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
1226
1227 /* Read Data Blk size */
42c6b129 1228 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
7528ca1c 1229
f38ba941
MH
1230 /* Read Flow Control Mode */
1231 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1232
7528ca1c
MH
1233 /* Read Location Data */
1234 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
e61ef499
AE
1235}
1236
42c6b129 1237static void hci_init1_req(struct hci_request *req, unsigned long opt)
e61ef499 1238{
42c6b129 1239 struct hci_dev *hdev = req->hdev;
e61ef499
AE
1240
1241 BT_DBG("%s %ld", hdev->name, opt);
1242
11778716
AE
1243 /* Reset */
1244 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
42c6b129 1245 hci_reset_req(req, 0);
11778716 1246
e61ef499
AE
1247 switch (hdev->dev_type) {
1248 case HCI_BREDR:
42c6b129 1249 bredr_init(req);
e61ef499
AE
1250 break;
1251
1252 case HCI_AMP:
42c6b129 1253 amp_init(req);
e61ef499
AE
1254 break;
1255
1256 default:
1257 BT_ERR("Unknown device type %d", hdev->dev_type);
1258 break;
1259 }
e61ef499
AE
1260}
1261
42c6b129 1262static void bredr_setup(struct hci_request *req)
2177bab5 1263{
4ca048e3
MH
1264 struct hci_dev *hdev = req->hdev;
1265
2177bab5
JH
1266 __le16 param;
1267 __u8 flt_type;
1268
1269 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
42c6b129 1270 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
1271
1272 /* Read Class of Device */
42c6b129 1273 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
2177bab5
JH
1274
1275 /* Read Local Name */
42c6b129 1276 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
2177bab5
JH
1277
1278 /* Read Voice Setting */
42c6b129 1279 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
2177bab5 1280
b4cb9fb2
MH
1281 /* Read Number of Supported IAC */
1282 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1283
4b836f39
MH
1284 /* Read Current IAC LAP */
1285 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1286
2177bab5
JH
1287 /* Clear Event Filters */
1288 flt_type = HCI_FLT_CLEAR_ALL;
42c6b129 1289 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
2177bab5
JH
1290
1291 /* Connection accept timeout ~20 secs */
dcf4adbf 1292 param = cpu_to_le16(0x7d00);
42c6b129 1293 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
2177bab5 1294
4ca048e3
MH
1295 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1296 * but it does not support page scan related HCI commands.
1297 */
1298 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
f332ec66
JH
1299 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1300 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1301 }
2177bab5
JH
1302}
1303
42c6b129 1304static void le_setup(struct hci_request *req)
2177bab5 1305{
c73eee91
JH
1306 struct hci_dev *hdev = req->hdev;
1307
2177bab5 1308 /* Read LE Buffer Size */
42c6b129 1309 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
1310
1311 /* Read LE Local Supported Features */
42c6b129 1312 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
2177bab5 1313
747d3f03
MH
1314 /* Read LE Supported States */
1315 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1316
2177bab5 1317 /* Read LE Advertising Channel TX Power */
42c6b129 1318 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
2177bab5
JH
1319
1320 /* Read LE White List Size */
42c6b129 1321 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
2177bab5 1322
747d3f03
MH
1323 /* Clear LE White List */
1324 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
c73eee91
JH
1325
1326 /* LE-only controllers have LE implicitly enabled */
1327 if (!lmp_bredr_capable(hdev))
1328 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2177bab5
JH
1329}
1330
1331static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1332{
1333 if (lmp_ext_inq_capable(hdev))
1334 return 0x02;
1335
1336 if (lmp_inq_rssi_capable(hdev))
1337 return 0x01;
1338
1339 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1340 hdev->lmp_subver == 0x0757)
1341 return 0x01;
1342
1343 if (hdev->manufacturer == 15) {
1344 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1345 return 0x01;
1346 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1347 return 0x01;
1348 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1349 return 0x01;
1350 }
1351
1352 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1353 hdev->lmp_subver == 0x1805)
1354 return 0x01;
1355
1356 return 0x00;
1357}
1358
42c6b129 1359static void hci_setup_inquiry_mode(struct hci_request *req)
2177bab5
JH
1360{
1361 u8 mode;
1362
42c6b129 1363 mode = hci_get_inquiry_mode(req->hdev);
2177bab5 1364
42c6b129 1365 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
2177bab5
JH
1366}
1367
42c6b129 1368static void hci_setup_event_mask(struct hci_request *req)
2177bab5 1369{
42c6b129
JH
1370 struct hci_dev *hdev = req->hdev;
1371
2177bab5
JH
1372 /* The second byte is 0xff instead of 0x9f (two reserved bits
1373 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1374 * command otherwise.
1375 */
1376 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1377
1378 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1379 * any event mask for pre 1.2 devices.
1380 */
1381 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1382 return;
1383
1384 if (lmp_bredr_capable(hdev)) {
1385 events[4] |= 0x01; /* Flow Specification Complete */
1386 events[4] |= 0x02; /* Inquiry Result with RSSI */
1387 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1388 events[5] |= 0x08; /* Synchronous Connection Complete */
1389 events[5] |= 0x10; /* Synchronous Connection Changed */
c7882cbd
MH
1390 } else {
1391 /* Use a different default for LE-only devices */
1392 memset(events, 0, sizeof(events));
1393 events[0] |= 0x10; /* Disconnection Complete */
1394 events[0] |= 0x80; /* Encryption Change */
1395 events[1] |= 0x08; /* Read Remote Version Information Complete */
1396 events[1] |= 0x20; /* Command Complete */
1397 events[1] |= 0x40; /* Command Status */
1398 events[1] |= 0x80; /* Hardware Error */
1399 events[2] |= 0x04; /* Number of Completed Packets */
1400 events[3] |= 0x02; /* Data Buffer Overflow */
1401 events[5] |= 0x80; /* Encryption Key Refresh Complete */
2177bab5
JH
1402 }
1403
1404 if (lmp_inq_rssi_capable(hdev))
1405 events[4] |= 0x02; /* Inquiry Result with RSSI */
1406
1407 if (lmp_sniffsubr_capable(hdev))
1408 events[5] |= 0x20; /* Sniff Subrating */
1409
1410 if (lmp_pause_enc_capable(hdev))
1411 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1412
1413 if (lmp_ext_inq_capable(hdev))
1414 events[5] |= 0x40; /* Extended Inquiry Result */
1415
1416 if (lmp_no_flush_capable(hdev))
1417 events[7] |= 0x01; /* Enhanced Flush Complete */
1418
1419 if (lmp_lsto_capable(hdev))
1420 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1421
1422 if (lmp_ssp_capable(hdev)) {
1423 events[6] |= 0x01; /* IO Capability Request */
1424 events[6] |= 0x02; /* IO Capability Response */
1425 events[6] |= 0x04; /* User Confirmation Request */
1426 events[6] |= 0x08; /* User Passkey Request */
1427 events[6] |= 0x10; /* Remote OOB Data Request */
1428 events[6] |= 0x20; /* Simple Pairing Complete */
1429 events[7] |= 0x04; /* User Passkey Notification */
1430 events[7] |= 0x08; /* Keypress Notification */
1431 events[7] |= 0x10; /* Remote Host Supported
1432 * Features Notification
1433 */
1434 }
1435
1436 if (lmp_le_capable(hdev))
1437 events[7] |= 0x20; /* LE Meta-Event */
1438
42c6b129 1439 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
2177bab5
JH
1440}
1441
42c6b129 1442static void hci_init2_req(struct hci_request *req, unsigned long opt)
2177bab5 1443{
42c6b129
JH
1444 struct hci_dev *hdev = req->hdev;
1445
2177bab5 1446 if (lmp_bredr_capable(hdev))
42c6b129 1447 bredr_setup(req);
56f87901
JH
1448 else
1449 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2177bab5
JH
1450
1451 if (lmp_le_capable(hdev))
42c6b129 1452 le_setup(req);
2177bab5 1453
42c6b129 1454 hci_setup_event_mask(req);
2177bab5 1455
3f8e2d75
JH
1456 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1457 * local supported commands HCI command.
1458 */
1459 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
42c6b129 1460 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
2177bab5
JH
1461
1462 if (lmp_ssp_capable(hdev)) {
57af75a8
MH
1463 /* When SSP is available, then the host features page
1464 * should also be available as well. However some
1465 * controllers list the max_page as 0 as long as SSP
1466 * has not been enabled. To achieve proper debugging
1467 * output, force the minimum max_page to 1 at least.
1468 */
1469 hdev->max_page = 0x01;
1470
2177bab5
JH
1471 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1472 u8 mode = 0x01;
42c6b129
JH
1473 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1474 sizeof(mode), &mode);
2177bab5
JH
1475 } else {
1476 struct hci_cp_write_eir cp;
1477
1478 memset(hdev->eir, 0, sizeof(hdev->eir));
1479 memset(&cp, 0, sizeof(cp));
1480
42c6b129 1481 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
2177bab5
JH
1482 }
1483 }
1484
1485 if (lmp_inq_rssi_capable(hdev))
42c6b129 1486 hci_setup_inquiry_mode(req);
2177bab5
JH
1487
1488 if (lmp_inq_tx_pwr_capable(hdev))
42c6b129 1489 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
2177bab5
JH
1490
1491 if (lmp_ext_feat_capable(hdev)) {
1492 struct hci_cp_read_local_ext_features cp;
1493
1494 cp.page = 0x01;
42c6b129
JH
1495 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1496 sizeof(cp), &cp);
2177bab5
JH
1497 }
1498
1499 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1500 u8 enable = 1;
42c6b129
JH
1501 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1502 &enable);
2177bab5
JH
1503 }
1504}
1505
42c6b129 1506static void hci_setup_link_policy(struct hci_request *req)
2177bab5 1507{
42c6b129 1508 struct hci_dev *hdev = req->hdev;
2177bab5
JH
1509 struct hci_cp_write_def_link_policy cp;
1510 u16 link_policy = 0;
1511
1512 if (lmp_rswitch_capable(hdev))
1513 link_policy |= HCI_LP_RSWITCH;
1514 if (lmp_hold_capable(hdev))
1515 link_policy |= HCI_LP_HOLD;
1516 if (lmp_sniff_capable(hdev))
1517 link_policy |= HCI_LP_SNIFF;
1518 if (lmp_park_capable(hdev))
1519 link_policy |= HCI_LP_PARK;
1520
1521 cp.policy = cpu_to_le16(link_policy);
42c6b129 1522 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
2177bab5
JH
1523}
1524
42c6b129 1525static void hci_set_le_support(struct hci_request *req)
2177bab5 1526{
42c6b129 1527 struct hci_dev *hdev = req->hdev;
2177bab5
JH
1528 struct hci_cp_write_le_host_supported cp;
1529
c73eee91
JH
1530 /* LE-only devices do not support explicit enablement */
1531 if (!lmp_bredr_capable(hdev))
1532 return;
1533
2177bab5
JH
1534 memset(&cp, 0, sizeof(cp));
1535
1536 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1537 cp.le = 0x01;
1538 cp.simul = lmp_le_br_capable(hdev);
1539 }
1540
1541 if (cp.le != lmp_host_le_capable(hdev))
42c6b129
JH
1542 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1543 &cp);
2177bab5
JH
1544}
1545
d62e6d67
JH
1546static void hci_set_event_mask_page_2(struct hci_request *req)
1547{
1548 struct hci_dev *hdev = req->hdev;
1549 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1550
1551 /* If Connectionless Slave Broadcast master role is supported
1552 * enable all necessary events for it.
1553 */
53b834d2 1554 if (lmp_csb_master_capable(hdev)) {
d62e6d67
JH
1555 events[1] |= 0x40; /* Triggered Clock Capture */
1556 events[1] |= 0x80; /* Synchronization Train Complete */
1557 events[2] |= 0x10; /* Slave Page Response Timeout */
1558 events[2] |= 0x20; /* CSB Channel Map Change */
1559 }
1560
1561 /* If Connectionless Slave Broadcast slave role is supported
1562 * enable all necessary events for it.
1563 */
53b834d2 1564 if (lmp_csb_slave_capable(hdev)) {
d62e6d67
JH
1565 events[2] |= 0x01; /* Synchronization Train Received */
1566 events[2] |= 0x02; /* CSB Receive */
1567 events[2] |= 0x04; /* CSB Timeout */
1568 events[2] |= 0x08; /* Truncated Page Complete */
1569 }
1570
40c59fcb
MH
1571 /* Enable Authenticated Payload Timeout Expired event if supported */
1572 if (lmp_ping_capable(hdev))
1573 events[2] |= 0x80;
1574
d62e6d67
JH
1575 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1576}
1577
42c6b129 1578static void hci_init3_req(struct hci_request *req, unsigned long opt)
2177bab5 1579{
42c6b129 1580 struct hci_dev *hdev = req->hdev;
d2c5d77f 1581 u8 p;
42c6b129 1582
b8f4e068
GP
1583 /* Some Broadcom based Bluetooth controllers do not support the
1584 * Delete Stored Link Key command. They are clearly indicating its
1585 * absence in the bit mask of supported commands.
1586 *
1587 * Check the supported commands and only if the the command is marked
1588 * as supported send it. If not supported assume that the controller
1589 * does not have actual support for stored link keys which makes this
1590 * command redundant anyway.
f9f462fa
MH
1591 *
1592 * Some controllers indicate that they support handling deleting
1593 * stored link keys, but they don't. The quirk lets a driver
1594 * just disable this command.
637b4cae 1595 */
f9f462fa
MH
1596 if (hdev->commands[6] & 0x80 &&
1597 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
59f45d57
JH
1598 struct hci_cp_delete_stored_link_key cp;
1599
1600 bacpy(&cp.bdaddr, BDADDR_ANY);
1601 cp.delete_all = 0x01;
1602 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1603 sizeof(cp), &cp);
1604 }
1605
2177bab5 1606 if (hdev->commands[5] & 0x10)
42c6b129 1607 hci_setup_link_policy(req);
2177bab5 1608
9193c6e8
AG
1609 if (lmp_le_capable(hdev)) {
1610 u8 events[8];
1611
1612 memset(events, 0, sizeof(events));
1613 events[0] = 0x1f;
662bc2e6
AG
1614
1615 /* If controller supports the Connection Parameters Request
1616 * Link Layer Procedure, enable the corresponding event.
1617 */
1618 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1619 events[0] |= 0x20; /* LE Remote Connection
1620 * Parameter Request
1621 */
1622
9193c6e8
AG
1623 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1624 events);
1625
42c6b129 1626 hci_set_le_support(req);
9193c6e8 1627 }
d2c5d77f
JH
1628
1629 /* Read features beyond page 1 if available */
1630 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1631 struct hci_cp_read_local_ext_features cp;
1632
1633 cp.page = p;
1634 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1635 sizeof(cp), &cp);
1636 }
2177bab5
JH
1637}
1638
5d4e7e8d
JH
1639static void hci_init4_req(struct hci_request *req, unsigned long opt)
1640{
1641 struct hci_dev *hdev = req->hdev;
1642
d62e6d67
JH
1643 /* Set event mask page 2 if the HCI command for it is supported */
1644 if (hdev->commands[22] & 0x04)
1645 hci_set_event_mask_page_2(req);
1646
5d4e7e8d 1647 /* Check for Synchronization Train support */
53b834d2 1648 if (lmp_sync_train_capable(hdev))
5d4e7e8d 1649 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
a6d0d690
MH
1650
1651 /* Enable Secure Connections if supported and configured */
5afeac14 1652 if ((lmp_sc_capable(hdev) ||
111902f7 1653 test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
a6d0d690
MH
1654 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1655 u8 support = 0x01;
1656 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1657 sizeof(support), &support);
1658 }
5d4e7e8d
JH
1659}
1660
2177bab5
JH
1661static int __hci_init(struct hci_dev *hdev)
1662{
1663 int err;
1664
1665 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1666 if (err < 0)
1667 return err;
1668
4b4148e9
MH
1669 /* The Device Under Test (DUT) mode is special and available for
1670 * all controller types. So just create it early on.
1671 */
1672 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1673 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1674 &dut_mode_fops);
1675 }
1676
2177bab5
JH
1677 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1678 * BR/EDR/LE type controllers. AMP controllers only need the
1679 * first stage init.
1680 */
1681 if (hdev->dev_type != HCI_BREDR)
1682 return 0;
1683
1684 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1685 if (err < 0)
1686 return err;
1687
5d4e7e8d
JH
1688 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1689 if (err < 0)
1690 return err;
1691
baf27f6e
MH
1692 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1693 if (err < 0)
1694 return err;
1695
1696 /* Only create debugfs entries during the initial setup
1697 * phase and not every time the controller gets powered on.
1698 */
1699 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1700 return 0;
1701
dfb826a8
MH
1702 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1703 &features_fops);
ceeb3bc0
MH
1704 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1705 &hdev->manufacturer);
1706 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1707 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
70afe0b8
MH
1708 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1709 &blacklist_fops);
47219839
MH
1710 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1711
31ad1691
AK
1712 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1713 &conn_info_min_age_fops);
1714 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1715 &conn_info_max_age_fops);
1716
baf27f6e
MH
1717 if (lmp_bredr_capable(hdev)) {
1718 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1719 hdev, &inquiry_cache_fops);
02d08d15
MH
1720 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1721 hdev, &link_keys_fops);
babdbb3c
MH
1722 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1723 hdev, &dev_class_fops);
041000b9
MH
1724 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1725 hdev, &voice_setting_fops);
baf27f6e
MH
1726 }
1727
06f5b778 1728 if (lmp_ssp_capable(hdev)) {
ebd1e33b
MH
1729 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1730 hdev, &auto_accept_delay_fops);
5afeac14
MH
1731 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1732 hdev, &force_sc_support_fops);
134c2a89
MH
1733 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1734 hdev, &sc_only_mode_fops);
06f5b778 1735 }
ebd1e33b 1736
2bfa3531
MH
1737 if (lmp_sniff_capable(hdev)) {
1738 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1739 hdev, &idle_timeout_fops);
1740 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1741 hdev, &sniff_min_interval_fops);
1742 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1743 hdev, &sniff_max_interval_fops);
1744 }
1745
d0f729b8 1746 if (lmp_le_capable(hdev)) {
ac345813
MH
1747 debugfs_create_file("identity", 0400, hdev->debugfs,
1748 hdev, &identity_fops);
1749 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1750 hdev, &rpa_timeout_fops);
7a4cd51d
MH
1751 debugfs_create_file("random_address", 0444, hdev->debugfs,
1752 hdev, &random_address_fops);
b32bba6c
MH
1753 debugfs_create_file("static_address", 0444, hdev->debugfs,
1754 hdev, &static_address_fops);
1755
1756 /* For controllers with a public address, provide a debug
1757 * option to force the usage of the configured static
1758 * address. By default the public address is used.
1759 */
1760 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1761 debugfs_create_file("force_static_address", 0644,
1762 hdev->debugfs, hdev,
1763 &force_static_address_fops);
1764
d0f729b8
MH
1765 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1766 &hdev->le_white_list_size);
d2ab0ac1
MH
1767 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1768 &white_list_fops);
3698d704
MH
1769 debugfs_create_file("identity_resolving_keys", 0400,
1770 hdev->debugfs, hdev,
1771 &identity_resolving_keys_fops);
8f8625cd
MH
1772 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1773 hdev, &long_term_keys_fops);
4e70c7e7
MH
1774 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1775 hdev, &conn_min_interval_fops);
1776 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1777 hdev, &conn_max_interval_fops);
816a93d1
MH
1778 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1779 hdev, &conn_latency_fops);
f1649577
MH
1780 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1781 hdev, &supervision_timeout_fops);
3f959d46
MH
1782 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1783 hdev, &adv_channel_map_fops);
0b3c7d37
MH
1784 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1785 &device_list_fops);
b9a7a61e
LR
1786 debugfs_create_u16("discov_interleaved_timeout", 0644,
1787 hdev->debugfs,
1788 &hdev->discov_interleaved_timeout);
d0f729b8 1789 }
e7b8fc92 1790
baf27f6e 1791 return 0;
2177bab5
JH
1792}
1793
42c6b129 1794static void hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1795{
1796 __u8 scan = opt;
1797
42c6b129 1798 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
1799
1800 /* Inquiry and Page scans */
42c6b129 1801 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
1802}
1803
42c6b129 1804static void hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1805{
1806 __u8 auth = opt;
1807
42c6b129 1808 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
1809
1810 /* Authentication */
42c6b129 1811 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
1812}
1813
42c6b129 1814static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1815{
1816 __u8 encrypt = opt;
1817
42c6b129 1818 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 1819
e4e8e37c 1820 /* Encryption */
42c6b129 1821 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
1822}
1823
42c6b129 1824static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
1825{
1826 __le16 policy = cpu_to_le16(opt);
1827
42c6b129 1828 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
1829
1830 /* Default link policy */
42c6b129 1831 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
e4e8e37c
MH
1832}
1833
8e87d142 1834/* Get HCI device by index.
1da177e4
LT
1835 * Device is held on return. */
1836struct hci_dev *hci_dev_get(int index)
1837{
8035ded4 1838 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
1839
1840 BT_DBG("%d", index);
1841
1842 if (index < 0)
1843 return NULL;
1844
1845 read_lock(&hci_dev_list_lock);
8035ded4 1846 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
1847 if (d->id == index) {
1848 hdev = hci_dev_hold(d);
1849 break;
1850 }
1851 }
1852 read_unlock(&hci_dev_list_lock);
1853 return hdev;
1854}
1da177e4
LT
1855
1856/* ---- Inquiry support ---- */
ff9ef578 1857
30dc78e1
JH
1858bool hci_discovery_active(struct hci_dev *hdev)
1859{
1860 struct discovery_state *discov = &hdev->discovery;
1861
6fbe195d 1862 switch (discov->state) {
343f935b 1863 case DISCOVERY_FINDING:
6fbe195d 1864 case DISCOVERY_RESOLVING:
30dc78e1
JH
1865 return true;
1866
6fbe195d
AG
1867 default:
1868 return false;
1869 }
30dc78e1
JH
1870}
1871
ff9ef578
JH
1872void hci_discovery_set_state(struct hci_dev *hdev, int state)
1873{
1874 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1875
1876 if (hdev->discovery.state == state)
1877 return;
1878
1879 switch (state) {
1880 case DISCOVERY_STOPPED:
c54c3860
AG
1881 hci_update_background_scan(hdev);
1882
7b99b659
AG
1883 if (hdev->discovery.state != DISCOVERY_STARTING)
1884 mgmt_discovering(hdev, 0);
ff9ef578
JH
1885 break;
1886 case DISCOVERY_STARTING:
1887 break;
343f935b 1888 case DISCOVERY_FINDING:
ff9ef578
JH
1889 mgmt_discovering(hdev, 1);
1890 break;
30dc78e1
JH
1891 case DISCOVERY_RESOLVING:
1892 break;
ff9ef578
JH
1893 case DISCOVERY_STOPPING:
1894 break;
1895 }
1896
1897 hdev->discovery.state = state;
1898}
1899
1f9b9a5d 1900void hci_inquiry_cache_flush(struct hci_dev *hdev)
1da177e4 1901{
30883512 1902 struct discovery_state *cache = &hdev->discovery;
b57c1a56 1903 struct inquiry_entry *p, *n;
1da177e4 1904
561aafbc
JH
1905 list_for_each_entry_safe(p, n, &cache->all, all) {
1906 list_del(&p->all);
b57c1a56 1907 kfree(p);
1da177e4 1908 }
561aafbc
JH
1909
1910 INIT_LIST_HEAD(&cache->unknown);
1911 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
1912}
1913
a8c5fb1a
GP
1914struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1915 bdaddr_t *bdaddr)
1da177e4 1916{
30883512 1917 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1918 struct inquiry_entry *e;
1919
6ed93dc6 1920 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 1921
561aafbc
JH
1922 list_for_each_entry(e, &cache->all, all) {
1923 if (!bacmp(&e->data.bdaddr, bdaddr))
1924 return e;
1925 }
1926
1927 return NULL;
1928}
1929
1930struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 1931 bdaddr_t *bdaddr)
561aafbc 1932{
30883512 1933 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
1934 struct inquiry_entry *e;
1935
6ed93dc6 1936 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
1937
1938 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 1939 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
1940 return e;
1941 }
1942
1943 return NULL;
1da177e4
LT
1944}
1945
30dc78e1 1946struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
1947 bdaddr_t *bdaddr,
1948 int state)
30dc78e1
JH
1949{
1950 struct discovery_state *cache = &hdev->discovery;
1951 struct inquiry_entry *e;
1952
6ed93dc6 1953 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
1954
1955 list_for_each_entry(e, &cache->resolve, list) {
1956 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1957 return e;
1958 if (!bacmp(&e->data.bdaddr, bdaddr))
1959 return e;
1960 }
1961
1962 return NULL;
1963}
1964
a3d4e20a 1965void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 1966 struct inquiry_entry *ie)
a3d4e20a
JH
1967{
1968 struct discovery_state *cache = &hdev->discovery;
1969 struct list_head *pos = &cache->resolve;
1970 struct inquiry_entry *p;
1971
1972 list_del(&ie->list);
1973
1974 list_for_each_entry(p, &cache->resolve, list) {
1975 if (p->name_state != NAME_PENDING &&
a8c5fb1a 1976 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
1977 break;
1978 pos = &p->list;
1979 }
1980
1981 list_add(&ie->list, pos);
1982}
1983
af58925c
MH
1984u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1985 bool name_known)
1da177e4 1986{
30883512 1987 struct discovery_state *cache = &hdev->discovery;
70f23020 1988 struct inquiry_entry *ie;
af58925c 1989 u32 flags = 0;
1da177e4 1990
6ed93dc6 1991 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 1992
2b2fec4d
SJ
1993 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1994
af58925c
MH
1995 if (!data->ssp_mode)
1996 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
388fc8fa 1997
70f23020 1998 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 1999 if (ie) {
af58925c
MH
2000 if (!ie->data.ssp_mode)
2001 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
388fc8fa 2002
a3d4e20a 2003 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 2004 data->rssi != ie->data.rssi) {
a3d4e20a
JH
2005 ie->data.rssi = data->rssi;
2006 hci_inquiry_cache_update_resolve(hdev, ie);
2007 }
2008
561aafbc 2009 goto update;
a3d4e20a 2010 }
561aafbc
JH
2011
2012 /* Entry not in the cache. Add new one. */
2013 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
af58925c
MH
2014 if (!ie) {
2015 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2016 goto done;
2017 }
561aafbc
JH
2018
2019 list_add(&ie->all, &cache->all);
2020
2021 if (name_known) {
2022 ie->name_state = NAME_KNOWN;
2023 } else {
2024 ie->name_state = NAME_NOT_KNOWN;
2025 list_add(&ie->list, &cache->unknown);
2026 }
70f23020 2027
561aafbc
JH
2028update:
2029 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 2030 ie->name_state != NAME_PENDING) {
561aafbc
JH
2031 ie->name_state = NAME_KNOWN;
2032 list_del(&ie->list);
1da177e4
LT
2033 }
2034
70f23020
AE
2035 memcpy(&ie->data, data, sizeof(*data));
2036 ie->timestamp = jiffies;
1da177e4 2037 cache->timestamp = jiffies;
3175405b
JH
2038
2039 if (ie->name_state == NAME_NOT_KNOWN)
af58925c 2040 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
3175405b 2041
af58925c
MH
2042done:
2043 return flags;
1da177e4
LT
2044}
2045
2046static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2047{
30883512 2048 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
2049 struct inquiry_info *info = (struct inquiry_info *) buf;
2050 struct inquiry_entry *e;
2051 int copied = 0;
2052
561aafbc 2053 list_for_each_entry(e, &cache->all, all) {
1da177e4 2054 struct inquiry_data *data = &e->data;
b57c1a56
JH
2055
2056 if (copied >= num)
2057 break;
2058
1da177e4
LT
2059 bacpy(&info->bdaddr, &data->bdaddr);
2060 info->pscan_rep_mode = data->pscan_rep_mode;
2061 info->pscan_period_mode = data->pscan_period_mode;
2062 info->pscan_mode = data->pscan_mode;
2063 memcpy(info->dev_class, data->dev_class, 3);
2064 info->clock_offset = data->clock_offset;
b57c1a56 2065
1da177e4 2066 info++;
b57c1a56 2067 copied++;
1da177e4
LT
2068 }
2069
2070 BT_DBG("cache %p, copied %d", cache, copied);
2071 return copied;
2072}
2073
42c6b129 2074static void hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
2075{
2076 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 2077 struct hci_dev *hdev = req->hdev;
1da177e4
LT
2078 struct hci_cp_inquiry cp;
2079
2080 BT_DBG("%s", hdev->name);
2081
2082 if (test_bit(HCI_INQUIRY, &hdev->flags))
2083 return;
2084
2085 /* Start Inquiry */
2086 memcpy(&cp.lap, &ir->lap, 3);
2087 cp.length = ir->length;
2088 cp.num_rsp = ir->num_rsp;
42c6b129 2089 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
2090}
2091
3e13fa1e
AG
2092static int wait_inquiry(void *word)
2093{
2094 schedule();
2095 return signal_pending(current);
2096}
2097
1da177e4
LT
2098int hci_inquiry(void __user *arg)
2099{
2100 __u8 __user *ptr = arg;
2101 struct hci_inquiry_req ir;
2102 struct hci_dev *hdev;
2103 int err = 0, do_inquiry = 0, max_rsp;
2104 long timeo;
2105 __u8 *buf;
2106
2107 if (copy_from_user(&ir, ptr, sizeof(ir)))
2108 return -EFAULT;
2109
5a08ecce
AE
2110 hdev = hci_dev_get(ir.dev_id);
2111 if (!hdev)
1da177e4
LT
2112 return -ENODEV;
2113
0736cfa8
MH
2114 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2115 err = -EBUSY;
2116 goto done;
2117 }
2118
4a964404 2119 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
fee746b0
MH
2120 err = -EOPNOTSUPP;
2121 goto done;
2122 }
2123
5b69bef5
MH
2124 if (hdev->dev_type != HCI_BREDR) {
2125 err = -EOPNOTSUPP;
2126 goto done;
2127 }
2128
56f87901
JH
2129 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2130 err = -EOPNOTSUPP;
2131 goto done;
2132 }
2133
09fd0de5 2134 hci_dev_lock(hdev);
8e87d142 2135 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 2136 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1f9b9a5d 2137 hci_inquiry_cache_flush(hdev);
1da177e4
LT
2138 do_inquiry = 1;
2139 }
09fd0de5 2140 hci_dev_unlock(hdev);
1da177e4 2141
04837f64 2142 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
2143
2144 if (do_inquiry) {
01178cd4
JH
2145 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2146 timeo);
70f23020
AE
2147 if (err < 0)
2148 goto done;
3e13fa1e
AG
2149
2150 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2151 * cleared). If it is interrupted by a signal, return -EINTR.
2152 */
2153 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2154 TASK_INTERRUPTIBLE))
2155 return -EINTR;
70f23020 2156 }
1da177e4 2157
8fc9ced3
GP
2158 /* for unlimited number of responses we will use buffer with
2159 * 255 entries
2160 */
1da177e4
LT
2161 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2162
2163 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2164 * copy it to the user space.
2165 */
01df8c31 2166 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 2167 if (!buf) {
1da177e4
LT
2168 err = -ENOMEM;
2169 goto done;
2170 }
2171
09fd0de5 2172 hci_dev_lock(hdev);
1da177e4 2173 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 2174 hci_dev_unlock(hdev);
1da177e4
LT
2175
2176 BT_DBG("num_rsp %d", ir.num_rsp);
2177
2178 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2179 ptr += sizeof(ir);
2180 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 2181 ir.num_rsp))
1da177e4 2182 err = -EFAULT;
8e87d142 2183 } else
1da177e4
LT
2184 err = -EFAULT;
2185
2186 kfree(buf);
2187
2188done:
2189 hci_dev_put(hdev);
2190 return err;
2191}
2192
cbed0ca1 2193static int hci_dev_do_open(struct hci_dev *hdev)
1da177e4 2194{
1da177e4
LT
2195 int ret = 0;
2196
1da177e4
LT
2197 BT_DBG("%s %p", hdev->name, hdev);
2198
2199 hci_req_lock(hdev);
2200
94324962
JH
2201 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2202 ret = -ENODEV;
2203 goto done;
2204 }
2205
a5c8f270
MH
2206 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
2207 /* Check for rfkill but allow the HCI setup stage to
2208 * proceed (which in itself doesn't cause any RF activity).
2209 */
2210 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2211 ret = -ERFKILL;
2212 goto done;
2213 }
2214
2215 /* Check for valid public address or a configured static
2216 * random adddress, but let the HCI setup proceed to
2217 * be able to determine if there is a public address
2218 * or not.
2219 *
c6beca0e
MH
2220 * In case of user channel usage, it is not important
2221 * if a public address or static random address is
2222 * available.
2223 *
a5c8f270
MH
2224 * This check is only valid for BR/EDR controllers
2225 * since AMP controllers do not have an address.
2226 */
c6beca0e
MH
2227 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2228 hdev->dev_type == HCI_BREDR &&
a5c8f270
MH
2229 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2230 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2231 ret = -EADDRNOTAVAIL;
2232 goto done;
2233 }
611b30f7
MH
2234 }
2235
1da177e4
LT
2236 if (test_bit(HCI_UP, &hdev->flags)) {
2237 ret = -EALREADY;
2238 goto done;
2239 }
2240
1da177e4
LT
2241 if (hdev->open(hdev)) {
2242 ret = -EIO;
2243 goto done;
2244 }
2245
f41c70c4
MH
2246 atomic_set(&hdev->cmd_cnt, 1);
2247 set_bit(HCI_INIT, &hdev->flags);
2248
2249 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
2250 ret = hdev->setup(hdev);
2251
24c457e2
MH
2252 /* If public address change is configured, ensure that the
2253 * address gets programmed. If the driver does not support
2254 * changing the public address, fail the power on procedure.
2255 */
2256 if (!ret && bacmp(&hdev->public_addr, BDADDR_ANY)) {
2257 if (hdev->set_bdaddr)
2258 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2259 else
2260 ret = -EADDRNOTAVAIL;
2261 }
2262
f41c70c4 2263 if (!ret) {
4a964404 2264 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
0736cfa8 2265 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
f41c70c4 2266 ret = __hci_init(hdev);
1da177e4
LT
2267 }
2268
f41c70c4
MH
2269 clear_bit(HCI_INIT, &hdev->flags);
2270
1da177e4
LT
2271 if (!ret) {
2272 hci_dev_hold(hdev);
d6bfd59c 2273 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
1da177e4
LT
2274 set_bit(HCI_UP, &hdev->flags);
2275 hci_notify(hdev, HCI_DEV_UP);
bb4b2a9a 2276 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
4a964404 2277 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
0736cfa8 2278 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1514b892 2279 hdev->dev_type == HCI_BREDR) {
09fd0de5 2280 hci_dev_lock(hdev);
744cf19e 2281 mgmt_powered(hdev, 1);
09fd0de5 2282 hci_dev_unlock(hdev);
56e5cb86 2283 }
8e87d142 2284 } else {
1da177e4 2285 /* Init failed, cleanup */
3eff45ea 2286 flush_work(&hdev->tx_work);
c347b765 2287 flush_work(&hdev->cmd_work);
b78752cc 2288 flush_work(&hdev->rx_work);
1da177e4
LT
2289
2290 skb_queue_purge(&hdev->cmd_q);
2291 skb_queue_purge(&hdev->rx_q);
2292
2293 if (hdev->flush)
2294 hdev->flush(hdev);
2295
2296 if (hdev->sent_cmd) {
2297 kfree_skb(hdev->sent_cmd);
2298 hdev->sent_cmd = NULL;
2299 }
2300
2301 hdev->close(hdev);
fee746b0 2302 hdev->flags &= BIT(HCI_RAW);
1da177e4
LT
2303 }
2304
2305done:
2306 hci_req_unlock(hdev);
1da177e4
LT
2307 return ret;
2308}
2309
cbed0ca1
JH
2310/* ---- HCI ioctl helpers ---- */
2311
2312int hci_dev_open(__u16 dev)
2313{
2314 struct hci_dev *hdev;
2315 int err;
2316
2317 hdev = hci_dev_get(dev);
2318 if (!hdev)
2319 return -ENODEV;
2320
4a964404 2321 /* Devices that are marked as unconfigured can only be powered
fee746b0
MH
2322 * up as user channel. Trying to bring them up as normal devices
2323 * will result into a failure. Only user channel operation is
2324 * possible.
2325 *
2326 * When this function is called for a user channel, the flag
2327 * HCI_USER_CHANNEL will be set first before attempting to
2328 * open the device.
2329 */
4a964404 2330 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
fee746b0
MH
2331 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2332 err = -EOPNOTSUPP;
2333 goto done;
2334 }
2335
e1d08f40
JH
2336 /* We need to ensure that no other power on/off work is pending
2337 * before proceeding to call hci_dev_do_open. This is
2338 * particularly important if the setup procedure has not yet
2339 * completed.
2340 */
2341 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2342 cancel_delayed_work(&hdev->power_off);
2343
a5c8f270
MH
2344 /* After this call it is guaranteed that the setup procedure
2345 * has finished. This means that error conditions like RFKILL
2346 * or no valid public or static random address apply.
2347 */
e1d08f40
JH
2348 flush_workqueue(hdev->req_workqueue);
2349
cbed0ca1
JH
2350 err = hci_dev_do_open(hdev);
2351
fee746b0 2352done:
cbed0ca1 2353 hci_dev_put(hdev);
cbed0ca1
JH
2354 return err;
2355}
2356
1da177e4
LT
2357static int hci_dev_do_close(struct hci_dev *hdev)
2358{
2359 BT_DBG("%s %p", hdev->name, hdev);
2360
78c04c0b
VCG
2361 cancel_delayed_work(&hdev->power_off);
2362
1da177e4
LT
2363 hci_req_cancel(hdev, ENODEV);
2364 hci_req_lock(hdev);
2365
2366 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
65cc2b49 2367 cancel_delayed_work_sync(&hdev->cmd_timer);
1da177e4
LT
2368 hci_req_unlock(hdev);
2369 return 0;
2370 }
2371
3eff45ea
GP
2372 /* Flush RX and TX works */
2373 flush_work(&hdev->tx_work);
b78752cc 2374 flush_work(&hdev->rx_work);
1da177e4 2375
16ab91ab 2376 if (hdev->discov_timeout > 0) {
e0f9309f 2377 cancel_delayed_work(&hdev->discov_off);
16ab91ab 2378 hdev->discov_timeout = 0;
5e5282bb 2379 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
310a3d48 2380 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
16ab91ab
JH
2381 }
2382
a8b2d5c2 2383 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
7d78525d
JH
2384 cancel_delayed_work(&hdev->service_cache);
2385
7ba8b4be 2386 cancel_delayed_work_sync(&hdev->le_scan_disable);
4518bb0f
JH
2387
2388 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2389 cancel_delayed_work_sync(&hdev->rpa_expired);
7ba8b4be 2390
09fd0de5 2391 hci_dev_lock(hdev);
1f9b9a5d 2392 hci_inquiry_cache_flush(hdev);
1da177e4 2393 hci_conn_hash_flush(hdev);
6046dc3e 2394 hci_pend_le_conns_clear(hdev);
09fd0de5 2395 hci_dev_unlock(hdev);
1da177e4
LT
2396
2397 hci_notify(hdev, HCI_DEV_DOWN);
2398
2399 if (hdev->flush)
2400 hdev->flush(hdev);
2401
2402 /* Reset device */
2403 skb_queue_purge(&hdev->cmd_q);
2404 atomic_set(&hdev->cmd_cnt, 1);
4a964404
MH
2405 if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2406 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
a6c511c6 2407 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1da177e4 2408 set_bit(HCI_INIT, &hdev->flags);
01178cd4 2409 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1da177e4
LT
2410 clear_bit(HCI_INIT, &hdev->flags);
2411 }
2412
c347b765
GP
2413 /* flush cmd work */
2414 flush_work(&hdev->cmd_work);
1da177e4
LT
2415
2416 /* Drop queues */
2417 skb_queue_purge(&hdev->rx_q);
2418 skb_queue_purge(&hdev->cmd_q);
2419 skb_queue_purge(&hdev->raw_q);
2420
2421 /* Drop last sent command */
2422 if (hdev->sent_cmd) {
65cc2b49 2423 cancel_delayed_work_sync(&hdev->cmd_timer);
1da177e4
LT
2424 kfree_skb(hdev->sent_cmd);
2425 hdev->sent_cmd = NULL;
2426 }
2427
b6ddb638
JH
2428 kfree_skb(hdev->recv_evt);
2429 hdev->recv_evt = NULL;
2430
1da177e4
LT
2431 /* After this point our queues are empty
2432 * and no tasks are scheduled. */
2433 hdev->close(hdev);
2434
35b973c9 2435 /* Clear flags */
fee746b0 2436 hdev->flags &= BIT(HCI_RAW);
35b973c9
JH
2437 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2438
93c311a0
MH
2439 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2440 if (hdev->dev_type == HCI_BREDR) {
2441 hci_dev_lock(hdev);
2442 mgmt_powered(hdev, 0);
2443 hci_dev_unlock(hdev);
2444 }
8ee56540 2445 }
5add6af8 2446
ced5c338 2447 /* Controller radio is available but is currently powered down */
536619e8 2448 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
ced5c338 2449
e59fda8d 2450 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 2451 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
7a4cd51d 2452 bacpy(&hdev->random_addr, BDADDR_ANY);
e59fda8d 2453
1da177e4
LT
2454 hci_req_unlock(hdev);
2455
2456 hci_dev_put(hdev);
2457 return 0;
2458}
2459
2460int hci_dev_close(__u16 dev)
2461{
2462 struct hci_dev *hdev;
2463 int err;
2464
70f23020
AE
2465 hdev = hci_dev_get(dev);
2466 if (!hdev)
1da177e4 2467 return -ENODEV;
8ee56540 2468
0736cfa8
MH
2469 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2470 err = -EBUSY;
2471 goto done;
2472 }
2473
8ee56540
MH
2474 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2475 cancel_delayed_work(&hdev->power_off);
2476
1da177e4 2477 err = hci_dev_do_close(hdev);
8ee56540 2478
0736cfa8 2479done:
1da177e4
LT
2480 hci_dev_put(hdev);
2481 return err;
2482}
2483
2484int hci_dev_reset(__u16 dev)
2485{
2486 struct hci_dev *hdev;
2487 int ret = 0;
2488
70f23020
AE
2489 hdev = hci_dev_get(dev);
2490 if (!hdev)
1da177e4
LT
2491 return -ENODEV;
2492
2493 hci_req_lock(hdev);
1da177e4 2494
808a049e
MH
2495 if (!test_bit(HCI_UP, &hdev->flags)) {
2496 ret = -ENETDOWN;
1da177e4 2497 goto done;
808a049e 2498 }
1da177e4 2499
0736cfa8
MH
2500 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2501 ret = -EBUSY;
2502 goto done;
2503 }
2504
4a964404 2505 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
fee746b0
MH
2506 ret = -EOPNOTSUPP;
2507 goto done;
2508 }
2509
1da177e4
LT
2510 /* Drop queues */
2511 skb_queue_purge(&hdev->rx_q);
2512 skb_queue_purge(&hdev->cmd_q);
2513
09fd0de5 2514 hci_dev_lock(hdev);
1f9b9a5d 2515 hci_inquiry_cache_flush(hdev);
1da177e4 2516 hci_conn_hash_flush(hdev);
09fd0de5 2517 hci_dev_unlock(hdev);
1da177e4
LT
2518
2519 if (hdev->flush)
2520 hdev->flush(hdev);
2521
8e87d142 2522 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 2523 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4 2524
fee746b0 2525 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1da177e4
LT
2526
2527done:
1da177e4
LT
2528 hci_req_unlock(hdev);
2529 hci_dev_put(hdev);
2530 return ret;
2531}
2532
2533int hci_dev_reset_stat(__u16 dev)
2534{
2535 struct hci_dev *hdev;
2536 int ret = 0;
2537
70f23020
AE
2538 hdev = hci_dev_get(dev);
2539 if (!hdev)
1da177e4
LT
2540 return -ENODEV;
2541
0736cfa8
MH
2542 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2543 ret = -EBUSY;
2544 goto done;
2545 }
2546
4a964404 2547 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
fee746b0
MH
2548 ret = -EOPNOTSUPP;
2549 goto done;
2550 }
2551
1da177e4
LT
2552 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2553
0736cfa8 2554done:
1da177e4 2555 hci_dev_put(hdev);
1da177e4
LT
2556 return ret;
2557}
2558
2559int hci_dev_cmd(unsigned int cmd, void __user *arg)
2560{
2561 struct hci_dev *hdev;
2562 struct hci_dev_req dr;
2563 int err = 0;
2564
2565 if (copy_from_user(&dr, arg, sizeof(dr)))
2566 return -EFAULT;
2567
70f23020
AE
2568 hdev = hci_dev_get(dr.dev_id);
2569 if (!hdev)
1da177e4
LT
2570 return -ENODEV;
2571
0736cfa8
MH
2572 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2573 err = -EBUSY;
2574 goto done;
2575 }
2576
4a964404 2577 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
fee746b0
MH
2578 err = -EOPNOTSUPP;
2579 goto done;
2580 }
2581
5b69bef5
MH
2582 if (hdev->dev_type != HCI_BREDR) {
2583 err = -EOPNOTSUPP;
2584 goto done;
2585 }
2586
56f87901
JH
2587 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2588 err = -EOPNOTSUPP;
2589 goto done;
2590 }
2591
1da177e4
LT
2592 switch (cmd) {
2593 case HCISETAUTH:
01178cd4
JH
2594 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2595 HCI_INIT_TIMEOUT);
1da177e4
LT
2596 break;
2597
2598 case HCISETENCRYPT:
2599 if (!lmp_encrypt_capable(hdev)) {
2600 err = -EOPNOTSUPP;
2601 break;
2602 }
2603
2604 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2605 /* Auth must be enabled first */
01178cd4
JH
2606 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2607 HCI_INIT_TIMEOUT);
1da177e4
LT
2608 if (err)
2609 break;
2610 }
2611
01178cd4
JH
2612 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2613 HCI_INIT_TIMEOUT);
1da177e4
LT
2614 break;
2615
2616 case HCISETSCAN:
01178cd4
JH
2617 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2618 HCI_INIT_TIMEOUT);
1da177e4
LT
2619 break;
2620
1da177e4 2621 case HCISETLINKPOL:
01178cd4
JH
2622 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2623 HCI_INIT_TIMEOUT);
1da177e4
LT
2624 break;
2625
2626 case HCISETLINKMODE:
e4e8e37c
MH
2627 hdev->link_mode = ((__u16) dr.dev_opt) &
2628 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2629 break;
2630
2631 case HCISETPTYPE:
2632 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
2633 break;
2634
2635 case HCISETACLMTU:
e4e8e37c
MH
2636 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2637 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2638 break;
2639
2640 case HCISETSCOMTU:
e4e8e37c
MH
2641 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2642 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2643 break;
2644
2645 default:
2646 err = -EINVAL;
2647 break;
2648 }
e4e8e37c 2649
0736cfa8 2650done:
1da177e4
LT
2651 hci_dev_put(hdev);
2652 return err;
2653}
2654
2655int hci_get_dev_list(void __user *arg)
2656{
8035ded4 2657 struct hci_dev *hdev;
1da177e4
LT
2658 struct hci_dev_list_req *dl;
2659 struct hci_dev_req *dr;
1da177e4
LT
2660 int n = 0, size, err;
2661 __u16 dev_num;
2662
2663 if (get_user(dev_num, (__u16 __user *) arg))
2664 return -EFAULT;
2665
2666 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2667 return -EINVAL;
2668
2669 size = sizeof(*dl) + dev_num * sizeof(*dr);
2670
70f23020
AE
2671 dl = kzalloc(size, GFP_KERNEL);
2672 if (!dl)
1da177e4
LT
2673 return -ENOMEM;
2674
2675 dr = dl->dev_req;
2676
f20d09d5 2677 read_lock(&hci_dev_list_lock);
8035ded4 2678 list_for_each_entry(hdev, &hci_dev_list, list) {
a8b2d5c2 2679 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
e0f9309f 2680 cancel_delayed_work(&hdev->power_off);
c542a06c 2681
a8b2d5c2
JH
2682 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2683 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 2684
1da177e4
LT
2685 (dr + n)->dev_id = hdev->id;
2686 (dr + n)->dev_opt = hdev->flags;
c542a06c 2687
1da177e4
LT
2688 if (++n >= dev_num)
2689 break;
2690 }
f20d09d5 2691 read_unlock(&hci_dev_list_lock);
1da177e4
LT
2692
2693 dl->dev_num = n;
2694 size = sizeof(*dl) + n * sizeof(*dr);
2695
2696 err = copy_to_user(arg, dl, size);
2697 kfree(dl);
2698
2699 return err ? -EFAULT : 0;
2700}
2701
2702int hci_get_dev_info(void __user *arg)
2703{
2704 struct hci_dev *hdev;
2705 struct hci_dev_info di;
2706 int err = 0;
2707
2708 if (copy_from_user(&di, arg, sizeof(di)))
2709 return -EFAULT;
2710
70f23020
AE
2711 hdev = hci_dev_get(di.dev_id);
2712 if (!hdev)
1da177e4
LT
2713 return -ENODEV;
2714
a8b2d5c2 2715 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
3243553f 2716 cancel_delayed_work_sync(&hdev->power_off);
ab81cbf9 2717
a8b2d5c2
JH
2718 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2719 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 2720
1da177e4
LT
2721 strcpy(di.name, hdev->name);
2722 di.bdaddr = hdev->bdaddr;
60f2a3ed 2723 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
1da177e4
LT
2724 di.flags = hdev->flags;
2725 di.pkt_type = hdev->pkt_type;
572c7f84
JH
2726 if (lmp_bredr_capable(hdev)) {
2727 di.acl_mtu = hdev->acl_mtu;
2728 di.acl_pkts = hdev->acl_pkts;
2729 di.sco_mtu = hdev->sco_mtu;
2730 di.sco_pkts = hdev->sco_pkts;
2731 } else {
2732 di.acl_mtu = hdev->le_mtu;
2733 di.acl_pkts = hdev->le_pkts;
2734 di.sco_mtu = 0;
2735 di.sco_pkts = 0;
2736 }
1da177e4
LT
2737 di.link_policy = hdev->link_policy;
2738 di.link_mode = hdev->link_mode;
2739
2740 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2741 memcpy(&di.features, &hdev->features, sizeof(di.features));
2742
2743 if (copy_to_user(arg, &di, sizeof(di)))
2744 err = -EFAULT;
2745
2746 hci_dev_put(hdev);
2747
2748 return err;
2749}
2750
2751/* ---- Interface to HCI drivers ---- */
2752
611b30f7
MH
2753static int hci_rfkill_set_block(void *data, bool blocked)
2754{
2755 struct hci_dev *hdev = data;
2756
2757 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2758
0736cfa8
MH
2759 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2760 return -EBUSY;
2761
5e130367
JH
2762 if (blocked) {
2763 set_bit(HCI_RFKILLED, &hdev->dev_flags);
bf543036
JH
2764 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2765 hci_dev_do_close(hdev);
5e130367
JH
2766 } else {
2767 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
1025c04c 2768 }
611b30f7
MH
2769
2770 return 0;
2771}
2772
2773static const struct rfkill_ops hci_rfkill_ops = {
2774 .set_block = hci_rfkill_set_block,
2775};
2776
ab81cbf9
JH
2777static void hci_power_on(struct work_struct *work)
2778{
2779 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
96570ffc 2780 int err;
ab81cbf9
JH
2781
2782 BT_DBG("%s", hdev->name);
2783
cbed0ca1 2784 err = hci_dev_do_open(hdev);
96570ffc
JH
2785 if (err < 0) {
2786 mgmt_set_powered_failed(hdev, err);
ab81cbf9 2787 return;
96570ffc 2788 }
ab81cbf9 2789
a5c8f270
MH
2790 /* During the HCI setup phase, a few error conditions are
2791 * ignored and they need to be checked now. If they are still
2792 * valid, it is important to turn the device back off.
2793 */
2794 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
4a964404 2795 test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
a5c8f270
MH
2796 (hdev->dev_type == HCI_BREDR &&
2797 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2798 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
bf543036
JH
2799 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2800 hci_dev_do_close(hdev);
2801 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
19202573
JH
2802 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2803 HCI_AUTO_OFF_TIMEOUT);
bf543036 2804 }
ab81cbf9 2805
fee746b0 2806 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
4a964404
MH
2807 /* For unconfigured devices, set the HCI_RAW flag
2808 * so that userspace can easily identify them.
4a964404
MH
2809 */
2810 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2811 set_bit(HCI_RAW, &hdev->flags);
0602a8ad
MH
2812
2813 /* For fully configured devices, this will send
2814 * the Index Added event. For unconfigured devices,
2815 * it will send Unconfigued Index Added event.
2816 *
2817 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2818 * and no event will be send.
2819 */
2820 mgmt_index_added(hdev);
fee746b0 2821 }
ab81cbf9
JH
2822}
2823
2824static void hci_power_off(struct work_struct *work)
2825{
3243553f 2826 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 2827 power_off.work);
ab81cbf9
JH
2828
2829 BT_DBG("%s", hdev->name);
2830
8ee56540 2831 hci_dev_do_close(hdev);
ab81cbf9
JH
2832}
2833
16ab91ab
JH
2834static void hci_discov_off(struct work_struct *work)
2835{
2836 struct hci_dev *hdev;
16ab91ab
JH
2837
2838 hdev = container_of(work, struct hci_dev, discov_off.work);
2839
2840 BT_DBG("%s", hdev->name);
2841
d1967ff8 2842 mgmt_discoverable_timeout(hdev);
16ab91ab
JH
2843}
2844
35f7498a 2845void hci_uuids_clear(struct hci_dev *hdev)
2aeb9a1a 2846{
4821002c 2847 struct bt_uuid *uuid, *tmp;
2aeb9a1a 2848
4821002c
JH
2849 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2850 list_del(&uuid->list);
2aeb9a1a
JH
2851 kfree(uuid);
2852 }
2aeb9a1a
JH
2853}
2854
35f7498a 2855void hci_link_keys_clear(struct hci_dev *hdev)
55ed8ca1
JH
2856{
2857 struct list_head *p, *n;
2858
2859 list_for_each_safe(p, n, &hdev->link_keys) {
2860 struct link_key *key;
2861
2862 key = list_entry(p, struct link_key, list);
2863
2864 list_del(p);
2865 kfree(key);
2866 }
55ed8ca1
JH
2867}
2868
35f7498a 2869void hci_smp_ltks_clear(struct hci_dev *hdev)
b899efaf
VCG
2870{
2871 struct smp_ltk *k, *tmp;
2872
2873 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2874 list_del(&k->list);
2875 kfree(k);
2876 }
b899efaf
VCG
2877}
2878
970c4e46
JH
2879void hci_smp_irks_clear(struct hci_dev *hdev)
2880{
2881 struct smp_irk *k, *tmp;
2882
2883 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2884 list_del(&k->list);
2885 kfree(k);
2886 }
2887}
2888
55ed8ca1
JH
2889struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2890{
8035ded4 2891 struct link_key *k;
55ed8ca1 2892
8035ded4 2893 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
2894 if (bacmp(bdaddr, &k->bdaddr) == 0)
2895 return k;
55ed8ca1
JH
2896
2897 return NULL;
2898}
2899
745c0ce3 2900static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 2901 u8 key_type, u8 old_key_type)
d25e28ab
JH
2902{
2903 /* Legacy key */
2904 if (key_type < 0x03)
745c0ce3 2905 return true;
d25e28ab
JH
2906
2907 /* Debug keys are insecure so don't store them persistently */
2908 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 2909 return false;
d25e28ab
JH
2910
2911 /* Changed combination key and there's no previous one */
2912 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 2913 return false;
d25e28ab
JH
2914
2915 /* Security mode 3 case */
2916 if (!conn)
745c0ce3 2917 return true;
d25e28ab
JH
2918
2919 /* Neither local nor remote side had no-bonding as requirement */
2920 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 2921 return true;
d25e28ab
JH
2922
2923 /* Local side had dedicated bonding as requirement */
2924 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 2925 return true;
d25e28ab
JH
2926
2927 /* Remote side had dedicated bonding as requirement */
2928 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 2929 return true;
d25e28ab
JH
2930
2931 /* If none of the above criteria match, then don't store the key
2932 * persistently */
745c0ce3 2933 return false;
d25e28ab
JH
2934}
2935
98a0b845
JH
2936static bool ltk_type_master(u8 type)
2937{
d97c9fb0 2938 return (type == SMP_LTK);
98a0b845
JH
2939}
2940
fe39c7b2 2941struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
98a0b845 2942 bool master)
75d262c2 2943{
c9839a11 2944 struct smp_ltk *k;
75d262c2 2945
c9839a11 2946 list_for_each_entry(k, &hdev->long_term_keys, list) {
fe39c7b2 2947 if (k->ediv != ediv || k->rand != rand)
75d262c2
VCG
2948 continue;
2949
98a0b845
JH
2950 if (ltk_type_master(k->type) != master)
2951 continue;
2952
c9839a11 2953 return k;
75d262c2
VCG
2954 }
2955
2956 return NULL;
2957}
75d262c2 2958
c9839a11 2959struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
98a0b845 2960 u8 addr_type, bool master)
75d262c2 2961{
c9839a11 2962 struct smp_ltk *k;
75d262c2 2963
c9839a11
VCG
2964 list_for_each_entry(k, &hdev->long_term_keys, list)
2965 if (addr_type == k->bdaddr_type &&
98a0b845
JH
2966 bacmp(bdaddr, &k->bdaddr) == 0 &&
2967 ltk_type_master(k->type) == master)
75d262c2
VCG
2968 return k;
2969
2970 return NULL;
2971}
75d262c2 2972
970c4e46
JH
2973struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2974{
2975 struct smp_irk *irk;
2976
2977 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2978 if (!bacmp(&irk->rpa, rpa))
2979 return irk;
2980 }
2981
2982 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2983 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
2984 bacpy(&irk->rpa, rpa);
2985 return irk;
2986 }
2987 }
2988
2989 return NULL;
2990}
2991
2992struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2993 u8 addr_type)
2994{
2995 struct smp_irk *irk;
2996
6cfc9988
JH
2997 /* Identity Address must be public or static random */
2998 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2999 return NULL;
3000
970c4e46
JH
3001 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3002 if (addr_type == irk->addr_type &&
3003 bacmp(bdaddr, &irk->bdaddr) == 0)
3004 return irk;
3005 }
3006
3007 return NULL;
3008}
3009
567fa2aa 3010struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
7652ff6a
JH
3011 bdaddr_t *bdaddr, u8 *val, u8 type,
3012 u8 pin_len, bool *persistent)
55ed8ca1
JH
3013{
3014 struct link_key *key, *old_key;
745c0ce3 3015 u8 old_key_type;
55ed8ca1
JH
3016
3017 old_key = hci_find_link_key(hdev, bdaddr);
3018 if (old_key) {
3019 old_key_type = old_key->type;
3020 key = old_key;
3021 } else {
12adcf3a 3022 old_key_type = conn ? conn->key_type : 0xff;
0a14ab41 3023 key = kzalloc(sizeof(*key), GFP_KERNEL);
55ed8ca1 3024 if (!key)
567fa2aa 3025 return NULL;
55ed8ca1
JH
3026 list_add(&key->list, &hdev->link_keys);
3027 }
3028
6ed93dc6 3029 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 3030
d25e28ab
JH
3031 /* Some buggy controller combinations generate a changed
3032 * combination key for legacy pairing even when there's no
3033 * previous key */
3034 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 3035 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 3036 type = HCI_LK_COMBINATION;
655fe6ec
JH
3037 if (conn)
3038 conn->key_type = type;
3039 }
d25e28ab 3040
55ed8ca1 3041 bacpy(&key->bdaddr, bdaddr);
9b3b4460 3042 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
3043 key->pin_len = pin_len;
3044
b6020ba0 3045 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 3046 key->type = old_key_type;
4748fed2
JH
3047 else
3048 key->type = type;
3049
7652ff6a
JH
3050 if (persistent)
3051 *persistent = hci_persistent_key(hdev, conn, type,
3052 old_key_type);
55ed8ca1 3053
567fa2aa 3054 return key;
55ed8ca1
JH
3055}
3056
ca9142b8 3057struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
35d70271 3058 u8 addr_type, u8 type, u8 authenticated,
fe39c7b2 3059 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
75d262c2 3060{
c9839a11 3061 struct smp_ltk *key, *old_key;
98a0b845 3062 bool master = ltk_type_master(type);
75d262c2 3063
98a0b845 3064 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
c9839a11 3065 if (old_key)
75d262c2 3066 key = old_key;
c9839a11 3067 else {
0a14ab41 3068 key = kzalloc(sizeof(*key), GFP_KERNEL);
75d262c2 3069 if (!key)
ca9142b8 3070 return NULL;
c9839a11 3071 list_add(&key->list, &hdev->long_term_keys);
75d262c2
VCG
3072 }
3073
75d262c2 3074 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
3075 key->bdaddr_type = addr_type;
3076 memcpy(key->val, tk, sizeof(key->val));
3077 key->authenticated = authenticated;
3078 key->ediv = ediv;
fe39c7b2 3079 key->rand = rand;
c9839a11
VCG
3080 key->enc_size = enc_size;
3081 key->type = type;
75d262c2 3082
ca9142b8 3083 return key;
75d262c2
VCG
3084}
3085
ca9142b8
JH
3086struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3087 u8 addr_type, u8 val[16], bdaddr_t *rpa)
970c4e46
JH
3088{
3089 struct smp_irk *irk;
3090
3091 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3092 if (!irk) {
3093 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3094 if (!irk)
ca9142b8 3095 return NULL;
970c4e46
JH
3096
3097 bacpy(&irk->bdaddr, bdaddr);
3098 irk->addr_type = addr_type;
3099
3100 list_add(&irk->list, &hdev->identity_resolving_keys);
3101 }
3102
3103 memcpy(irk->val, val, 16);
3104 bacpy(&irk->rpa, rpa);
3105
ca9142b8 3106 return irk;
970c4e46
JH
3107}
3108
55ed8ca1
JH
3109int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3110{
3111 struct link_key *key;
3112
3113 key = hci_find_link_key(hdev, bdaddr);
3114 if (!key)
3115 return -ENOENT;
3116
6ed93dc6 3117 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1
JH
3118
3119 list_del(&key->list);
3120 kfree(key);
3121
3122 return 0;
3123}
3124
e0b2b27e 3125int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
b899efaf
VCG
3126{
3127 struct smp_ltk *k, *tmp;
c51ffa0b 3128 int removed = 0;
b899efaf
VCG
3129
3130 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
e0b2b27e 3131 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
b899efaf
VCG
3132 continue;
3133
6ed93dc6 3134 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf
VCG
3135
3136 list_del(&k->list);
3137 kfree(k);
c51ffa0b 3138 removed++;
b899efaf
VCG
3139 }
3140
c51ffa0b 3141 return removed ? 0 : -ENOENT;
b899efaf
VCG
3142}
3143
a7ec7338
JH
3144void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3145{
3146 struct smp_irk *k, *tmp;
3147
668b7b19 3148 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
a7ec7338
JH
3149 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3150 continue;
3151
3152 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3153
3154 list_del(&k->list);
3155 kfree(k);
3156 }
3157}
3158
6bd32326 3159/* HCI command timer function */
65cc2b49 3160static void hci_cmd_timeout(struct work_struct *work)
6bd32326 3161{
65cc2b49
MH
3162 struct hci_dev *hdev = container_of(work, struct hci_dev,
3163 cmd_timer.work);
6bd32326 3164
bda4f23a
AE
3165 if (hdev->sent_cmd) {
3166 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3167 u16 opcode = __le16_to_cpu(sent->opcode);
3168
3169 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3170 } else {
3171 BT_ERR("%s command tx timeout", hdev->name);
3172 }
3173
6bd32326 3174 atomic_set(&hdev->cmd_cnt, 1);
c347b765 3175 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
3176}
3177
2763eda6 3178struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
04124681 3179 bdaddr_t *bdaddr)
2763eda6
SJ
3180{
3181 struct oob_data *data;
3182
3183 list_for_each_entry(data, &hdev->remote_oob_data, list)
3184 if (bacmp(bdaddr, &data->bdaddr) == 0)
3185 return data;
3186
3187 return NULL;
3188}
3189
3190int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3191{
3192 struct oob_data *data;
3193
3194 data = hci_find_remote_oob_data(hdev, bdaddr);
3195 if (!data)
3196 return -ENOENT;
3197
6ed93dc6 3198 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2763eda6
SJ
3199
3200 list_del(&data->list);
3201 kfree(data);
3202
3203 return 0;
3204}
3205
35f7498a 3206void hci_remote_oob_data_clear(struct hci_dev *hdev)
2763eda6
SJ
3207{
3208 struct oob_data *data, *n;
3209
3210 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3211 list_del(&data->list);
3212 kfree(data);
3213 }
2763eda6
SJ
3214}
3215
0798872e
MH
3216int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3217 u8 *hash, u8 *randomizer)
2763eda6
SJ
3218{
3219 struct oob_data *data;
3220
3221 data = hci_find_remote_oob_data(hdev, bdaddr);
2763eda6 3222 if (!data) {
0a14ab41 3223 data = kmalloc(sizeof(*data), GFP_KERNEL);
2763eda6
SJ
3224 if (!data)
3225 return -ENOMEM;
3226
3227 bacpy(&data->bdaddr, bdaddr);
3228 list_add(&data->list, &hdev->remote_oob_data);
3229 }
3230
519ca9d0
MH
3231 memcpy(data->hash192, hash, sizeof(data->hash192));
3232 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
2763eda6 3233
0798872e
MH
3234 memset(data->hash256, 0, sizeof(data->hash256));
3235 memset(data->randomizer256, 0, sizeof(data->randomizer256));
3236
3237 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3238
3239 return 0;
3240}
3241
3242int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3243 u8 *hash192, u8 *randomizer192,
3244 u8 *hash256, u8 *randomizer256)
3245{
3246 struct oob_data *data;
3247
3248 data = hci_find_remote_oob_data(hdev, bdaddr);
3249 if (!data) {
0a14ab41 3250 data = kmalloc(sizeof(*data), GFP_KERNEL);
0798872e
MH
3251 if (!data)
3252 return -ENOMEM;
3253
3254 bacpy(&data->bdaddr, bdaddr);
3255 list_add(&data->list, &hdev->remote_oob_data);
3256 }
3257
3258 memcpy(data->hash192, hash192, sizeof(data->hash192));
3259 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3260
3261 memcpy(data->hash256, hash256, sizeof(data->hash256));
3262 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3263
6ed93dc6 3264 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
3265
3266 return 0;
3267}
3268
b9ee0a78
MH
3269struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
3270 bdaddr_t *bdaddr, u8 type)
b2a66aad 3271{
8035ded4 3272 struct bdaddr_list *b;
b2a66aad 3273
b9ee0a78
MH
3274 list_for_each_entry(b, &hdev->blacklist, list) {
3275 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
b2a66aad 3276 return b;
b9ee0a78 3277 }
b2a66aad
AJ
3278
3279 return NULL;
3280}
3281
c9507490 3282static void hci_blacklist_clear(struct hci_dev *hdev)
b2a66aad
AJ
3283{
3284 struct list_head *p, *n;
3285
3286 list_for_each_safe(p, n, &hdev->blacklist) {
b9ee0a78 3287 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
b2a66aad
AJ
3288
3289 list_del(p);
3290 kfree(b);
3291 }
b2a66aad
AJ
3292}
3293
88c1fe4b 3294int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
3295{
3296 struct bdaddr_list *entry;
b2a66aad 3297
b9ee0a78 3298 if (!bacmp(bdaddr, BDADDR_ANY))
b2a66aad
AJ
3299 return -EBADF;
3300
b9ee0a78 3301 if (hci_blacklist_lookup(hdev, bdaddr, type))
5e762444 3302 return -EEXIST;
b2a66aad
AJ
3303
3304 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
5e762444
AJ
3305 if (!entry)
3306 return -ENOMEM;
b2a66aad
AJ
3307
3308 bacpy(&entry->bdaddr, bdaddr);
b9ee0a78 3309 entry->bdaddr_type = type;
b2a66aad
AJ
3310
3311 list_add(&entry->list, &hdev->blacklist);
3312
2a8357f2 3313 return 0;
b2a66aad
AJ
3314}
3315
88c1fe4b 3316int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
3317{
3318 struct bdaddr_list *entry;
b2a66aad 3319
35f7498a
JH
3320 if (!bacmp(bdaddr, BDADDR_ANY)) {
3321 hci_blacklist_clear(hdev);
3322 return 0;
3323 }
b2a66aad 3324
b9ee0a78 3325 entry = hci_blacklist_lookup(hdev, bdaddr, type);
1ec918ce 3326 if (!entry)
5e762444 3327 return -ENOENT;
b2a66aad
AJ
3328
3329 list_del(&entry->list);
3330 kfree(entry);
3331
2a8357f2 3332 return 0;
b2a66aad
AJ
3333}
3334
d2ab0ac1
MH
3335struct bdaddr_list *hci_white_list_lookup(struct hci_dev *hdev,
3336 bdaddr_t *bdaddr, u8 type)
3337{
3338 struct bdaddr_list *b;
3339
3340 list_for_each_entry(b, &hdev->le_white_list, list) {
3341 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3342 return b;
3343 }
3344
3345 return NULL;
3346}
3347
3348void hci_white_list_clear(struct hci_dev *hdev)
3349{
3350 struct list_head *p, *n;
3351
3352 list_for_each_safe(p, n, &hdev->le_white_list) {
3353 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3354
3355 list_del(p);
3356 kfree(b);
3357 }
3358}
3359
3360int hci_white_list_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3361{
3362 struct bdaddr_list *entry;
3363
3364 if (!bacmp(bdaddr, BDADDR_ANY))
3365 return -EBADF;
3366
3367 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3368 if (!entry)
3369 return -ENOMEM;
3370
3371 bacpy(&entry->bdaddr, bdaddr);
3372 entry->bdaddr_type = type;
3373
3374 list_add(&entry->list, &hdev->le_white_list);
3375
3376 return 0;
3377}
3378
3379int hci_white_list_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3380{
3381 struct bdaddr_list *entry;
3382
3383 if (!bacmp(bdaddr, BDADDR_ANY))
3384 return -EBADF;
3385
3386 entry = hci_white_list_lookup(hdev, bdaddr, type);
3387 if (!entry)
3388 return -ENOENT;
3389
3390 list_del(&entry->list);
3391 kfree(entry);
3392
3393 return 0;
3394}
3395
15819a70
AG
3396/* This function requires the caller holds hdev->lock */
3397struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3398 bdaddr_t *addr, u8 addr_type)
3399{
3400 struct hci_conn_params *params;
3401
3402 list_for_each_entry(params, &hdev->le_conn_params, list) {
3403 if (bacmp(&params->addr, addr) == 0 &&
3404 params->addr_type == addr_type) {
3405 return params;
3406 }
3407 }
3408
3409 return NULL;
3410}
3411
cef952ce
AG
3412static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3413{
3414 struct hci_conn *conn;
3415
3416 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3417 if (!conn)
3418 return false;
3419
3420 if (conn->dst_type != type)
3421 return false;
3422
3423 if (conn->state != BT_CONNECTED)
3424 return false;
3425
3426 return true;
3427}
3428
4b10966f
MH
3429/* This function requires the caller holds hdev->lock */
3430struct bdaddr_list *hci_pend_le_conn_lookup(struct hci_dev *hdev,
3431 bdaddr_t *addr, u8 addr_type)
3432{
3433 struct bdaddr_list *entry;
3434
3435 list_for_each_entry(entry, &hdev->pend_le_conns, list) {
3436 if (bacmp(&entry->bdaddr, addr) == 0 &&
3437 entry->bdaddr_type == addr_type)
3438 return entry;
3439 }
3440
3441 return NULL;
3442}
3443
3444/* This function requires the caller holds hdev->lock */
3445void hci_pend_le_conn_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3446{
3447 struct bdaddr_list *entry;
3448
3449 entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3450 if (entry)
3451 goto done;
3452
3453 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3454 if (!entry) {
3455 BT_ERR("Out of memory");
3456 return;
3457 }
3458
3459 bacpy(&entry->bdaddr, addr);
3460 entry->bdaddr_type = addr_type;
3461
3462 list_add(&entry->list, &hdev->pend_le_conns);
3463
3464 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3465
3466done:
3467 hci_update_background_scan(hdev);
3468}
3469
3470/* This function requires the caller holds hdev->lock */
3471void hci_pend_le_conn_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3472{
3473 struct bdaddr_list *entry;
3474
3475 entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3476 if (!entry)
3477 goto done;
3478
3479 list_del(&entry->list);
3480 kfree(entry);
3481
3482 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3483
3484done:
3485 hci_update_background_scan(hdev);
3486}
3487
3488/* This function requires the caller holds hdev->lock */
3489void hci_pend_le_conns_clear(struct hci_dev *hdev)
3490{
3491 struct bdaddr_list *entry, *tmp;
3492
3493 list_for_each_entry_safe(entry, tmp, &hdev->pend_le_conns, list) {
3494 list_del(&entry->list);
3495 kfree(entry);
3496 }
3497
3498 BT_DBG("All LE pending connections cleared");
1c1697c0
MH
3499
3500 hci_update_background_scan(hdev);
4b10966f
MH
3501}
3502
15819a70 3503/* This function requires the caller holds hdev->lock */
51d167c0
MH
3504struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3505 bdaddr_t *addr, u8 addr_type)
bf5b3c8b
MH
3506{
3507 struct hci_conn_params *params;
3508
c46245b3 3509 if (!hci_is_identity_address(addr, addr_type))
51d167c0 3510 return NULL;
bf5b3c8b
MH
3511
3512 params = hci_conn_params_lookup(hdev, addr, addr_type);
3513 if (params)
51d167c0 3514 return params;
bf5b3c8b
MH
3515
3516 params = kzalloc(sizeof(*params), GFP_KERNEL);
3517 if (!params) {
3518 BT_ERR("Out of memory");
51d167c0 3519 return NULL;
bf5b3c8b
MH
3520 }
3521
3522 bacpy(&params->addr, addr);
3523 params->addr_type = addr_type;
3524
3525 list_add(&params->list, &hdev->le_conn_params);
3526
3527 params->conn_min_interval = hdev->le_conn_min_interval;
3528 params->conn_max_interval = hdev->le_conn_max_interval;
3529 params->conn_latency = hdev->le_conn_latency;
3530 params->supervision_timeout = hdev->le_supv_timeout;
3531 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3532
3533 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3534
51d167c0 3535 return params;
bf5b3c8b
MH
3536}
3537
3538/* This function requires the caller holds hdev->lock */
3539int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
d06b50ce 3540 u8 auto_connect)
15819a70
AG
3541{
3542 struct hci_conn_params *params;
3543
8c87aae1
MH
3544 params = hci_conn_params_add(hdev, addr, addr_type);
3545 if (!params)
3546 return -EIO;
cef952ce 3547
851efca8
JH
3548 if (params->auto_connect == HCI_AUTO_CONN_REPORT &&
3549 auto_connect != HCI_AUTO_CONN_REPORT)
3550 hdev->pend_le_reports--;
15819a70 3551
cef952ce
AG
3552 switch (auto_connect) {
3553 case HCI_AUTO_CONN_DISABLED:
3554 case HCI_AUTO_CONN_LINK_LOSS:
3555 hci_pend_le_conn_del(hdev, addr, addr_type);
3556 break;
851efca8
JH
3557 case HCI_AUTO_CONN_REPORT:
3558 if (params->auto_connect != HCI_AUTO_CONN_REPORT)
3559 hdev->pend_le_reports++;
3560 hci_pend_le_conn_del(hdev, addr, addr_type);
3561 break;
cef952ce
AG
3562 case HCI_AUTO_CONN_ALWAYS:
3563 if (!is_connected(hdev, addr, addr_type))
3564 hci_pend_le_conn_add(hdev, addr, addr_type);
3565 break;
3566 }
15819a70 3567
851efca8
JH
3568 params->auto_connect = auto_connect;
3569
d06b50ce
MH
3570 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3571 auto_connect);
a9b0a04c
AG
3572
3573 return 0;
15819a70
AG
3574}
3575
3576/* This function requires the caller holds hdev->lock */
3577void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3578{
3579 struct hci_conn_params *params;
3580
3581 params = hci_conn_params_lookup(hdev, addr, addr_type);
3582 if (!params)
3583 return;
3584
851efca8
JH
3585 if (params->auto_connect == HCI_AUTO_CONN_REPORT)
3586 hdev->pend_le_reports--;
3587
cef952ce
AG
3588 hci_pend_le_conn_del(hdev, addr, addr_type);
3589
15819a70
AG
3590 list_del(&params->list);
3591 kfree(params);
3592
3593 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3594}
3595
55af49a8
JH
3596/* This function requires the caller holds hdev->lock */
3597void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3598{
3599 struct hci_conn_params *params, *tmp;
3600
3601 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3602 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3603 continue;
3604 list_del(&params->list);
3605 kfree(params);
3606 }
3607
3608 BT_DBG("All LE disabled connection parameters were removed");
3609}
3610
3611/* This function requires the caller holds hdev->lock */
3612void hci_conn_params_clear_enabled(struct hci_dev *hdev)
3613{
3614 struct hci_conn_params *params, *tmp;
3615
3616 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3617 if (params->auto_connect == HCI_AUTO_CONN_DISABLED)
3618 continue;
851efca8
JH
3619 if (params->auto_connect == HCI_AUTO_CONN_REPORT)
3620 hdev->pend_le_reports--;
55af49a8
JH
3621 list_del(&params->list);
3622 kfree(params);
3623 }
3624
3625 hci_pend_le_conns_clear(hdev);
3626
3627 BT_DBG("All enabled LE connection parameters were removed");
3628}
3629
15819a70 3630/* This function requires the caller holds hdev->lock */
373110c5 3631void hci_conn_params_clear_all(struct hci_dev *hdev)
15819a70
AG
3632{
3633 struct hci_conn_params *params, *tmp;
3634
3635 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3636 list_del(&params->list);
3637 kfree(params);
3638 }
3639
1089b67d
MH
3640 hci_pend_le_conns_clear(hdev);
3641
15819a70
AG
3642 BT_DBG("All LE connection parameters were removed");
3643}
3644
4c87eaab 3645static void inquiry_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 3646{
4c87eaab
AG
3647 if (status) {
3648 BT_ERR("Failed to start inquiry: status %d", status);
7ba8b4be 3649
4c87eaab
AG
3650 hci_dev_lock(hdev);
3651 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3652 hci_dev_unlock(hdev);
3653 return;
3654 }
7ba8b4be
AG
3655}
3656
4c87eaab 3657static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 3658{
4c87eaab
AG
3659 /* General inquiry access code (GIAC) */
3660 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3661 struct hci_request req;
3662 struct hci_cp_inquiry cp;
7ba8b4be
AG
3663 int err;
3664
4c87eaab
AG
3665 if (status) {
3666 BT_ERR("Failed to disable LE scanning: status %d", status);
3667 return;
3668 }
7ba8b4be 3669
4c87eaab
AG
3670 switch (hdev->discovery.type) {
3671 case DISCOV_TYPE_LE:
3672 hci_dev_lock(hdev);
3673 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3674 hci_dev_unlock(hdev);
3675 break;
7ba8b4be 3676
4c87eaab
AG
3677 case DISCOV_TYPE_INTERLEAVED:
3678 hci_req_init(&req, hdev);
7ba8b4be 3679
4c87eaab
AG
3680 memset(&cp, 0, sizeof(cp));
3681 memcpy(&cp.lap, lap, sizeof(cp.lap));
3682 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3683 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
7ba8b4be 3684
4c87eaab 3685 hci_dev_lock(hdev);
7dbfac1d 3686
4c87eaab 3687 hci_inquiry_cache_flush(hdev);
7dbfac1d 3688
4c87eaab
AG
3689 err = hci_req_run(&req, inquiry_complete);
3690 if (err) {
3691 BT_ERR("Inquiry request failed: err %d", err);
3692 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3693 }
7dbfac1d 3694
4c87eaab
AG
3695 hci_dev_unlock(hdev);
3696 break;
7dbfac1d 3697 }
7dbfac1d
AG
3698}
3699
7ba8b4be
AG
3700static void le_scan_disable_work(struct work_struct *work)
3701{
3702 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 3703 le_scan_disable.work);
4c87eaab
AG
3704 struct hci_request req;
3705 int err;
7ba8b4be
AG
3706
3707 BT_DBG("%s", hdev->name);
3708
4c87eaab 3709 hci_req_init(&req, hdev);
28b75a89 3710
b1efcc28 3711 hci_req_add_le_scan_disable(&req);
28b75a89 3712
4c87eaab
AG
3713 err = hci_req_run(&req, le_scan_disable_work_complete);
3714 if (err)
3715 BT_ERR("Disable LE scanning request failed: err %d", err);
28b75a89
AG
3716}
3717
8d97250e
JH
3718static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3719{
3720 struct hci_dev *hdev = req->hdev;
3721
3722 /* If we're advertising or initiating an LE connection we can't
3723 * go ahead and change the random address at this time. This is
3724 * because the eventual initiator address used for the
3725 * subsequently created connection will be undefined (some
3726 * controllers use the new address and others the one we had
3727 * when the operation started).
3728 *
3729 * In this kind of scenario skip the update and let the random
3730 * address be updated at the next cycle.
3731 */
3732 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) ||
3733 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3734 BT_DBG("Deferring random address update");
3735 return;
3736 }
3737
3738 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3739}
3740
94b1fc92
MH
3741int hci_update_random_address(struct hci_request *req, bool require_privacy,
3742 u8 *own_addr_type)
ebd3a747
JH
3743{
3744 struct hci_dev *hdev = req->hdev;
3745 int err;
3746
3747 /* If privacy is enabled use a resolvable private address. If
2b5224dc
MH
3748 * current RPA has expired or there is something else than
3749 * the current RPA in use, then generate a new one.
ebd3a747
JH
3750 */
3751 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
ebd3a747
JH
3752 int to;
3753
3754 *own_addr_type = ADDR_LE_DEV_RANDOM;
3755
3756 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
2b5224dc 3757 !bacmp(&hdev->random_addr, &hdev->rpa))
ebd3a747
JH
3758 return 0;
3759
2b5224dc 3760 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
ebd3a747
JH
3761 if (err < 0) {
3762 BT_ERR("%s failed to generate new RPA", hdev->name);
3763 return err;
3764 }
3765
8d97250e 3766 set_random_addr(req, &hdev->rpa);
ebd3a747
JH
3767
3768 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3769 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3770
3771 return 0;
94b1fc92
MH
3772 }
3773
3774 /* In case of required privacy without resolvable private address,
3775 * use an unresolvable private address. This is useful for active
3776 * scanning and non-connectable advertising.
3777 */
3778 if (require_privacy) {
3779 bdaddr_t urpa;
3780
3781 get_random_bytes(&urpa, 6);
3782 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3783
3784 *own_addr_type = ADDR_LE_DEV_RANDOM;
8d97250e 3785 set_random_addr(req, &urpa);
94b1fc92 3786 return 0;
ebd3a747
JH
3787 }
3788
3789 /* If forcing static address is in use or there is no public
3790 * address use the static address as random address (but skip
3791 * the HCI command if the current random address is already the
3792 * static one.
3793 */
111902f7 3794 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
ebd3a747
JH
3795 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3796 *own_addr_type = ADDR_LE_DEV_RANDOM;
3797 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3798 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3799 &hdev->static_addr);
3800 return 0;
3801 }
3802
3803 /* Neither privacy nor static address is being used so use a
3804 * public address.
3805 */
3806 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3807
3808 return 0;
3809}
3810
a1f4c318
JH
3811/* Copy the Identity Address of the controller.
3812 *
3813 * If the controller has a public BD_ADDR, then by default use that one.
3814 * If this is a LE only controller without a public address, default to
3815 * the static random address.
3816 *
3817 * For debugging purposes it is possible to force controllers with a
3818 * public address to use the static random address instead.
3819 */
3820void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3821 u8 *bdaddr_type)
3822{
111902f7 3823 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
a1f4c318
JH
3824 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3825 bacpy(bdaddr, &hdev->static_addr);
3826 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3827 } else {
3828 bacpy(bdaddr, &hdev->bdaddr);
3829 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3830 }
3831}
3832
9be0dab7
DH
3833/* Alloc HCI device */
3834struct hci_dev *hci_alloc_dev(void)
3835{
3836 struct hci_dev *hdev;
3837
3838 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3839 if (!hdev)
3840 return NULL;
3841
b1b813d4
DH
3842 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3843 hdev->esco_type = (ESCO_HV1);
3844 hdev->link_mode = (HCI_LM_ACCEPT);
b4cb9fb2
MH
3845 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3846 hdev->io_capability = 0x03; /* No Input No Output */
96c2103a 3847 hdev->manufacturer = 0xffff; /* Default to internal use */
bbaf444a
JH
3848 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3849 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
b1b813d4 3850
b1b813d4
DH
3851 hdev->sniff_max_interval = 800;
3852 hdev->sniff_min_interval = 80;
3853
3f959d46 3854 hdev->le_adv_channel_map = 0x07;
bef64738
MH
3855 hdev->le_scan_interval = 0x0060;
3856 hdev->le_scan_window = 0x0030;
4e70c7e7
MH
3857 hdev->le_conn_min_interval = 0x0028;
3858 hdev->le_conn_max_interval = 0x0038;
04fb7d90
MH
3859 hdev->le_conn_latency = 0x0000;
3860 hdev->le_supv_timeout = 0x002a;
bef64738 3861
d6bfd59c 3862 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
b9a7a61e 3863 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
31ad1691
AK
3864 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3865 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
d6bfd59c 3866
b1b813d4
DH
3867 mutex_init(&hdev->lock);
3868 mutex_init(&hdev->req_lock);
3869
3870 INIT_LIST_HEAD(&hdev->mgmt_pending);
3871 INIT_LIST_HEAD(&hdev->blacklist);
3872 INIT_LIST_HEAD(&hdev->uuids);
3873 INIT_LIST_HEAD(&hdev->link_keys);
3874 INIT_LIST_HEAD(&hdev->long_term_keys);
970c4e46 3875 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
b1b813d4 3876 INIT_LIST_HEAD(&hdev->remote_oob_data);
d2ab0ac1 3877 INIT_LIST_HEAD(&hdev->le_white_list);
15819a70 3878 INIT_LIST_HEAD(&hdev->le_conn_params);
77a77a30 3879 INIT_LIST_HEAD(&hdev->pend_le_conns);
6b536b5e 3880 INIT_LIST_HEAD(&hdev->conn_hash.list);
b1b813d4
DH
3881
3882 INIT_WORK(&hdev->rx_work, hci_rx_work);
3883 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3884 INIT_WORK(&hdev->tx_work, hci_tx_work);
3885 INIT_WORK(&hdev->power_on, hci_power_on);
b1b813d4 3886
b1b813d4
DH
3887 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3888 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3889 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3890
b1b813d4
DH
3891 skb_queue_head_init(&hdev->rx_q);
3892 skb_queue_head_init(&hdev->cmd_q);
3893 skb_queue_head_init(&hdev->raw_q);
3894
3895 init_waitqueue_head(&hdev->req_wait_q);
3896
65cc2b49 3897 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
b1b813d4 3898
b1b813d4
DH
3899 hci_init_sysfs(hdev);
3900 discovery_init(hdev);
9be0dab7
DH
3901
3902 return hdev;
3903}
3904EXPORT_SYMBOL(hci_alloc_dev);
3905
3906/* Free HCI device */
3907void hci_free_dev(struct hci_dev *hdev)
3908{
9be0dab7
DH
3909 /* will free via device release */
3910 put_device(&hdev->dev);
3911}
3912EXPORT_SYMBOL(hci_free_dev);
3913
1da177e4
LT
3914/* Register HCI device */
3915int hci_register_dev(struct hci_dev *hdev)
3916{
b1b813d4 3917 int id, error;
1da177e4 3918
010666a1 3919 if (!hdev->open || !hdev->close)
1da177e4
LT
3920 return -EINVAL;
3921
08add513
MM
3922 /* Do not allow HCI_AMP devices to register at index 0,
3923 * so the index can be used as the AMP controller ID.
3924 */
3df92b31
SL
3925 switch (hdev->dev_type) {
3926 case HCI_BREDR:
3927 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3928 break;
3929 case HCI_AMP:
3930 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3931 break;
3932 default:
3933 return -EINVAL;
1da177e4 3934 }
8e87d142 3935
3df92b31
SL
3936 if (id < 0)
3937 return id;
3938
1da177e4
LT
3939 sprintf(hdev->name, "hci%d", id);
3940 hdev->id = id;
2d8b3a11
AE
3941
3942 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3943
d8537548
KC
3944 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3945 WQ_MEM_RECLAIM, 1, hdev->name);
33ca954d
DH
3946 if (!hdev->workqueue) {
3947 error = -ENOMEM;
3948 goto err;
3949 }
f48fd9c8 3950
d8537548
KC
3951 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3952 WQ_MEM_RECLAIM, 1, hdev->name);
6ead1bbc
JH
3953 if (!hdev->req_workqueue) {
3954 destroy_workqueue(hdev->workqueue);
3955 error = -ENOMEM;
3956 goto err;
3957 }
3958
0153e2ec
MH
3959 if (!IS_ERR_OR_NULL(bt_debugfs))
3960 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3961
bdc3e0f1
MH
3962 dev_set_name(&hdev->dev, "%s", hdev->name);
3963
99780a7b
JH
3964 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3965 CRYPTO_ALG_ASYNC);
3966 if (IS_ERR(hdev->tfm_aes)) {
3967 BT_ERR("Unable to create crypto context");
3968 error = PTR_ERR(hdev->tfm_aes);
3969 hdev->tfm_aes = NULL;
3970 goto err_wqueue;
3971 }
3972
bdc3e0f1 3973 error = device_add(&hdev->dev);
33ca954d 3974 if (error < 0)
99780a7b 3975 goto err_tfm;
1da177e4 3976
611b30f7 3977 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
3978 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3979 hdev);
611b30f7
MH
3980 if (hdev->rfkill) {
3981 if (rfkill_register(hdev->rfkill) < 0) {
3982 rfkill_destroy(hdev->rfkill);
3983 hdev->rfkill = NULL;
3984 }
3985 }
3986
5e130367
JH
3987 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3988 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3989
a8b2d5c2 3990 set_bit(HCI_SETUP, &hdev->dev_flags);
004b0258 3991 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
ce2be9ac 3992
01cd3404 3993 if (hdev->dev_type == HCI_BREDR) {
56f87901
JH
3994 /* Assume BR/EDR support until proven otherwise (such as
3995 * through reading supported features during init.
3996 */
3997 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3998 }
ce2be9ac 3999
fcee3377
GP
4000 write_lock(&hci_dev_list_lock);
4001 list_add(&hdev->list, &hci_dev_list);
4002 write_unlock(&hci_dev_list_lock);
4003
4a964404
MH
4004 /* Devices that are marked for raw-only usage are unconfigured
4005 * and should not be included in normal operation.
fee746b0
MH
4006 */
4007 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
4a964404 4008 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
fee746b0 4009
1da177e4 4010 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 4011 hci_dev_hold(hdev);
1da177e4 4012
19202573 4013 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 4014
1da177e4 4015 return id;
f48fd9c8 4016
99780a7b
JH
4017err_tfm:
4018 crypto_free_blkcipher(hdev->tfm_aes);
33ca954d
DH
4019err_wqueue:
4020 destroy_workqueue(hdev->workqueue);
6ead1bbc 4021 destroy_workqueue(hdev->req_workqueue);
33ca954d 4022err:
3df92b31 4023 ida_simple_remove(&hci_index_ida, hdev->id);
f48fd9c8 4024
33ca954d 4025 return error;
1da177e4
LT
4026}
4027EXPORT_SYMBOL(hci_register_dev);
4028
4029/* Unregister HCI device */
59735631 4030void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 4031{
3df92b31 4032 int i, id;
ef222013 4033
c13854ce 4034 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 4035
94324962
JH
4036 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4037
3df92b31
SL
4038 id = hdev->id;
4039
f20d09d5 4040 write_lock(&hci_dev_list_lock);
1da177e4 4041 list_del(&hdev->list);
f20d09d5 4042 write_unlock(&hci_dev_list_lock);
1da177e4
LT
4043
4044 hci_dev_do_close(hdev);
4045
cd4c5391 4046 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
4047 kfree_skb(hdev->reassembly[i]);
4048
b9b5ef18
GP
4049 cancel_work_sync(&hdev->power_on);
4050
ab81cbf9 4051 if (!test_bit(HCI_INIT, &hdev->flags) &&
0602a8ad 4052 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
09fd0de5 4053 hci_dev_lock(hdev);
744cf19e 4054 mgmt_index_removed(hdev);
09fd0de5 4055 hci_dev_unlock(hdev);
56e5cb86 4056 }
ab81cbf9 4057
2e58ef3e
JH
4058 /* mgmt_index_removed should take care of emptying the
4059 * pending list */
4060 BUG_ON(!list_empty(&hdev->mgmt_pending));
4061
1da177e4
LT
4062 hci_notify(hdev, HCI_DEV_UNREG);
4063
611b30f7
MH
4064 if (hdev->rfkill) {
4065 rfkill_unregister(hdev->rfkill);
4066 rfkill_destroy(hdev->rfkill);
4067 }
4068
99780a7b
JH
4069 if (hdev->tfm_aes)
4070 crypto_free_blkcipher(hdev->tfm_aes);
4071
bdc3e0f1 4072 device_del(&hdev->dev);
147e2d59 4073
0153e2ec
MH
4074 debugfs_remove_recursive(hdev->debugfs);
4075
f48fd9c8 4076 destroy_workqueue(hdev->workqueue);
6ead1bbc 4077 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 4078
09fd0de5 4079 hci_dev_lock(hdev);
e2e0cacb 4080 hci_blacklist_clear(hdev);
2aeb9a1a 4081 hci_uuids_clear(hdev);
55ed8ca1 4082 hci_link_keys_clear(hdev);
b899efaf 4083 hci_smp_ltks_clear(hdev);
970c4e46 4084 hci_smp_irks_clear(hdev);
2763eda6 4085 hci_remote_oob_data_clear(hdev);
d2ab0ac1 4086 hci_white_list_clear(hdev);
373110c5 4087 hci_conn_params_clear_all(hdev);
09fd0de5 4088 hci_dev_unlock(hdev);
e2e0cacb 4089
dc946bd8 4090 hci_dev_put(hdev);
3df92b31
SL
4091
4092 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
4093}
4094EXPORT_SYMBOL(hci_unregister_dev);
4095
4096/* Suspend HCI device */
4097int hci_suspend_dev(struct hci_dev *hdev)
4098{
4099 hci_notify(hdev, HCI_DEV_SUSPEND);
4100 return 0;
4101}
4102EXPORT_SYMBOL(hci_suspend_dev);
4103
4104/* Resume HCI device */
4105int hci_resume_dev(struct hci_dev *hdev)
4106{
4107 hci_notify(hdev, HCI_DEV_RESUME);
4108 return 0;
4109}
4110EXPORT_SYMBOL(hci_resume_dev);
4111
76bca880 4112/* Receive frame from HCI drivers */
e1a26170 4113int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
76bca880 4114{
76bca880 4115 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 4116 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
4117 kfree_skb(skb);
4118 return -ENXIO;
4119 }
4120
d82603c6 4121 /* Incoming skb */
76bca880
MH
4122 bt_cb(skb)->incoming = 1;
4123
4124 /* Time stamp */
4125 __net_timestamp(skb);
4126
76bca880 4127 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 4128 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 4129
76bca880
MH
4130 return 0;
4131}
4132EXPORT_SYMBOL(hci_recv_frame);
4133
33e882a5 4134static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
a8c5fb1a 4135 int count, __u8 index)
33e882a5
SS
4136{
4137 int len = 0;
4138 int hlen = 0;
4139 int remain = count;
4140 struct sk_buff *skb;
4141 struct bt_skb_cb *scb;
4142
4143 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
a8c5fb1a 4144 index >= NUM_REASSEMBLY)
33e882a5
SS
4145 return -EILSEQ;
4146
4147 skb = hdev->reassembly[index];
4148
4149 if (!skb) {
4150 switch (type) {
4151 case HCI_ACLDATA_PKT:
4152 len = HCI_MAX_FRAME_SIZE;
4153 hlen = HCI_ACL_HDR_SIZE;
4154 break;
4155 case HCI_EVENT_PKT:
4156 len = HCI_MAX_EVENT_SIZE;
4157 hlen = HCI_EVENT_HDR_SIZE;
4158 break;
4159 case HCI_SCODATA_PKT:
4160 len = HCI_MAX_SCO_SIZE;
4161 hlen = HCI_SCO_HDR_SIZE;
4162 break;
4163 }
4164
1e429f38 4165 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
4166 if (!skb)
4167 return -ENOMEM;
4168
4169 scb = (void *) skb->cb;
4170 scb->expect = hlen;
4171 scb->pkt_type = type;
4172
33e882a5
SS
4173 hdev->reassembly[index] = skb;
4174 }
4175
4176 while (count) {
4177 scb = (void *) skb->cb;
89bb46d0 4178 len = min_t(uint, scb->expect, count);
33e882a5
SS
4179
4180 memcpy(skb_put(skb, len), data, len);
4181
4182 count -= len;
4183 data += len;
4184 scb->expect -= len;
4185 remain = count;
4186
4187 switch (type) {
4188 case HCI_EVENT_PKT:
4189 if (skb->len == HCI_EVENT_HDR_SIZE) {
4190 struct hci_event_hdr *h = hci_event_hdr(skb);
4191 scb->expect = h->plen;
4192
4193 if (skb_tailroom(skb) < scb->expect) {
4194 kfree_skb(skb);
4195 hdev->reassembly[index] = NULL;
4196 return -ENOMEM;
4197 }
4198 }
4199 break;
4200
4201 case HCI_ACLDATA_PKT:
4202 if (skb->len == HCI_ACL_HDR_SIZE) {
4203 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4204 scb->expect = __le16_to_cpu(h->dlen);
4205
4206 if (skb_tailroom(skb) < scb->expect) {
4207 kfree_skb(skb);
4208 hdev->reassembly[index] = NULL;
4209 return -ENOMEM;
4210 }
4211 }
4212 break;
4213
4214 case HCI_SCODATA_PKT:
4215 if (skb->len == HCI_SCO_HDR_SIZE) {
4216 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4217 scb->expect = h->dlen;
4218
4219 if (skb_tailroom(skb) < scb->expect) {
4220 kfree_skb(skb);
4221 hdev->reassembly[index] = NULL;
4222 return -ENOMEM;
4223 }
4224 }
4225 break;
4226 }
4227
4228 if (scb->expect == 0) {
4229 /* Complete frame */
4230
4231 bt_cb(skb)->pkt_type = type;
e1a26170 4232 hci_recv_frame(hdev, skb);
33e882a5
SS
4233
4234 hdev->reassembly[index] = NULL;
4235 return remain;
4236 }
4237 }
4238
4239 return remain;
4240}
4241
ef222013
MH
4242int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4243{
f39a3c06
SS
4244 int rem = 0;
4245
ef222013
MH
4246 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4247 return -EILSEQ;
4248
da5f6c37 4249 while (count) {
1e429f38 4250 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
4251 if (rem < 0)
4252 return rem;
ef222013 4253
f39a3c06
SS
4254 data += (count - rem);
4255 count = rem;
f81c6224 4256 }
ef222013 4257
f39a3c06 4258 return rem;
ef222013
MH
4259}
4260EXPORT_SYMBOL(hci_recv_fragment);
4261
99811510
SS
4262#define STREAM_REASSEMBLY 0
4263
4264int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4265{
4266 int type;
4267 int rem = 0;
4268
da5f6c37 4269 while (count) {
99811510
SS
4270 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4271
4272 if (!skb) {
4273 struct { char type; } *pkt;
4274
4275 /* Start of the frame */
4276 pkt = data;
4277 type = pkt->type;
4278
4279 data++;
4280 count--;
4281 } else
4282 type = bt_cb(skb)->pkt_type;
4283
1e429f38 4284 rem = hci_reassembly(hdev, type, data, count,
a8c5fb1a 4285 STREAM_REASSEMBLY);
99811510
SS
4286 if (rem < 0)
4287 return rem;
4288
4289 data += (count - rem);
4290 count = rem;
f81c6224 4291 }
99811510
SS
4292
4293 return rem;
4294}
4295EXPORT_SYMBOL(hci_recv_stream_fragment);
4296
1da177e4
LT
4297/* ---- Interface to upper protocols ---- */
4298
1da177e4
LT
4299int hci_register_cb(struct hci_cb *cb)
4300{
4301 BT_DBG("%p name %s", cb, cb->name);
4302
f20d09d5 4303 write_lock(&hci_cb_list_lock);
1da177e4 4304 list_add(&cb->list, &hci_cb_list);
f20d09d5 4305 write_unlock(&hci_cb_list_lock);
1da177e4
LT
4306
4307 return 0;
4308}
4309EXPORT_SYMBOL(hci_register_cb);
4310
4311int hci_unregister_cb(struct hci_cb *cb)
4312{
4313 BT_DBG("%p name %s", cb, cb->name);
4314
f20d09d5 4315 write_lock(&hci_cb_list_lock);
1da177e4 4316 list_del(&cb->list);
f20d09d5 4317 write_unlock(&hci_cb_list_lock);
1da177e4
LT
4318
4319 return 0;
4320}
4321EXPORT_SYMBOL(hci_unregister_cb);
4322
51086991 4323static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4 4324{
0d48d939 4325 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 4326
cd82e61c
MH
4327 /* Time stamp */
4328 __net_timestamp(skb);
1da177e4 4329
cd82e61c
MH
4330 /* Send copy to monitor */
4331 hci_send_to_monitor(hdev, skb);
4332
4333 if (atomic_read(&hdev->promisc)) {
4334 /* Send copy to the sockets */
470fe1b5 4335 hci_send_to_sock(hdev, skb);
1da177e4
LT
4336 }
4337
4338 /* Get rid of skb owner, prior to sending to the driver. */
4339 skb_orphan(skb);
4340
7bd8f09f 4341 if (hdev->send(hdev, skb) < 0)
51086991 4342 BT_ERR("%s sending frame failed", hdev->name);
1da177e4
LT
4343}
4344
3119ae95
JH
4345void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4346{
4347 skb_queue_head_init(&req->cmd_q);
4348 req->hdev = hdev;
5d73e034 4349 req->err = 0;
3119ae95
JH
4350}
4351
4352int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4353{
4354 struct hci_dev *hdev = req->hdev;
4355 struct sk_buff *skb;
4356 unsigned long flags;
4357
4358 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4359
5d73e034
AG
4360 /* If an error occured during request building, remove all HCI
4361 * commands queued on the HCI request queue.
4362 */
4363 if (req->err) {
4364 skb_queue_purge(&req->cmd_q);
4365 return req->err;
4366 }
4367
3119ae95
JH
4368 /* Do not allow empty requests */
4369 if (skb_queue_empty(&req->cmd_q))
382b0c39 4370 return -ENODATA;
3119ae95
JH
4371
4372 skb = skb_peek_tail(&req->cmd_q);
4373 bt_cb(skb)->req.complete = complete;
4374
4375 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4376 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4377 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4378
4379 queue_work(hdev->workqueue, &hdev->cmd_work);
4380
4381 return 0;
4382}
4383
1ca3a9d0 4384static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
07dc93dd 4385 u32 plen, const void *param)
1da177e4
LT
4386{
4387 int len = HCI_COMMAND_HDR_SIZE + plen;
4388 struct hci_command_hdr *hdr;
4389 struct sk_buff *skb;
4390
1da177e4 4391 skb = bt_skb_alloc(len, GFP_ATOMIC);
1ca3a9d0
JH
4392 if (!skb)
4393 return NULL;
1da177e4
LT
4394
4395 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 4396 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
4397 hdr->plen = plen;
4398
4399 if (plen)
4400 memcpy(skb_put(skb, plen), param, plen);
4401
4402 BT_DBG("skb len %d", skb->len);
4403
0d48d939 4404 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
c78ae283 4405
1ca3a9d0
JH
4406 return skb;
4407}
4408
4409/* Send HCI command */
07dc93dd
JH
4410int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4411 const void *param)
1ca3a9d0
JH
4412{
4413 struct sk_buff *skb;
4414
4415 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4416
4417 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4418 if (!skb) {
4419 BT_ERR("%s no memory for command", hdev->name);
4420 return -ENOMEM;
4421 }
4422
11714b3d
JH
4423 /* Stand-alone HCI commands must be flaged as
4424 * single-command requests.
4425 */
4426 bt_cb(skb)->req.start = true;
4427
1da177e4 4428 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 4429 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
4430
4431 return 0;
4432}
1da177e4 4433
71c76a17 4434/* Queue a command to an asynchronous HCI request */
07dc93dd
JH
4435void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4436 const void *param, u8 event)
71c76a17
JH
4437{
4438 struct hci_dev *hdev = req->hdev;
4439 struct sk_buff *skb;
4440
4441 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4442
34739c1e
AG
4443 /* If an error occured during request building, there is no point in
4444 * queueing the HCI command. We can simply return.
4445 */
4446 if (req->err)
4447 return;
4448
71c76a17
JH
4449 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4450 if (!skb) {
5d73e034
AG
4451 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4452 hdev->name, opcode);
4453 req->err = -ENOMEM;
e348fe6b 4454 return;
71c76a17
JH
4455 }
4456
4457 if (skb_queue_empty(&req->cmd_q))
4458 bt_cb(skb)->req.start = true;
4459
02350a72
JH
4460 bt_cb(skb)->req.event = event;
4461
71c76a17 4462 skb_queue_tail(&req->cmd_q, skb);
71c76a17
JH
4463}
4464
07dc93dd
JH
4465void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4466 const void *param)
02350a72
JH
4467{
4468 hci_req_add_ev(req, opcode, plen, param, 0);
4469}
4470
1da177e4 4471/* Get data from the previously sent command */
a9de9248 4472void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
4473{
4474 struct hci_command_hdr *hdr;
4475
4476 if (!hdev->sent_cmd)
4477 return NULL;
4478
4479 hdr = (void *) hdev->sent_cmd->data;
4480
a9de9248 4481 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
4482 return NULL;
4483
f0e09510 4484 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
4485
4486 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4487}
4488
4489/* Send ACL data */
4490static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4491{
4492 struct hci_acl_hdr *hdr;
4493 int len = skb->len;
4494
badff6d0
ACM
4495 skb_push(skb, HCI_ACL_HDR_SIZE);
4496 skb_reset_transport_header(skb);
9c70220b 4497 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
4498 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4499 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
4500}
4501
ee22be7e 4502static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 4503 struct sk_buff *skb, __u16 flags)
1da177e4 4504{
ee22be7e 4505 struct hci_conn *conn = chan->conn;
1da177e4
LT
4506 struct hci_dev *hdev = conn->hdev;
4507 struct sk_buff *list;
4508
087bfd99
GP
4509 skb->len = skb_headlen(skb);
4510 skb->data_len = 0;
4511
4512 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
204a6e54
AE
4513
4514 switch (hdev->dev_type) {
4515 case HCI_BREDR:
4516 hci_add_acl_hdr(skb, conn->handle, flags);
4517 break;
4518 case HCI_AMP:
4519 hci_add_acl_hdr(skb, chan->handle, flags);
4520 break;
4521 default:
4522 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4523 return;
4524 }
087bfd99 4525
70f23020
AE
4526 list = skb_shinfo(skb)->frag_list;
4527 if (!list) {
1da177e4
LT
4528 /* Non fragmented */
4529 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4530
73d80deb 4531 skb_queue_tail(queue, skb);
1da177e4
LT
4532 } else {
4533 /* Fragmented */
4534 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4535
4536 skb_shinfo(skb)->frag_list = NULL;
4537
4538 /* Queue all fragments atomically */
af3e6359 4539 spin_lock(&queue->lock);
1da177e4 4540
73d80deb 4541 __skb_queue_tail(queue, skb);
e702112f
AE
4542
4543 flags &= ~ACL_START;
4544 flags |= ACL_CONT;
1da177e4
LT
4545 do {
4546 skb = list; list = list->next;
8e87d142 4547
0d48d939 4548 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 4549 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
4550
4551 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4552
73d80deb 4553 __skb_queue_tail(queue, skb);
1da177e4
LT
4554 } while (list);
4555
af3e6359 4556 spin_unlock(&queue->lock);
1da177e4 4557 }
73d80deb
LAD
4558}
4559
4560void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4561{
ee22be7e 4562 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 4563
f0e09510 4564 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb 4565
ee22be7e 4566 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 4567
3eff45ea 4568 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 4569}
1da177e4
LT
4570
4571/* Send SCO data */
0d861d8b 4572void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
4573{
4574 struct hci_dev *hdev = conn->hdev;
4575 struct hci_sco_hdr hdr;
4576
4577 BT_DBG("%s len %d", hdev->name, skb->len);
4578
aca3192c 4579 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
4580 hdr.dlen = skb->len;
4581
badff6d0
ACM
4582 skb_push(skb, HCI_SCO_HDR_SIZE);
4583 skb_reset_transport_header(skb);
9c70220b 4584 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4 4585
0d48d939 4586 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 4587
1da177e4 4588 skb_queue_tail(&conn->data_q, skb);
3eff45ea 4589 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 4590}
1da177e4
LT
4591
4592/* ---- HCI TX task (outgoing data) ---- */
4593
4594/* HCI Connection scheduler */
6039aa73
GP
4595static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4596 int *quote)
1da177e4
LT
4597{
4598 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 4599 struct hci_conn *conn = NULL, *c;
abc5de8f 4600 unsigned int num = 0, min = ~0;
1da177e4 4601
8e87d142 4602 /* We don't have to lock device here. Connections are always
1da177e4 4603 * added and removed with TX task disabled. */
bf4c6325
GP
4604
4605 rcu_read_lock();
4606
4607 list_for_each_entry_rcu(c, &h->list, list) {
769be974 4608 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 4609 continue;
769be974
MH
4610
4611 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4612 continue;
4613
1da177e4
LT
4614 num++;
4615
4616 if (c->sent < min) {
4617 min = c->sent;
4618 conn = c;
4619 }
52087a79
LAD
4620
4621 if (hci_conn_num(hdev, type) == num)
4622 break;
1da177e4
LT
4623 }
4624
bf4c6325
GP
4625 rcu_read_unlock();
4626
1da177e4 4627 if (conn) {
6ed58ec5
VT
4628 int cnt, q;
4629
4630 switch (conn->type) {
4631 case ACL_LINK:
4632 cnt = hdev->acl_cnt;
4633 break;
4634 case SCO_LINK:
4635 case ESCO_LINK:
4636 cnt = hdev->sco_cnt;
4637 break;
4638 case LE_LINK:
4639 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4640 break;
4641 default:
4642 cnt = 0;
4643 BT_ERR("Unknown link type");
4644 }
4645
4646 q = cnt / num;
1da177e4
LT
4647 *quote = q ? q : 1;
4648 } else
4649 *quote = 0;
4650
4651 BT_DBG("conn %p quote %d", conn, *quote);
4652 return conn;
4653}
4654
6039aa73 4655static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
4656{
4657 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 4658 struct hci_conn *c;
1da177e4 4659
bae1f5d9 4660 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 4661
bf4c6325
GP
4662 rcu_read_lock();
4663
1da177e4 4664 /* Kill stalled connections */
bf4c6325 4665 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 4666 if (c->type == type && c->sent) {
6ed93dc6
AE
4667 BT_ERR("%s killing stalled connection %pMR",
4668 hdev->name, &c->dst);
bed71748 4669 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
4670 }
4671 }
bf4c6325
GP
4672
4673 rcu_read_unlock();
1da177e4
LT
4674}
4675
6039aa73
GP
4676static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4677 int *quote)
1da177e4 4678{
73d80deb
LAD
4679 struct hci_conn_hash *h = &hdev->conn_hash;
4680 struct hci_chan *chan = NULL;
abc5de8f 4681 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 4682 struct hci_conn *conn;
73d80deb
LAD
4683 int cnt, q, conn_num = 0;
4684
4685 BT_DBG("%s", hdev->name);
4686
bf4c6325
GP
4687 rcu_read_lock();
4688
4689 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
4690 struct hci_chan *tmp;
4691
4692 if (conn->type != type)
4693 continue;
4694
4695 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4696 continue;
4697
4698 conn_num++;
4699
8192edef 4700 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
4701 struct sk_buff *skb;
4702
4703 if (skb_queue_empty(&tmp->data_q))
4704 continue;
4705
4706 skb = skb_peek(&tmp->data_q);
4707 if (skb->priority < cur_prio)
4708 continue;
4709
4710 if (skb->priority > cur_prio) {
4711 num = 0;
4712 min = ~0;
4713 cur_prio = skb->priority;
4714 }
4715
4716 num++;
4717
4718 if (conn->sent < min) {
4719 min = conn->sent;
4720 chan = tmp;
4721 }
4722 }
4723
4724 if (hci_conn_num(hdev, type) == conn_num)
4725 break;
4726 }
4727
bf4c6325
GP
4728 rcu_read_unlock();
4729
73d80deb
LAD
4730 if (!chan)
4731 return NULL;
4732
4733 switch (chan->conn->type) {
4734 case ACL_LINK:
4735 cnt = hdev->acl_cnt;
4736 break;
bd1eb66b
AE
4737 case AMP_LINK:
4738 cnt = hdev->block_cnt;
4739 break;
73d80deb
LAD
4740 case SCO_LINK:
4741 case ESCO_LINK:
4742 cnt = hdev->sco_cnt;
4743 break;
4744 case LE_LINK:
4745 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4746 break;
4747 default:
4748 cnt = 0;
4749 BT_ERR("Unknown link type");
4750 }
4751
4752 q = cnt / num;
4753 *quote = q ? q : 1;
4754 BT_DBG("chan %p quote %d", chan, *quote);
4755 return chan;
4756}
4757
02b20f0b
LAD
4758static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4759{
4760 struct hci_conn_hash *h = &hdev->conn_hash;
4761 struct hci_conn *conn;
4762 int num = 0;
4763
4764 BT_DBG("%s", hdev->name);
4765
bf4c6325
GP
4766 rcu_read_lock();
4767
4768 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
4769 struct hci_chan *chan;
4770
4771 if (conn->type != type)
4772 continue;
4773
4774 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4775 continue;
4776
4777 num++;
4778
8192edef 4779 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
4780 struct sk_buff *skb;
4781
4782 if (chan->sent) {
4783 chan->sent = 0;
4784 continue;
4785 }
4786
4787 if (skb_queue_empty(&chan->data_q))
4788 continue;
4789
4790 skb = skb_peek(&chan->data_q);
4791 if (skb->priority >= HCI_PRIO_MAX - 1)
4792 continue;
4793
4794 skb->priority = HCI_PRIO_MAX - 1;
4795
4796 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 4797 skb->priority);
02b20f0b
LAD
4798 }
4799
4800 if (hci_conn_num(hdev, type) == num)
4801 break;
4802 }
bf4c6325
GP
4803
4804 rcu_read_unlock();
4805
02b20f0b
LAD
4806}
4807
b71d385a
AE
4808static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4809{
4810 /* Calculate count of blocks used by this packet */
4811 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4812}
4813
6039aa73 4814static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 4815{
4a964404 4816 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
1da177e4
LT
4817 /* ACL tx timeout must be longer than maximum
4818 * link supervision timeout (40.9 seconds) */
63d2bc1b 4819 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 4820 HCI_ACL_TX_TIMEOUT))
bae1f5d9 4821 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 4822 }
63d2bc1b 4823}
1da177e4 4824
6039aa73 4825static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
4826{
4827 unsigned int cnt = hdev->acl_cnt;
4828 struct hci_chan *chan;
4829 struct sk_buff *skb;
4830 int quote;
4831
4832 __check_timeout(hdev, cnt);
04837f64 4833
73d80deb 4834 while (hdev->acl_cnt &&
a8c5fb1a 4835 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
4836 u32 priority = (skb_peek(&chan->data_q))->priority;
4837 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4838 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4839 skb->len, skb->priority);
73d80deb 4840
ec1cce24
LAD
4841 /* Stop if priority has changed */
4842 if (skb->priority < priority)
4843 break;
4844
4845 skb = skb_dequeue(&chan->data_q);
4846
73d80deb 4847 hci_conn_enter_active_mode(chan->conn,
04124681 4848 bt_cb(skb)->force_active);
04837f64 4849
57d17d70 4850 hci_send_frame(hdev, skb);
1da177e4
LT
4851 hdev->acl_last_tx = jiffies;
4852
4853 hdev->acl_cnt--;
73d80deb
LAD
4854 chan->sent++;
4855 chan->conn->sent++;
1da177e4
LT
4856 }
4857 }
02b20f0b
LAD
4858
4859 if (cnt != hdev->acl_cnt)
4860 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
4861}
4862
6039aa73 4863static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 4864{
63d2bc1b 4865 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
4866 struct hci_chan *chan;
4867 struct sk_buff *skb;
4868 int quote;
bd1eb66b 4869 u8 type;
b71d385a 4870
63d2bc1b 4871 __check_timeout(hdev, cnt);
b71d385a 4872
bd1eb66b
AE
4873 BT_DBG("%s", hdev->name);
4874
4875 if (hdev->dev_type == HCI_AMP)
4876 type = AMP_LINK;
4877 else
4878 type = ACL_LINK;
4879
b71d385a 4880 while (hdev->block_cnt > 0 &&
bd1eb66b 4881 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
4882 u32 priority = (skb_peek(&chan->data_q))->priority;
4883 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4884 int blocks;
4885
4886 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4887 skb->len, skb->priority);
b71d385a
AE
4888
4889 /* Stop if priority has changed */
4890 if (skb->priority < priority)
4891 break;
4892
4893 skb = skb_dequeue(&chan->data_q);
4894
4895 blocks = __get_blocks(hdev, skb);
4896 if (blocks > hdev->block_cnt)
4897 return;
4898
4899 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 4900 bt_cb(skb)->force_active);
b71d385a 4901
57d17d70 4902 hci_send_frame(hdev, skb);
b71d385a
AE
4903 hdev->acl_last_tx = jiffies;
4904
4905 hdev->block_cnt -= blocks;
4906 quote -= blocks;
4907
4908 chan->sent += blocks;
4909 chan->conn->sent += blocks;
4910 }
4911 }
4912
4913 if (cnt != hdev->block_cnt)
bd1eb66b 4914 hci_prio_recalculate(hdev, type);
b71d385a
AE
4915}
4916
6039aa73 4917static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
4918{
4919 BT_DBG("%s", hdev->name);
4920
bd1eb66b
AE
4921 /* No ACL link over BR/EDR controller */
4922 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4923 return;
4924
4925 /* No AMP link over AMP controller */
4926 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
4927 return;
4928
4929 switch (hdev->flow_ctl_mode) {
4930 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4931 hci_sched_acl_pkt(hdev);
4932 break;
4933
4934 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4935 hci_sched_acl_blk(hdev);
4936 break;
4937 }
4938}
4939
1da177e4 4940/* Schedule SCO */
6039aa73 4941static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
4942{
4943 struct hci_conn *conn;
4944 struct sk_buff *skb;
4945 int quote;
4946
4947 BT_DBG("%s", hdev->name);
4948
52087a79
LAD
4949 if (!hci_conn_num(hdev, SCO_LINK))
4950 return;
4951
1da177e4
LT
4952 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4953 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4954 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 4955 hci_send_frame(hdev, skb);
1da177e4
LT
4956
4957 conn->sent++;
4958 if (conn->sent == ~0)
4959 conn->sent = 0;
4960 }
4961 }
4962}
4963
6039aa73 4964static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
4965{
4966 struct hci_conn *conn;
4967 struct sk_buff *skb;
4968 int quote;
4969
4970 BT_DBG("%s", hdev->name);
4971
52087a79
LAD
4972 if (!hci_conn_num(hdev, ESCO_LINK))
4973 return;
4974
8fc9ced3
GP
4975 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4976 &quote))) {
b6a0dc82
MH
4977 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4978 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 4979 hci_send_frame(hdev, skb);
b6a0dc82
MH
4980
4981 conn->sent++;
4982 if (conn->sent == ~0)
4983 conn->sent = 0;
4984 }
4985 }
4986}
4987
6039aa73 4988static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 4989{
73d80deb 4990 struct hci_chan *chan;
6ed58ec5 4991 struct sk_buff *skb;
02b20f0b 4992 int quote, cnt, tmp;
6ed58ec5
VT
4993
4994 BT_DBG("%s", hdev->name);
4995
52087a79
LAD
4996 if (!hci_conn_num(hdev, LE_LINK))
4997 return;
4998
4a964404 4999 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
6ed58ec5
VT
5000 /* LE tx timeout must be longer than maximum
5001 * link supervision timeout (40.9 seconds) */
bae1f5d9 5002 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 5003 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 5004 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
5005 }
5006
5007 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 5008 tmp = cnt;
73d80deb 5009 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
5010 u32 priority = (skb_peek(&chan->data_q))->priority;
5011 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 5012 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 5013 skb->len, skb->priority);
6ed58ec5 5014
ec1cce24
LAD
5015 /* Stop if priority has changed */
5016 if (skb->priority < priority)
5017 break;
5018
5019 skb = skb_dequeue(&chan->data_q);
5020
57d17d70 5021 hci_send_frame(hdev, skb);
6ed58ec5
VT
5022 hdev->le_last_tx = jiffies;
5023
5024 cnt--;
73d80deb
LAD
5025 chan->sent++;
5026 chan->conn->sent++;
6ed58ec5
VT
5027 }
5028 }
73d80deb 5029
6ed58ec5
VT
5030 if (hdev->le_pkts)
5031 hdev->le_cnt = cnt;
5032 else
5033 hdev->acl_cnt = cnt;
02b20f0b
LAD
5034
5035 if (cnt != tmp)
5036 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
5037}
5038
3eff45ea 5039static void hci_tx_work(struct work_struct *work)
1da177e4 5040{
3eff45ea 5041 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
5042 struct sk_buff *skb;
5043
6ed58ec5 5044 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 5045 hdev->sco_cnt, hdev->le_cnt);
1da177e4 5046
52de599e
MH
5047 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5048 /* Schedule queues and send stuff to HCI driver */
5049 hci_sched_acl(hdev);
5050 hci_sched_sco(hdev);
5051 hci_sched_esco(hdev);
5052 hci_sched_le(hdev);
5053 }
6ed58ec5 5054
1da177e4
LT
5055 /* Send next queued raw (unknown type) packet */
5056 while ((skb = skb_dequeue(&hdev->raw_q)))
57d17d70 5057 hci_send_frame(hdev, skb);
1da177e4
LT
5058}
5059
25985edc 5060/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
5061
5062/* ACL data packet */
6039aa73 5063static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
5064{
5065 struct hci_acl_hdr *hdr = (void *) skb->data;
5066 struct hci_conn *conn;
5067 __u16 handle, flags;
5068
5069 skb_pull(skb, HCI_ACL_HDR_SIZE);
5070
5071 handle = __le16_to_cpu(hdr->handle);
5072 flags = hci_flags(handle);
5073 handle = hci_handle(handle);
5074
f0e09510 5075 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 5076 handle, flags);
1da177e4
LT
5077
5078 hdev->stat.acl_rx++;
5079
5080 hci_dev_lock(hdev);
5081 conn = hci_conn_hash_lookup_handle(hdev, handle);
5082 hci_dev_unlock(hdev);
8e87d142 5083
1da177e4 5084 if (conn) {
65983fc7 5085 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 5086
1da177e4 5087 /* Send to upper protocol */
686ebf28
UF
5088 l2cap_recv_acldata(conn, skb, flags);
5089 return;
1da177e4 5090 } else {
8e87d142 5091 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 5092 hdev->name, handle);
1da177e4
LT
5093 }
5094
5095 kfree_skb(skb);
5096}
5097
5098/* SCO data packet */
6039aa73 5099static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
5100{
5101 struct hci_sco_hdr *hdr = (void *) skb->data;
5102 struct hci_conn *conn;
5103 __u16 handle;
5104
5105 skb_pull(skb, HCI_SCO_HDR_SIZE);
5106
5107 handle = __le16_to_cpu(hdr->handle);
5108
f0e09510 5109 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
1da177e4
LT
5110
5111 hdev->stat.sco_rx++;
5112
5113 hci_dev_lock(hdev);
5114 conn = hci_conn_hash_lookup_handle(hdev, handle);
5115 hci_dev_unlock(hdev);
5116
5117 if (conn) {
1da177e4 5118 /* Send to upper protocol */
686ebf28
UF
5119 sco_recv_scodata(conn, skb);
5120 return;
1da177e4 5121 } else {
8e87d142 5122 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 5123 hdev->name, handle);
1da177e4
LT
5124 }
5125
5126 kfree_skb(skb);
5127}
5128
9238f36a
JH
5129static bool hci_req_is_complete(struct hci_dev *hdev)
5130{
5131 struct sk_buff *skb;
5132
5133 skb = skb_peek(&hdev->cmd_q);
5134 if (!skb)
5135 return true;
5136
5137 return bt_cb(skb)->req.start;
5138}
5139
42c6b129
JH
5140static void hci_resend_last(struct hci_dev *hdev)
5141{
5142 struct hci_command_hdr *sent;
5143 struct sk_buff *skb;
5144 u16 opcode;
5145
5146 if (!hdev->sent_cmd)
5147 return;
5148
5149 sent = (void *) hdev->sent_cmd->data;
5150 opcode = __le16_to_cpu(sent->opcode);
5151 if (opcode == HCI_OP_RESET)
5152 return;
5153
5154 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5155 if (!skb)
5156 return;
5157
5158 skb_queue_head(&hdev->cmd_q, skb);
5159 queue_work(hdev->workqueue, &hdev->cmd_work);
5160}
5161
9238f36a
JH
5162void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5163{
5164 hci_req_complete_t req_complete = NULL;
5165 struct sk_buff *skb;
5166 unsigned long flags;
5167
5168 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5169
42c6b129
JH
5170 /* If the completed command doesn't match the last one that was
5171 * sent we need to do special handling of it.
9238f36a 5172 */
42c6b129
JH
5173 if (!hci_sent_cmd_data(hdev, opcode)) {
5174 /* Some CSR based controllers generate a spontaneous
5175 * reset complete event during init and any pending
5176 * command will never be completed. In such a case we
5177 * need to resend whatever was the last sent
5178 * command.
5179 */
5180 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5181 hci_resend_last(hdev);
5182
9238f36a 5183 return;
42c6b129 5184 }
9238f36a
JH
5185
5186 /* If the command succeeded and there's still more commands in
5187 * this request the request is not yet complete.
5188 */
5189 if (!status && !hci_req_is_complete(hdev))
5190 return;
5191
5192 /* If this was the last command in a request the complete
5193 * callback would be found in hdev->sent_cmd instead of the
5194 * command queue (hdev->cmd_q).
5195 */
5196 if (hdev->sent_cmd) {
5197 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
53e21fbc
JH
5198
5199 if (req_complete) {
5200 /* We must set the complete callback to NULL to
5201 * avoid calling the callback more than once if
5202 * this function gets called again.
5203 */
5204 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5205
9238f36a 5206 goto call_complete;
53e21fbc 5207 }
9238f36a
JH
5208 }
5209
5210 /* Remove all pending commands belonging to this request */
5211 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5212 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5213 if (bt_cb(skb)->req.start) {
5214 __skb_queue_head(&hdev->cmd_q, skb);
5215 break;
5216 }
5217
5218 req_complete = bt_cb(skb)->req.complete;
5219 kfree_skb(skb);
5220 }
5221 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5222
5223call_complete:
5224 if (req_complete)
5225 req_complete(hdev, status);
5226}
5227
b78752cc 5228static void hci_rx_work(struct work_struct *work)
1da177e4 5229{
b78752cc 5230 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
5231 struct sk_buff *skb;
5232
5233 BT_DBG("%s", hdev->name);
5234
1da177e4 5235 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
5236 /* Send copy to monitor */
5237 hci_send_to_monitor(hdev, skb);
5238
1da177e4
LT
5239 if (atomic_read(&hdev->promisc)) {
5240 /* Send copy to the sockets */
470fe1b5 5241 hci_send_to_sock(hdev, skb);
1da177e4
LT
5242 }
5243
fee746b0 5244 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1da177e4
LT
5245 kfree_skb(skb);
5246 continue;
5247 }
5248
5249 if (test_bit(HCI_INIT, &hdev->flags)) {
5250 /* Don't process data packets in this states. */
0d48d939 5251 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
5252 case HCI_ACLDATA_PKT:
5253 case HCI_SCODATA_PKT:
5254 kfree_skb(skb);
5255 continue;
3ff50b79 5256 }
1da177e4
LT
5257 }
5258
5259 /* Process frame */
0d48d939 5260 switch (bt_cb(skb)->pkt_type) {
1da177e4 5261 case HCI_EVENT_PKT:
b78752cc 5262 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
5263 hci_event_packet(hdev, skb);
5264 break;
5265
5266 case HCI_ACLDATA_PKT:
5267 BT_DBG("%s ACL data packet", hdev->name);
5268 hci_acldata_packet(hdev, skb);
5269 break;
5270
5271 case HCI_SCODATA_PKT:
5272 BT_DBG("%s SCO data packet", hdev->name);
5273 hci_scodata_packet(hdev, skb);
5274 break;
5275
5276 default:
5277 kfree_skb(skb);
5278 break;
5279 }
5280 }
1da177e4
LT
5281}
5282
c347b765 5283static void hci_cmd_work(struct work_struct *work)
1da177e4 5284{
c347b765 5285 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
5286 struct sk_buff *skb;
5287
2104786b
AE
5288 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5289 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 5290
1da177e4 5291 /* Send queued commands */
5a08ecce
AE
5292 if (atomic_read(&hdev->cmd_cnt)) {
5293 skb = skb_dequeue(&hdev->cmd_q);
5294 if (!skb)
5295 return;
5296
7585b97a 5297 kfree_skb(hdev->sent_cmd);
1da177e4 5298
a675d7f1 5299 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
70f23020 5300 if (hdev->sent_cmd) {
1da177e4 5301 atomic_dec(&hdev->cmd_cnt);
57d17d70 5302 hci_send_frame(hdev, skb);
7bdb8a5c 5303 if (test_bit(HCI_RESET, &hdev->flags))
65cc2b49 5304 cancel_delayed_work(&hdev->cmd_timer);
7bdb8a5c 5305 else
65cc2b49
MH
5306 schedule_delayed_work(&hdev->cmd_timer,
5307 HCI_CMD_TIMEOUT);
1da177e4
LT
5308 } else {
5309 skb_queue_head(&hdev->cmd_q, skb);
c347b765 5310 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
5311 }
5312 }
5313}
b1efcc28
AG
5314
5315void hci_req_add_le_scan_disable(struct hci_request *req)
5316{
5317 struct hci_cp_le_set_scan_enable cp;
5318
5319 memset(&cp, 0, sizeof(cp));
5320 cp.enable = LE_SCAN_DISABLE;
5321 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5322}
a4790dbd 5323
8ef30fd3
AG
5324void hci_req_add_le_passive_scan(struct hci_request *req)
5325{
5326 struct hci_cp_le_set_scan_param param_cp;
5327 struct hci_cp_le_set_scan_enable enable_cp;
5328 struct hci_dev *hdev = req->hdev;
5329 u8 own_addr_type;
5330
6ab535a7
MH
5331 /* Set require_privacy to false since no SCAN_REQ are send
5332 * during passive scanning. Not using an unresolvable address
5333 * here is important so that peer devices using direct
5334 * advertising with our address will be correctly reported
5335 * by the controller.
8ef30fd3 5336 */
6ab535a7 5337 if (hci_update_random_address(req, false, &own_addr_type))
8ef30fd3
AG
5338 return;
5339
5340 memset(&param_cp, 0, sizeof(param_cp));
5341 param_cp.type = LE_SCAN_PASSIVE;
5342 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5343 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5344 param_cp.own_address_type = own_addr_type;
5345 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5346 &param_cp);
5347
5348 memset(&enable_cp, 0, sizeof(enable_cp));
5349 enable_cp.enable = LE_SCAN_ENABLE;
4340a124 5350 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
8ef30fd3
AG
5351 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5352 &enable_cp);
5353}
5354
a4790dbd
AG
5355static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5356{
5357 if (status)
5358 BT_DBG("HCI request failed to update background scanning: "
5359 "status 0x%2.2x", status);
5360}
5361
5362/* This function controls the background scanning based on hdev->pend_le_conns
5363 * list. If there are pending LE connection we start the background scanning,
5364 * otherwise we stop it.
5365 *
5366 * This function requires the caller holds hdev->lock.
5367 */
5368void hci_update_background_scan(struct hci_dev *hdev)
5369{
a4790dbd
AG
5370 struct hci_request req;
5371 struct hci_conn *conn;
5372 int err;
5373
c20c02d5
MH
5374 if (!test_bit(HCI_UP, &hdev->flags) ||
5375 test_bit(HCI_INIT, &hdev->flags) ||
5376 test_bit(HCI_SETUP, &hdev->dev_flags) ||
b8221770 5377 test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
c20c02d5 5378 test_bit(HCI_UNREGISTER, &hdev->dev_flags))
1c1697c0
MH
5379 return;
5380
a4790dbd
AG
5381 hci_req_init(&req, hdev);
5382
5383 if (list_empty(&hdev->pend_le_conns)) {
5384 /* If there is no pending LE connections, we should stop
5385 * the background scanning.
5386 */
5387
5388 /* If controller is not scanning we are done. */
5389 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5390 return;
5391
5392 hci_req_add_le_scan_disable(&req);
5393
5394 BT_DBG("%s stopping background scanning", hdev->name);
5395 } else {
a4790dbd
AG
5396 /* If there is at least one pending LE connection, we should
5397 * keep the background scan running.
5398 */
5399
a4790dbd
AG
5400 /* If controller is connecting, we should not start scanning
5401 * since some controllers are not able to scan and connect at
5402 * the same time.
5403 */
5404 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5405 if (conn)
5406 return;
5407
4340a124
AG
5408 /* If controller is currently scanning, we stop it to ensure we
5409 * don't miss any advertising (due to duplicates filter).
5410 */
5411 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5412 hci_req_add_le_scan_disable(&req);
5413
8ef30fd3 5414 hci_req_add_le_passive_scan(&req);
a4790dbd
AG
5415
5416 BT_DBG("%s starting background scanning", hdev->name);
5417 }
5418
5419 err = hci_req_run(&req, update_background_scan_complete);
5420 if (err)
5421 BT_ERR("Failed to run HCI request: err %d", err);
5422}