]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - net/bluetooth/hci_core.c
Bluetooth: Don't bother doing anything if auto_connect doesn't change
[mirror_ubuntu-bionic-kernel.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
8c520a59 30#include <linux/rfkill.h>
baf27f6e 31#include <linux/debugfs.h>
99780a7b 32#include <linux/crypto.h>
47219839 33#include <asm/unaligned.h>
1da177e4
LT
34
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
4bc58f51 37#include <net/bluetooth/l2cap.h>
af58925c 38#include <net/bluetooth/mgmt.h>
1da177e4 39
970c4e46
JH
40#include "smp.h"
41
b78752cc 42static void hci_rx_work(struct work_struct *work);
c347b765 43static void hci_cmd_work(struct work_struct *work);
3eff45ea 44static void hci_tx_work(struct work_struct *work);
1da177e4 45
1da177e4
LT
46/* HCI device list */
47LIST_HEAD(hci_dev_list);
48DEFINE_RWLOCK(hci_dev_list_lock);
49
50/* HCI callback list */
51LIST_HEAD(hci_cb_list);
52DEFINE_RWLOCK(hci_cb_list_lock);
53
3df92b31
SL
54/* HCI ID Numbering */
55static DEFINE_IDA(hci_index_ida);
56
1da177e4
LT
57/* ---- HCI notifications ---- */
58
6516455d 59static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 60{
040030ef 61 hci_sock_dev_event(hdev, event);
1da177e4
LT
62}
63
baf27f6e
MH
64/* ---- HCI debugfs entries ---- */
65
4b4148e9
MH
66static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
67 size_t count, loff_t *ppos)
68{
69 struct hci_dev *hdev = file->private_data;
70 char buf[3];
71
111902f7 72 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
4b4148e9
MH
73 buf[1] = '\n';
74 buf[2] = '\0';
75 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
76}
77
78static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
79 size_t count, loff_t *ppos)
80{
81 struct hci_dev *hdev = file->private_data;
82 struct sk_buff *skb;
83 char buf[32];
84 size_t buf_size = min(count, (sizeof(buf)-1));
85 bool enable;
86 int err;
87
88 if (!test_bit(HCI_UP, &hdev->flags))
89 return -ENETDOWN;
90
91 if (copy_from_user(buf, user_buf, buf_size))
92 return -EFAULT;
93
94 buf[buf_size] = '\0';
95 if (strtobool(buf, &enable))
96 return -EINVAL;
97
111902f7 98 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
4b4148e9
MH
99 return -EALREADY;
100
101 hci_req_lock(hdev);
102 if (enable)
103 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
104 HCI_CMD_TIMEOUT);
105 else
106 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
107 HCI_CMD_TIMEOUT);
108 hci_req_unlock(hdev);
109
110 if (IS_ERR(skb))
111 return PTR_ERR(skb);
112
113 err = -bt_to_errno(skb->data[0]);
114 kfree_skb(skb);
115
116 if (err < 0)
117 return err;
118
111902f7 119 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
4b4148e9
MH
120
121 return count;
122}
123
124static const struct file_operations dut_mode_fops = {
125 .open = simple_open,
126 .read = dut_mode_read,
127 .write = dut_mode_write,
128 .llseek = default_llseek,
129};
130
dfb826a8
MH
131static int features_show(struct seq_file *f, void *ptr)
132{
133 struct hci_dev *hdev = f->private;
134 u8 p;
135
136 hci_dev_lock(hdev);
137 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
cfbb2b5b 138 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
dfb826a8
MH
139 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
140 hdev->features[p][0], hdev->features[p][1],
141 hdev->features[p][2], hdev->features[p][3],
142 hdev->features[p][4], hdev->features[p][5],
143 hdev->features[p][6], hdev->features[p][7]);
144 }
cfbb2b5b
MH
145 if (lmp_le_capable(hdev))
146 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
147 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
148 hdev->le_features[0], hdev->le_features[1],
149 hdev->le_features[2], hdev->le_features[3],
150 hdev->le_features[4], hdev->le_features[5],
151 hdev->le_features[6], hdev->le_features[7]);
dfb826a8
MH
152 hci_dev_unlock(hdev);
153
154 return 0;
155}
156
157static int features_open(struct inode *inode, struct file *file)
158{
159 return single_open(file, features_show, inode->i_private);
160}
161
162static const struct file_operations features_fops = {
163 .open = features_open,
164 .read = seq_read,
165 .llseek = seq_lseek,
166 .release = single_release,
167};
168
70afe0b8
MH
169static int blacklist_show(struct seq_file *f, void *p)
170{
171 struct hci_dev *hdev = f->private;
172 struct bdaddr_list *b;
173
174 hci_dev_lock(hdev);
175 list_for_each_entry(b, &hdev->blacklist, list)
b25f0785 176 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
70afe0b8
MH
177 hci_dev_unlock(hdev);
178
179 return 0;
180}
181
182static int blacklist_open(struct inode *inode, struct file *file)
183{
184 return single_open(file, blacklist_show, inode->i_private);
185}
186
187static const struct file_operations blacklist_fops = {
188 .open = blacklist_open,
189 .read = seq_read,
190 .llseek = seq_lseek,
191 .release = single_release,
192};
193
47219839
MH
194static int uuids_show(struct seq_file *f, void *p)
195{
196 struct hci_dev *hdev = f->private;
197 struct bt_uuid *uuid;
198
199 hci_dev_lock(hdev);
200 list_for_each_entry(uuid, &hdev->uuids, list) {
58f01aa9
MH
201 u8 i, val[16];
202
203 /* The Bluetooth UUID values are stored in big endian,
204 * but with reversed byte order. So convert them into
205 * the right order for the %pUb modifier.
206 */
207 for (i = 0; i < 16; i++)
208 val[i] = uuid->uuid[15 - i];
209
210 seq_printf(f, "%pUb\n", val);
47219839
MH
211 }
212 hci_dev_unlock(hdev);
213
214 return 0;
215}
216
217static int uuids_open(struct inode *inode, struct file *file)
218{
219 return single_open(file, uuids_show, inode->i_private);
220}
221
222static const struct file_operations uuids_fops = {
223 .open = uuids_open,
224 .read = seq_read,
225 .llseek = seq_lseek,
226 .release = single_release,
227};
228
baf27f6e
MH
229static int inquiry_cache_show(struct seq_file *f, void *p)
230{
231 struct hci_dev *hdev = f->private;
232 struct discovery_state *cache = &hdev->discovery;
233 struct inquiry_entry *e;
234
235 hci_dev_lock(hdev);
236
237 list_for_each_entry(e, &cache->all, all) {
238 struct inquiry_data *data = &e->data;
239 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
240 &data->bdaddr,
241 data->pscan_rep_mode, data->pscan_period_mode,
242 data->pscan_mode, data->dev_class[2],
243 data->dev_class[1], data->dev_class[0],
244 __le16_to_cpu(data->clock_offset),
245 data->rssi, data->ssp_mode, e->timestamp);
246 }
247
248 hci_dev_unlock(hdev);
249
250 return 0;
251}
252
253static int inquiry_cache_open(struct inode *inode, struct file *file)
254{
255 return single_open(file, inquiry_cache_show, inode->i_private);
256}
257
258static const struct file_operations inquiry_cache_fops = {
259 .open = inquiry_cache_open,
260 .read = seq_read,
261 .llseek = seq_lseek,
262 .release = single_release,
263};
264
02d08d15
MH
265static int link_keys_show(struct seq_file *f, void *ptr)
266{
267 struct hci_dev *hdev = f->private;
268 struct list_head *p, *n;
269
270 hci_dev_lock(hdev);
271 list_for_each_safe(p, n, &hdev->link_keys) {
272 struct link_key *key = list_entry(p, struct link_key, list);
273 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
274 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
275 }
276 hci_dev_unlock(hdev);
277
278 return 0;
279}
280
281static int link_keys_open(struct inode *inode, struct file *file)
282{
283 return single_open(file, link_keys_show, inode->i_private);
284}
285
286static const struct file_operations link_keys_fops = {
287 .open = link_keys_open,
288 .read = seq_read,
289 .llseek = seq_lseek,
290 .release = single_release,
291};
292
babdbb3c
MH
293static int dev_class_show(struct seq_file *f, void *ptr)
294{
295 struct hci_dev *hdev = f->private;
296
297 hci_dev_lock(hdev);
298 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
299 hdev->dev_class[1], hdev->dev_class[0]);
300 hci_dev_unlock(hdev);
301
302 return 0;
303}
304
305static int dev_class_open(struct inode *inode, struct file *file)
306{
307 return single_open(file, dev_class_show, inode->i_private);
308}
309
310static const struct file_operations dev_class_fops = {
311 .open = dev_class_open,
312 .read = seq_read,
313 .llseek = seq_lseek,
314 .release = single_release,
315};
316
041000b9
MH
317static int voice_setting_get(void *data, u64 *val)
318{
319 struct hci_dev *hdev = data;
320
321 hci_dev_lock(hdev);
322 *val = hdev->voice_setting;
323 hci_dev_unlock(hdev);
324
325 return 0;
326}
327
328DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
329 NULL, "0x%4.4llx\n");
330
ebd1e33b
MH
331static int auto_accept_delay_set(void *data, u64 val)
332{
333 struct hci_dev *hdev = data;
334
335 hci_dev_lock(hdev);
336 hdev->auto_accept_delay = val;
337 hci_dev_unlock(hdev);
338
339 return 0;
340}
341
342static int auto_accept_delay_get(void *data, u64 *val)
343{
344 struct hci_dev *hdev = data;
345
346 hci_dev_lock(hdev);
347 *val = hdev->auto_accept_delay;
348 hci_dev_unlock(hdev);
349
350 return 0;
351}
352
353DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
354 auto_accept_delay_set, "%llu\n");
355
5afeac14
MH
356static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
357 size_t count, loff_t *ppos)
358{
359 struct hci_dev *hdev = file->private_data;
360 char buf[3];
361
111902f7 362 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
5afeac14
MH
363 buf[1] = '\n';
364 buf[2] = '\0';
365 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
366}
367
368static ssize_t force_sc_support_write(struct file *file,
369 const char __user *user_buf,
370 size_t count, loff_t *ppos)
371{
372 struct hci_dev *hdev = file->private_data;
373 char buf[32];
374 size_t buf_size = min(count, (sizeof(buf)-1));
375 bool enable;
376
377 if (test_bit(HCI_UP, &hdev->flags))
378 return -EBUSY;
379
380 if (copy_from_user(buf, user_buf, buf_size))
381 return -EFAULT;
382
383 buf[buf_size] = '\0';
384 if (strtobool(buf, &enable))
385 return -EINVAL;
386
111902f7 387 if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
5afeac14
MH
388 return -EALREADY;
389
111902f7 390 change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
5afeac14
MH
391
392 return count;
393}
394
395static const struct file_operations force_sc_support_fops = {
396 .open = simple_open,
397 .read = force_sc_support_read,
398 .write = force_sc_support_write,
399 .llseek = default_llseek,
400};
401
134c2a89
MH
402static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
403 size_t count, loff_t *ppos)
404{
405 struct hci_dev *hdev = file->private_data;
406 char buf[3];
407
408 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
409 buf[1] = '\n';
410 buf[2] = '\0';
411 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
412}
413
414static const struct file_operations sc_only_mode_fops = {
415 .open = simple_open,
416 .read = sc_only_mode_read,
417 .llseek = default_llseek,
418};
419
2bfa3531
MH
420static int idle_timeout_set(void *data, u64 val)
421{
422 struct hci_dev *hdev = data;
423
424 if (val != 0 && (val < 500 || val > 3600000))
425 return -EINVAL;
426
427 hci_dev_lock(hdev);
2be48b65 428 hdev->idle_timeout = val;
2bfa3531
MH
429 hci_dev_unlock(hdev);
430
431 return 0;
432}
433
434static int idle_timeout_get(void *data, u64 *val)
435{
436 struct hci_dev *hdev = data;
437
438 hci_dev_lock(hdev);
439 *val = hdev->idle_timeout;
440 hci_dev_unlock(hdev);
441
442 return 0;
443}
444
445DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
446 idle_timeout_set, "%llu\n");
447
c982b2ea
JH
448static int rpa_timeout_set(void *data, u64 val)
449{
450 struct hci_dev *hdev = data;
451
452 /* Require the RPA timeout to be at least 30 seconds and at most
453 * 24 hours.
454 */
455 if (val < 30 || val > (60 * 60 * 24))
456 return -EINVAL;
457
458 hci_dev_lock(hdev);
459 hdev->rpa_timeout = val;
460 hci_dev_unlock(hdev);
461
462 return 0;
463}
464
465static int rpa_timeout_get(void *data, u64 *val)
466{
467 struct hci_dev *hdev = data;
468
469 hci_dev_lock(hdev);
470 *val = hdev->rpa_timeout;
471 hci_dev_unlock(hdev);
472
473 return 0;
474}
475
476DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
477 rpa_timeout_set, "%llu\n");
478
2bfa3531
MH
479static int sniff_min_interval_set(void *data, u64 val)
480{
481 struct hci_dev *hdev = data;
482
483 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
484 return -EINVAL;
485
486 hci_dev_lock(hdev);
2be48b65 487 hdev->sniff_min_interval = val;
2bfa3531
MH
488 hci_dev_unlock(hdev);
489
490 return 0;
491}
492
493static int sniff_min_interval_get(void *data, u64 *val)
494{
495 struct hci_dev *hdev = data;
496
497 hci_dev_lock(hdev);
498 *val = hdev->sniff_min_interval;
499 hci_dev_unlock(hdev);
500
501 return 0;
502}
503
504DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
505 sniff_min_interval_set, "%llu\n");
506
507static int sniff_max_interval_set(void *data, u64 val)
508{
509 struct hci_dev *hdev = data;
510
511 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
512 return -EINVAL;
513
514 hci_dev_lock(hdev);
2be48b65 515 hdev->sniff_max_interval = val;
2bfa3531
MH
516 hci_dev_unlock(hdev);
517
518 return 0;
519}
520
521static int sniff_max_interval_get(void *data, u64 *val)
522{
523 struct hci_dev *hdev = data;
524
525 hci_dev_lock(hdev);
526 *val = hdev->sniff_max_interval;
527 hci_dev_unlock(hdev);
528
529 return 0;
530}
531
532DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
533 sniff_max_interval_set, "%llu\n");
534
31ad1691
AK
535static int conn_info_min_age_set(void *data, u64 val)
536{
537 struct hci_dev *hdev = data;
538
539 if (val == 0 || val > hdev->conn_info_max_age)
540 return -EINVAL;
541
542 hci_dev_lock(hdev);
543 hdev->conn_info_min_age = val;
544 hci_dev_unlock(hdev);
545
546 return 0;
547}
548
549static int conn_info_min_age_get(void *data, u64 *val)
550{
551 struct hci_dev *hdev = data;
552
553 hci_dev_lock(hdev);
554 *val = hdev->conn_info_min_age;
555 hci_dev_unlock(hdev);
556
557 return 0;
558}
559
560DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
561 conn_info_min_age_set, "%llu\n");
562
563static int conn_info_max_age_set(void *data, u64 val)
564{
565 struct hci_dev *hdev = data;
566
567 if (val == 0 || val < hdev->conn_info_min_age)
568 return -EINVAL;
569
570 hci_dev_lock(hdev);
571 hdev->conn_info_max_age = val;
572 hci_dev_unlock(hdev);
573
574 return 0;
575}
576
577static int conn_info_max_age_get(void *data, u64 *val)
578{
579 struct hci_dev *hdev = data;
580
581 hci_dev_lock(hdev);
582 *val = hdev->conn_info_max_age;
583 hci_dev_unlock(hdev);
584
585 return 0;
586}
587
588DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
589 conn_info_max_age_set, "%llu\n");
590
ac345813
MH
591static int identity_show(struct seq_file *f, void *p)
592{
593 struct hci_dev *hdev = f->private;
a1f4c318 594 bdaddr_t addr;
ac345813
MH
595 u8 addr_type;
596
597 hci_dev_lock(hdev);
598
a1f4c318 599 hci_copy_identity_address(hdev, &addr, &addr_type);
ac345813 600
a1f4c318 601 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
473deef2 602 16, hdev->irk, &hdev->rpa);
ac345813
MH
603
604 hci_dev_unlock(hdev);
605
606 return 0;
607}
608
609static int identity_open(struct inode *inode, struct file *file)
610{
611 return single_open(file, identity_show, inode->i_private);
612}
613
614static const struct file_operations identity_fops = {
615 .open = identity_open,
616 .read = seq_read,
617 .llseek = seq_lseek,
618 .release = single_release,
619};
620
7a4cd51d
MH
621static int random_address_show(struct seq_file *f, void *p)
622{
623 struct hci_dev *hdev = f->private;
624
625 hci_dev_lock(hdev);
626 seq_printf(f, "%pMR\n", &hdev->random_addr);
627 hci_dev_unlock(hdev);
628
629 return 0;
630}
631
632static int random_address_open(struct inode *inode, struct file *file)
633{
634 return single_open(file, random_address_show, inode->i_private);
635}
636
637static const struct file_operations random_address_fops = {
638 .open = random_address_open,
639 .read = seq_read,
640 .llseek = seq_lseek,
641 .release = single_release,
642};
643
e7b8fc92
MH
644static int static_address_show(struct seq_file *f, void *p)
645{
646 struct hci_dev *hdev = f->private;
647
648 hci_dev_lock(hdev);
649 seq_printf(f, "%pMR\n", &hdev->static_addr);
650 hci_dev_unlock(hdev);
651
652 return 0;
653}
654
655static int static_address_open(struct inode *inode, struct file *file)
656{
657 return single_open(file, static_address_show, inode->i_private);
658}
659
660static const struct file_operations static_address_fops = {
661 .open = static_address_open,
662 .read = seq_read,
663 .llseek = seq_lseek,
664 .release = single_release,
665};
666
b32bba6c
MH
667static ssize_t force_static_address_read(struct file *file,
668 char __user *user_buf,
669 size_t count, loff_t *ppos)
92202185 670{
b32bba6c
MH
671 struct hci_dev *hdev = file->private_data;
672 char buf[3];
92202185 673
111902f7 674 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
b32bba6c
MH
675 buf[1] = '\n';
676 buf[2] = '\0';
677 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
92202185
MH
678}
679
b32bba6c
MH
680static ssize_t force_static_address_write(struct file *file,
681 const char __user *user_buf,
682 size_t count, loff_t *ppos)
92202185 683{
b32bba6c
MH
684 struct hci_dev *hdev = file->private_data;
685 char buf[32];
686 size_t buf_size = min(count, (sizeof(buf)-1));
687 bool enable;
92202185 688
b32bba6c
MH
689 if (test_bit(HCI_UP, &hdev->flags))
690 return -EBUSY;
92202185 691
b32bba6c
MH
692 if (copy_from_user(buf, user_buf, buf_size))
693 return -EFAULT;
694
695 buf[buf_size] = '\0';
696 if (strtobool(buf, &enable))
697 return -EINVAL;
698
111902f7 699 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
b32bba6c
MH
700 return -EALREADY;
701
111902f7 702 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
b32bba6c
MH
703
704 return count;
92202185
MH
705}
706
b32bba6c
MH
707static const struct file_operations force_static_address_fops = {
708 .open = simple_open,
709 .read = force_static_address_read,
710 .write = force_static_address_write,
711 .llseek = default_llseek,
712};
92202185 713
d2ab0ac1
MH
714static int white_list_show(struct seq_file *f, void *ptr)
715{
716 struct hci_dev *hdev = f->private;
717 struct bdaddr_list *b;
718
719 hci_dev_lock(hdev);
720 list_for_each_entry(b, &hdev->le_white_list, list)
721 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
722 hci_dev_unlock(hdev);
723
724 return 0;
725}
726
727static int white_list_open(struct inode *inode, struct file *file)
728{
729 return single_open(file, white_list_show, inode->i_private);
730}
731
732static const struct file_operations white_list_fops = {
733 .open = white_list_open,
734 .read = seq_read,
735 .llseek = seq_lseek,
736 .release = single_release,
737};
738
3698d704
MH
739static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
740{
741 struct hci_dev *hdev = f->private;
742 struct list_head *p, *n;
743
744 hci_dev_lock(hdev);
745 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
746 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
747 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
748 &irk->bdaddr, irk->addr_type,
749 16, irk->val, &irk->rpa);
750 }
751 hci_dev_unlock(hdev);
752
753 return 0;
754}
755
756static int identity_resolving_keys_open(struct inode *inode, struct file *file)
757{
758 return single_open(file, identity_resolving_keys_show,
759 inode->i_private);
760}
761
762static const struct file_operations identity_resolving_keys_fops = {
763 .open = identity_resolving_keys_open,
764 .read = seq_read,
765 .llseek = seq_lseek,
766 .release = single_release,
767};
768
8f8625cd
MH
769static int long_term_keys_show(struct seq_file *f, void *ptr)
770{
771 struct hci_dev *hdev = f->private;
772 struct list_head *p, *n;
773
774 hci_dev_lock(hdev);
f813f1be 775 list_for_each_safe(p, n, &hdev->long_term_keys) {
8f8625cd 776 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
fe39c7b2 777 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
8f8625cd
MH
778 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
779 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
fe39c7b2 780 __le64_to_cpu(ltk->rand), 16, ltk->val);
8f8625cd
MH
781 }
782 hci_dev_unlock(hdev);
783
784 return 0;
785}
786
787static int long_term_keys_open(struct inode *inode, struct file *file)
788{
789 return single_open(file, long_term_keys_show, inode->i_private);
790}
791
792static const struct file_operations long_term_keys_fops = {
793 .open = long_term_keys_open,
794 .read = seq_read,
795 .llseek = seq_lseek,
796 .release = single_release,
797};
798
4e70c7e7
MH
799static int conn_min_interval_set(void *data, u64 val)
800{
801 struct hci_dev *hdev = data;
802
803 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
804 return -EINVAL;
805
806 hci_dev_lock(hdev);
2be48b65 807 hdev->le_conn_min_interval = val;
4e70c7e7
MH
808 hci_dev_unlock(hdev);
809
810 return 0;
811}
812
813static int conn_min_interval_get(void *data, u64 *val)
814{
815 struct hci_dev *hdev = data;
816
817 hci_dev_lock(hdev);
818 *val = hdev->le_conn_min_interval;
819 hci_dev_unlock(hdev);
820
821 return 0;
822}
823
824DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
825 conn_min_interval_set, "%llu\n");
826
827static int conn_max_interval_set(void *data, u64 val)
828{
829 struct hci_dev *hdev = data;
830
831 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
832 return -EINVAL;
833
834 hci_dev_lock(hdev);
2be48b65 835 hdev->le_conn_max_interval = val;
4e70c7e7
MH
836 hci_dev_unlock(hdev);
837
838 return 0;
839}
840
841static int conn_max_interval_get(void *data, u64 *val)
842{
843 struct hci_dev *hdev = data;
844
845 hci_dev_lock(hdev);
846 *val = hdev->le_conn_max_interval;
847 hci_dev_unlock(hdev);
848
849 return 0;
850}
851
852DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
853 conn_max_interval_set, "%llu\n");
854
816a93d1
MH
855static int conn_latency_set(void *data, u64 val)
856{
857 struct hci_dev *hdev = data;
858
859 if (val > 0x01f3)
860 return -EINVAL;
861
862 hci_dev_lock(hdev);
863 hdev->le_conn_latency = val;
864 hci_dev_unlock(hdev);
865
866 return 0;
867}
868
869static int conn_latency_get(void *data, u64 *val)
870{
871 struct hci_dev *hdev = data;
872
873 hci_dev_lock(hdev);
874 *val = hdev->le_conn_latency;
875 hci_dev_unlock(hdev);
876
877 return 0;
878}
879
880DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
881 conn_latency_set, "%llu\n");
882
f1649577
MH
883static int supervision_timeout_set(void *data, u64 val)
884{
885 struct hci_dev *hdev = data;
886
887 if (val < 0x000a || val > 0x0c80)
888 return -EINVAL;
889
890 hci_dev_lock(hdev);
891 hdev->le_supv_timeout = val;
892 hci_dev_unlock(hdev);
893
894 return 0;
895}
896
897static int supervision_timeout_get(void *data, u64 *val)
898{
899 struct hci_dev *hdev = data;
900
901 hci_dev_lock(hdev);
902 *val = hdev->le_supv_timeout;
903 hci_dev_unlock(hdev);
904
905 return 0;
906}
907
908DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
909 supervision_timeout_set, "%llu\n");
910
3f959d46
MH
911static int adv_channel_map_set(void *data, u64 val)
912{
913 struct hci_dev *hdev = data;
914
915 if (val < 0x01 || val > 0x07)
916 return -EINVAL;
917
918 hci_dev_lock(hdev);
919 hdev->le_adv_channel_map = val;
920 hci_dev_unlock(hdev);
921
922 return 0;
923}
924
925static int adv_channel_map_get(void *data, u64 *val)
926{
927 struct hci_dev *hdev = data;
928
929 hci_dev_lock(hdev);
930 *val = hdev->le_adv_channel_map;
931 hci_dev_unlock(hdev);
932
933 return 0;
934}
935
936DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
937 adv_channel_map_set, "%llu\n");
938
0b3c7d37 939static int device_list_show(struct seq_file *f, void *ptr)
7d474e06 940{
0b3c7d37 941 struct hci_dev *hdev = f->private;
7d474e06
AG
942 struct hci_conn_params *p;
943
944 hci_dev_lock(hdev);
7d474e06 945 list_for_each_entry(p, &hdev->le_conn_params, list) {
0b3c7d37 946 seq_printf(f, "%pMR %u %u\n", &p->addr, p->addr_type,
7d474e06
AG
947 p->auto_connect);
948 }
7d474e06
AG
949 hci_dev_unlock(hdev);
950
951 return 0;
952}
953
0b3c7d37 954static int device_list_open(struct inode *inode, struct file *file)
7d474e06 955{
0b3c7d37 956 return single_open(file, device_list_show, inode->i_private);
7d474e06
AG
957}
958
0b3c7d37
MH
959static const struct file_operations device_list_fops = {
960 .open = device_list_open,
7d474e06 961 .read = seq_read,
7d474e06
AG
962 .llseek = seq_lseek,
963 .release = single_release,
964};
965
1da177e4
LT
966/* ---- HCI requests ---- */
967
42c6b129 968static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1da177e4 969{
42c6b129 970 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1da177e4
LT
971
972 if (hdev->req_status == HCI_REQ_PEND) {
973 hdev->req_result = result;
974 hdev->req_status = HCI_REQ_DONE;
975 wake_up_interruptible(&hdev->req_wait_q);
976 }
977}
978
979static void hci_req_cancel(struct hci_dev *hdev, int err)
980{
981 BT_DBG("%s err 0x%2.2x", hdev->name, err);
982
983 if (hdev->req_status == HCI_REQ_PEND) {
984 hdev->req_result = err;
985 hdev->req_status = HCI_REQ_CANCELED;
986 wake_up_interruptible(&hdev->req_wait_q);
987 }
988}
989
77a63e0a
FW
990static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
991 u8 event)
75e84b7c
JH
992{
993 struct hci_ev_cmd_complete *ev;
994 struct hci_event_hdr *hdr;
995 struct sk_buff *skb;
996
997 hci_dev_lock(hdev);
998
999 skb = hdev->recv_evt;
1000 hdev->recv_evt = NULL;
1001
1002 hci_dev_unlock(hdev);
1003
1004 if (!skb)
1005 return ERR_PTR(-ENODATA);
1006
1007 if (skb->len < sizeof(*hdr)) {
1008 BT_ERR("Too short HCI event");
1009 goto failed;
1010 }
1011
1012 hdr = (void *) skb->data;
1013 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1014
7b1abbbe
JH
1015 if (event) {
1016 if (hdr->evt != event)
1017 goto failed;
1018 return skb;
1019 }
1020
75e84b7c
JH
1021 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1022 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1023 goto failed;
1024 }
1025
1026 if (skb->len < sizeof(*ev)) {
1027 BT_ERR("Too short cmd_complete event");
1028 goto failed;
1029 }
1030
1031 ev = (void *) skb->data;
1032 skb_pull(skb, sizeof(*ev));
1033
1034 if (opcode == __le16_to_cpu(ev->opcode))
1035 return skb;
1036
1037 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1038 __le16_to_cpu(ev->opcode));
1039
1040failed:
1041 kfree_skb(skb);
1042 return ERR_PTR(-ENODATA);
1043}
1044
7b1abbbe 1045struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 1046 const void *param, u8 event, u32 timeout)
75e84b7c
JH
1047{
1048 DECLARE_WAITQUEUE(wait, current);
1049 struct hci_request req;
1050 int err = 0;
1051
1052 BT_DBG("%s", hdev->name);
1053
1054 hci_req_init(&req, hdev);
1055
7b1abbbe 1056 hci_req_add_ev(&req, opcode, plen, param, event);
75e84b7c
JH
1057
1058 hdev->req_status = HCI_REQ_PEND;
1059
1060 err = hci_req_run(&req, hci_req_sync_complete);
1061 if (err < 0)
1062 return ERR_PTR(err);
1063
1064 add_wait_queue(&hdev->req_wait_q, &wait);
1065 set_current_state(TASK_INTERRUPTIBLE);
1066
1067 schedule_timeout(timeout);
1068
1069 remove_wait_queue(&hdev->req_wait_q, &wait);
1070
1071 if (signal_pending(current))
1072 return ERR_PTR(-EINTR);
1073
1074 switch (hdev->req_status) {
1075 case HCI_REQ_DONE:
1076 err = -bt_to_errno(hdev->req_result);
1077 break;
1078
1079 case HCI_REQ_CANCELED:
1080 err = -hdev->req_result;
1081 break;
1082
1083 default:
1084 err = -ETIMEDOUT;
1085 break;
1086 }
1087
1088 hdev->req_status = hdev->req_result = 0;
1089
1090 BT_DBG("%s end: err %d", hdev->name, err);
1091
1092 if (err < 0)
1093 return ERR_PTR(err);
1094
7b1abbbe
JH
1095 return hci_get_cmd_complete(hdev, opcode, event);
1096}
1097EXPORT_SYMBOL(__hci_cmd_sync_ev);
1098
1099struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 1100 const void *param, u32 timeout)
7b1abbbe
JH
1101{
1102 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
75e84b7c
JH
1103}
1104EXPORT_SYMBOL(__hci_cmd_sync);
1105
1da177e4 1106/* Execute request and wait for completion. */
01178cd4 1107static int __hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
1108 void (*func)(struct hci_request *req,
1109 unsigned long opt),
01178cd4 1110 unsigned long opt, __u32 timeout)
1da177e4 1111{
42c6b129 1112 struct hci_request req;
1da177e4
LT
1113 DECLARE_WAITQUEUE(wait, current);
1114 int err = 0;
1115
1116 BT_DBG("%s start", hdev->name);
1117
42c6b129
JH
1118 hci_req_init(&req, hdev);
1119
1da177e4
LT
1120 hdev->req_status = HCI_REQ_PEND;
1121
42c6b129 1122 func(&req, opt);
53cce22d 1123
42c6b129
JH
1124 err = hci_req_run(&req, hci_req_sync_complete);
1125 if (err < 0) {
53cce22d 1126 hdev->req_status = 0;
920c8300
AG
1127
1128 /* ENODATA means the HCI request command queue is empty.
1129 * This can happen when a request with conditionals doesn't
1130 * trigger any commands to be sent. This is normal behavior
1131 * and should not trigger an error return.
42c6b129 1132 */
920c8300
AG
1133 if (err == -ENODATA)
1134 return 0;
1135
1136 return err;
53cce22d
JH
1137 }
1138
bc4445c7
AG
1139 add_wait_queue(&hdev->req_wait_q, &wait);
1140 set_current_state(TASK_INTERRUPTIBLE);
1141
1da177e4
LT
1142 schedule_timeout(timeout);
1143
1144 remove_wait_queue(&hdev->req_wait_q, &wait);
1145
1146 if (signal_pending(current))
1147 return -EINTR;
1148
1149 switch (hdev->req_status) {
1150 case HCI_REQ_DONE:
e175072f 1151 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
1152 break;
1153
1154 case HCI_REQ_CANCELED:
1155 err = -hdev->req_result;
1156 break;
1157
1158 default:
1159 err = -ETIMEDOUT;
1160 break;
3ff50b79 1161 }
1da177e4 1162
a5040efa 1163 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
1164
1165 BT_DBG("%s end: err %d", hdev->name, err);
1166
1167 return err;
1168}
1169
01178cd4 1170static int hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
1171 void (*req)(struct hci_request *req,
1172 unsigned long opt),
01178cd4 1173 unsigned long opt, __u32 timeout)
1da177e4
LT
1174{
1175 int ret;
1176
7c6a329e
MH
1177 if (!test_bit(HCI_UP, &hdev->flags))
1178 return -ENETDOWN;
1179
1da177e4
LT
1180 /* Serialize all requests */
1181 hci_req_lock(hdev);
01178cd4 1182 ret = __hci_req_sync(hdev, req, opt, timeout);
1da177e4
LT
1183 hci_req_unlock(hdev);
1184
1185 return ret;
1186}
1187
42c6b129 1188static void hci_reset_req(struct hci_request *req, unsigned long opt)
1da177e4 1189{
42c6b129 1190 BT_DBG("%s %ld", req->hdev->name, opt);
1da177e4
LT
1191
1192 /* Reset device */
42c6b129
JH
1193 set_bit(HCI_RESET, &req->hdev->flags);
1194 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1da177e4
LT
1195}
1196
42c6b129 1197static void bredr_init(struct hci_request *req)
1da177e4 1198{
42c6b129 1199 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
2455a3ea 1200
1da177e4 1201 /* Read Local Supported Features */
42c6b129 1202 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 1203
1143e5a6 1204 /* Read Local Version */
42c6b129 1205 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
2177bab5
JH
1206
1207 /* Read BD Address */
42c6b129 1208 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1da177e4
LT
1209}
1210
42c6b129 1211static void amp_init(struct hci_request *req)
e61ef499 1212{
42c6b129 1213 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
2455a3ea 1214
e61ef499 1215 /* Read Local Version */
42c6b129 1216 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489 1217
f6996cfe
MH
1218 /* Read Local Supported Commands */
1219 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1220
1221 /* Read Local Supported Features */
1222 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1223
6bcbc489 1224 /* Read Local AMP Info */
42c6b129 1225 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
1226
1227 /* Read Data Blk size */
42c6b129 1228 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
7528ca1c 1229
f38ba941
MH
1230 /* Read Flow Control Mode */
1231 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1232
7528ca1c
MH
1233 /* Read Location Data */
1234 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
e61ef499
AE
1235}
1236
42c6b129 1237static void hci_init1_req(struct hci_request *req, unsigned long opt)
e61ef499 1238{
42c6b129 1239 struct hci_dev *hdev = req->hdev;
e61ef499
AE
1240
1241 BT_DBG("%s %ld", hdev->name, opt);
1242
11778716
AE
1243 /* Reset */
1244 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
42c6b129 1245 hci_reset_req(req, 0);
11778716 1246
e61ef499
AE
1247 switch (hdev->dev_type) {
1248 case HCI_BREDR:
42c6b129 1249 bredr_init(req);
e61ef499
AE
1250 break;
1251
1252 case HCI_AMP:
42c6b129 1253 amp_init(req);
e61ef499
AE
1254 break;
1255
1256 default:
1257 BT_ERR("Unknown device type %d", hdev->dev_type);
1258 break;
1259 }
e61ef499
AE
1260}
1261
42c6b129 1262static void bredr_setup(struct hci_request *req)
2177bab5 1263{
4ca048e3
MH
1264 struct hci_dev *hdev = req->hdev;
1265
2177bab5
JH
1266 __le16 param;
1267 __u8 flt_type;
1268
1269 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
42c6b129 1270 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
1271
1272 /* Read Class of Device */
42c6b129 1273 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
2177bab5
JH
1274
1275 /* Read Local Name */
42c6b129 1276 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
2177bab5
JH
1277
1278 /* Read Voice Setting */
42c6b129 1279 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
2177bab5 1280
b4cb9fb2
MH
1281 /* Read Number of Supported IAC */
1282 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1283
4b836f39
MH
1284 /* Read Current IAC LAP */
1285 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1286
2177bab5
JH
1287 /* Clear Event Filters */
1288 flt_type = HCI_FLT_CLEAR_ALL;
42c6b129 1289 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
2177bab5
JH
1290
1291 /* Connection accept timeout ~20 secs */
dcf4adbf 1292 param = cpu_to_le16(0x7d00);
42c6b129 1293 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
2177bab5 1294
4ca048e3
MH
1295 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1296 * but it does not support page scan related HCI commands.
1297 */
1298 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
f332ec66
JH
1299 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1300 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1301 }
2177bab5
JH
1302}
1303
42c6b129 1304static void le_setup(struct hci_request *req)
2177bab5 1305{
c73eee91
JH
1306 struct hci_dev *hdev = req->hdev;
1307
2177bab5 1308 /* Read LE Buffer Size */
42c6b129 1309 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
1310
1311 /* Read LE Local Supported Features */
42c6b129 1312 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
2177bab5 1313
747d3f03
MH
1314 /* Read LE Supported States */
1315 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1316
2177bab5 1317 /* Read LE Advertising Channel TX Power */
42c6b129 1318 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
2177bab5
JH
1319
1320 /* Read LE White List Size */
42c6b129 1321 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
2177bab5 1322
747d3f03
MH
1323 /* Clear LE White List */
1324 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
c73eee91
JH
1325
1326 /* LE-only controllers have LE implicitly enabled */
1327 if (!lmp_bredr_capable(hdev))
1328 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2177bab5
JH
1329}
1330
1331static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1332{
1333 if (lmp_ext_inq_capable(hdev))
1334 return 0x02;
1335
1336 if (lmp_inq_rssi_capable(hdev))
1337 return 0x01;
1338
1339 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1340 hdev->lmp_subver == 0x0757)
1341 return 0x01;
1342
1343 if (hdev->manufacturer == 15) {
1344 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1345 return 0x01;
1346 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1347 return 0x01;
1348 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1349 return 0x01;
1350 }
1351
1352 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1353 hdev->lmp_subver == 0x1805)
1354 return 0x01;
1355
1356 return 0x00;
1357}
1358
42c6b129 1359static void hci_setup_inquiry_mode(struct hci_request *req)
2177bab5
JH
1360{
1361 u8 mode;
1362
42c6b129 1363 mode = hci_get_inquiry_mode(req->hdev);
2177bab5 1364
42c6b129 1365 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
2177bab5
JH
1366}
1367
42c6b129 1368static void hci_setup_event_mask(struct hci_request *req)
2177bab5 1369{
42c6b129
JH
1370 struct hci_dev *hdev = req->hdev;
1371
2177bab5
JH
1372 /* The second byte is 0xff instead of 0x9f (two reserved bits
1373 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1374 * command otherwise.
1375 */
1376 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1377
1378 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1379 * any event mask for pre 1.2 devices.
1380 */
1381 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1382 return;
1383
1384 if (lmp_bredr_capable(hdev)) {
1385 events[4] |= 0x01; /* Flow Specification Complete */
1386 events[4] |= 0x02; /* Inquiry Result with RSSI */
1387 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1388 events[5] |= 0x08; /* Synchronous Connection Complete */
1389 events[5] |= 0x10; /* Synchronous Connection Changed */
c7882cbd
MH
1390 } else {
1391 /* Use a different default for LE-only devices */
1392 memset(events, 0, sizeof(events));
1393 events[0] |= 0x10; /* Disconnection Complete */
1394 events[0] |= 0x80; /* Encryption Change */
1395 events[1] |= 0x08; /* Read Remote Version Information Complete */
1396 events[1] |= 0x20; /* Command Complete */
1397 events[1] |= 0x40; /* Command Status */
1398 events[1] |= 0x80; /* Hardware Error */
1399 events[2] |= 0x04; /* Number of Completed Packets */
1400 events[3] |= 0x02; /* Data Buffer Overflow */
1401 events[5] |= 0x80; /* Encryption Key Refresh Complete */
2177bab5
JH
1402 }
1403
1404 if (lmp_inq_rssi_capable(hdev))
1405 events[4] |= 0x02; /* Inquiry Result with RSSI */
1406
1407 if (lmp_sniffsubr_capable(hdev))
1408 events[5] |= 0x20; /* Sniff Subrating */
1409
1410 if (lmp_pause_enc_capable(hdev))
1411 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1412
1413 if (lmp_ext_inq_capable(hdev))
1414 events[5] |= 0x40; /* Extended Inquiry Result */
1415
1416 if (lmp_no_flush_capable(hdev))
1417 events[7] |= 0x01; /* Enhanced Flush Complete */
1418
1419 if (lmp_lsto_capable(hdev))
1420 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1421
1422 if (lmp_ssp_capable(hdev)) {
1423 events[6] |= 0x01; /* IO Capability Request */
1424 events[6] |= 0x02; /* IO Capability Response */
1425 events[6] |= 0x04; /* User Confirmation Request */
1426 events[6] |= 0x08; /* User Passkey Request */
1427 events[6] |= 0x10; /* Remote OOB Data Request */
1428 events[6] |= 0x20; /* Simple Pairing Complete */
1429 events[7] |= 0x04; /* User Passkey Notification */
1430 events[7] |= 0x08; /* Keypress Notification */
1431 events[7] |= 0x10; /* Remote Host Supported
1432 * Features Notification
1433 */
1434 }
1435
1436 if (lmp_le_capable(hdev))
1437 events[7] |= 0x20; /* LE Meta-Event */
1438
42c6b129 1439 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
2177bab5
JH
1440}
1441
42c6b129 1442static void hci_init2_req(struct hci_request *req, unsigned long opt)
2177bab5 1443{
42c6b129
JH
1444 struct hci_dev *hdev = req->hdev;
1445
2177bab5 1446 if (lmp_bredr_capable(hdev))
42c6b129 1447 bredr_setup(req);
56f87901
JH
1448 else
1449 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2177bab5
JH
1450
1451 if (lmp_le_capable(hdev))
42c6b129 1452 le_setup(req);
2177bab5 1453
42c6b129 1454 hci_setup_event_mask(req);
2177bab5 1455
3f8e2d75
JH
1456 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1457 * local supported commands HCI command.
1458 */
1459 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
42c6b129 1460 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
2177bab5
JH
1461
1462 if (lmp_ssp_capable(hdev)) {
57af75a8
MH
1463 /* When SSP is available, then the host features page
1464 * should also be available as well. However some
1465 * controllers list the max_page as 0 as long as SSP
1466 * has not been enabled. To achieve proper debugging
1467 * output, force the minimum max_page to 1 at least.
1468 */
1469 hdev->max_page = 0x01;
1470
2177bab5
JH
1471 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1472 u8 mode = 0x01;
42c6b129
JH
1473 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1474 sizeof(mode), &mode);
2177bab5
JH
1475 } else {
1476 struct hci_cp_write_eir cp;
1477
1478 memset(hdev->eir, 0, sizeof(hdev->eir));
1479 memset(&cp, 0, sizeof(cp));
1480
42c6b129 1481 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
2177bab5
JH
1482 }
1483 }
1484
1485 if (lmp_inq_rssi_capable(hdev))
42c6b129 1486 hci_setup_inquiry_mode(req);
2177bab5
JH
1487
1488 if (lmp_inq_tx_pwr_capable(hdev))
42c6b129 1489 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
2177bab5
JH
1490
1491 if (lmp_ext_feat_capable(hdev)) {
1492 struct hci_cp_read_local_ext_features cp;
1493
1494 cp.page = 0x01;
42c6b129
JH
1495 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1496 sizeof(cp), &cp);
2177bab5
JH
1497 }
1498
1499 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1500 u8 enable = 1;
42c6b129
JH
1501 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1502 &enable);
2177bab5
JH
1503 }
1504}
1505
42c6b129 1506static void hci_setup_link_policy(struct hci_request *req)
2177bab5 1507{
42c6b129 1508 struct hci_dev *hdev = req->hdev;
2177bab5
JH
1509 struct hci_cp_write_def_link_policy cp;
1510 u16 link_policy = 0;
1511
1512 if (lmp_rswitch_capable(hdev))
1513 link_policy |= HCI_LP_RSWITCH;
1514 if (lmp_hold_capable(hdev))
1515 link_policy |= HCI_LP_HOLD;
1516 if (lmp_sniff_capable(hdev))
1517 link_policy |= HCI_LP_SNIFF;
1518 if (lmp_park_capable(hdev))
1519 link_policy |= HCI_LP_PARK;
1520
1521 cp.policy = cpu_to_le16(link_policy);
42c6b129 1522 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
2177bab5
JH
1523}
1524
42c6b129 1525static void hci_set_le_support(struct hci_request *req)
2177bab5 1526{
42c6b129 1527 struct hci_dev *hdev = req->hdev;
2177bab5
JH
1528 struct hci_cp_write_le_host_supported cp;
1529
c73eee91
JH
1530 /* LE-only devices do not support explicit enablement */
1531 if (!lmp_bredr_capable(hdev))
1532 return;
1533
2177bab5
JH
1534 memset(&cp, 0, sizeof(cp));
1535
1536 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1537 cp.le = 0x01;
1538 cp.simul = lmp_le_br_capable(hdev);
1539 }
1540
1541 if (cp.le != lmp_host_le_capable(hdev))
42c6b129
JH
1542 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1543 &cp);
2177bab5
JH
1544}
1545
d62e6d67
JH
1546static void hci_set_event_mask_page_2(struct hci_request *req)
1547{
1548 struct hci_dev *hdev = req->hdev;
1549 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1550
1551 /* If Connectionless Slave Broadcast master role is supported
1552 * enable all necessary events for it.
1553 */
53b834d2 1554 if (lmp_csb_master_capable(hdev)) {
d62e6d67
JH
1555 events[1] |= 0x40; /* Triggered Clock Capture */
1556 events[1] |= 0x80; /* Synchronization Train Complete */
1557 events[2] |= 0x10; /* Slave Page Response Timeout */
1558 events[2] |= 0x20; /* CSB Channel Map Change */
1559 }
1560
1561 /* If Connectionless Slave Broadcast slave role is supported
1562 * enable all necessary events for it.
1563 */
53b834d2 1564 if (lmp_csb_slave_capable(hdev)) {
d62e6d67
JH
1565 events[2] |= 0x01; /* Synchronization Train Received */
1566 events[2] |= 0x02; /* CSB Receive */
1567 events[2] |= 0x04; /* CSB Timeout */
1568 events[2] |= 0x08; /* Truncated Page Complete */
1569 }
1570
40c59fcb
MH
1571 /* Enable Authenticated Payload Timeout Expired event if supported */
1572 if (lmp_ping_capable(hdev))
1573 events[2] |= 0x80;
1574
d62e6d67
JH
1575 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1576}
1577
42c6b129 1578static void hci_init3_req(struct hci_request *req, unsigned long opt)
2177bab5 1579{
42c6b129 1580 struct hci_dev *hdev = req->hdev;
d2c5d77f 1581 u8 p;
42c6b129 1582
b8f4e068
GP
1583 /* Some Broadcom based Bluetooth controllers do not support the
1584 * Delete Stored Link Key command. They are clearly indicating its
1585 * absence in the bit mask of supported commands.
1586 *
1587 * Check the supported commands and only if the the command is marked
1588 * as supported send it. If not supported assume that the controller
1589 * does not have actual support for stored link keys which makes this
1590 * command redundant anyway.
f9f462fa
MH
1591 *
1592 * Some controllers indicate that they support handling deleting
1593 * stored link keys, but they don't. The quirk lets a driver
1594 * just disable this command.
637b4cae 1595 */
f9f462fa
MH
1596 if (hdev->commands[6] & 0x80 &&
1597 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
59f45d57
JH
1598 struct hci_cp_delete_stored_link_key cp;
1599
1600 bacpy(&cp.bdaddr, BDADDR_ANY);
1601 cp.delete_all = 0x01;
1602 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1603 sizeof(cp), &cp);
1604 }
1605
2177bab5 1606 if (hdev->commands[5] & 0x10)
42c6b129 1607 hci_setup_link_policy(req);
2177bab5 1608
9193c6e8
AG
1609 if (lmp_le_capable(hdev)) {
1610 u8 events[8];
1611
1612 memset(events, 0, sizeof(events));
1613 events[0] = 0x1f;
662bc2e6
AG
1614
1615 /* If controller supports the Connection Parameters Request
1616 * Link Layer Procedure, enable the corresponding event.
1617 */
1618 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1619 events[0] |= 0x20; /* LE Remote Connection
1620 * Parameter Request
1621 */
1622
9193c6e8
AG
1623 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1624 events);
1625
42c6b129 1626 hci_set_le_support(req);
9193c6e8 1627 }
d2c5d77f
JH
1628
1629 /* Read features beyond page 1 if available */
1630 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1631 struct hci_cp_read_local_ext_features cp;
1632
1633 cp.page = p;
1634 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1635 sizeof(cp), &cp);
1636 }
2177bab5
JH
1637}
1638
5d4e7e8d
JH
1639static void hci_init4_req(struct hci_request *req, unsigned long opt)
1640{
1641 struct hci_dev *hdev = req->hdev;
1642
d62e6d67
JH
1643 /* Set event mask page 2 if the HCI command for it is supported */
1644 if (hdev->commands[22] & 0x04)
1645 hci_set_event_mask_page_2(req);
1646
5d4e7e8d 1647 /* Check for Synchronization Train support */
53b834d2 1648 if (lmp_sync_train_capable(hdev))
5d4e7e8d 1649 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
a6d0d690
MH
1650
1651 /* Enable Secure Connections if supported and configured */
5afeac14 1652 if ((lmp_sc_capable(hdev) ||
111902f7 1653 test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
a6d0d690
MH
1654 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1655 u8 support = 0x01;
1656 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1657 sizeof(support), &support);
1658 }
5d4e7e8d
JH
1659}
1660
2177bab5
JH
1661static int __hci_init(struct hci_dev *hdev)
1662{
1663 int err;
1664
1665 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1666 if (err < 0)
1667 return err;
1668
4b4148e9
MH
1669 /* The Device Under Test (DUT) mode is special and available for
1670 * all controller types. So just create it early on.
1671 */
1672 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1673 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1674 &dut_mode_fops);
1675 }
1676
2177bab5
JH
1677 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1678 * BR/EDR/LE type controllers. AMP controllers only need the
1679 * first stage init.
1680 */
1681 if (hdev->dev_type != HCI_BREDR)
1682 return 0;
1683
1684 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1685 if (err < 0)
1686 return err;
1687
5d4e7e8d
JH
1688 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1689 if (err < 0)
1690 return err;
1691
baf27f6e
MH
1692 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1693 if (err < 0)
1694 return err;
1695
1696 /* Only create debugfs entries during the initial setup
1697 * phase and not every time the controller gets powered on.
1698 */
1699 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1700 return 0;
1701
dfb826a8
MH
1702 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1703 &features_fops);
ceeb3bc0
MH
1704 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1705 &hdev->manufacturer);
1706 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1707 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
70afe0b8
MH
1708 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1709 &blacklist_fops);
47219839
MH
1710 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1711
31ad1691
AK
1712 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1713 &conn_info_min_age_fops);
1714 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1715 &conn_info_max_age_fops);
1716
baf27f6e
MH
1717 if (lmp_bredr_capable(hdev)) {
1718 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1719 hdev, &inquiry_cache_fops);
02d08d15
MH
1720 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1721 hdev, &link_keys_fops);
babdbb3c
MH
1722 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1723 hdev, &dev_class_fops);
041000b9
MH
1724 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1725 hdev, &voice_setting_fops);
baf27f6e
MH
1726 }
1727
06f5b778 1728 if (lmp_ssp_capable(hdev)) {
ebd1e33b
MH
1729 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1730 hdev, &auto_accept_delay_fops);
5afeac14
MH
1731 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1732 hdev, &force_sc_support_fops);
134c2a89
MH
1733 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1734 hdev, &sc_only_mode_fops);
06f5b778 1735 }
ebd1e33b 1736
2bfa3531
MH
1737 if (lmp_sniff_capable(hdev)) {
1738 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1739 hdev, &idle_timeout_fops);
1740 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1741 hdev, &sniff_min_interval_fops);
1742 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1743 hdev, &sniff_max_interval_fops);
1744 }
1745
d0f729b8 1746 if (lmp_le_capable(hdev)) {
ac345813
MH
1747 debugfs_create_file("identity", 0400, hdev->debugfs,
1748 hdev, &identity_fops);
1749 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1750 hdev, &rpa_timeout_fops);
7a4cd51d
MH
1751 debugfs_create_file("random_address", 0444, hdev->debugfs,
1752 hdev, &random_address_fops);
b32bba6c
MH
1753 debugfs_create_file("static_address", 0444, hdev->debugfs,
1754 hdev, &static_address_fops);
1755
1756 /* For controllers with a public address, provide a debug
1757 * option to force the usage of the configured static
1758 * address. By default the public address is used.
1759 */
1760 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1761 debugfs_create_file("force_static_address", 0644,
1762 hdev->debugfs, hdev,
1763 &force_static_address_fops);
1764
d0f729b8
MH
1765 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1766 &hdev->le_white_list_size);
d2ab0ac1
MH
1767 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1768 &white_list_fops);
3698d704
MH
1769 debugfs_create_file("identity_resolving_keys", 0400,
1770 hdev->debugfs, hdev,
1771 &identity_resolving_keys_fops);
8f8625cd
MH
1772 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1773 hdev, &long_term_keys_fops);
4e70c7e7
MH
1774 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1775 hdev, &conn_min_interval_fops);
1776 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1777 hdev, &conn_max_interval_fops);
816a93d1
MH
1778 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1779 hdev, &conn_latency_fops);
f1649577
MH
1780 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1781 hdev, &supervision_timeout_fops);
3f959d46
MH
1782 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1783 hdev, &adv_channel_map_fops);
0b3c7d37
MH
1784 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1785 &device_list_fops);
b9a7a61e
LR
1786 debugfs_create_u16("discov_interleaved_timeout", 0644,
1787 hdev->debugfs,
1788 &hdev->discov_interleaved_timeout);
d0f729b8 1789 }
e7b8fc92 1790
baf27f6e 1791 return 0;
2177bab5
JH
1792}
1793
42c6b129 1794static void hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1795{
1796 __u8 scan = opt;
1797
42c6b129 1798 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
1799
1800 /* Inquiry and Page scans */
42c6b129 1801 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
1802}
1803
42c6b129 1804static void hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1805{
1806 __u8 auth = opt;
1807
42c6b129 1808 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
1809
1810 /* Authentication */
42c6b129 1811 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
1812}
1813
42c6b129 1814static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1815{
1816 __u8 encrypt = opt;
1817
42c6b129 1818 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 1819
e4e8e37c 1820 /* Encryption */
42c6b129 1821 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
1822}
1823
42c6b129 1824static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
1825{
1826 __le16 policy = cpu_to_le16(opt);
1827
42c6b129 1828 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
1829
1830 /* Default link policy */
42c6b129 1831 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
e4e8e37c
MH
1832}
1833
8e87d142 1834/* Get HCI device by index.
1da177e4
LT
1835 * Device is held on return. */
1836struct hci_dev *hci_dev_get(int index)
1837{
8035ded4 1838 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
1839
1840 BT_DBG("%d", index);
1841
1842 if (index < 0)
1843 return NULL;
1844
1845 read_lock(&hci_dev_list_lock);
8035ded4 1846 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
1847 if (d->id == index) {
1848 hdev = hci_dev_hold(d);
1849 break;
1850 }
1851 }
1852 read_unlock(&hci_dev_list_lock);
1853 return hdev;
1854}
1da177e4
LT
1855
1856/* ---- Inquiry support ---- */
ff9ef578 1857
30dc78e1
JH
1858bool hci_discovery_active(struct hci_dev *hdev)
1859{
1860 struct discovery_state *discov = &hdev->discovery;
1861
6fbe195d 1862 switch (discov->state) {
343f935b 1863 case DISCOVERY_FINDING:
6fbe195d 1864 case DISCOVERY_RESOLVING:
30dc78e1
JH
1865 return true;
1866
6fbe195d
AG
1867 default:
1868 return false;
1869 }
30dc78e1
JH
1870}
1871
ff9ef578
JH
1872void hci_discovery_set_state(struct hci_dev *hdev, int state)
1873{
1874 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1875
1876 if (hdev->discovery.state == state)
1877 return;
1878
1879 switch (state) {
1880 case DISCOVERY_STOPPED:
c54c3860
AG
1881 hci_update_background_scan(hdev);
1882
7b99b659
AG
1883 if (hdev->discovery.state != DISCOVERY_STARTING)
1884 mgmt_discovering(hdev, 0);
ff9ef578
JH
1885 break;
1886 case DISCOVERY_STARTING:
1887 break;
343f935b 1888 case DISCOVERY_FINDING:
ff9ef578
JH
1889 mgmt_discovering(hdev, 1);
1890 break;
30dc78e1
JH
1891 case DISCOVERY_RESOLVING:
1892 break;
ff9ef578
JH
1893 case DISCOVERY_STOPPING:
1894 break;
1895 }
1896
1897 hdev->discovery.state = state;
1898}
1899
1f9b9a5d 1900void hci_inquiry_cache_flush(struct hci_dev *hdev)
1da177e4 1901{
30883512 1902 struct discovery_state *cache = &hdev->discovery;
b57c1a56 1903 struct inquiry_entry *p, *n;
1da177e4 1904
561aafbc
JH
1905 list_for_each_entry_safe(p, n, &cache->all, all) {
1906 list_del(&p->all);
b57c1a56 1907 kfree(p);
1da177e4 1908 }
561aafbc
JH
1909
1910 INIT_LIST_HEAD(&cache->unknown);
1911 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
1912}
1913
a8c5fb1a
GP
1914struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1915 bdaddr_t *bdaddr)
1da177e4 1916{
30883512 1917 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1918 struct inquiry_entry *e;
1919
6ed93dc6 1920 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 1921
561aafbc
JH
1922 list_for_each_entry(e, &cache->all, all) {
1923 if (!bacmp(&e->data.bdaddr, bdaddr))
1924 return e;
1925 }
1926
1927 return NULL;
1928}
1929
1930struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 1931 bdaddr_t *bdaddr)
561aafbc 1932{
30883512 1933 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
1934 struct inquiry_entry *e;
1935
6ed93dc6 1936 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
1937
1938 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 1939 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
1940 return e;
1941 }
1942
1943 return NULL;
1da177e4
LT
1944}
1945
30dc78e1 1946struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
1947 bdaddr_t *bdaddr,
1948 int state)
30dc78e1
JH
1949{
1950 struct discovery_state *cache = &hdev->discovery;
1951 struct inquiry_entry *e;
1952
6ed93dc6 1953 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
1954
1955 list_for_each_entry(e, &cache->resolve, list) {
1956 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1957 return e;
1958 if (!bacmp(&e->data.bdaddr, bdaddr))
1959 return e;
1960 }
1961
1962 return NULL;
1963}
1964
a3d4e20a 1965void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 1966 struct inquiry_entry *ie)
a3d4e20a
JH
1967{
1968 struct discovery_state *cache = &hdev->discovery;
1969 struct list_head *pos = &cache->resolve;
1970 struct inquiry_entry *p;
1971
1972 list_del(&ie->list);
1973
1974 list_for_each_entry(p, &cache->resolve, list) {
1975 if (p->name_state != NAME_PENDING &&
a8c5fb1a 1976 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
1977 break;
1978 pos = &p->list;
1979 }
1980
1981 list_add(&ie->list, pos);
1982}
1983
af58925c
MH
1984u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1985 bool name_known)
1da177e4 1986{
30883512 1987 struct discovery_state *cache = &hdev->discovery;
70f23020 1988 struct inquiry_entry *ie;
af58925c 1989 u32 flags = 0;
1da177e4 1990
6ed93dc6 1991 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 1992
2b2fec4d
SJ
1993 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1994
af58925c
MH
1995 if (!data->ssp_mode)
1996 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
388fc8fa 1997
70f23020 1998 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 1999 if (ie) {
af58925c
MH
2000 if (!ie->data.ssp_mode)
2001 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
388fc8fa 2002
a3d4e20a 2003 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 2004 data->rssi != ie->data.rssi) {
a3d4e20a
JH
2005 ie->data.rssi = data->rssi;
2006 hci_inquiry_cache_update_resolve(hdev, ie);
2007 }
2008
561aafbc 2009 goto update;
a3d4e20a 2010 }
561aafbc
JH
2011
2012 /* Entry not in the cache. Add new one. */
2013 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
af58925c
MH
2014 if (!ie) {
2015 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2016 goto done;
2017 }
561aafbc
JH
2018
2019 list_add(&ie->all, &cache->all);
2020
2021 if (name_known) {
2022 ie->name_state = NAME_KNOWN;
2023 } else {
2024 ie->name_state = NAME_NOT_KNOWN;
2025 list_add(&ie->list, &cache->unknown);
2026 }
70f23020 2027
561aafbc
JH
2028update:
2029 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 2030 ie->name_state != NAME_PENDING) {
561aafbc
JH
2031 ie->name_state = NAME_KNOWN;
2032 list_del(&ie->list);
1da177e4
LT
2033 }
2034
70f23020
AE
2035 memcpy(&ie->data, data, sizeof(*data));
2036 ie->timestamp = jiffies;
1da177e4 2037 cache->timestamp = jiffies;
3175405b
JH
2038
2039 if (ie->name_state == NAME_NOT_KNOWN)
af58925c 2040 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
3175405b 2041
af58925c
MH
2042done:
2043 return flags;
1da177e4
LT
2044}
2045
2046static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2047{
30883512 2048 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
2049 struct inquiry_info *info = (struct inquiry_info *) buf;
2050 struct inquiry_entry *e;
2051 int copied = 0;
2052
561aafbc 2053 list_for_each_entry(e, &cache->all, all) {
1da177e4 2054 struct inquiry_data *data = &e->data;
b57c1a56
JH
2055
2056 if (copied >= num)
2057 break;
2058
1da177e4
LT
2059 bacpy(&info->bdaddr, &data->bdaddr);
2060 info->pscan_rep_mode = data->pscan_rep_mode;
2061 info->pscan_period_mode = data->pscan_period_mode;
2062 info->pscan_mode = data->pscan_mode;
2063 memcpy(info->dev_class, data->dev_class, 3);
2064 info->clock_offset = data->clock_offset;
b57c1a56 2065
1da177e4 2066 info++;
b57c1a56 2067 copied++;
1da177e4
LT
2068 }
2069
2070 BT_DBG("cache %p, copied %d", cache, copied);
2071 return copied;
2072}
2073
42c6b129 2074static void hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
2075{
2076 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 2077 struct hci_dev *hdev = req->hdev;
1da177e4
LT
2078 struct hci_cp_inquiry cp;
2079
2080 BT_DBG("%s", hdev->name);
2081
2082 if (test_bit(HCI_INQUIRY, &hdev->flags))
2083 return;
2084
2085 /* Start Inquiry */
2086 memcpy(&cp.lap, &ir->lap, 3);
2087 cp.length = ir->length;
2088 cp.num_rsp = ir->num_rsp;
42c6b129 2089 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
2090}
2091
3e13fa1e
AG
2092static int wait_inquiry(void *word)
2093{
2094 schedule();
2095 return signal_pending(current);
2096}
2097
1da177e4
LT
2098int hci_inquiry(void __user *arg)
2099{
2100 __u8 __user *ptr = arg;
2101 struct hci_inquiry_req ir;
2102 struct hci_dev *hdev;
2103 int err = 0, do_inquiry = 0, max_rsp;
2104 long timeo;
2105 __u8 *buf;
2106
2107 if (copy_from_user(&ir, ptr, sizeof(ir)))
2108 return -EFAULT;
2109
5a08ecce
AE
2110 hdev = hci_dev_get(ir.dev_id);
2111 if (!hdev)
1da177e4
LT
2112 return -ENODEV;
2113
0736cfa8
MH
2114 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2115 err = -EBUSY;
2116 goto done;
2117 }
2118
4a964404 2119 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
fee746b0
MH
2120 err = -EOPNOTSUPP;
2121 goto done;
2122 }
2123
5b69bef5
MH
2124 if (hdev->dev_type != HCI_BREDR) {
2125 err = -EOPNOTSUPP;
2126 goto done;
2127 }
2128
56f87901
JH
2129 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2130 err = -EOPNOTSUPP;
2131 goto done;
2132 }
2133
09fd0de5 2134 hci_dev_lock(hdev);
8e87d142 2135 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 2136 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1f9b9a5d 2137 hci_inquiry_cache_flush(hdev);
1da177e4
LT
2138 do_inquiry = 1;
2139 }
09fd0de5 2140 hci_dev_unlock(hdev);
1da177e4 2141
04837f64 2142 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
2143
2144 if (do_inquiry) {
01178cd4
JH
2145 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2146 timeo);
70f23020
AE
2147 if (err < 0)
2148 goto done;
3e13fa1e
AG
2149
2150 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2151 * cleared). If it is interrupted by a signal, return -EINTR.
2152 */
2153 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2154 TASK_INTERRUPTIBLE))
2155 return -EINTR;
70f23020 2156 }
1da177e4 2157
8fc9ced3
GP
2158 /* for unlimited number of responses we will use buffer with
2159 * 255 entries
2160 */
1da177e4
LT
2161 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2162
2163 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2164 * copy it to the user space.
2165 */
01df8c31 2166 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 2167 if (!buf) {
1da177e4
LT
2168 err = -ENOMEM;
2169 goto done;
2170 }
2171
09fd0de5 2172 hci_dev_lock(hdev);
1da177e4 2173 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 2174 hci_dev_unlock(hdev);
1da177e4
LT
2175
2176 BT_DBG("num_rsp %d", ir.num_rsp);
2177
2178 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2179 ptr += sizeof(ir);
2180 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 2181 ir.num_rsp))
1da177e4 2182 err = -EFAULT;
8e87d142 2183 } else
1da177e4
LT
2184 err = -EFAULT;
2185
2186 kfree(buf);
2187
2188done:
2189 hci_dev_put(hdev);
2190 return err;
2191}
2192
cbed0ca1 2193static int hci_dev_do_open(struct hci_dev *hdev)
1da177e4 2194{
1da177e4
LT
2195 int ret = 0;
2196
1da177e4
LT
2197 BT_DBG("%s %p", hdev->name, hdev);
2198
2199 hci_req_lock(hdev);
2200
94324962
JH
2201 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2202 ret = -ENODEV;
2203 goto done;
2204 }
2205
a5c8f270
MH
2206 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
2207 /* Check for rfkill but allow the HCI setup stage to
2208 * proceed (which in itself doesn't cause any RF activity).
2209 */
2210 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2211 ret = -ERFKILL;
2212 goto done;
2213 }
2214
2215 /* Check for valid public address or a configured static
2216 * random adddress, but let the HCI setup proceed to
2217 * be able to determine if there is a public address
2218 * or not.
2219 *
c6beca0e
MH
2220 * In case of user channel usage, it is not important
2221 * if a public address or static random address is
2222 * available.
2223 *
a5c8f270
MH
2224 * This check is only valid for BR/EDR controllers
2225 * since AMP controllers do not have an address.
2226 */
c6beca0e
MH
2227 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2228 hdev->dev_type == HCI_BREDR &&
a5c8f270
MH
2229 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2230 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2231 ret = -EADDRNOTAVAIL;
2232 goto done;
2233 }
611b30f7
MH
2234 }
2235
1da177e4
LT
2236 if (test_bit(HCI_UP, &hdev->flags)) {
2237 ret = -EALREADY;
2238 goto done;
2239 }
2240
1da177e4
LT
2241 if (hdev->open(hdev)) {
2242 ret = -EIO;
2243 goto done;
2244 }
2245
f41c70c4
MH
2246 atomic_set(&hdev->cmd_cnt, 1);
2247 set_bit(HCI_INIT, &hdev->flags);
2248
2249 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
2250 ret = hdev->setup(hdev);
2251
24c457e2
MH
2252 /* If public address change is configured, ensure that the
2253 * address gets programmed. If the driver does not support
2254 * changing the public address, fail the power on procedure.
2255 */
2256 if (!ret && bacmp(&hdev->public_addr, BDADDR_ANY)) {
2257 if (hdev->set_bdaddr)
2258 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2259 else
2260 ret = -EADDRNOTAVAIL;
2261 }
2262
f41c70c4 2263 if (!ret) {
4a964404 2264 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
0736cfa8 2265 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
f41c70c4 2266 ret = __hci_init(hdev);
1da177e4
LT
2267 }
2268
f41c70c4
MH
2269 clear_bit(HCI_INIT, &hdev->flags);
2270
1da177e4
LT
2271 if (!ret) {
2272 hci_dev_hold(hdev);
d6bfd59c 2273 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
1da177e4
LT
2274 set_bit(HCI_UP, &hdev->flags);
2275 hci_notify(hdev, HCI_DEV_UP);
bb4b2a9a 2276 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
4a964404 2277 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
0736cfa8 2278 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1514b892 2279 hdev->dev_type == HCI_BREDR) {
09fd0de5 2280 hci_dev_lock(hdev);
744cf19e 2281 mgmt_powered(hdev, 1);
09fd0de5 2282 hci_dev_unlock(hdev);
56e5cb86 2283 }
8e87d142 2284 } else {
1da177e4 2285 /* Init failed, cleanup */
3eff45ea 2286 flush_work(&hdev->tx_work);
c347b765 2287 flush_work(&hdev->cmd_work);
b78752cc 2288 flush_work(&hdev->rx_work);
1da177e4
LT
2289
2290 skb_queue_purge(&hdev->cmd_q);
2291 skb_queue_purge(&hdev->rx_q);
2292
2293 if (hdev->flush)
2294 hdev->flush(hdev);
2295
2296 if (hdev->sent_cmd) {
2297 kfree_skb(hdev->sent_cmd);
2298 hdev->sent_cmd = NULL;
2299 }
2300
2301 hdev->close(hdev);
fee746b0 2302 hdev->flags &= BIT(HCI_RAW);
1da177e4
LT
2303 }
2304
2305done:
2306 hci_req_unlock(hdev);
1da177e4
LT
2307 return ret;
2308}
2309
cbed0ca1
JH
2310/* ---- HCI ioctl helpers ---- */
2311
2312int hci_dev_open(__u16 dev)
2313{
2314 struct hci_dev *hdev;
2315 int err;
2316
2317 hdev = hci_dev_get(dev);
2318 if (!hdev)
2319 return -ENODEV;
2320
4a964404 2321 /* Devices that are marked as unconfigured can only be powered
fee746b0
MH
2322 * up as user channel. Trying to bring them up as normal devices
2323 * will result into a failure. Only user channel operation is
2324 * possible.
2325 *
2326 * When this function is called for a user channel, the flag
2327 * HCI_USER_CHANNEL will be set first before attempting to
2328 * open the device.
2329 */
4a964404 2330 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
fee746b0
MH
2331 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2332 err = -EOPNOTSUPP;
2333 goto done;
2334 }
2335
e1d08f40
JH
2336 /* We need to ensure that no other power on/off work is pending
2337 * before proceeding to call hci_dev_do_open. This is
2338 * particularly important if the setup procedure has not yet
2339 * completed.
2340 */
2341 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2342 cancel_delayed_work(&hdev->power_off);
2343
a5c8f270
MH
2344 /* After this call it is guaranteed that the setup procedure
2345 * has finished. This means that error conditions like RFKILL
2346 * or no valid public or static random address apply.
2347 */
e1d08f40
JH
2348 flush_workqueue(hdev->req_workqueue);
2349
cbed0ca1
JH
2350 err = hci_dev_do_open(hdev);
2351
fee746b0 2352done:
cbed0ca1 2353 hci_dev_put(hdev);
cbed0ca1
JH
2354 return err;
2355}
2356
1da177e4
LT
2357static int hci_dev_do_close(struct hci_dev *hdev)
2358{
2359 BT_DBG("%s %p", hdev->name, hdev);
2360
78c04c0b
VCG
2361 cancel_delayed_work(&hdev->power_off);
2362
1da177e4
LT
2363 hci_req_cancel(hdev, ENODEV);
2364 hci_req_lock(hdev);
2365
2366 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
65cc2b49 2367 cancel_delayed_work_sync(&hdev->cmd_timer);
1da177e4
LT
2368 hci_req_unlock(hdev);
2369 return 0;
2370 }
2371
3eff45ea
GP
2372 /* Flush RX and TX works */
2373 flush_work(&hdev->tx_work);
b78752cc 2374 flush_work(&hdev->rx_work);
1da177e4 2375
16ab91ab 2376 if (hdev->discov_timeout > 0) {
e0f9309f 2377 cancel_delayed_work(&hdev->discov_off);
16ab91ab 2378 hdev->discov_timeout = 0;
5e5282bb 2379 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
310a3d48 2380 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
16ab91ab
JH
2381 }
2382
a8b2d5c2 2383 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
7d78525d
JH
2384 cancel_delayed_work(&hdev->service_cache);
2385
7ba8b4be 2386 cancel_delayed_work_sync(&hdev->le_scan_disable);
4518bb0f
JH
2387
2388 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2389 cancel_delayed_work_sync(&hdev->rpa_expired);
7ba8b4be 2390
09fd0de5 2391 hci_dev_lock(hdev);
1f9b9a5d 2392 hci_inquiry_cache_flush(hdev);
1da177e4 2393 hci_conn_hash_flush(hdev);
6046dc3e 2394 hci_pend_le_conns_clear(hdev);
09fd0de5 2395 hci_dev_unlock(hdev);
1da177e4
LT
2396
2397 hci_notify(hdev, HCI_DEV_DOWN);
2398
2399 if (hdev->flush)
2400 hdev->flush(hdev);
2401
2402 /* Reset device */
2403 skb_queue_purge(&hdev->cmd_q);
2404 atomic_set(&hdev->cmd_cnt, 1);
4a964404
MH
2405 if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2406 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
a6c511c6 2407 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1da177e4 2408 set_bit(HCI_INIT, &hdev->flags);
01178cd4 2409 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1da177e4
LT
2410 clear_bit(HCI_INIT, &hdev->flags);
2411 }
2412
c347b765
GP
2413 /* flush cmd work */
2414 flush_work(&hdev->cmd_work);
1da177e4
LT
2415
2416 /* Drop queues */
2417 skb_queue_purge(&hdev->rx_q);
2418 skb_queue_purge(&hdev->cmd_q);
2419 skb_queue_purge(&hdev->raw_q);
2420
2421 /* Drop last sent command */
2422 if (hdev->sent_cmd) {
65cc2b49 2423 cancel_delayed_work_sync(&hdev->cmd_timer);
1da177e4
LT
2424 kfree_skb(hdev->sent_cmd);
2425 hdev->sent_cmd = NULL;
2426 }
2427
b6ddb638
JH
2428 kfree_skb(hdev->recv_evt);
2429 hdev->recv_evt = NULL;
2430
1da177e4
LT
2431 /* After this point our queues are empty
2432 * and no tasks are scheduled. */
2433 hdev->close(hdev);
2434
35b973c9 2435 /* Clear flags */
fee746b0 2436 hdev->flags &= BIT(HCI_RAW);
35b973c9
JH
2437 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2438
93c311a0
MH
2439 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2440 if (hdev->dev_type == HCI_BREDR) {
2441 hci_dev_lock(hdev);
2442 mgmt_powered(hdev, 0);
2443 hci_dev_unlock(hdev);
2444 }
8ee56540 2445 }
5add6af8 2446
ced5c338 2447 /* Controller radio is available but is currently powered down */
536619e8 2448 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
ced5c338 2449
e59fda8d 2450 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 2451 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
7a4cd51d 2452 bacpy(&hdev->random_addr, BDADDR_ANY);
e59fda8d 2453
1da177e4
LT
2454 hci_req_unlock(hdev);
2455
2456 hci_dev_put(hdev);
2457 return 0;
2458}
2459
2460int hci_dev_close(__u16 dev)
2461{
2462 struct hci_dev *hdev;
2463 int err;
2464
70f23020
AE
2465 hdev = hci_dev_get(dev);
2466 if (!hdev)
1da177e4 2467 return -ENODEV;
8ee56540 2468
0736cfa8
MH
2469 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2470 err = -EBUSY;
2471 goto done;
2472 }
2473
8ee56540
MH
2474 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2475 cancel_delayed_work(&hdev->power_off);
2476
1da177e4 2477 err = hci_dev_do_close(hdev);
8ee56540 2478
0736cfa8 2479done:
1da177e4
LT
2480 hci_dev_put(hdev);
2481 return err;
2482}
2483
2484int hci_dev_reset(__u16 dev)
2485{
2486 struct hci_dev *hdev;
2487 int ret = 0;
2488
70f23020
AE
2489 hdev = hci_dev_get(dev);
2490 if (!hdev)
1da177e4
LT
2491 return -ENODEV;
2492
2493 hci_req_lock(hdev);
1da177e4 2494
808a049e
MH
2495 if (!test_bit(HCI_UP, &hdev->flags)) {
2496 ret = -ENETDOWN;
1da177e4 2497 goto done;
808a049e 2498 }
1da177e4 2499
0736cfa8
MH
2500 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2501 ret = -EBUSY;
2502 goto done;
2503 }
2504
4a964404 2505 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
fee746b0
MH
2506 ret = -EOPNOTSUPP;
2507 goto done;
2508 }
2509
1da177e4
LT
2510 /* Drop queues */
2511 skb_queue_purge(&hdev->rx_q);
2512 skb_queue_purge(&hdev->cmd_q);
2513
09fd0de5 2514 hci_dev_lock(hdev);
1f9b9a5d 2515 hci_inquiry_cache_flush(hdev);
1da177e4 2516 hci_conn_hash_flush(hdev);
09fd0de5 2517 hci_dev_unlock(hdev);
1da177e4
LT
2518
2519 if (hdev->flush)
2520 hdev->flush(hdev);
2521
8e87d142 2522 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 2523 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4 2524
fee746b0 2525 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1da177e4
LT
2526
2527done:
1da177e4
LT
2528 hci_req_unlock(hdev);
2529 hci_dev_put(hdev);
2530 return ret;
2531}
2532
2533int hci_dev_reset_stat(__u16 dev)
2534{
2535 struct hci_dev *hdev;
2536 int ret = 0;
2537
70f23020
AE
2538 hdev = hci_dev_get(dev);
2539 if (!hdev)
1da177e4
LT
2540 return -ENODEV;
2541
0736cfa8
MH
2542 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2543 ret = -EBUSY;
2544 goto done;
2545 }
2546
4a964404 2547 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
fee746b0
MH
2548 ret = -EOPNOTSUPP;
2549 goto done;
2550 }
2551
1da177e4
LT
2552 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2553
0736cfa8 2554done:
1da177e4 2555 hci_dev_put(hdev);
1da177e4
LT
2556 return ret;
2557}
2558
2559int hci_dev_cmd(unsigned int cmd, void __user *arg)
2560{
2561 struct hci_dev *hdev;
2562 struct hci_dev_req dr;
2563 int err = 0;
2564
2565 if (copy_from_user(&dr, arg, sizeof(dr)))
2566 return -EFAULT;
2567
70f23020
AE
2568 hdev = hci_dev_get(dr.dev_id);
2569 if (!hdev)
1da177e4
LT
2570 return -ENODEV;
2571
0736cfa8
MH
2572 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2573 err = -EBUSY;
2574 goto done;
2575 }
2576
4a964404 2577 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
fee746b0
MH
2578 err = -EOPNOTSUPP;
2579 goto done;
2580 }
2581
5b69bef5
MH
2582 if (hdev->dev_type != HCI_BREDR) {
2583 err = -EOPNOTSUPP;
2584 goto done;
2585 }
2586
56f87901
JH
2587 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2588 err = -EOPNOTSUPP;
2589 goto done;
2590 }
2591
1da177e4
LT
2592 switch (cmd) {
2593 case HCISETAUTH:
01178cd4
JH
2594 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2595 HCI_INIT_TIMEOUT);
1da177e4
LT
2596 break;
2597
2598 case HCISETENCRYPT:
2599 if (!lmp_encrypt_capable(hdev)) {
2600 err = -EOPNOTSUPP;
2601 break;
2602 }
2603
2604 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2605 /* Auth must be enabled first */
01178cd4
JH
2606 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2607 HCI_INIT_TIMEOUT);
1da177e4
LT
2608 if (err)
2609 break;
2610 }
2611
01178cd4
JH
2612 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2613 HCI_INIT_TIMEOUT);
1da177e4
LT
2614 break;
2615
2616 case HCISETSCAN:
01178cd4
JH
2617 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2618 HCI_INIT_TIMEOUT);
1da177e4
LT
2619 break;
2620
1da177e4 2621 case HCISETLINKPOL:
01178cd4
JH
2622 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2623 HCI_INIT_TIMEOUT);
1da177e4
LT
2624 break;
2625
2626 case HCISETLINKMODE:
e4e8e37c
MH
2627 hdev->link_mode = ((__u16) dr.dev_opt) &
2628 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2629 break;
2630
2631 case HCISETPTYPE:
2632 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
2633 break;
2634
2635 case HCISETACLMTU:
e4e8e37c
MH
2636 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2637 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2638 break;
2639
2640 case HCISETSCOMTU:
e4e8e37c
MH
2641 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2642 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2643 break;
2644
2645 default:
2646 err = -EINVAL;
2647 break;
2648 }
e4e8e37c 2649
0736cfa8 2650done:
1da177e4
LT
2651 hci_dev_put(hdev);
2652 return err;
2653}
2654
2655int hci_get_dev_list(void __user *arg)
2656{
8035ded4 2657 struct hci_dev *hdev;
1da177e4
LT
2658 struct hci_dev_list_req *dl;
2659 struct hci_dev_req *dr;
1da177e4
LT
2660 int n = 0, size, err;
2661 __u16 dev_num;
2662
2663 if (get_user(dev_num, (__u16 __user *) arg))
2664 return -EFAULT;
2665
2666 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2667 return -EINVAL;
2668
2669 size = sizeof(*dl) + dev_num * sizeof(*dr);
2670
70f23020
AE
2671 dl = kzalloc(size, GFP_KERNEL);
2672 if (!dl)
1da177e4
LT
2673 return -ENOMEM;
2674
2675 dr = dl->dev_req;
2676
f20d09d5 2677 read_lock(&hci_dev_list_lock);
8035ded4 2678 list_for_each_entry(hdev, &hci_dev_list, list) {
a8b2d5c2 2679 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
e0f9309f 2680 cancel_delayed_work(&hdev->power_off);
c542a06c 2681
a8b2d5c2
JH
2682 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2683 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 2684
1da177e4
LT
2685 (dr + n)->dev_id = hdev->id;
2686 (dr + n)->dev_opt = hdev->flags;
c542a06c 2687
1da177e4
LT
2688 if (++n >= dev_num)
2689 break;
2690 }
f20d09d5 2691 read_unlock(&hci_dev_list_lock);
1da177e4
LT
2692
2693 dl->dev_num = n;
2694 size = sizeof(*dl) + n * sizeof(*dr);
2695
2696 err = copy_to_user(arg, dl, size);
2697 kfree(dl);
2698
2699 return err ? -EFAULT : 0;
2700}
2701
2702int hci_get_dev_info(void __user *arg)
2703{
2704 struct hci_dev *hdev;
2705 struct hci_dev_info di;
2706 int err = 0;
2707
2708 if (copy_from_user(&di, arg, sizeof(di)))
2709 return -EFAULT;
2710
70f23020
AE
2711 hdev = hci_dev_get(di.dev_id);
2712 if (!hdev)
1da177e4
LT
2713 return -ENODEV;
2714
a8b2d5c2 2715 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
3243553f 2716 cancel_delayed_work_sync(&hdev->power_off);
ab81cbf9 2717
a8b2d5c2
JH
2718 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2719 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 2720
1da177e4
LT
2721 strcpy(di.name, hdev->name);
2722 di.bdaddr = hdev->bdaddr;
60f2a3ed 2723 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
1da177e4
LT
2724 di.flags = hdev->flags;
2725 di.pkt_type = hdev->pkt_type;
572c7f84
JH
2726 if (lmp_bredr_capable(hdev)) {
2727 di.acl_mtu = hdev->acl_mtu;
2728 di.acl_pkts = hdev->acl_pkts;
2729 di.sco_mtu = hdev->sco_mtu;
2730 di.sco_pkts = hdev->sco_pkts;
2731 } else {
2732 di.acl_mtu = hdev->le_mtu;
2733 di.acl_pkts = hdev->le_pkts;
2734 di.sco_mtu = 0;
2735 di.sco_pkts = 0;
2736 }
1da177e4
LT
2737 di.link_policy = hdev->link_policy;
2738 di.link_mode = hdev->link_mode;
2739
2740 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2741 memcpy(&di.features, &hdev->features, sizeof(di.features));
2742
2743 if (copy_to_user(arg, &di, sizeof(di)))
2744 err = -EFAULT;
2745
2746 hci_dev_put(hdev);
2747
2748 return err;
2749}
2750
2751/* ---- Interface to HCI drivers ---- */
2752
611b30f7
MH
2753static int hci_rfkill_set_block(void *data, bool blocked)
2754{
2755 struct hci_dev *hdev = data;
2756
2757 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2758
0736cfa8
MH
2759 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2760 return -EBUSY;
2761
5e130367
JH
2762 if (blocked) {
2763 set_bit(HCI_RFKILLED, &hdev->dev_flags);
bf543036
JH
2764 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2765 hci_dev_do_close(hdev);
5e130367
JH
2766 } else {
2767 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
1025c04c 2768 }
611b30f7
MH
2769
2770 return 0;
2771}
2772
2773static const struct rfkill_ops hci_rfkill_ops = {
2774 .set_block = hci_rfkill_set_block,
2775};
2776
ab81cbf9
JH
2777static void hci_power_on(struct work_struct *work)
2778{
2779 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
96570ffc 2780 int err;
ab81cbf9
JH
2781
2782 BT_DBG("%s", hdev->name);
2783
cbed0ca1 2784 err = hci_dev_do_open(hdev);
96570ffc
JH
2785 if (err < 0) {
2786 mgmt_set_powered_failed(hdev, err);
ab81cbf9 2787 return;
96570ffc 2788 }
ab81cbf9 2789
a5c8f270
MH
2790 /* During the HCI setup phase, a few error conditions are
2791 * ignored and they need to be checked now. If they are still
2792 * valid, it is important to turn the device back off.
2793 */
2794 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
4a964404 2795 test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
a5c8f270
MH
2796 (hdev->dev_type == HCI_BREDR &&
2797 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2798 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
bf543036
JH
2799 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2800 hci_dev_do_close(hdev);
2801 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
19202573
JH
2802 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2803 HCI_AUTO_OFF_TIMEOUT);
bf543036 2804 }
ab81cbf9 2805
fee746b0 2806 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
4a964404
MH
2807 /* For unconfigured devices, set the HCI_RAW flag
2808 * so that userspace can easily identify them.
4a964404
MH
2809 */
2810 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2811 set_bit(HCI_RAW, &hdev->flags);
0602a8ad
MH
2812
2813 /* For fully configured devices, this will send
2814 * the Index Added event. For unconfigured devices,
2815 * it will send Unconfigued Index Added event.
2816 *
2817 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2818 * and no event will be send.
2819 */
2820 mgmt_index_added(hdev);
fee746b0 2821 }
ab81cbf9
JH
2822}
2823
2824static void hci_power_off(struct work_struct *work)
2825{
3243553f 2826 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 2827 power_off.work);
ab81cbf9
JH
2828
2829 BT_DBG("%s", hdev->name);
2830
8ee56540 2831 hci_dev_do_close(hdev);
ab81cbf9
JH
2832}
2833
16ab91ab
JH
2834static void hci_discov_off(struct work_struct *work)
2835{
2836 struct hci_dev *hdev;
16ab91ab
JH
2837
2838 hdev = container_of(work, struct hci_dev, discov_off.work);
2839
2840 BT_DBG("%s", hdev->name);
2841
d1967ff8 2842 mgmt_discoverable_timeout(hdev);
16ab91ab
JH
2843}
2844
35f7498a 2845void hci_uuids_clear(struct hci_dev *hdev)
2aeb9a1a 2846{
4821002c 2847 struct bt_uuid *uuid, *tmp;
2aeb9a1a 2848
4821002c
JH
2849 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2850 list_del(&uuid->list);
2aeb9a1a
JH
2851 kfree(uuid);
2852 }
2aeb9a1a
JH
2853}
2854
35f7498a 2855void hci_link_keys_clear(struct hci_dev *hdev)
55ed8ca1
JH
2856{
2857 struct list_head *p, *n;
2858
2859 list_for_each_safe(p, n, &hdev->link_keys) {
2860 struct link_key *key;
2861
2862 key = list_entry(p, struct link_key, list);
2863
2864 list_del(p);
2865 kfree(key);
2866 }
55ed8ca1
JH
2867}
2868
35f7498a 2869void hci_smp_ltks_clear(struct hci_dev *hdev)
b899efaf
VCG
2870{
2871 struct smp_ltk *k, *tmp;
2872
2873 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2874 list_del(&k->list);
2875 kfree(k);
2876 }
b899efaf
VCG
2877}
2878
970c4e46
JH
2879void hci_smp_irks_clear(struct hci_dev *hdev)
2880{
2881 struct smp_irk *k, *tmp;
2882
2883 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2884 list_del(&k->list);
2885 kfree(k);
2886 }
2887}
2888
55ed8ca1
JH
2889struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2890{
8035ded4 2891 struct link_key *k;
55ed8ca1 2892
8035ded4 2893 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
2894 if (bacmp(bdaddr, &k->bdaddr) == 0)
2895 return k;
55ed8ca1
JH
2896
2897 return NULL;
2898}
2899
745c0ce3 2900static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 2901 u8 key_type, u8 old_key_type)
d25e28ab
JH
2902{
2903 /* Legacy key */
2904 if (key_type < 0x03)
745c0ce3 2905 return true;
d25e28ab
JH
2906
2907 /* Debug keys are insecure so don't store them persistently */
2908 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 2909 return false;
d25e28ab
JH
2910
2911 /* Changed combination key and there's no previous one */
2912 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 2913 return false;
d25e28ab
JH
2914
2915 /* Security mode 3 case */
2916 if (!conn)
745c0ce3 2917 return true;
d25e28ab
JH
2918
2919 /* Neither local nor remote side had no-bonding as requirement */
2920 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 2921 return true;
d25e28ab
JH
2922
2923 /* Local side had dedicated bonding as requirement */
2924 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 2925 return true;
d25e28ab
JH
2926
2927 /* Remote side had dedicated bonding as requirement */
2928 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 2929 return true;
d25e28ab
JH
2930
2931 /* If none of the above criteria match, then don't store the key
2932 * persistently */
745c0ce3 2933 return false;
d25e28ab
JH
2934}
2935
98a0b845
JH
2936static bool ltk_type_master(u8 type)
2937{
d97c9fb0 2938 return (type == SMP_LTK);
98a0b845
JH
2939}
2940
fe39c7b2 2941struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
98a0b845 2942 bool master)
75d262c2 2943{
c9839a11 2944 struct smp_ltk *k;
75d262c2 2945
c9839a11 2946 list_for_each_entry(k, &hdev->long_term_keys, list) {
fe39c7b2 2947 if (k->ediv != ediv || k->rand != rand)
75d262c2
VCG
2948 continue;
2949
98a0b845
JH
2950 if (ltk_type_master(k->type) != master)
2951 continue;
2952
c9839a11 2953 return k;
75d262c2
VCG
2954 }
2955
2956 return NULL;
2957}
75d262c2 2958
c9839a11 2959struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
98a0b845 2960 u8 addr_type, bool master)
75d262c2 2961{
c9839a11 2962 struct smp_ltk *k;
75d262c2 2963
c9839a11
VCG
2964 list_for_each_entry(k, &hdev->long_term_keys, list)
2965 if (addr_type == k->bdaddr_type &&
98a0b845
JH
2966 bacmp(bdaddr, &k->bdaddr) == 0 &&
2967 ltk_type_master(k->type) == master)
75d262c2
VCG
2968 return k;
2969
2970 return NULL;
2971}
75d262c2 2972
970c4e46
JH
2973struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2974{
2975 struct smp_irk *irk;
2976
2977 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2978 if (!bacmp(&irk->rpa, rpa))
2979 return irk;
2980 }
2981
2982 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2983 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
2984 bacpy(&irk->rpa, rpa);
2985 return irk;
2986 }
2987 }
2988
2989 return NULL;
2990}
2991
2992struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2993 u8 addr_type)
2994{
2995 struct smp_irk *irk;
2996
6cfc9988
JH
2997 /* Identity Address must be public or static random */
2998 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2999 return NULL;
3000
970c4e46
JH
3001 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3002 if (addr_type == irk->addr_type &&
3003 bacmp(bdaddr, &irk->bdaddr) == 0)
3004 return irk;
3005 }
3006
3007 return NULL;
3008}
3009
567fa2aa 3010struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
7652ff6a
JH
3011 bdaddr_t *bdaddr, u8 *val, u8 type,
3012 u8 pin_len, bool *persistent)
55ed8ca1
JH
3013{
3014 struct link_key *key, *old_key;
745c0ce3 3015 u8 old_key_type;
55ed8ca1
JH
3016
3017 old_key = hci_find_link_key(hdev, bdaddr);
3018 if (old_key) {
3019 old_key_type = old_key->type;
3020 key = old_key;
3021 } else {
12adcf3a 3022 old_key_type = conn ? conn->key_type : 0xff;
0a14ab41 3023 key = kzalloc(sizeof(*key), GFP_KERNEL);
55ed8ca1 3024 if (!key)
567fa2aa 3025 return NULL;
55ed8ca1
JH
3026 list_add(&key->list, &hdev->link_keys);
3027 }
3028
6ed93dc6 3029 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 3030
d25e28ab
JH
3031 /* Some buggy controller combinations generate a changed
3032 * combination key for legacy pairing even when there's no
3033 * previous key */
3034 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 3035 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 3036 type = HCI_LK_COMBINATION;
655fe6ec
JH
3037 if (conn)
3038 conn->key_type = type;
3039 }
d25e28ab 3040
55ed8ca1 3041 bacpy(&key->bdaddr, bdaddr);
9b3b4460 3042 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
3043 key->pin_len = pin_len;
3044
b6020ba0 3045 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 3046 key->type = old_key_type;
4748fed2
JH
3047 else
3048 key->type = type;
3049
7652ff6a
JH
3050 if (persistent)
3051 *persistent = hci_persistent_key(hdev, conn, type,
3052 old_key_type);
55ed8ca1 3053
567fa2aa 3054 return key;
55ed8ca1
JH
3055}
3056
ca9142b8 3057struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
35d70271 3058 u8 addr_type, u8 type, u8 authenticated,
fe39c7b2 3059 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
75d262c2 3060{
c9839a11 3061 struct smp_ltk *key, *old_key;
98a0b845 3062 bool master = ltk_type_master(type);
75d262c2 3063
98a0b845 3064 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
c9839a11 3065 if (old_key)
75d262c2 3066 key = old_key;
c9839a11 3067 else {
0a14ab41 3068 key = kzalloc(sizeof(*key), GFP_KERNEL);
75d262c2 3069 if (!key)
ca9142b8 3070 return NULL;
c9839a11 3071 list_add(&key->list, &hdev->long_term_keys);
75d262c2
VCG
3072 }
3073
75d262c2 3074 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
3075 key->bdaddr_type = addr_type;
3076 memcpy(key->val, tk, sizeof(key->val));
3077 key->authenticated = authenticated;
3078 key->ediv = ediv;
fe39c7b2 3079 key->rand = rand;
c9839a11
VCG
3080 key->enc_size = enc_size;
3081 key->type = type;
75d262c2 3082
ca9142b8 3083 return key;
75d262c2
VCG
3084}
3085
ca9142b8
JH
3086struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3087 u8 addr_type, u8 val[16], bdaddr_t *rpa)
970c4e46
JH
3088{
3089 struct smp_irk *irk;
3090
3091 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3092 if (!irk) {
3093 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3094 if (!irk)
ca9142b8 3095 return NULL;
970c4e46
JH
3096
3097 bacpy(&irk->bdaddr, bdaddr);
3098 irk->addr_type = addr_type;
3099
3100 list_add(&irk->list, &hdev->identity_resolving_keys);
3101 }
3102
3103 memcpy(irk->val, val, 16);
3104 bacpy(&irk->rpa, rpa);
3105
ca9142b8 3106 return irk;
970c4e46
JH
3107}
3108
55ed8ca1
JH
3109int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3110{
3111 struct link_key *key;
3112
3113 key = hci_find_link_key(hdev, bdaddr);
3114 if (!key)
3115 return -ENOENT;
3116
6ed93dc6 3117 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1
JH
3118
3119 list_del(&key->list);
3120 kfree(key);
3121
3122 return 0;
3123}
3124
e0b2b27e 3125int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
b899efaf
VCG
3126{
3127 struct smp_ltk *k, *tmp;
c51ffa0b 3128 int removed = 0;
b899efaf
VCG
3129
3130 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
e0b2b27e 3131 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
b899efaf
VCG
3132 continue;
3133
6ed93dc6 3134 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf
VCG
3135
3136 list_del(&k->list);
3137 kfree(k);
c51ffa0b 3138 removed++;
b899efaf
VCG
3139 }
3140
c51ffa0b 3141 return removed ? 0 : -ENOENT;
b899efaf
VCG
3142}
3143
a7ec7338
JH
3144void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3145{
3146 struct smp_irk *k, *tmp;
3147
668b7b19 3148 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
a7ec7338
JH
3149 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3150 continue;
3151
3152 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3153
3154 list_del(&k->list);
3155 kfree(k);
3156 }
3157}
3158
6bd32326 3159/* HCI command timer function */
65cc2b49 3160static void hci_cmd_timeout(struct work_struct *work)
6bd32326 3161{
65cc2b49
MH
3162 struct hci_dev *hdev = container_of(work, struct hci_dev,
3163 cmd_timer.work);
6bd32326 3164
bda4f23a
AE
3165 if (hdev->sent_cmd) {
3166 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3167 u16 opcode = __le16_to_cpu(sent->opcode);
3168
3169 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3170 } else {
3171 BT_ERR("%s command tx timeout", hdev->name);
3172 }
3173
6bd32326 3174 atomic_set(&hdev->cmd_cnt, 1);
c347b765 3175 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
3176}
3177
2763eda6 3178struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
04124681 3179 bdaddr_t *bdaddr)
2763eda6
SJ
3180{
3181 struct oob_data *data;
3182
3183 list_for_each_entry(data, &hdev->remote_oob_data, list)
3184 if (bacmp(bdaddr, &data->bdaddr) == 0)
3185 return data;
3186
3187 return NULL;
3188}
3189
3190int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3191{
3192 struct oob_data *data;
3193
3194 data = hci_find_remote_oob_data(hdev, bdaddr);
3195 if (!data)
3196 return -ENOENT;
3197
6ed93dc6 3198 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2763eda6
SJ
3199
3200 list_del(&data->list);
3201 kfree(data);
3202
3203 return 0;
3204}
3205
35f7498a 3206void hci_remote_oob_data_clear(struct hci_dev *hdev)
2763eda6
SJ
3207{
3208 struct oob_data *data, *n;
3209
3210 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3211 list_del(&data->list);
3212 kfree(data);
3213 }
2763eda6
SJ
3214}
3215
0798872e
MH
3216int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3217 u8 *hash, u8 *randomizer)
2763eda6
SJ
3218{
3219 struct oob_data *data;
3220
3221 data = hci_find_remote_oob_data(hdev, bdaddr);
2763eda6 3222 if (!data) {
0a14ab41 3223 data = kmalloc(sizeof(*data), GFP_KERNEL);
2763eda6
SJ
3224 if (!data)
3225 return -ENOMEM;
3226
3227 bacpy(&data->bdaddr, bdaddr);
3228 list_add(&data->list, &hdev->remote_oob_data);
3229 }
3230
519ca9d0
MH
3231 memcpy(data->hash192, hash, sizeof(data->hash192));
3232 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
2763eda6 3233
0798872e
MH
3234 memset(data->hash256, 0, sizeof(data->hash256));
3235 memset(data->randomizer256, 0, sizeof(data->randomizer256));
3236
3237 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3238
3239 return 0;
3240}
3241
3242int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3243 u8 *hash192, u8 *randomizer192,
3244 u8 *hash256, u8 *randomizer256)
3245{
3246 struct oob_data *data;
3247
3248 data = hci_find_remote_oob_data(hdev, bdaddr);
3249 if (!data) {
0a14ab41 3250 data = kmalloc(sizeof(*data), GFP_KERNEL);
0798872e
MH
3251 if (!data)
3252 return -ENOMEM;
3253
3254 bacpy(&data->bdaddr, bdaddr);
3255 list_add(&data->list, &hdev->remote_oob_data);
3256 }
3257
3258 memcpy(data->hash192, hash192, sizeof(data->hash192));
3259 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3260
3261 memcpy(data->hash256, hash256, sizeof(data->hash256));
3262 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3263
6ed93dc6 3264 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
3265
3266 return 0;
3267}
3268
b9ee0a78
MH
3269struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
3270 bdaddr_t *bdaddr, u8 type)
b2a66aad 3271{
8035ded4 3272 struct bdaddr_list *b;
b2a66aad 3273
b9ee0a78
MH
3274 list_for_each_entry(b, &hdev->blacklist, list) {
3275 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
b2a66aad 3276 return b;
b9ee0a78 3277 }
b2a66aad
AJ
3278
3279 return NULL;
3280}
3281
c9507490 3282static void hci_blacklist_clear(struct hci_dev *hdev)
b2a66aad
AJ
3283{
3284 struct list_head *p, *n;
3285
3286 list_for_each_safe(p, n, &hdev->blacklist) {
b9ee0a78 3287 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
b2a66aad
AJ
3288
3289 list_del(p);
3290 kfree(b);
3291 }
b2a66aad
AJ
3292}
3293
88c1fe4b 3294int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
3295{
3296 struct bdaddr_list *entry;
b2a66aad 3297
b9ee0a78 3298 if (!bacmp(bdaddr, BDADDR_ANY))
b2a66aad
AJ
3299 return -EBADF;
3300
b9ee0a78 3301 if (hci_blacklist_lookup(hdev, bdaddr, type))
5e762444 3302 return -EEXIST;
b2a66aad
AJ
3303
3304 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
5e762444
AJ
3305 if (!entry)
3306 return -ENOMEM;
b2a66aad
AJ
3307
3308 bacpy(&entry->bdaddr, bdaddr);
b9ee0a78 3309 entry->bdaddr_type = type;
b2a66aad
AJ
3310
3311 list_add(&entry->list, &hdev->blacklist);
3312
2a8357f2 3313 return 0;
b2a66aad
AJ
3314}
3315
88c1fe4b 3316int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
3317{
3318 struct bdaddr_list *entry;
b2a66aad 3319
35f7498a
JH
3320 if (!bacmp(bdaddr, BDADDR_ANY)) {
3321 hci_blacklist_clear(hdev);
3322 return 0;
3323 }
b2a66aad 3324
b9ee0a78 3325 entry = hci_blacklist_lookup(hdev, bdaddr, type);
1ec918ce 3326 if (!entry)
5e762444 3327 return -ENOENT;
b2a66aad
AJ
3328
3329 list_del(&entry->list);
3330 kfree(entry);
3331
2a8357f2 3332 return 0;
b2a66aad
AJ
3333}
3334
d2ab0ac1
MH
3335struct bdaddr_list *hci_white_list_lookup(struct hci_dev *hdev,
3336 bdaddr_t *bdaddr, u8 type)
3337{
3338 struct bdaddr_list *b;
3339
3340 list_for_each_entry(b, &hdev->le_white_list, list) {
3341 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3342 return b;
3343 }
3344
3345 return NULL;
3346}
3347
3348void hci_white_list_clear(struct hci_dev *hdev)
3349{
3350 struct list_head *p, *n;
3351
3352 list_for_each_safe(p, n, &hdev->le_white_list) {
3353 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3354
3355 list_del(p);
3356 kfree(b);
3357 }
3358}
3359
3360int hci_white_list_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3361{
3362 struct bdaddr_list *entry;
3363
3364 if (!bacmp(bdaddr, BDADDR_ANY))
3365 return -EBADF;
3366
3367 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3368 if (!entry)
3369 return -ENOMEM;
3370
3371 bacpy(&entry->bdaddr, bdaddr);
3372 entry->bdaddr_type = type;
3373
3374 list_add(&entry->list, &hdev->le_white_list);
3375
3376 return 0;
3377}
3378
3379int hci_white_list_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3380{
3381 struct bdaddr_list *entry;
3382
3383 if (!bacmp(bdaddr, BDADDR_ANY))
3384 return -EBADF;
3385
3386 entry = hci_white_list_lookup(hdev, bdaddr, type);
3387 if (!entry)
3388 return -ENOENT;
3389
3390 list_del(&entry->list);
3391 kfree(entry);
3392
3393 return 0;
3394}
3395
15819a70
AG
3396/* This function requires the caller holds hdev->lock */
3397struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3398 bdaddr_t *addr, u8 addr_type)
3399{
3400 struct hci_conn_params *params;
3401
738f6185
JH
3402 /* The conn params list only contains identity addresses */
3403 if (!hci_is_identity_address(addr, addr_type))
3404 return NULL;
3405
15819a70
AG
3406 list_for_each_entry(params, &hdev->le_conn_params, list) {
3407 if (bacmp(&params->addr, addr) == 0 &&
3408 params->addr_type == addr_type) {
3409 return params;
3410 }
3411 }
3412
3413 return NULL;
3414}
3415
cef952ce
AG
3416static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3417{
3418 struct hci_conn *conn;
3419
3420 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3421 if (!conn)
3422 return false;
3423
3424 if (conn->dst_type != type)
3425 return false;
3426
3427 if (conn->state != BT_CONNECTED)
3428 return false;
3429
3430 return true;
3431}
3432
4b10966f 3433/* This function requires the caller holds hdev->lock */
912b42ef
JH
3434struct hci_conn_params *hci_pend_le_conn_lookup(struct hci_dev *hdev,
3435 bdaddr_t *addr, u8 addr_type)
4b10966f 3436{
912b42ef 3437 struct hci_conn_params *param;
4b10966f 3438
738f6185
JH
3439 /* The list only contains identity addresses */
3440 if (!hci_is_identity_address(addr, addr_type))
3441 return NULL;
3442
93450c75 3443 list_for_each_entry(param, &hdev->pend_le_conns, action) {
912b42ef
JH
3444 if (bacmp(&param->addr, addr) == 0 &&
3445 param->addr_type == addr_type)
3446 return param;
4b10966f
MH
3447 }
3448
3449 return NULL;
3450}
3451
3452/* This function requires the caller holds hdev->lock */
912b42ef 3453void hci_pend_le_conn_add(struct hci_dev *hdev, struct hci_conn_params *params)
4b10966f 3454{
93450c75
JH
3455 list_del_init(&params->action);
3456 list_add(&params->action, &hdev->pend_le_conns);
4b10966f 3457
912b42ef 3458 BT_DBG("addr %pMR (type %u)", &params->addr, params->addr_type);
4b10966f 3459
4b10966f
MH
3460 hci_update_background_scan(hdev);
3461}
3462
3463/* This function requires the caller holds hdev->lock */
912b42ef 3464void hci_pend_le_conn_del(struct hci_dev *hdev, struct hci_conn_params *params)
4b10966f 3465{
93450c75 3466 list_del_init(&params->action);
4b10966f 3467
912b42ef 3468 BT_DBG("addr %pMR (type %u)", &params->addr, params->addr_type);
4b10966f 3469
4b10966f
MH
3470 hci_update_background_scan(hdev);
3471}
3472
3473/* This function requires the caller holds hdev->lock */
3474void hci_pend_le_conns_clear(struct hci_dev *hdev)
3475{
912b42ef
JH
3476 while (!list_empty(&hdev->pend_le_conns))
3477 list_del_init(hdev->pend_le_conns.next);
4b10966f
MH
3478
3479 BT_DBG("All LE pending connections cleared");
3480}
3481
15819a70 3482/* This function requires the caller holds hdev->lock */
51d167c0
MH
3483struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3484 bdaddr_t *addr, u8 addr_type)
bf5b3c8b
MH
3485{
3486 struct hci_conn_params *params;
3487
c46245b3 3488 if (!hci_is_identity_address(addr, addr_type))
51d167c0 3489 return NULL;
bf5b3c8b
MH
3490
3491 params = hci_conn_params_lookup(hdev, addr, addr_type);
3492 if (params)
51d167c0 3493 return params;
bf5b3c8b
MH
3494
3495 params = kzalloc(sizeof(*params), GFP_KERNEL);
3496 if (!params) {
3497 BT_ERR("Out of memory");
51d167c0 3498 return NULL;
bf5b3c8b
MH
3499 }
3500
3501 bacpy(&params->addr, addr);
3502 params->addr_type = addr_type;
3503
3504 list_add(&params->list, &hdev->le_conn_params);
93450c75 3505 INIT_LIST_HEAD(&params->action);
bf5b3c8b
MH
3506
3507 params->conn_min_interval = hdev->le_conn_min_interval;
3508 params->conn_max_interval = hdev->le_conn_max_interval;
3509 params->conn_latency = hdev->le_conn_latency;
3510 params->supervision_timeout = hdev->le_supv_timeout;
3511 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3512
3513 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3514
51d167c0 3515 return params;
bf5b3c8b
MH
3516}
3517
3518/* This function requires the caller holds hdev->lock */
3519int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
d06b50ce 3520 u8 auto_connect)
15819a70
AG
3521{
3522 struct hci_conn_params *params;
3523
8c87aae1
MH
3524 params = hci_conn_params_add(hdev, addr, addr_type);
3525 if (!params)
3526 return -EIO;
cef952ce 3527
42ce26de
JH
3528 if (params->auto_connect == auto_connect)
3529 return 0;
3530
851efca8
JH
3531 if (params->auto_connect == HCI_AUTO_CONN_REPORT &&
3532 auto_connect != HCI_AUTO_CONN_REPORT)
66f8455a 3533 list_del_init(&params->action);
15819a70 3534
cef952ce
AG
3535 switch (auto_connect) {
3536 case HCI_AUTO_CONN_DISABLED:
3537 case HCI_AUTO_CONN_LINK_LOSS:
912b42ef 3538 hci_pend_le_conn_del(hdev, params);
cef952ce 3539 break;
851efca8 3540 case HCI_AUTO_CONN_REPORT:
66f8455a
JH
3541 if (params->auto_connect != HCI_AUTO_CONN_REPORT) {
3542 list_del_init(&params->action);
3543 list_add(&params->action,
3544 &hdev->pend_le_reports);
3545 }
912b42ef 3546 hci_pend_le_conn_del(hdev, params);
851efca8 3547 break;
cef952ce
AG
3548 case HCI_AUTO_CONN_ALWAYS:
3549 if (!is_connected(hdev, addr, addr_type))
912b42ef 3550 hci_pend_le_conn_add(hdev, params);
cef952ce
AG
3551 break;
3552 }
15819a70 3553
851efca8
JH
3554 params->auto_connect = auto_connect;
3555
d06b50ce
MH
3556 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3557 auto_connect);
a9b0a04c
AG
3558
3559 return 0;
15819a70
AG
3560}
3561
3562/* This function requires the caller holds hdev->lock */
3563void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3564{
3565 struct hci_conn_params *params;
3566
3567 params = hci_conn_params_lookup(hdev, addr, addr_type);
3568 if (!params)
3569 return;
3570
851efca8 3571 if (params->auto_connect == HCI_AUTO_CONN_REPORT)
66f8455a 3572 list_del_init(&params->action);
851efca8 3573
912b42ef 3574 hci_pend_le_conn_del(hdev, params);
cef952ce 3575
15819a70
AG
3576 list_del(&params->list);
3577 kfree(params);
3578
3579 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3580}
3581
55af49a8
JH
3582/* This function requires the caller holds hdev->lock */
3583void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3584{
3585 struct hci_conn_params *params, *tmp;
3586
3587 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3588 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3589 continue;
3590 list_del(&params->list);
3591 kfree(params);
3592 }
3593
3594 BT_DBG("All LE disabled connection parameters were removed");
3595}
3596
3597/* This function requires the caller holds hdev->lock */
3598void hci_conn_params_clear_enabled(struct hci_dev *hdev)
3599{
3600 struct hci_conn_params *params, *tmp;
3601
3602 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3603 if (params->auto_connect == HCI_AUTO_CONN_DISABLED)
3604 continue;
a2f41a8f 3605 list_del(&params->action);
55af49a8
JH
3606 list_del(&params->list);
3607 kfree(params);
3608 }
3609
a2f41a8f 3610 hci_update_background_scan(hdev);
55af49a8
JH
3611
3612 BT_DBG("All enabled LE connection parameters were removed");
3613}
3614
15819a70 3615/* This function requires the caller holds hdev->lock */
373110c5 3616void hci_conn_params_clear_all(struct hci_dev *hdev)
15819a70
AG
3617{
3618 struct hci_conn_params *params, *tmp;
3619
3620 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
a2f41a8f 3621 list_del(&params->action);
15819a70
AG
3622 list_del(&params->list);
3623 kfree(params);
3624 }
3625
a2f41a8f 3626 hci_update_background_scan(hdev);
1089b67d 3627
15819a70
AG
3628 BT_DBG("All LE connection parameters were removed");
3629}
3630
4c87eaab 3631static void inquiry_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 3632{
4c87eaab
AG
3633 if (status) {
3634 BT_ERR("Failed to start inquiry: status %d", status);
7ba8b4be 3635
4c87eaab
AG
3636 hci_dev_lock(hdev);
3637 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3638 hci_dev_unlock(hdev);
3639 return;
3640 }
7ba8b4be
AG
3641}
3642
4c87eaab 3643static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 3644{
4c87eaab
AG
3645 /* General inquiry access code (GIAC) */
3646 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3647 struct hci_request req;
3648 struct hci_cp_inquiry cp;
7ba8b4be
AG
3649 int err;
3650
4c87eaab
AG
3651 if (status) {
3652 BT_ERR("Failed to disable LE scanning: status %d", status);
3653 return;
3654 }
7ba8b4be 3655
4c87eaab
AG
3656 switch (hdev->discovery.type) {
3657 case DISCOV_TYPE_LE:
3658 hci_dev_lock(hdev);
3659 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3660 hci_dev_unlock(hdev);
3661 break;
7ba8b4be 3662
4c87eaab
AG
3663 case DISCOV_TYPE_INTERLEAVED:
3664 hci_req_init(&req, hdev);
7ba8b4be 3665
4c87eaab
AG
3666 memset(&cp, 0, sizeof(cp));
3667 memcpy(&cp.lap, lap, sizeof(cp.lap));
3668 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3669 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
7ba8b4be 3670
4c87eaab 3671 hci_dev_lock(hdev);
7dbfac1d 3672
4c87eaab 3673 hci_inquiry_cache_flush(hdev);
7dbfac1d 3674
4c87eaab
AG
3675 err = hci_req_run(&req, inquiry_complete);
3676 if (err) {
3677 BT_ERR("Inquiry request failed: err %d", err);
3678 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3679 }
7dbfac1d 3680
4c87eaab
AG
3681 hci_dev_unlock(hdev);
3682 break;
7dbfac1d 3683 }
7dbfac1d
AG
3684}
3685
7ba8b4be
AG
3686static void le_scan_disable_work(struct work_struct *work)
3687{
3688 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 3689 le_scan_disable.work);
4c87eaab
AG
3690 struct hci_request req;
3691 int err;
7ba8b4be
AG
3692
3693 BT_DBG("%s", hdev->name);
3694
4c87eaab 3695 hci_req_init(&req, hdev);
28b75a89 3696
b1efcc28 3697 hci_req_add_le_scan_disable(&req);
28b75a89 3698
4c87eaab
AG
3699 err = hci_req_run(&req, le_scan_disable_work_complete);
3700 if (err)
3701 BT_ERR("Disable LE scanning request failed: err %d", err);
28b75a89
AG
3702}
3703
8d97250e
JH
3704static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3705{
3706 struct hci_dev *hdev = req->hdev;
3707
3708 /* If we're advertising or initiating an LE connection we can't
3709 * go ahead and change the random address at this time. This is
3710 * because the eventual initiator address used for the
3711 * subsequently created connection will be undefined (some
3712 * controllers use the new address and others the one we had
3713 * when the operation started).
3714 *
3715 * In this kind of scenario skip the update and let the random
3716 * address be updated at the next cycle.
3717 */
3718 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) ||
3719 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3720 BT_DBG("Deferring random address update");
3721 return;
3722 }
3723
3724 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3725}
3726
94b1fc92
MH
3727int hci_update_random_address(struct hci_request *req, bool require_privacy,
3728 u8 *own_addr_type)
ebd3a747
JH
3729{
3730 struct hci_dev *hdev = req->hdev;
3731 int err;
3732
3733 /* If privacy is enabled use a resolvable private address. If
2b5224dc
MH
3734 * current RPA has expired or there is something else than
3735 * the current RPA in use, then generate a new one.
ebd3a747
JH
3736 */
3737 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
ebd3a747
JH
3738 int to;
3739
3740 *own_addr_type = ADDR_LE_DEV_RANDOM;
3741
3742 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
2b5224dc 3743 !bacmp(&hdev->random_addr, &hdev->rpa))
ebd3a747
JH
3744 return 0;
3745
2b5224dc 3746 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
ebd3a747
JH
3747 if (err < 0) {
3748 BT_ERR("%s failed to generate new RPA", hdev->name);
3749 return err;
3750 }
3751
8d97250e 3752 set_random_addr(req, &hdev->rpa);
ebd3a747
JH
3753
3754 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3755 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3756
3757 return 0;
94b1fc92
MH
3758 }
3759
3760 /* In case of required privacy without resolvable private address,
3761 * use an unresolvable private address. This is useful for active
3762 * scanning and non-connectable advertising.
3763 */
3764 if (require_privacy) {
3765 bdaddr_t urpa;
3766
3767 get_random_bytes(&urpa, 6);
3768 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3769
3770 *own_addr_type = ADDR_LE_DEV_RANDOM;
8d97250e 3771 set_random_addr(req, &urpa);
94b1fc92 3772 return 0;
ebd3a747
JH
3773 }
3774
3775 /* If forcing static address is in use or there is no public
3776 * address use the static address as random address (but skip
3777 * the HCI command if the current random address is already the
3778 * static one.
3779 */
111902f7 3780 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
ebd3a747
JH
3781 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3782 *own_addr_type = ADDR_LE_DEV_RANDOM;
3783 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3784 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3785 &hdev->static_addr);
3786 return 0;
3787 }
3788
3789 /* Neither privacy nor static address is being used so use a
3790 * public address.
3791 */
3792 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3793
3794 return 0;
3795}
3796
a1f4c318
JH
3797/* Copy the Identity Address of the controller.
3798 *
3799 * If the controller has a public BD_ADDR, then by default use that one.
3800 * If this is a LE only controller without a public address, default to
3801 * the static random address.
3802 *
3803 * For debugging purposes it is possible to force controllers with a
3804 * public address to use the static random address instead.
3805 */
3806void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3807 u8 *bdaddr_type)
3808{
111902f7 3809 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
a1f4c318
JH
3810 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3811 bacpy(bdaddr, &hdev->static_addr);
3812 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3813 } else {
3814 bacpy(bdaddr, &hdev->bdaddr);
3815 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3816 }
3817}
3818
9be0dab7
DH
3819/* Alloc HCI device */
3820struct hci_dev *hci_alloc_dev(void)
3821{
3822 struct hci_dev *hdev;
3823
3824 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3825 if (!hdev)
3826 return NULL;
3827
b1b813d4
DH
3828 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3829 hdev->esco_type = (ESCO_HV1);
3830 hdev->link_mode = (HCI_LM_ACCEPT);
b4cb9fb2
MH
3831 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3832 hdev->io_capability = 0x03; /* No Input No Output */
96c2103a 3833 hdev->manufacturer = 0xffff; /* Default to internal use */
bbaf444a
JH
3834 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3835 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
b1b813d4 3836
b1b813d4
DH
3837 hdev->sniff_max_interval = 800;
3838 hdev->sniff_min_interval = 80;
3839
3f959d46 3840 hdev->le_adv_channel_map = 0x07;
bef64738
MH
3841 hdev->le_scan_interval = 0x0060;
3842 hdev->le_scan_window = 0x0030;
4e70c7e7
MH
3843 hdev->le_conn_min_interval = 0x0028;
3844 hdev->le_conn_max_interval = 0x0038;
04fb7d90
MH
3845 hdev->le_conn_latency = 0x0000;
3846 hdev->le_supv_timeout = 0x002a;
bef64738 3847
d6bfd59c 3848 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
b9a7a61e 3849 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
31ad1691
AK
3850 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3851 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
d6bfd59c 3852
b1b813d4
DH
3853 mutex_init(&hdev->lock);
3854 mutex_init(&hdev->req_lock);
3855
3856 INIT_LIST_HEAD(&hdev->mgmt_pending);
3857 INIT_LIST_HEAD(&hdev->blacklist);
3858 INIT_LIST_HEAD(&hdev->uuids);
3859 INIT_LIST_HEAD(&hdev->link_keys);
3860 INIT_LIST_HEAD(&hdev->long_term_keys);
970c4e46 3861 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
b1b813d4 3862 INIT_LIST_HEAD(&hdev->remote_oob_data);
d2ab0ac1 3863 INIT_LIST_HEAD(&hdev->le_white_list);
15819a70 3864 INIT_LIST_HEAD(&hdev->le_conn_params);
77a77a30 3865 INIT_LIST_HEAD(&hdev->pend_le_conns);
66f8455a 3866 INIT_LIST_HEAD(&hdev->pend_le_reports);
6b536b5e 3867 INIT_LIST_HEAD(&hdev->conn_hash.list);
b1b813d4
DH
3868
3869 INIT_WORK(&hdev->rx_work, hci_rx_work);
3870 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3871 INIT_WORK(&hdev->tx_work, hci_tx_work);
3872 INIT_WORK(&hdev->power_on, hci_power_on);
b1b813d4 3873
b1b813d4
DH
3874 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3875 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3876 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3877
b1b813d4
DH
3878 skb_queue_head_init(&hdev->rx_q);
3879 skb_queue_head_init(&hdev->cmd_q);
3880 skb_queue_head_init(&hdev->raw_q);
3881
3882 init_waitqueue_head(&hdev->req_wait_q);
3883
65cc2b49 3884 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
b1b813d4 3885
b1b813d4
DH
3886 hci_init_sysfs(hdev);
3887 discovery_init(hdev);
9be0dab7
DH
3888
3889 return hdev;
3890}
3891EXPORT_SYMBOL(hci_alloc_dev);
3892
3893/* Free HCI device */
3894void hci_free_dev(struct hci_dev *hdev)
3895{
9be0dab7
DH
3896 /* will free via device release */
3897 put_device(&hdev->dev);
3898}
3899EXPORT_SYMBOL(hci_free_dev);
3900
1da177e4
LT
3901/* Register HCI device */
3902int hci_register_dev(struct hci_dev *hdev)
3903{
b1b813d4 3904 int id, error;
1da177e4 3905
010666a1 3906 if (!hdev->open || !hdev->close)
1da177e4
LT
3907 return -EINVAL;
3908
08add513
MM
3909 /* Do not allow HCI_AMP devices to register at index 0,
3910 * so the index can be used as the AMP controller ID.
3911 */
3df92b31
SL
3912 switch (hdev->dev_type) {
3913 case HCI_BREDR:
3914 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3915 break;
3916 case HCI_AMP:
3917 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3918 break;
3919 default:
3920 return -EINVAL;
1da177e4 3921 }
8e87d142 3922
3df92b31
SL
3923 if (id < 0)
3924 return id;
3925
1da177e4
LT
3926 sprintf(hdev->name, "hci%d", id);
3927 hdev->id = id;
2d8b3a11
AE
3928
3929 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3930
d8537548
KC
3931 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3932 WQ_MEM_RECLAIM, 1, hdev->name);
33ca954d
DH
3933 if (!hdev->workqueue) {
3934 error = -ENOMEM;
3935 goto err;
3936 }
f48fd9c8 3937
d8537548
KC
3938 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3939 WQ_MEM_RECLAIM, 1, hdev->name);
6ead1bbc
JH
3940 if (!hdev->req_workqueue) {
3941 destroy_workqueue(hdev->workqueue);
3942 error = -ENOMEM;
3943 goto err;
3944 }
3945
0153e2ec
MH
3946 if (!IS_ERR_OR_NULL(bt_debugfs))
3947 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3948
bdc3e0f1
MH
3949 dev_set_name(&hdev->dev, "%s", hdev->name);
3950
99780a7b
JH
3951 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3952 CRYPTO_ALG_ASYNC);
3953 if (IS_ERR(hdev->tfm_aes)) {
3954 BT_ERR("Unable to create crypto context");
3955 error = PTR_ERR(hdev->tfm_aes);
3956 hdev->tfm_aes = NULL;
3957 goto err_wqueue;
3958 }
3959
bdc3e0f1 3960 error = device_add(&hdev->dev);
33ca954d 3961 if (error < 0)
99780a7b 3962 goto err_tfm;
1da177e4 3963
611b30f7 3964 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
3965 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3966 hdev);
611b30f7
MH
3967 if (hdev->rfkill) {
3968 if (rfkill_register(hdev->rfkill) < 0) {
3969 rfkill_destroy(hdev->rfkill);
3970 hdev->rfkill = NULL;
3971 }
3972 }
3973
5e130367
JH
3974 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3975 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3976
a8b2d5c2 3977 set_bit(HCI_SETUP, &hdev->dev_flags);
004b0258 3978 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
ce2be9ac 3979
01cd3404 3980 if (hdev->dev_type == HCI_BREDR) {
56f87901
JH
3981 /* Assume BR/EDR support until proven otherwise (such as
3982 * through reading supported features during init.
3983 */
3984 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3985 }
ce2be9ac 3986
fcee3377
GP
3987 write_lock(&hci_dev_list_lock);
3988 list_add(&hdev->list, &hci_dev_list);
3989 write_unlock(&hci_dev_list_lock);
3990
4a964404
MH
3991 /* Devices that are marked for raw-only usage are unconfigured
3992 * and should not be included in normal operation.
fee746b0
MH
3993 */
3994 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
4a964404 3995 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
fee746b0 3996
1da177e4 3997 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 3998 hci_dev_hold(hdev);
1da177e4 3999
19202573 4000 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 4001
1da177e4 4002 return id;
f48fd9c8 4003
99780a7b
JH
4004err_tfm:
4005 crypto_free_blkcipher(hdev->tfm_aes);
33ca954d
DH
4006err_wqueue:
4007 destroy_workqueue(hdev->workqueue);
6ead1bbc 4008 destroy_workqueue(hdev->req_workqueue);
33ca954d 4009err:
3df92b31 4010 ida_simple_remove(&hci_index_ida, hdev->id);
f48fd9c8 4011
33ca954d 4012 return error;
1da177e4
LT
4013}
4014EXPORT_SYMBOL(hci_register_dev);
4015
4016/* Unregister HCI device */
59735631 4017void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 4018{
3df92b31 4019 int i, id;
ef222013 4020
c13854ce 4021 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 4022
94324962
JH
4023 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4024
3df92b31
SL
4025 id = hdev->id;
4026
f20d09d5 4027 write_lock(&hci_dev_list_lock);
1da177e4 4028 list_del(&hdev->list);
f20d09d5 4029 write_unlock(&hci_dev_list_lock);
1da177e4
LT
4030
4031 hci_dev_do_close(hdev);
4032
cd4c5391 4033 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
4034 kfree_skb(hdev->reassembly[i]);
4035
b9b5ef18
GP
4036 cancel_work_sync(&hdev->power_on);
4037
ab81cbf9 4038 if (!test_bit(HCI_INIT, &hdev->flags) &&
0602a8ad 4039 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
09fd0de5 4040 hci_dev_lock(hdev);
744cf19e 4041 mgmt_index_removed(hdev);
09fd0de5 4042 hci_dev_unlock(hdev);
56e5cb86 4043 }
ab81cbf9 4044
2e58ef3e
JH
4045 /* mgmt_index_removed should take care of emptying the
4046 * pending list */
4047 BUG_ON(!list_empty(&hdev->mgmt_pending));
4048
1da177e4
LT
4049 hci_notify(hdev, HCI_DEV_UNREG);
4050
611b30f7
MH
4051 if (hdev->rfkill) {
4052 rfkill_unregister(hdev->rfkill);
4053 rfkill_destroy(hdev->rfkill);
4054 }
4055
99780a7b
JH
4056 if (hdev->tfm_aes)
4057 crypto_free_blkcipher(hdev->tfm_aes);
4058
bdc3e0f1 4059 device_del(&hdev->dev);
147e2d59 4060
0153e2ec
MH
4061 debugfs_remove_recursive(hdev->debugfs);
4062
f48fd9c8 4063 destroy_workqueue(hdev->workqueue);
6ead1bbc 4064 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 4065
09fd0de5 4066 hci_dev_lock(hdev);
e2e0cacb 4067 hci_blacklist_clear(hdev);
2aeb9a1a 4068 hci_uuids_clear(hdev);
55ed8ca1 4069 hci_link_keys_clear(hdev);
b899efaf 4070 hci_smp_ltks_clear(hdev);
970c4e46 4071 hci_smp_irks_clear(hdev);
2763eda6 4072 hci_remote_oob_data_clear(hdev);
d2ab0ac1 4073 hci_white_list_clear(hdev);
373110c5 4074 hci_conn_params_clear_all(hdev);
09fd0de5 4075 hci_dev_unlock(hdev);
e2e0cacb 4076
dc946bd8 4077 hci_dev_put(hdev);
3df92b31
SL
4078
4079 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
4080}
4081EXPORT_SYMBOL(hci_unregister_dev);
4082
4083/* Suspend HCI device */
4084int hci_suspend_dev(struct hci_dev *hdev)
4085{
4086 hci_notify(hdev, HCI_DEV_SUSPEND);
4087 return 0;
4088}
4089EXPORT_SYMBOL(hci_suspend_dev);
4090
4091/* Resume HCI device */
4092int hci_resume_dev(struct hci_dev *hdev)
4093{
4094 hci_notify(hdev, HCI_DEV_RESUME);
4095 return 0;
4096}
4097EXPORT_SYMBOL(hci_resume_dev);
4098
76bca880 4099/* Receive frame from HCI drivers */
e1a26170 4100int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
76bca880 4101{
76bca880 4102 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 4103 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
4104 kfree_skb(skb);
4105 return -ENXIO;
4106 }
4107
d82603c6 4108 /* Incoming skb */
76bca880
MH
4109 bt_cb(skb)->incoming = 1;
4110
4111 /* Time stamp */
4112 __net_timestamp(skb);
4113
76bca880 4114 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 4115 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 4116
76bca880
MH
4117 return 0;
4118}
4119EXPORT_SYMBOL(hci_recv_frame);
4120
33e882a5 4121static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
a8c5fb1a 4122 int count, __u8 index)
33e882a5
SS
4123{
4124 int len = 0;
4125 int hlen = 0;
4126 int remain = count;
4127 struct sk_buff *skb;
4128 struct bt_skb_cb *scb;
4129
4130 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
a8c5fb1a 4131 index >= NUM_REASSEMBLY)
33e882a5
SS
4132 return -EILSEQ;
4133
4134 skb = hdev->reassembly[index];
4135
4136 if (!skb) {
4137 switch (type) {
4138 case HCI_ACLDATA_PKT:
4139 len = HCI_MAX_FRAME_SIZE;
4140 hlen = HCI_ACL_HDR_SIZE;
4141 break;
4142 case HCI_EVENT_PKT:
4143 len = HCI_MAX_EVENT_SIZE;
4144 hlen = HCI_EVENT_HDR_SIZE;
4145 break;
4146 case HCI_SCODATA_PKT:
4147 len = HCI_MAX_SCO_SIZE;
4148 hlen = HCI_SCO_HDR_SIZE;
4149 break;
4150 }
4151
1e429f38 4152 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
4153 if (!skb)
4154 return -ENOMEM;
4155
4156 scb = (void *) skb->cb;
4157 scb->expect = hlen;
4158 scb->pkt_type = type;
4159
33e882a5
SS
4160 hdev->reassembly[index] = skb;
4161 }
4162
4163 while (count) {
4164 scb = (void *) skb->cb;
89bb46d0 4165 len = min_t(uint, scb->expect, count);
33e882a5
SS
4166
4167 memcpy(skb_put(skb, len), data, len);
4168
4169 count -= len;
4170 data += len;
4171 scb->expect -= len;
4172 remain = count;
4173
4174 switch (type) {
4175 case HCI_EVENT_PKT:
4176 if (skb->len == HCI_EVENT_HDR_SIZE) {
4177 struct hci_event_hdr *h = hci_event_hdr(skb);
4178 scb->expect = h->plen;
4179
4180 if (skb_tailroom(skb) < scb->expect) {
4181 kfree_skb(skb);
4182 hdev->reassembly[index] = NULL;
4183 return -ENOMEM;
4184 }
4185 }
4186 break;
4187
4188 case HCI_ACLDATA_PKT:
4189 if (skb->len == HCI_ACL_HDR_SIZE) {
4190 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4191 scb->expect = __le16_to_cpu(h->dlen);
4192
4193 if (skb_tailroom(skb) < scb->expect) {
4194 kfree_skb(skb);
4195 hdev->reassembly[index] = NULL;
4196 return -ENOMEM;
4197 }
4198 }
4199 break;
4200
4201 case HCI_SCODATA_PKT:
4202 if (skb->len == HCI_SCO_HDR_SIZE) {
4203 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4204 scb->expect = h->dlen;
4205
4206 if (skb_tailroom(skb) < scb->expect) {
4207 kfree_skb(skb);
4208 hdev->reassembly[index] = NULL;
4209 return -ENOMEM;
4210 }
4211 }
4212 break;
4213 }
4214
4215 if (scb->expect == 0) {
4216 /* Complete frame */
4217
4218 bt_cb(skb)->pkt_type = type;
e1a26170 4219 hci_recv_frame(hdev, skb);
33e882a5
SS
4220
4221 hdev->reassembly[index] = NULL;
4222 return remain;
4223 }
4224 }
4225
4226 return remain;
4227}
4228
ef222013
MH
4229int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4230{
f39a3c06
SS
4231 int rem = 0;
4232
ef222013
MH
4233 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4234 return -EILSEQ;
4235
da5f6c37 4236 while (count) {
1e429f38 4237 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
4238 if (rem < 0)
4239 return rem;
ef222013 4240
f39a3c06
SS
4241 data += (count - rem);
4242 count = rem;
f81c6224 4243 }
ef222013 4244
f39a3c06 4245 return rem;
ef222013
MH
4246}
4247EXPORT_SYMBOL(hci_recv_fragment);
4248
99811510
SS
4249#define STREAM_REASSEMBLY 0
4250
4251int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4252{
4253 int type;
4254 int rem = 0;
4255
da5f6c37 4256 while (count) {
99811510
SS
4257 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4258
4259 if (!skb) {
4260 struct { char type; } *pkt;
4261
4262 /* Start of the frame */
4263 pkt = data;
4264 type = pkt->type;
4265
4266 data++;
4267 count--;
4268 } else
4269 type = bt_cb(skb)->pkt_type;
4270
1e429f38 4271 rem = hci_reassembly(hdev, type, data, count,
a8c5fb1a 4272 STREAM_REASSEMBLY);
99811510
SS
4273 if (rem < 0)
4274 return rem;
4275
4276 data += (count - rem);
4277 count = rem;
f81c6224 4278 }
99811510
SS
4279
4280 return rem;
4281}
4282EXPORT_SYMBOL(hci_recv_stream_fragment);
4283
1da177e4
LT
4284/* ---- Interface to upper protocols ---- */
4285
1da177e4
LT
4286int hci_register_cb(struct hci_cb *cb)
4287{
4288 BT_DBG("%p name %s", cb, cb->name);
4289
f20d09d5 4290 write_lock(&hci_cb_list_lock);
1da177e4 4291 list_add(&cb->list, &hci_cb_list);
f20d09d5 4292 write_unlock(&hci_cb_list_lock);
1da177e4
LT
4293
4294 return 0;
4295}
4296EXPORT_SYMBOL(hci_register_cb);
4297
4298int hci_unregister_cb(struct hci_cb *cb)
4299{
4300 BT_DBG("%p name %s", cb, cb->name);
4301
f20d09d5 4302 write_lock(&hci_cb_list_lock);
1da177e4 4303 list_del(&cb->list);
f20d09d5 4304 write_unlock(&hci_cb_list_lock);
1da177e4
LT
4305
4306 return 0;
4307}
4308EXPORT_SYMBOL(hci_unregister_cb);
4309
51086991 4310static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4 4311{
0d48d939 4312 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 4313
cd82e61c
MH
4314 /* Time stamp */
4315 __net_timestamp(skb);
1da177e4 4316
cd82e61c
MH
4317 /* Send copy to monitor */
4318 hci_send_to_monitor(hdev, skb);
4319
4320 if (atomic_read(&hdev->promisc)) {
4321 /* Send copy to the sockets */
470fe1b5 4322 hci_send_to_sock(hdev, skb);
1da177e4
LT
4323 }
4324
4325 /* Get rid of skb owner, prior to sending to the driver. */
4326 skb_orphan(skb);
4327
7bd8f09f 4328 if (hdev->send(hdev, skb) < 0)
51086991 4329 BT_ERR("%s sending frame failed", hdev->name);
1da177e4
LT
4330}
4331
3119ae95
JH
4332void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4333{
4334 skb_queue_head_init(&req->cmd_q);
4335 req->hdev = hdev;
5d73e034 4336 req->err = 0;
3119ae95
JH
4337}
4338
4339int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4340{
4341 struct hci_dev *hdev = req->hdev;
4342 struct sk_buff *skb;
4343 unsigned long flags;
4344
4345 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4346
5d73e034
AG
4347 /* If an error occured during request building, remove all HCI
4348 * commands queued on the HCI request queue.
4349 */
4350 if (req->err) {
4351 skb_queue_purge(&req->cmd_q);
4352 return req->err;
4353 }
4354
3119ae95
JH
4355 /* Do not allow empty requests */
4356 if (skb_queue_empty(&req->cmd_q))
382b0c39 4357 return -ENODATA;
3119ae95
JH
4358
4359 skb = skb_peek_tail(&req->cmd_q);
4360 bt_cb(skb)->req.complete = complete;
4361
4362 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4363 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4364 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4365
4366 queue_work(hdev->workqueue, &hdev->cmd_work);
4367
4368 return 0;
4369}
4370
1ca3a9d0 4371static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
07dc93dd 4372 u32 plen, const void *param)
1da177e4
LT
4373{
4374 int len = HCI_COMMAND_HDR_SIZE + plen;
4375 struct hci_command_hdr *hdr;
4376 struct sk_buff *skb;
4377
1da177e4 4378 skb = bt_skb_alloc(len, GFP_ATOMIC);
1ca3a9d0
JH
4379 if (!skb)
4380 return NULL;
1da177e4
LT
4381
4382 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 4383 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
4384 hdr->plen = plen;
4385
4386 if (plen)
4387 memcpy(skb_put(skb, plen), param, plen);
4388
4389 BT_DBG("skb len %d", skb->len);
4390
0d48d939 4391 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
c78ae283 4392
1ca3a9d0
JH
4393 return skb;
4394}
4395
4396/* Send HCI command */
07dc93dd
JH
4397int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4398 const void *param)
1ca3a9d0
JH
4399{
4400 struct sk_buff *skb;
4401
4402 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4403
4404 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4405 if (!skb) {
4406 BT_ERR("%s no memory for command", hdev->name);
4407 return -ENOMEM;
4408 }
4409
11714b3d
JH
4410 /* Stand-alone HCI commands must be flaged as
4411 * single-command requests.
4412 */
4413 bt_cb(skb)->req.start = true;
4414
1da177e4 4415 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 4416 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
4417
4418 return 0;
4419}
1da177e4 4420
71c76a17 4421/* Queue a command to an asynchronous HCI request */
07dc93dd
JH
4422void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4423 const void *param, u8 event)
71c76a17
JH
4424{
4425 struct hci_dev *hdev = req->hdev;
4426 struct sk_buff *skb;
4427
4428 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4429
34739c1e
AG
4430 /* If an error occured during request building, there is no point in
4431 * queueing the HCI command. We can simply return.
4432 */
4433 if (req->err)
4434 return;
4435
71c76a17
JH
4436 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4437 if (!skb) {
5d73e034
AG
4438 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4439 hdev->name, opcode);
4440 req->err = -ENOMEM;
e348fe6b 4441 return;
71c76a17
JH
4442 }
4443
4444 if (skb_queue_empty(&req->cmd_q))
4445 bt_cb(skb)->req.start = true;
4446
02350a72
JH
4447 bt_cb(skb)->req.event = event;
4448
71c76a17 4449 skb_queue_tail(&req->cmd_q, skb);
71c76a17
JH
4450}
4451
07dc93dd
JH
4452void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4453 const void *param)
02350a72
JH
4454{
4455 hci_req_add_ev(req, opcode, plen, param, 0);
4456}
4457
1da177e4 4458/* Get data from the previously sent command */
a9de9248 4459void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
4460{
4461 struct hci_command_hdr *hdr;
4462
4463 if (!hdev->sent_cmd)
4464 return NULL;
4465
4466 hdr = (void *) hdev->sent_cmd->data;
4467
a9de9248 4468 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
4469 return NULL;
4470
f0e09510 4471 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
4472
4473 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4474}
4475
4476/* Send ACL data */
4477static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4478{
4479 struct hci_acl_hdr *hdr;
4480 int len = skb->len;
4481
badff6d0
ACM
4482 skb_push(skb, HCI_ACL_HDR_SIZE);
4483 skb_reset_transport_header(skb);
9c70220b 4484 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
4485 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4486 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
4487}
4488
ee22be7e 4489static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 4490 struct sk_buff *skb, __u16 flags)
1da177e4 4491{
ee22be7e 4492 struct hci_conn *conn = chan->conn;
1da177e4
LT
4493 struct hci_dev *hdev = conn->hdev;
4494 struct sk_buff *list;
4495
087bfd99
GP
4496 skb->len = skb_headlen(skb);
4497 skb->data_len = 0;
4498
4499 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
204a6e54
AE
4500
4501 switch (hdev->dev_type) {
4502 case HCI_BREDR:
4503 hci_add_acl_hdr(skb, conn->handle, flags);
4504 break;
4505 case HCI_AMP:
4506 hci_add_acl_hdr(skb, chan->handle, flags);
4507 break;
4508 default:
4509 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4510 return;
4511 }
087bfd99 4512
70f23020
AE
4513 list = skb_shinfo(skb)->frag_list;
4514 if (!list) {
1da177e4
LT
4515 /* Non fragmented */
4516 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4517
73d80deb 4518 skb_queue_tail(queue, skb);
1da177e4
LT
4519 } else {
4520 /* Fragmented */
4521 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4522
4523 skb_shinfo(skb)->frag_list = NULL;
4524
4525 /* Queue all fragments atomically */
af3e6359 4526 spin_lock(&queue->lock);
1da177e4 4527
73d80deb 4528 __skb_queue_tail(queue, skb);
e702112f
AE
4529
4530 flags &= ~ACL_START;
4531 flags |= ACL_CONT;
1da177e4
LT
4532 do {
4533 skb = list; list = list->next;
8e87d142 4534
0d48d939 4535 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 4536 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
4537
4538 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4539
73d80deb 4540 __skb_queue_tail(queue, skb);
1da177e4
LT
4541 } while (list);
4542
af3e6359 4543 spin_unlock(&queue->lock);
1da177e4 4544 }
73d80deb
LAD
4545}
4546
4547void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4548{
ee22be7e 4549 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 4550
f0e09510 4551 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb 4552
ee22be7e 4553 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 4554
3eff45ea 4555 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 4556}
1da177e4
LT
4557
4558/* Send SCO data */
0d861d8b 4559void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
4560{
4561 struct hci_dev *hdev = conn->hdev;
4562 struct hci_sco_hdr hdr;
4563
4564 BT_DBG("%s len %d", hdev->name, skb->len);
4565
aca3192c 4566 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
4567 hdr.dlen = skb->len;
4568
badff6d0
ACM
4569 skb_push(skb, HCI_SCO_HDR_SIZE);
4570 skb_reset_transport_header(skb);
9c70220b 4571 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4 4572
0d48d939 4573 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 4574
1da177e4 4575 skb_queue_tail(&conn->data_q, skb);
3eff45ea 4576 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 4577}
1da177e4
LT
4578
4579/* ---- HCI TX task (outgoing data) ---- */
4580
4581/* HCI Connection scheduler */
6039aa73
GP
4582static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4583 int *quote)
1da177e4
LT
4584{
4585 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 4586 struct hci_conn *conn = NULL, *c;
abc5de8f 4587 unsigned int num = 0, min = ~0;
1da177e4 4588
8e87d142 4589 /* We don't have to lock device here. Connections are always
1da177e4 4590 * added and removed with TX task disabled. */
bf4c6325
GP
4591
4592 rcu_read_lock();
4593
4594 list_for_each_entry_rcu(c, &h->list, list) {
769be974 4595 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 4596 continue;
769be974
MH
4597
4598 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4599 continue;
4600
1da177e4
LT
4601 num++;
4602
4603 if (c->sent < min) {
4604 min = c->sent;
4605 conn = c;
4606 }
52087a79
LAD
4607
4608 if (hci_conn_num(hdev, type) == num)
4609 break;
1da177e4
LT
4610 }
4611
bf4c6325
GP
4612 rcu_read_unlock();
4613
1da177e4 4614 if (conn) {
6ed58ec5
VT
4615 int cnt, q;
4616
4617 switch (conn->type) {
4618 case ACL_LINK:
4619 cnt = hdev->acl_cnt;
4620 break;
4621 case SCO_LINK:
4622 case ESCO_LINK:
4623 cnt = hdev->sco_cnt;
4624 break;
4625 case LE_LINK:
4626 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4627 break;
4628 default:
4629 cnt = 0;
4630 BT_ERR("Unknown link type");
4631 }
4632
4633 q = cnt / num;
1da177e4
LT
4634 *quote = q ? q : 1;
4635 } else
4636 *quote = 0;
4637
4638 BT_DBG("conn %p quote %d", conn, *quote);
4639 return conn;
4640}
4641
6039aa73 4642static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
4643{
4644 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 4645 struct hci_conn *c;
1da177e4 4646
bae1f5d9 4647 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 4648
bf4c6325
GP
4649 rcu_read_lock();
4650
1da177e4 4651 /* Kill stalled connections */
bf4c6325 4652 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 4653 if (c->type == type && c->sent) {
6ed93dc6
AE
4654 BT_ERR("%s killing stalled connection %pMR",
4655 hdev->name, &c->dst);
bed71748 4656 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
4657 }
4658 }
bf4c6325
GP
4659
4660 rcu_read_unlock();
1da177e4
LT
4661}
4662
6039aa73
GP
4663static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4664 int *quote)
1da177e4 4665{
73d80deb
LAD
4666 struct hci_conn_hash *h = &hdev->conn_hash;
4667 struct hci_chan *chan = NULL;
abc5de8f 4668 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 4669 struct hci_conn *conn;
73d80deb
LAD
4670 int cnt, q, conn_num = 0;
4671
4672 BT_DBG("%s", hdev->name);
4673
bf4c6325
GP
4674 rcu_read_lock();
4675
4676 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
4677 struct hci_chan *tmp;
4678
4679 if (conn->type != type)
4680 continue;
4681
4682 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4683 continue;
4684
4685 conn_num++;
4686
8192edef 4687 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
4688 struct sk_buff *skb;
4689
4690 if (skb_queue_empty(&tmp->data_q))
4691 continue;
4692
4693 skb = skb_peek(&tmp->data_q);
4694 if (skb->priority < cur_prio)
4695 continue;
4696
4697 if (skb->priority > cur_prio) {
4698 num = 0;
4699 min = ~0;
4700 cur_prio = skb->priority;
4701 }
4702
4703 num++;
4704
4705 if (conn->sent < min) {
4706 min = conn->sent;
4707 chan = tmp;
4708 }
4709 }
4710
4711 if (hci_conn_num(hdev, type) == conn_num)
4712 break;
4713 }
4714
bf4c6325
GP
4715 rcu_read_unlock();
4716
73d80deb
LAD
4717 if (!chan)
4718 return NULL;
4719
4720 switch (chan->conn->type) {
4721 case ACL_LINK:
4722 cnt = hdev->acl_cnt;
4723 break;
bd1eb66b
AE
4724 case AMP_LINK:
4725 cnt = hdev->block_cnt;
4726 break;
73d80deb
LAD
4727 case SCO_LINK:
4728 case ESCO_LINK:
4729 cnt = hdev->sco_cnt;
4730 break;
4731 case LE_LINK:
4732 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4733 break;
4734 default:
4735 cnt = 0;
4736 BT_ERR("Unknown link type");
4737 }
4738
4739 q = cnt / num;
4740 *quote = q ? q : 1;
4741 BT_DBG("chan %p quote %d", chan, *quote);
4742 return chan;
4743}
4744
02b20f0b
LAD
4745static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4746{
4747 struct hci_conn_hash *h = &hdev->conn_hash;
4748 struct hci_conn *conn;
4749 int num = 0;
4750
4751 BT_DBG("%s", hdev->name);
4752
bf4c6325
GP
4753 rcu_read_lock();
4754
4755 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
4756 struct hci_chan *chan;
4757
4758 if (conn->type != type)
4759 continue;
4760
4761 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4762 continue;
4763
4764 num++;
4765
8192edef 4766 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
4767 struct sk_buff *skb;
4768
4769 if (chan->sent) {
4770 chan->sent = 0;
4771 continue;
4772 }
4773
4774 if (skb_queue_empty(&chan->data_q))
4775 continue;
4776
4777 skb = skb_peek(&chan->data_q);
4778 if (skb->priority >= HCI_PRIO_MAX - 1)
4779 continue;
4780
4781 skb->priority = HCI_PRIO_MAX - 1;
4782
4783 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 4784 skb->priority);
02b20f0b
LAD
4785 }
4786
4787 if (hci_conn_num(hdev, type) == num)
4788 break;
4789 }
bf4c6325
GP
4790
4791 rcu_read_unlock();
4792
02b20f0b
LAD
4793}
4794
b71d385a
AE
4795static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4796{
4797 /* Calculate count of blocks used by this packet */
4798 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4799}
4800
6039aa73 4801static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 4802{
4a964404 4803 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
1da177e4
LT
4804 /* ACL tx timeout must be longer than maximum
4805 * link supervision timeout (40.9 seconds) */
63d2bc1b 4806 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 4807 HCI_ACL_TX_TIMEOUT))
bae1f5d9 4808 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 4809 }
63d2bc1b 4810}
1da177e4 4811
6039aa73 4812static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
4813{
4814 unsigned int cnt = hdev->acl_cnt;
4815 struct hci_chan *chan;
4816 struct sk_buff *skb;
4817 int quote;
4818
4819 __check_timeout(hdev, cnt);
04837f64 4820
73d80deb 4821 while (hdev->acl_cnt &&
a8c5fb1a 4822 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
4823 u32 priority = (skb_peek(&chan->data_q))->priority;
4824 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4825 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4826 skb->len, skb->priority);
73d80deb 4827
ec1cce24
LAD
4828 /* Stop if priority has changed */
4829 if (skb->priority < priority)
4830 break;
4831
4832 skb = skb_dequeue(&chan->data_q);
4833
73d80deb 4834 hci_conn_enter_active_mode(chan->conn,
04124681 4835 bt_cb(skb)->force_active);
04837f64 4836
57d17d70 4837 hci_send_frame(hdev, skb);
1da177e4
LT
4838 hdev->acl_last_tx = jiffies;
4839
4840 hdev->acl_cnt--;
73d80deb
LAD
4841 chan->sent++;
4842 chan->conn->sent++;
1da177e4
LT
4843 }
4844 }
02b20f0b
LAD
4845
4846 if (cnt != hdev->acl_cnt)
4847 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
4848}
4849
6039aa73 4850static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 4851{
63d2bc1b 4852 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
4853 struct hci_chan *chan;
4854 struct sk_buff *skb;
4855 int quote;
bd1eb66b 4856 u8 type;
b71d385a 4857
63d2bc1b 4858 __check_timeout(hdev, cnt);
b71d385a 4859
bd1eb66b
AE
4860 BT_DBG("%s", hdev->name);
4861
4862 if (hdev->dev_type == HCI_AMP)
4863 type = AMP_LINK;
4864 else
4865 type = ACL_LINK;
4866
b71d385a 4867 while (hdev->block_cnt > 0 &&
bd1eb66b 4868 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
4869 u32 priority = (skb_peek(&chan->data_q))->priority;
4870 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4871 int blocks;
4872
4873 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4874 skb->len, skb->priority);
b71d385a
AE
4875
4876 /* Stop if priority has changed */
4877 if (skb->priority < priority)
4878 break;
4879
4880 skb = skb_dequeue(&chan->data_q);
4881
4882 blocks = __get_blocks(hdev, skb);
4883 if (blocks > hdev->block_cnt)
4884 return;
4885
4886 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 4887 bt_cb(skb)->force_active);
b71d385a 4888
57d17d70 4889 hci_send_frame(hdev, skb);
b71d385a
AE
4890 hdev->acl_last_tx = jiffies;
4891
4892 hdev->block_cnt -= blocks;
4893 quote -= blocks;
4894
4895 chan->sent += blocks;
4896 chan->conn->sent += blocks;
4897 }
4898 }
4899
4900 if (cnt != hdev->block_cnt)
bd1eb66b 4901 hci_prio_recalculate(hdev, type);
b71d385a
AE
4902}
4903
6039aa73 4904static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
4905{
4906 BT_DBG("%s", hdev->name);
4907
bd1eb66b
AE
4908 /* No ACL link over BR/EDR controller */
4909 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4910 return;
4911
4912 /* No AMP link over AMP controller */
4913 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
4914 return;
4915
4916 switch (hdev->flow_ctl_mode) {
4917 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4918 hci_sched_acl_pkt(hdev);
4919 break;
4920
4921 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4922 hci_sched_acl_blk(hdev);
4923 break;
4924 }
4925}
4926
1da177e4 4927/* Schedule SCO */
6039aa73 4928static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
4929{
4930 struct hci_conn *conn;
4931 struct sk_buff *skb;
4932 int quote;
4933
4934 BT_DBG("%s", hdev->name);
4935
52087a79
LAD
4936 if (!hci_conn_num(hdev, SCO_LINK))
4937 return;
4938
1da177e4
LT
4939 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4940 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4941 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 4942 hci_send_frame(hdev, skb);
1da177e4
LT
4943
4944 conn->sent++;
4945 if (conn->sent == ~0)
4946 conn->sent = 0;
4947 }
4948 }
4949}
4950
6039aa73 4951static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
4952{
4953 struct hci_conn *conn;
4954 struct sk_buff *skb;
4955 int quote;
4956
4957 BT_DBG("%s", hdev->name);
4958
52087a79
LAD
4959 if (!hci_conn_num(hdev, ESCO_LINK))
4960 return;
4961
8fc9ced3
GP
4962 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4963 &quote))) {
b6a0dc82
MH
4964 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4965 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 4966 hci_send_frame(hdev, skb);
b6a0dc82
MH
4967
4968 conn->sent++;
4969 if (conn->sent == ~0)
4970 conn->sent = 0;
4971 }
4972 }
4973}
4974
6039aa73 4975static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 4976{
73d80deb 4977 struct hci_chan *chan;
6ed58ec5 4978 struct sk_buff *skb;
02b20f0b 4979 int quote, cnt, tmp;
6ed58ec5
VT
4980
4981 BT_DBG("%s", hdev->name);
4982
52087a79
LAD
4983 if (!hci_conn_num(hdev, LE_LINK))
4984 return;
4985
4a964404 4986 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
6ed58ec5
VT
4987 /* LE tx timeout must be longer than maximum
4988 * link supervision timeout (40.9 seconds) */
bae1f5d9 4989 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 4990 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 4991 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
4992 }
4993
4994 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 4995 tmp = cnt;
73d80deb 4996 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
4997 u32 priority = (skb_peek(&chan->data_q))->priority;
4998 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4999 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 5000 skb->len, skb->priority);
6ed58ec5 5001
ec1cce24
LAD
5002 /* Stop if priority has changed */
5003 if (skb->priority < priority)
5004 break;
5005
5006 skb = skb_dequeue(&chan->data_q);
5007
57d17d70 5008 hci_send_frame(hdev, skb);
6ed58ec5
VT
5009 hdev->le_last_tx = jiffies;
5010
5011 cnt--;
73d80deb
LAD
5012 chan->sent++;
5013 chan->conn->sent++;
6ed58ec5
VT
5014 }
5015 }
73d80deb 5016
6ed58ec5
VT
5017 if (hdev->le_pkts)
5018 hdev->le_cnt = cnt;
5019 else
5020 hdev->acl_cnt = cnt;
02b20f0b
LAD
5021
5022 if (cnt != tmp)
5023 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
5024}
5025
3eff45ea 5026static void hci_tx_work(struct work_struct *work)
1da177e4 5027{
3eff45ea 5028 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
5029 struct sk_buff *skb;
5030
6ed58ec5 5031 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 5032 hdev->sco_cnt, hdev->le_cnt);
1da177e4 5033
52de599e
MH
5034 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5035 /* Schedule queues and send stuff to HCI driver */
5036 hci_sched_acl(hdev);
5037 hci_sched_sco(hdev);
5038 hci_sched_esco(hdev);
5039 hci_sched_le(hdev);
5040 }
6ed58ec5 5041
1da177e4
LT
5042 /* Send next queued raw (unknown type) packet */
5043 while ((skb = skb_dequeue(&hdev->raw_q)))
57d17d70 5044 hci_send_frame(hdev, skb);
1da177e4
LT
5045}
5046
25985edc 5047/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
5048
5049/* ACL data packet */
6039aa73 5050static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
5051{
5052 struct hci_acl_hdr *hdr = (void *) skb->data;
5053 struct hci_conn *conn;
5054 __u16 handle, flags;
5055
5056 skb_pull(skb, HCI_ACL_HDR_SIZE);
5057
5058 handle = __le16_to_cpu(hdr->handle);
5059 flags = hci_flags(handle);
5060 handle = hci_handle(handle);
5061
f0e09510 5062 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 5063 handle, flags);
1da177e4
LT
5064
5065 hdev->stat.acl_rx++;
5066
5067 hci_dev_lock(hdev);
5068 conn = hci_conn_hash_lookup_handle(hdev, handle);
5069 hci_dev_unlock(hdev);
8e87d142 5070
1da177e4 5071 if (conn) {
65983fc7 5072 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 5073
1da177e4 5074 /* Send to upper protocol */
686ebf28
UF
5075 l2cap_recv_acldata(conn, skb, flags);
5076 return;
1da177e4 5077 } else {
8e87d142 5078 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 5079 hdev->name, handle);
1da177e4
LT
5080 }
5081
5082 kfree_skb(skb);
5083}
5084
5085/* SCO data packet */
6039aa73 5086static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
5087{
5088 struct hci_sco_hdr *hdr = (void *) skb->data;
5089 struct hci_conn *conn;
5090 __u16 handle;
5091
5092 skb_pull(skb, HCI_SCO_HDR_SIZE);
5093
5094 handle = __le16_to_cpu(hdr->handle);
5095
f0e09510 5096 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
1da177e4
LT
5097
5098 hdev->stat.sco_rx++;
5099
5100 hci_dev_lock(hdev);
5101 conn = hci_conn_hash_lookup_handle(hdev, handle);
5102 hci_dev_unlock(hdev);
5103
5104 if (conn) {
1da177e4 5105 /* Send to upper protocol */
686ebf28
UF
5106 sco_recv_scodata(conn, skb);
5107 return;
1da177e4 5108 } else {
8e87d142 5109 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 5110 hdev->name, handle);
1da177e4
LT
5111 }
5112
5113 kfree_skb(skb);
5114}
5115
9238f36a
JH
5116static bool hci_req_is_complete(struct hci_dev *hdev)
5117{
5118 struct sk_buff *skb;
5119
5120 skb = skb_peek(&hdev->cmd_q);
5121 if (!skb)
5122 return true;
5123
5124 return bt_cb(skb)->req.start;
5125}
5126
42c6b129
JH
5127static void hci_resend_last(struct hci_dev *hdev)
5128{
5129 struct hci_command_hdr *sent;
5130 struct sk_buff *skb;
5131 u16 opcode;
5132
5133 if (!hdev->sent_cmd)
5134 return;
5135
5136 sent = (void *) hdev->sent_cmd->data;
5137 opcode = __le16_to_cpu(sent->opcode);
5138 if (opcode == HCI_OP_RESET)
5139 return;
5140
5141 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5142 if (!skb)
5143 return;
5144
5145 skb_queue_head(&hdev->cmd_q, skb);
5146 queue_work(hdev->workqueue, &hdev->cmd_work);
5147}
5148
9238f36a
JH
5149void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5150{
5151 hci_req_complete_t req_complete = NULL;
5152 struct sk_buff *skb;
5153 unsigned long flags;
5154
5155 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5156
42c6b129
JH
5157 /* If the completed command doesn't match the last one that was
5158 * sent we need to do special handling of it.
9238f36a 5159 */
42c6b129
JH
5160 if (!hci_sent_cmd_data(hdev, opcode)) {
5161 /* Some CSR based controllers generate a spontaneous
5162 * reset complete event during init and any pending
5163 * command will never be completed. In such a case we
5164 * need to resend whatever was the last sent
5165 * command.
5166 */
5167 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5168 hci_resend_last(hdev);
5169
9238f36a 5170 return;
42c6b129 5171 }
9238f36a
JH
5172
5173 /* If the command succeeded and there's still more commands in
5174 * this request the request is not yet complete.
5175 */
5176 if (!status && !hci_req_is_complete(hdev))
5177 return;
5178
5179 /* If this was the last command in a request the complete
5180 * callback would be found in hdev->sent_cmd instead of the
5181 * command queue (hdev->cmd_q).
5182 */
5183 if (hdev->sent_cmd) {
5184 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
53e21fbc
JH
5185
5186 if (req_complete) {
5187 /* We must set the complete callback to NULL to
5188 * avoid calling the callback more than once if
5189 * this function gets called again.
5190 */
5191 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5192
9238f36a 5193 goto call_complete;
53e21fbc 5194 }
9238f36a
JH
5195 }
5196
5197 /* Remove all pending commands belonging to this request */
5198 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5199 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5200 if (bt_cb(skb)->req.start) {
5201 __skb_queue_head(&hdev->cmd_q, skb);
5202 break;
5203 }
5204
5205 req_complete = bt_cb(skb)->req.complete;
5206 kfree_skb(skb);
5207 }
5208 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5209
5210call_complete:
5211 if (req_complete)
5212 req_complete(hdev, status);
5213}
5214
b78752cc 5215static void hci_rx_work(struct work_struct *work)
1da177e4 5216{
b78752cc 5217 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
5218 struct sk_buff *skb;
5219
5220 BT_DBG("%s", hdev->name);
5221
1da177e4 5222 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
5223 /* Send copy to monitor */
5224 hci_send_to_monitor(hdev, skb);
5225
1da177e4
LT
5226 if (atomic_read(&hdev->promisc)) {
5227 /* Send copy to the sockets */
470fe1b5 5228 hci_send_to_sock(hdev, skb);
1da177e4
LT
5229 }
5230
fee746b0 5231 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1da177e4
LT
5232 kfree_skb(skb);
5233 continue;
5234 }
5235
5236 if (test_bit(HCI_INIT, &hdev->flags)) {
5237 /* Don't process data packets in this states. */
0d48d939 5238 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
5239 case HCI_ACLDATA_PKT:
5240 case HCI_SCODATA_PKT:
5241 kfree_skb(skb);
5242 continue;
3ff50b79 5243 }
1da177e4
LT
5244 }
5245
5246 /* Process frame */
0d48d939 5247 switch (bt_cb(skb)->pkt_type) {
1da177e4 5248 case HCI_EVENT_PKT:
b78752cc 5249 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
5250 hci_event_packet(hdev, skb);
5251 break;
5252
5253 case HCI_ACLDATA_PKT:
5254 BT_DBG("%s ACL data packet", hdev->name);
5255 hci_acldata_packet(hdev, skb);
5256 break;
5257
5258 case HCI_SCODATA_PKT:
5259 BT_DBG("%s SCO data packet", hdev->name);
5260 hci_scodata_packet(hdev, skb);
5261 break;
5262
5263 default:
5264 kfree_skb(skb);
5265 break;
5266 }
5267 }
1da177e4
LT
5268}
5269
c347b765 5270static void hci_cmd_work(struct work_struct *work)
1da177e4 5271{
c347b765 5272 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
5273 struct sk_buff *skb;
5274
2104786b
AE
5275 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5276 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 5277
1da177e4 5278 /* Send queued commands */
5a08ecce
AE
5279 if (atomic_read(&hdev->cmd_cnt)) {
5280 skb = skb_dequeue(&hdev->cmd_q);
5281 if (!skb)
5282 return;
5283
7585b97a 5284 kfree_skb(hdev->sent_cmd);
1da177e4 5285
a675d7f1 5286 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
70f23020 5287 if (hdev->sent_cmd) {
1da177e4 5288 atomic_dec(&hdev->cmd_cnt);
57d17d70 5289 hci_send_frame(hdev, skb);
7bdb8a5c 5290 if (test_bit(HCI_RESET, &hdev->flags))
65cc2b49 5291 cancel_delayed_work(&hdev->cmd_timer);
7bdb8a5c 5292 else
65cc2b49
MH
5293 schedule_delayed_work(&hdev->cmd_timer,
5294 HCI_CMD_TIMEOUT);
1da177e4
LT
5295 } else {
5296 skb_queue_head(&hdev->cmd_q, skb);
c347b765 5297 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
5298 }
5299 }
5300}
b1efcc28
AG
5301
5302void hci_req_add_le_scan_disable(struct hci_request *req)
5303{
5304 struct hci_cp_le_set_scan_enable cp;
5305
5306 memset(&cp, 0, sizeof(cp));
5307 cp.enable = LE_SCAN_DISABLE;
5308 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5309}
a4790dbd 5310
8ef30fd3
AG
5311void hci_req_add_le_passive_scan(struct hci_request *req)
5312{
5313 struct hci_cp_le_set_scan_param param_cp;
5314 struct hci_cp_le_set_scan_enable enable_cp;
5315 struct hci_dev *hdev = req->hdev;
5316 u8 own_addr_type;
5317
6ab535a7
MH
5318 /* Set require_privacy to false since no SCAN_REQ are send
5319 * during passive scanning. Not using an unresolvable address
5320 * here is important so that peer devices using direct
5321 * advertising with our address will be correctly reported
5322 * by the controller.
8ef30fd3 5323 */
6ab535a7 5324 if (hci_update_random_address(req, false, &own_addr_type))
8ef30fd3
AG
5325 return;
5326
5327 memset(&param_cp, 0, sizeof(param_cp));
5328 param_cp.type = LE_SCAN_PASSIVE;
5329 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5330 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5331 param_cp.own_address_type = own_addr_type;
5332 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5333 &param_cp);
5334
5335 memset(&enable_cp, 0, sizeof(enable_cp));
5336 enable_cp.enable = LE_SCAN_ENABLE;
4340a124 5337 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
8ef30fd3
AG
5338 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5339 &enable_cp);
5340}
5341
a4790dbd
AG
5342static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5343{
5344 if (status)
5345 BT_DBG("HCI request failed to update background scanning: "
5346 "status 0x%2.2x", status);
5347}
5348
5349/* This function controls the background scanning based on hdev->pend_le_conns
5350 * list. If there are pending LE connection we start the background scanning,
5351 * otherwise we stop it.
5352 *
5353 * This function requires the caller holds hdev->lock.
5354 */
5355void hci_update_background_scan(struct hci_dev *hdev)
5356{
a4790dbd
AG
5357 struct hci_request req;
5358 struct hci_conn *conn;
5359 int err;
5360
c20c02d5
MH
5361 if (!test_bit(HCI_UP, &hdev->flags) ||
5362 test_bit(HCI_INIT, &hdev->flags) ||
5363 test_bit(HCI_SETUP, &hdev->dev_flags) ||
b8221770 5364 test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
c20c02d5 5365 test_bit(HCI_UNREGISTER, &hdev->dev_flags))
1c1697c0
MH
5366 return;
5367
a4790dbd
AG
5368 hci_req_init(&req, hdev);
5369
66f8455a
JH
5370 if (list_empty(&hdev->pend_le_conns) &&
5371 list_empty(&hdev->pend_le_reports)) {
0d2bf134
JH
5372 /* If there is no pending LE connections or devices
5373 * to be scanned for, we should stop the background
5374 * scanning.
a4790dbd
AG
5375 */
5376
5377 /* If controller is not scanning we are done. */
5378 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5379 return;
5380
5381 hci_req_add_le_scan_disable(&req);
5382
5383 BT_DBG("%s stopping background scanning", hdev->name);
5384 } else {
a4790dbd
AG
5385 /* If there is at least one pending LE connection, we should
5386 * keep the background scan running.
5387 */
5388
a4790dbd
AG
5389 /* If controller is connecting, we should not start scanning
5390 * since some controllers are not able to scan and connect at
5391 * the same time.
5392 */
5393 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5394 if (conn)
5395 return;
5396
4340a124
AG
5397 /* If controller is currently scanning, we stop it to ensure we
5398 * don't miss any advertising (due to duplicates filter).
5399 */
5400 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5401 hci_req_add_le_scan_disable(&req);
5402
8ef30fd3 5403 hci_req_add_le_passive_scan(&req);
a4790dbd
AG
5404
5405 BT_DBG("%s starting background scanning", hdev->name);
5406 }
5407
5408 err = hci_req_run(&req, update_background_scan_complete);
5409 if (err)
5410 BT_ERR("Failed to run HCI request: err %d", err);
5411}