]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - net/bluetooth/hci_core.c
Bluetooth: Ensure that background scanning gets enabled on power on
[mirror_ubuntu-bionic-kernel.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
8c520a59 30#include <linux/rfkill.h>
baf27f6e 31#include <linux/debugfs.h>
99780a7b 32#include <linux/crypto.h>
47219839 33#include <asm/unaligned.h>
1da177e4
LT
34
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
4bc58f51 37#include <net/bluetooth/l2cap.h>
af58925c 38#include <net/bluetooth/mgmt.h>
1da177e4 39
970c4e46
JH
40#include "smp.h"
41
b78752cc 42static void hci_rx_work(struct work_struct *work);
c347b765 43static void hci_cmd_work(struct work_struct *work);
3eff45ea 44static void hci_tx_work(struct work_struct *work);
1da177e4 45
1da177e4
LT
46/* HCI device list */
47LIST_HEAD(hci_dev_list);
48DEFINE_RWLOCK(hci_dev_list_lock);
49
50/* HCI callback list */
51LIST_HEAD(hci_cb_list);
52DEFINE_RWLOCK(hci_cb_list_lock);
53
3df92b31
SL
54/* HCI ID Numbering */
55static DEFINE_IDA(hci_index_ida);
56
1da177e4
LT
57/* ---- HCI notifications ---- */
58
6516455d 59static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 60{
040030ef 61 hci_sock_dev_event(hdev, event);
1da177e4
LT
62}
63
baf27f6e
MH
64/* ---- HCI debugfs entries ---- */
65
4b4148e9
MH
66static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
67 size_t count, loff_t *ppos)
68{
69 struct hci_dev *hdev = file->private_data;
70 char buf[3];
71
111902f7 72 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
4b4148e9
MH
73 buf[1] = '\n';
74 buf[2] = '\0';
75 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
76}
77
78static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
79 size_t count, loff_t *ppos)
80{
81 struct hci_dev *hdev = file->private_data;
82 struct sk_buff *skb;
83 char buf[32];
84 size_t buf_size = min(count, (sizeof(buf)-1));
85 bool enable;
86 int err;
87
88 if (!test_bit(HCI_UP, &hdev->flags))
89 return -ENETDOWN;
90
91 if (copy_from_user(buf, user_buf, buf_size))
92 return -EFAULT;
93
94 buf[buf_size] = '\0';
95 if (strtobool(buf, &enable))
96 return -EINVAL;
97
111902f7 98 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
4b4148e9
MH
99 return -EALREADY;
100
101 hci_req_lock(hdev);
102 if (enable)
103 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
104 HCI_CMD_TIMEOUT);
105 else
106 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
107 HCI_CMD_TIMEOUT);
108 hci_req_unlock(hdev);
109
110 if (IS_ERR(skb))
111 return PTR_ERR(skb);
112
113 err = -bt_to_errno(skb->data[0]);
114 kfree_skb(skb);
115
116 if (err < 0)
117 return err;
118
111902f7 119 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
4b4148e9
MH
120
121 return count;
122}
123
124static const struct file_operations dut_mode_fops = {
125 .open = simple_open,
126 .read = dut_mode_read,
127 .write = dut_mode_write,
128 .llseek = default_llseek,
129};
130
dfb826a8
MH
131static int features_show(struct seq_file *f, void *ptr)
132{
133 struct hci_dev *hdev = f->private;
134 u8 p;
135
136 hci_dev_lock(hdev);
137 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
cfbb2b5b 138 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
dfb826a8
MH
139 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
140 hdev->features[p][0], hdev->features[p][1],
141 hdev->features[p][2], hdev->features[p][3],
142 hdev->features[p][4], hdev->features[p][5],
143 hdev->features[p][6], hdev->features[p][7]);
144 }
cfbb2b5b
MH
145 if (lmp_le_capable(hdev))
146 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
147 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
148 hdev->le_features[0], hdev->le_features[1],
149 hdev->le_features[2], hdev->le_features[3],
150 hdev->le_features[4], hdev->le_features[5],
151 hdev->le_features[6], hdev->le_features[7]);
dfb826a8
MH
152 hci_dev_unlock(hdev);
153
154 return 0;
155}
156
157static int features_open(struct inode *inode, struct file *file)
158{
159 return single_open(file, features_show, inode->i_private);
160}
161
162static const struct file_operations features_fops = {
163 .open = features_open,
164 .read = seq_read,
165 .llseek = seq_lseek,
166 .release = single_release,
167};
168
70afe0b8
MH
169static int blacklist_show(struct seq_file *f, void *p)
170{
171 struct hci_dev *hdev = f->private;
172 struct bdaddr_list *b;
173
174 hci_dev_lock(hdev);
175 list_for_each_entry(b, &hdev->blacklist, list)
b25f0785 176 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
70afe0b8
MH
177 hci_dev_unlock(hdev);
178
179 return 0;
180}
181
182static int blacklist_open(struct inode *inode, struct file *file)
183{
184 return single_open(file, blacklist_show, inode->i_private);
185}
186
187static const struct file_operations blacklist_fops = {
188 .open = blacklist_open,
189 .read = seq_read,
190 .llseek = seq_lseek,
191 .release = single_release,
192};
193
47219839
MH
194static int uuids_show(struct seq_file *f, void *p)
195{
196 struct hci_dev *hdev = f->private;
197 struct bt_uuid *uuid;
198
199 hci_dev_lock(hdev);
200 list_for_each_entry(uuid, &hdev->uuids, list) {
58f01aa9
MH
201 u8 i, val[16];
202
203 /* The Bluetooth UUID values are stored in big endian,
204 * but with reversed byte order. So convert them into
205 * the right order for the %pUb modifier.
206 */
207 for (i = 0; i < 16; i++)
208 val[i] = uuid->uuid[15 - i];
209
210 seq_printf(f, "%pUb\n", val);
47219839
MH
211 }
212 hci_dev_unlock(hdev);
213
214 return 0;
215}
216
217static int uuids_open(struct inode *inode, struct file *file)
218{
219 return single_open(file, uuids_show, inode->i_private);
220}
221
222static const struct file_operations uuids_fops = {
223 .open = uuids_open,
224 .read = seq_read,
225 .llseek = seq_lseek,
226 .release = single_release,
227};
228
baf27f6e
MH
229static int inquiry_cache_show(struct seq_file *f, void *p)
230{
231 struct hci_dev *hdev = f->private;
232 struct discovery_state *cache = &hdev->discovery;
233 struct inquiry_entry *e;
234
235 hci_dev_lock(hdev);
236
237 list_for_each_entry(e, &cache->all, all) {
238 struct inquiry_data *data = &e->data;
239 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
240 &data->bdaddr,
241 data->pscan_rep_mode, data->pscan_period_mode,
242 data->pscan_mode, data->dev_class[2],
243 data->dev_class[1], data->dev_class[0],
244 __le16_to_cpu(data->clock_offset),
245 data->rssi, data->ssp_mode, e->timestamp);
246 }
247
248 hci_dev_unlock(hdev);
249
250 return 0;
251}
252
253static int inquiry_cache_open(struct inode *inode, struct file *file)
254{
255 return single_open(file, inquiry_cache_show, inode->i_private);
256}
257
258static const struct file_operations inquiry_cache_fops = {
259 .open = inquiry_cache_open,
260 .read = seq_read,
261 .llseek = seq_lseek,
262 .release = single_release,
263};
264
02d08d15
MH
265static int link_keys_show(struct seq_file *f, void *ptr)
266{
267 struct hci_dev *hdev = f->private;
268 struct list_head *p, *n;
269
270 hci_dev_lock(hdev);
271 list_for_each_safe(p, n, &hdev->link_keys) {
272 struct link_key *key = list_entry(p, struct link_key, list);
273 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
274 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
275 }
276 hci_dev_unlock(hdev);
277
278 return 0;
279}
280
281static int link_keys_open(struct inode *inode, struct file *file)
282{
283 return single_open(file, link_keys_show, inode->i_private);
284}
285
286static const struct file_operations link_keys_fops = {
287 .open = link_keys_open,
288 .read = seq_read,
289 .llseek = seq_lseek,
290 .release = single_release,
291};
292
babdbb3c
MH
293static int dev_class_show(struct seq_file *f, void *ptr)
294{
295 struct hci_dev *hdev = f->private;
296
297 hci_dev_lock(hdev);
298 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
299 hdev->dev_class[1], hdev->dev_class[0]);
300 hci_dev_unlock(hdev);
301
302 return 0;
303}
304
305static int dev_class_open(struct inode *inode, struct file *file)
306{
307 return single_open(file, dev_class_show, inode->i_private);
308}
309
310static const struct file_operations dev_class_fops = {
311 .open = dev_class_open,
312 .read = seq_read,
313 .llseek = seq_lseek,
314 .release = single_release,
315};
316
041000b9
MH
317static int voice_setting_get(void *data, u64 *val)
318{
319 struct hci_dev *hdev = data;
320
321 hci_dev_lock(hdev);
322 *val = hdev->voice_setting;
323 hci_dev_unlock(hdev);
324
325 return 0;
326}
327
328DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
329 NULL, "0x%4.4llx\n");
330
ebd1e33b
MH
331static int auto_accept_delay_set(void *data, u64 val)
332{
333 struct hci_dev *hdev = data;
334
335 hci_dev_lock(hdev);
336 hdev->auto_accept_delay = val;
337 hci_dev_unlock(hdev);
338
339 return 0;
340}
341
342static int auto_accept_delay_get(void *data, u64 *val)
343{
344 struct hci_dev *hdev = data;
345
346 hci_dev_lock(hdev);
347 *val = hdev->auto_accept_delay;
348 hci_dev_unlock(hdev);
349
350 return 0;
351}
352
353DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
354 auto_accept_delay_set, "%llu\n");
355
5afeac14
MH
356static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
357 size_t count, loff_t *ppos)
358{
359 struct hci_dev *hdev = file->private_data;
360 char buf[3];
361
111902f7 362 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
5afeac14
MH
363 buf[1] = '\n';
364 buf[2] = '\0';
365 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
366}
367
368static ssize_t force_sc_support_write(struct file *file,
369 const char __user *user_buf,
370 size_t count, loff_t *ppos)
371{
372 struct hci_dev *hdev = file->private_data;
373 char buf[32];
374 size_t buf_size = min(count, (sizeof(buf)-1));
375 bool enable;
376
377 if (test_bit(HCI_UP, &hdev->flags))
378 return -EBUSY;
379
380 if (copy_from_user(buf, user_buf, buf_size))
381 return -EFAULT;
382
383 buf[buf_size] = '\0';
384 if (strtobool(buf, &enable))
385 return -EINVAL;
386
111902f7 387 if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
5afeac14
MH
388 return -EALREADY;
389
111902f7 390 change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
5afeac14
MH
391
392 return count;
393}
394
395static const struct file_operations force_sc_support_fops = {
396 .open = simple_open,
397 .read = force_sc_support_read,
398 .write = force_sc_support_write,
399 .llseek = default_llseek,
400};
401
134c2a89
MH
402static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
403 size_t count, loff_t *ppos)
404{
405 struct hci_dev *hdev = file->private_data;
406 char buf[3];
407
408 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
409 buf[1] = '\n';
410 buf[2] = '\0';
411 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
412}
413
414static const struct file_operations sc_only_mode_fops = {
415 .open = simple_open,
416 .read = sc_only_mode_read,
417 .llseek = default_llseek,
418};
419
2bfa3531
MH
420static int idle_timeout_set(void *data, u64 val)
421{
422 struct hci_dev *hdev = data;
423
424 if (val != 0 && (val < 500 || val > 3600000))
425 return -EINVAL;
426
427 hci_dev_lock(hdev);
2be48b65 428 hdev->idle_timeout = val;
2bfa3531
MH
429 hci_dev_unlock(hdev);
430
431 return 0;
432}
433
434static int idle_timeout_get(void *data, u64 *val)
435{
436 struct hci_dev *hdev = data;
437
438 hci_dev_lock(hdev);
439 *val = hdev->idle_timeout;
440 hci_dev_unlock(hdev);
441
442 return 0;
443}
444
445DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
446 idle_timeout_set, "%llu\n");
447
c982b2ea
JH
448static int rpa_timeout_set(void *data, u64 val)
449{
450 struct hci_dev *hdev = data;
451
452 /* Require the RPA timeout to be at least 30 seconds and at most
453 * 24 hours.
454 */
455 if (val < 30 || val > (60 * 60 * 24))
456 return -EINVAL;
457
458 hci_dev_lock(hdev);
459 hdev->rpa_timeout = val;
460 hci_dev_unlock(hdev);
461
462 return 0;
463}
464
465static int rpa_timeout_get(void *data, u64 *val)
466{
467 struct hci_dev *hdev = data;
468
469 hci_dev_lock(hdev);
470 *val = hdev->rpa_timeout;
471 hci_dev_unlock(hdev);
472
473 return 0;
474}
475
476DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
477 rpa_timeout_set, "%llu\n");
478
2bfa3531
MH
479static int sniff_min_interval_set(void *data, u64 val)
480{
481 struct hci_dev *hdev = data;
482
483 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
484 return -EINVAL;
485
486 hci_dev_lock(hdev);
2be48b65 487 hdev->sniff_min_interval = val;
2bfa3531
MH
488 hci_dev_unlock(hdev);
489
490 return 0;
491}
492
493static int sniff_min_interval_get(void *data, u64 *val)
494{
495 struct hci_dev *hdev = data;
496
497 hci_dev_lock(hdev);
498 *val = hdev->sniff_min_interval;
499 hci_dev_unlock(hdev);
500
501 return 0;
502}
503
504DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
505 sniff_min_interval_set, "%llu\n");
506
507static int sniff_max_interval_set(void *data, u64 val)
508{
509 struct hci_dev *hdev = data;
510
511 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
512 return -EINVAL;
513
514 hci_dev_lock(hdev);
2be48b65 515 hdev->sniff_max_interval = val;
2bfa3531
MH
516 hci_dev_unlock(hdev);
517
518 return 0;
519}
520
521static int sniff_max_interval_get(void *data, u64 *val)
522{
523 struct hci_dev *hdev = data;
524
525 hci_dev_lock(hdev);
526 *val = hdev->sniff_max_interval;
527 hci_dev_unlock(hdev);
528
529 return 0;
530}
531
532DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
533 sniff_max_interval_set, "%llu\n");
534
31ad1691
AK
535static int conn_info_min_age_set(void *data, u64 val)
536{
537 struct hci_dev *hdev = data;
538
539 if (val == 0 || val > hdev->conn_info_max_age)
540 return -EINVAL;
541
542 hci_dev_lock(hdev);
543 hdev->conn_info_min_age = val;
544 hci_dev_unlock(hdev);
545
546 return 0;
547}
548
549static int conn_info_min_age_get(void *data, u64 *val)
550{
551 struct hci_dev *hdev = data;
552
553 hci_dev_lock(hdev);
554 *val = hdev->conn_info_min_age;
555 hci_dev_unlock(hdev);
556
557 return 0;
558}
559
560DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
561 conn_info_min_age_set, "%llu\n");
562
563static int conn_info_max_age_set(void *data, u64 val)
564{
565 struct hci_dev *hdev = data;
566
567 if (val == 0 || val < hdev->conn_info_min_age)
568 return -EINVAL;
569
570 hci_dev_lock(hdev);
571 hdev->conn_info_max_age = val;
572 hci_dev_unlock(hdev);
573
574 return 0;
575}
576
577static int conn_info_max_age_get(void *data, u64 *val)
578{
579 struct hci_dev *hdev = data;
580
581 hci_dev_lock(hdev);
582 *val = hdev->conn_info_max_age;
583 hci_dev_unlock(hdev);
584
585 return 0;
586}
587
588DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
589 conn_info_max_age_set, "%llu\n");
590
ac345813
MH
591static int identity_show(struct seq_file *f, void *p)
592{
593 struct hci_dev *hdev = f->private;
a1f4c318 594 bdaddr_t addr;
ac345813
MH
595 u8 addr_type;
596
597 hci_dev_lock(hdev);
598
a1f4c318 599 hci_copy_identity_address(hdev, &addr, &addr_type);
ac345813 600
a1f4c318 601 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
473deef2 602 16, hdev->irk, &hdev->rpa);
ac345813
MH
603
604 hci_dev_unlock(hdev);
605
606 return 0;
607}
608
609static int identity_open(struct inode *inode, struct file *file)
610{
611 return single_open(file, identity_show, inode->i_private);
612}
613
614static const struct file_operations identity_fops = {
615 .open = identity_open,
616 .read = seq_read,
617 .llseek = seq_lseek,
618 .release = single_release,
619};
620
7a4cd51d
MH
621static int random_address_show(struct seq_file *f, void *p)
622{
623 struct hci_dev *hdev = f->private;
624
625 hci_dev_lock(hdev);
626 seq_printf(f, "%pMR\n", &hdev->random_addr);
627 hci_dev_unlock(hdev);
628
629 return 0;
630}
631
632static int random_address_open(struct inode *inode, struct file *file)
633{
634 return single_open(file, random_address_show, inode->i_private);
635}
636
637static const struct file_operations random_address_fops = {
638 .open = random_address_open,
639 .read = seq_read,
640 .llseek = seq_lseek,
641 .release = single_release,
642};
643
e7b8fc92
MH
644static int static_address_show(struct seq_file *f, void *p)
645{
646 struct hci_dev *hdev = f->private;
647
648 hci_dev_lock(hdev);
649 seq_printf(f, "%pMR\n", &hdev->static_addr);
650 hci_dev_unlock(hdev);
651
652 return 0;
653}
654
655static int static_address_open(struct inode *inode, struct file *file)
656{
657 return single_open(file, static_address_show, inode->i_private);
658}
659
660static const struct file_operations static_address_fops = {
661 .open = static_address_open,
662 .read = seq_read,
663 .llseek = seq_lseek,
664 .release = single_release,
665};
666
b32bba6c
MH
667static ssize_t force_static_address_read(struct file *file,
668 char __user *user_buf,
669 size_t count, loff_t *ppos)
92202185 670{
b32bba6c
MH
671 struct hci_dev *hdev = file->private_data;
672 char buf[3];
92202185 673
111902f7 674 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
b32bba6c
MH
675 buf[1] = '\n';
676 buf[2] = '\0';
677 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
92202185
MH
678}
679
b32bba6c
MH
680static ssize_t force_static_address_write(struct file *file,
681 const char __user *user_buf,
682 size_t count, loff_t *ppos)
92202185 683{
b32bba6c
MH
684 struct hci_dev *hdev = file->private_data;
685 char buf[32];
686 size_t buf_size = min(count, (sizeof(buf)-1));
687 bool enable;
92202185 688
b32bba6c
MH
689 if (test_bit(HCI_UP, &hdev->flags))
690 return -EBUSY;
92202185 691
b32bba6c
MH
692 if (copy_from_user(buf, user_buf, buf_size))
693 return -EFAULT;
694
695 buf[buf_size] = '\0';
696 if (strtobool(buf, &enable))
697 return -EINVAL;
698
111902f7 699 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
b32bba6c
MH
700 return -EALREADY;
701
111902f7 702 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
b32bba6c
MH
703
704 return count;
92202185
MH
705}
706
b32bba6c
MH
707static const struct file_operations force_static_address_fops = {
708 .open = simple_open,
709 .read = force_static_address_read,
710 .write = force_static_address_write,
711 .llseek = default_llseek,
712};
92202185 713
d2ab0ac1
MH
714static int white_list_show(struct seq_file *f, void *ptr)
715{
716 struct hci_dev *hdev = f->private;
717 struct bdaddr_list *b;
718
719 hci_dev_lock(hdev);
720 list_for_each_entry(b, &hdev->le_white_list, list)
721 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
722 hci_dev_unlock(hdev);
723
724 return 0;
725}
726
727static int white_list_open(struct inode *inode, struct file *file)
728{
729 return single_open(file, white_list_show, inode->i_private);
730}
731
732static const struct file_operations white_list_fops = {
733 .open = white_list_open,
734 .read = seq_read,
735 .llseek = seq_lseek,
736 .release = single_release,
737};
738
3698d704
MH
739static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
740{
741 struct hci_dev *hdev = f->private;
742 struct list_head *p, *n;
743
744 hci_dev_lock(hdev);
745 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
746 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
747 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
748 &irk->bdaddr, irk->addr_type,
749 16, irk->val, &irk->rpa);
750 }
751 hci_dev_unlock(hdev);
752
753 return 0;
754}
755
756static int identity_resolving_keys_open(struct inode *inode, struct file *file)
757{
758 return single_open(file, identity_resolving_keys_show,
759 inode->i_private);
760}
761
762static const struct file_operations identity_resolving_keys_fops = {
763 .open = identity_resolving_keys_open,
764 .read = seq_read,
765 .llseek = seq_lseek,
766 .release = single_release,
767};
768
8f8625cd
MH
769static int long_term_keys_show(struct seq_file *f, void *ptr)
770{
771 struct hci_dev *hdev = f->private;
772 struct list_head *p, *n;
773
774 hci_dev_lock(hdev);
f813f1be 775 list_for_each_safe(p, n, &hdev->long_term_keys) {
8f8625cd 776 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
fe39c7b2 777 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
8f8625cd
MH
778 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
779 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
fe39c7b2 780 __le64_to_cpu(ltk->rand), 16, ltk->val);
8f8625cd
MH
781 }
782 hci_dev_unlock(hdev);
783
784 return 0;
785}
786
787static int long_term_keys_open(struct inode *inode, struct file *file)
788{
789 return single_open(file, long_term_keys_show, inode->i_private);
790}
791
792static const struct file_operations long_term_keys_fops = {
793 .open = long_term_keys_open,
794 .read = seq_read,
795 .llseek = seq_lseek,
796 .release = single_release,
797};
798
4e70c7e7
MH
799static int conn_min_interval_set(void *data, u64 val)
800{
801 struct hci_dev *hdev = data;
802
803 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
804 return -EINVAL;
805
806 hci_dev_lock(hdev);
2be48b65 807 hdev->le_conn_min_interval = val;
4e70c7e7
MH
808 hci_dev_unlock(hdev);
809
810 return 0;
811}
812
813static int conn_min_interval_get(void *data, u64 *val)
814{
815 struct hci_dev *hdev = data;
816
817 hci_dev_lock(hdev);
818 *val = hdev->le_conn_min_interval;
819 hci_dev_unlock(hdev);
820
821 return 0;
822}
823
824DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
825 conn_min_interval_set, "%llu\n");
826
827static int conn_max_interval_set(void *data, u64 val)
828{
829 struct hci_dev *hdev = data;
830
831 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
832 return -EINVAL;
833
834 hci_dev_lock(hdev);
2be48b65 835 hdev->le_conn_max_interval = val;
4e70c7e7
MH
836 hci_dev_unlock(hdev);
837
838 return 0;
839}
840
841static int conn_max_interval_get(void *data, u64 *val)
842{
843 struct hci_dev *hdev = data;
844
845 hci_dev_lock(hdev);
846 *val = hdev->le_conn_max_interval;
847 hci_dev_unlock(hdev);
848
849 return 0;
850}
851
852DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
853 conn_max_interval_set, "%llu\n");
854
816a93d1
MH
855static int conn_latency_set(void *data, u64 val)
856{
857 struct hci_dev *hdev = data;
858
859 if (val > 0x01f3)
860 return -EINVAL;
861
862 hci_dev_lock(hdev);
863 hdev->le_conn_latency = val;
864 hci_dev_unlock(hdev);
865
866 return 0;
867}
868
869static int conn_latency_get(void *data, u64 *val)
870{
871 struct hci_dev *hdev = data;
872
873 hci_dev_lock(hdev);
874 *val = hdev->le_conn_latency;
875 hci_dev_unlock(hdev);
876
877 return 0;
878}
879
880DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
881 conn_latency_set, "%llu\n");
882
f1649577
MH
883static int supervision_timeout_set(void *data, u64 val)
884{
885 struct hci_dev *hdev = data;
886
887 if (val < 0x000a || val > 0x0c80)
888 return -EINVAL;
889
890 hci_dev_lock(hdev);
891 hdev->le_supv_timeout = val;
892 hci_dev_unlock(hdev);
893
894 return 0;
895}
896
897static int supervision_timeout_get(void *data, u64 *val)
898{
899 struct hci_dev *hdev = data;
900
901 hci_dev_lock(hdev);
902 *val = hdev->le_supv_timeout;
903 hci_dev_unlock(hdev);
904
905 return 0;
906}
907
908DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
909 supervision_timeout_set, "%llu\n");
910
3f959d46
MH
911static int adv_channel_map_set(void *data, u64 val)
912{
913 struct hci_dev *hdev = data;
914
915 if (val < 0x01 || val > 0x07)
916 return -EINVAL;
917
918 hci_dev_lock(hdev);
919 hdev->le_adv_channel_map = val;
920 hci_dev_unlock(hdev);
921
922 return 0;
923}
924
925static int adv_channel_map_get(void *data, u64 *val)
926{
927 struct hci_dev *hdev = data;
928
929 hci_dev_lock(hdev);
930 *val = hdev->le_adv_channel_map;
931 hci_dev_unlock(hdev);
932
933 return 0;
934}
935
936DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
937 adv_channel_map_set, "%llu\n");
938
0b3c7d37 939static int device_list_show(struct seq_file *f, void *ptr)
7d474e06 940{
0b3c7d37 941 struct hci_dev *hdev = f->private;
7d474e06
AG
942 struct hci_conn_params *p;
943
944 hci_dev_lock(hdev);
7d474e06 945 list_for_each_entry(p, &hdev->le_conn_params, list) {
0b3c7d37 946 seq_printf(f, "%pMR %u %u\n", &p->addr, p->addr_type,
7d474e06
AG
947 p->auto_connect);
948 }
7d474e06
AG
949 hci_dev_unlock(hdev);
950
951 return 0;
952}
953
0b3c7d37 954static int device_list_open(struct inode *inode, struct file *file)
7d474e06 955{
0b3c7d37 956 return single_open(file, device_list_show, inode->i_private);
7d474e06
AG
957}
958
0b3c7d37
MH
959static const struct file_operations device_list_fops = {
960 .open = device_list_open,
7d474e06 961 .read = seq_read,
7d474e06
AG
962 .llseek = seq_lseek,
963 .release = single_release,
964};
965
1da177e4
LT
966/* ---- HCI requests ---- */
967
42c6b129 968static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1da177e4 969{
42c6b129 970 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1da177e4
LT
971
972 if (hdev->req_status == HCI_REQ_PEND) {
973 hdev->req_result = result;
974 hdev->req_status = HCI_REQ_DONE;
975 wake_up_interruptible(&hdev->req_wait_q);
976 }
977}
978
979static void hci_req_cancel(struct hci_dev *hdev, int err)
980{
981 BT_DBG("%s err 0x%2.2x", hdev->name, err);
982
983 if (hdev->req_status == HCI_REQ_PEND) {
984 hdev->req_result = err;
985 hdev->req_status = HCI_REQ_CANCELED;
986 wake_up_interruptible(&hdev->req_wait_q);
987 }
988}
989
77a63e0a
FW
990static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
991 u8 event)
75e84b7c
JH
992{
993 struct hci_ev_cmd_complete *ev;
994 struct hci_event_hdr *hdr;
995 struct sk_buff *skb;
996
997 hci_dev_lock(hdev);
998
999 skb = hdev->recv_evt;
1000 hdev->recv_evt = NULL;
1001
1002 hci_dev_unlock(hdev);
1003
1004 if (!skb)
1005 return ERR_PTR(-ENODATA);
1006
1007 if (skb->len < sizeof(*hdr)) {
1008 BT_ERR("Too short HCI event");
1009 goto failed;
1010 }
1011
1012 hdr = (void *) skb->data;
1013 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1014
7b1abbbe
JH
1015 if (event) {
1016 if (hdr->evt != event)
1017 goto failed;
1018 return skb;
1019 }
1020
75e84b7c
JH
1021 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1022 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1023 goto failed;
1024 }
1025
1026 if (skb->len < sizeof(*ev)) {
1027 BT_ERR("Too short cmd_complete event");
1028 goto failed;
1029 }
1030
1031 ev = (void *) skb->data;
1032 skb_pull(skb, sizeof(*ev));
1033
1034 if (opcode == __le16_to_cpu(ev->opcode))
1035 return skb;
1036
1037 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1038 __le16_to_cpu(ev->opcode));
1039
1040failed:
1041 kfree_skb(skb);
1042 return ERR_PTR(-ENODATA);
1043}
1044
7b1abbbe 1045struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 1046 const void *param, u8 event, u32 timeout)
75e84b7c
JH
1047{
1048 DECLARE_WAITQUEUE(wait, current);
1049 struct hci_request req;
1050 int err = 0;
1051
1052 BT_DBG("%s", hdev->name);
1053
1054 hci_req_init(&req, hdev);
1055
7b1abbbe 1056 hci_req_add_ev(&req, opcode, plen, param, event);
75e84b7c
JH
1057
1058 hdev->req_status = HCI_REQ_PEND;
1059
1060 err = hci_req_run(&req, hci_req_sync_complete);
1061 if (err < 0)
1062 return ERR_PTR(err);
1063
1064 add_wait_queue(&hdev->req_wait_q, &wait);
1065 set_current_state(TASK_INTERRUPTIBLE);
1066
1067 schedule_timeout(timeout);
1068
1069 remove_wait_queue(&hdev->req_wait_q, &wait);
1070
1071 if (signal_pending(current))
1072 return ERR_PTR(-EINTR);
1073
1074 switch (hdev->req_status) {
1075 case HCI_REQ_DONE:
1076 err = -bt_to_errno(hdev->req_result);
1077 break;
1078
1079 case HCI_REQ_CANCELED:
1080 err = -hdev->req_result;
1081 break;
1082
1083 default:
1084 err = -ETIMEDOUT;
1085 break;
1086 }
1087
1088 hdev->req_status = hdev->req_result = 0;
1089
1090 BT_DBG("%s end: err %d", hdev->name, err);
1091
1092 if (err < 0)
1093 return ERR_PTR(err);
1094
7b1abbbe
JH
1095 return hci_get_cmd_complete(hdev, opcode, event);
1096}
1097EXPORT_SYMBOL(__hci_cmd_sync_ev);
1098
1099struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 1100 const void *param, u32 timeout)
7b1abbbe
JH
1101{
1102 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
75e84b7c
JH
1103}
1104EXPORT_SYMBOL(__hci_cmd_sync);
1105
1da177e4 1106/* Execute request and wait for completion. */
01178cd4 1107static int __hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
1108 void (*func)(struct hci_request *req,
1109 unsigned long opt),
01178cd4 1110 unsigned long opt, __u32 timeout)
1da177e4 1111{
42c6b129 1112 struct hci_request req;
1da177e4
LT
1113 DECLARE_WAITQUEUE(wait, current);
1114 int err = 0;
1115
1116 BT_DBG("%s start", hdev->name);
1117
42c6b129
JH
1118 hci_req_init(&req, hdev);
1119
1da177e4
LT
1120 hdev->req_status = HCI_REQ_PEND;
1121
42c6b129 1122 func(&req, opt);
53cce22d 1123
42c6b129
JH
1124 err = hci_req_run(&req, hci_req_sync_complete);
1125 if (err < 0) {
53cce22d 1126 hdev->req_status = 0;
920c8300
AG
1127
1128 /* ENODATA means the HCI request command queue is empty.
1129 * This can happen when a request with conditionals doesn't
1130 * trigger any commands to be sent. This is normal behavior
1131 * and should not trigger an error return.
42c6b129 1132 */
920c8300
AG
1133 if (err == -ENODATA)
1134 return 0;
1135
1136 return err;
53cce22d
JH
1137 }
1138
bc4445c7
AG
1139 add_wait_queue(&hdev->req_wait_q, &wait);
1140 set_current_state(TASK_INTERRUPTIBLE);
1141
1da177e4
LT
1142 schedule_timeout(timeout);
1143
1144 remove_wait_queue(&hdev->req_wait_q, &wait);
1145
1146 if (signal_pending(current))
1147 return -EINTR;
1148
1149 switch (hdev->req_status) {
1150 case HCI_REQ_DONE:
e175072f 1151 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
1152 break;
1153
1154 case HCI_REQ_CANCELED:
1155 err = -hdev->req_result;
1156 break;
1157
1158 default:
1159 err = -ETIMEDOUT;
1160 break;
3ff50b79 1161 }
1da177e4 1162
a5040efa 1163 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
1164
1165 BT_DBG("%s end: err %d", hdev->name, err);
1166
1167 return err;
1168}
1169
01178cd4 1170static int hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
1171 void (*req)(struct hci_request *req,
1172 unsigned long opt),
01178cd4 1173 unsigned long opt, __u32 timeout)
1da177e4
LT
1174{
1175 int ret;
1176
7c6a329e
MH
1177 if (!test_bit(HCI_UP, &hdev->flags))
1178 return -ENETDOWN;
1179
1da177e4
LT
1180 /* Serialize all requests */
1181 hci_req_lock(hdev);
01178cd4 1182 ret = __hci_req_sync(hdev, req, opt, timeout);
1da177e4
LT
1183 hci_req_unlock(hdev);
1184
1185 return ret;
1186}
1187
42c6b129 1188static void hci_reset_req(struct hci_request *req, unsigned long opt)
1da177e4 1189{
42c6b129 1190 BT_DBG("%s %ld", req->hdev->name, opt);
1da177e4
LT
1191
1192 /* Reset device */
42c6b129
JH
1193 set_bit(HCI_RESET, &req->hdev->flags);
1194 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1da177e4
LT
1195}
1196
42c6b129 1197static void bredr_init(struct hci_request *req)
1da177e4 1198{
42c6b129 1199 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
2455a3ea 1200
1da177e4 1201 /* Read Local Supported Features */
42c6b129 1202 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 1203
1143e5a6 1204 /* Read Local Version */
42c6b129 1205 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
2177bab5
JH
1206
1207 /* Read BD Address */
42c6b129 1208 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1da177e4
LT
1209}
1210
42c6b129 1211static void amp_init(struct hci_request *req)
e61ef499 1212{
42c6b129 1213 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
2455a3ea 1214
e61ef499 1215 /* Read Local Version */
42c6b129 1216 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489 1217
f6996cfe
MH
1218 /* Read Local Supported Commands */
1219 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1220
1221 /* Read Local Supported Features */
1222 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1223
6bcbc489 1224 /* Read Local AMP Info */
42c6b129 1225 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
1226
1227 /* Read Data Blk size */
42c6b129 1228 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
7528ca1c 1229
f38ba941
MH
1230 /* Read Flow Control Mode */
1231 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1232
7528ca1c
MH
1233 /* Read Location Data */
1234 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
e61ef499
AE
1235}
1236
42c6b129 1237static void hci_init1_req(struct hci_request *req, unsigned long opt)
e61ef499 1238{
42c6b129 1239 struct hci_dev *hdev = req->hdev;
e61ef499
AE
1240
1241 BT_DBG("%s %ld", hdev->name, opt);
1242
11778716
AE
1243 /* Reset */
1244 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
42c6b129 1245 hci_reset_req(req, 0);
11778716 1246
e61ef499
AE
1247 switch (hdev->dev_type) {
1248 case HCI_BREDR:
42c6b129 1249 bredr_init(req);
e61ef499
AE
1250 break;
1251
1252 case HCI_AMP:
42c6b129 1253 amp_init(req);
e61ef499
AE
1254 break;
1255
1256 default:
1257 BT_ERR("Unknown device type %d", hdev->dev_type);
1258 break;
1259 }
e61ef499
AE
1260}
1261
42c6b129 1262static void bredr_setup(struct hci_request *req)
2177bab5 1263{
4ca048e3
MH
1264 struct hci_dev *hdev = req->hdev;
1265
2177bab5
JH
1266 __le16 param;
1267 __u8 flt_type;
1268
1269 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
42c6b129 1270 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
1271
1272 /* Read Class of Device */
42c6b129 1273 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
2177bab5
JH
1274
1275 /* Read Local Name */
42c6b129 1276 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
2177bab5
JH
1277
1278 /* Read Voice Setting */
42c6b129 1279 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
2177bab5 1280
b4cb9fb2
MH
1281 /* Read Number of Supported IAC */
1282 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1283
4b836f39
MH
1284 /* Read Current IAC LAP */
1285 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1286
2177bab5
JH
1287 /* Clear Event Filters */
1288 flt_type = HCI_FLT_CLEAR_ALL;
42c6b129 1289 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
2177bab5
JH
1290
1291 /* Connection accept timeout ~20 secs */
dcf4adbf 1292 param = cpu_to_le16(0x7d00);
42c6b129 1293 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
2177bab5 1294
4ca048e3
MH
1295 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1296 * but it does not support page scan related HCI commands.
1297 */
1298 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
f332ec66
JH
1299 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1300 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1301 }
2177bab5
JH
1302}
1303
42c6b129 1304static void le_setup(struct hci_request *req)
2177bab5 1305{
c73eee91
JH
1306 struct hci_dev *hdev = req->hdev;
1307
2177bab5 1308 /* Read LE Buffer Size */
42c6b129 1309 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
1310
1311 /* Read LE Local Supported Features */
42c6b129 1312 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
2177bab5 1313
747d3f03
MH
1314 /* Read LE Supported States */
1315 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1316
2177bab5 1317 /* Read LE Advertising Channel TX Power */
42c6b129 1318 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
2177bab5
JH
1319
1320 /* Read LE White List Size */
42c6b129 1321 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
2177bab5 1322
747d3f03
MH
1323 /* Clear LE White List */
1324 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
c73eee91
JH
1325
1326 /* LE-only controllers have LE implicitly enabled */
1327 if (!lmp_bredr_capable(hdev))
1328 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2177bab5
JH
1329}
1330
1331static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1332{
1333 if (lmp_ext_inq_capable(hdev))
1334 return 0x02;
1335
1336 if (lmp_inq_rssi_capable(hdev))
1337 return 0x01;
1338
1339 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1340 hdev->lmp_subver == 0x0757)
1341 return 0x01;
1342
1343 if (hdev->manufacturer == 15) {
1344 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1345 return 0x01;
1346 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1347 return 0x01;
1348 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1349 return 0x01;
1350 }
1351
1352 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1353 hdev->lmp_subver == 0x1805)
1354 return 0x01;
1355
1356 return 0x00;
1357}
1358
42c6b129 1359static void hci_setup_inquiry_mode(struct hci_request *req)
2177bab5
JH
1360{
1361 u8 mode;
1362
42c6b129 1363 mode = hci_get_inquiry_mode(req->hdev);
2177bab5 1364
42c6b129 1365 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
2177bab5
JH
1366}
1367
42c6b129 1368static void hci_setup_event_mask(struct hci_request *req)
2177bab5 1369{
42c6b129
JH
1370 struct hci_dev *hdev = req->hdev;
1371
2177bab5
JH
1372 /* The second byte is 0xff instead of 0x9f (two reserved bits
1373 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1374 * command otherwise.
1375 */
1376 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1377
1378 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1379 * any event mask for pre 1.2 devices.
1380 */
1381 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1382 return;
1383
1384 if (lmp_bredr_capable(hdev)) {
1385 events[4] |= 0x01; /* Flow Specification Complete */
1386 events[4] |= 0x02; /* Inquiry Result with RSSI */
1387 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1388 events[5] |= 0x08; /* Synchronous Connection Complete */
1389 events[5] |= 0x10; /* Synchronous Connection Changed */
c7882cbd
MH
1390 } else {
1391 /* Use a different default for LE-only devices */
1392 memset(events, 0, sizeof(events));
1393 events[0] |= 0x10; /* Disconnection Complete */
1394 events[0] |= 0x80; /* Encryption Change */
1395 events[1] |= 0x08; /* Read Remote Version Information Complete */
1396 events[1] |= 0x20; /* Command Complete */
1397 events[1] |= 0x40; /* Command Status */
1398 events[1] |= 0x80; /* Hardware Error */
1399 events[2] |= 0x04; /* Number of Completed Packets */
1400 events[3] |= 0x02; /* Data Buffer Overflow */
1401 events[5] |= 0x80; /* Encryption Key Refresh Complete */
2177bab5
JH
1402 }
1403
1404 if (lmp_inq_rssi_capable(hdev))
1405 events[4] |= 0x02; /* Inquiry Result with RSSI */
1406
1407 if (lmp_sniffsubr_capable(hdev))
1408 events[5] |= 0x20; /* Sniff Subrating */
1409
1410 if (lmp_pause_enc_capable(hdev))
1411 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1412
1413 if (lmp_ext_inq_capable(hdev))
1414 events[5] |= 0x40; /* Extended Inquiry Result */
1415
1416 if (lmp_no_flush_capable(hdev))
1417 events[7] |= 0x01; /* Enhanced Flush Complete */
1418
1419 if (lmp_lsto_capable(hdev))
1420 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1421
1422 if (lmp_ssp_capable(hdev)) {
1423 events[6] |= 0x01; /* IO Capability Request */
1424 events[6] |= 0x02; /* IO Capability Response */
1425 events[6] |= 0x04; /* User Confirmation Request */
1426 events[6] |= 0x08; /* User Passkey Request */
1427 events[6] |= 0x10; /* Remote OOB Data Request */
1428 events[6] |= 0x20; /* Simple Pairing Complete */
1429 events[7] |= 0x04; /* User Passkey Notification */
1430 events[7] |= 0x08; /* Keypress Notification */
1431 events[7] |= 0x10; /* Remote Host Supported
1432 * Features Notification
1433 */
1434 }
1435
1436 if (lmp_le_capable(hdev))
1437 events[7] |= 0x20; /* LE Meta-Event */
1438
42c6b129 1439 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
2177bab5
JH
1440
1441 if (lmp_le_capable(hdev)) {
1442 memset(events, 0, sizeof(events));
1443 events[0] = 0x1f;
42c6b129
JH
1444 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1445 sizeof(events), events);
2177bab5
JH
1446 }
1447}
1448
42c6b129 1449static void hci_init2_req(struct hci_request *req, unsigned long opt)
2177bab5 1450{
42c6b129
JH
1451 struct hci_dev *hdev = req->hdev;
1452
2177bab5 1453 if (lmp_bredr_capable(hdev))
42c6b129 1454 bredr_setup(req);
56f87901
JH
1455 else
1456 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2177bab5
JH
1457
1458 if (lmp_le_capable(hdev))
42c6b129 1459 le_setup(req);
2177bab5 1460
42c6b129 1461 hci_setup_event_mask(req);
2177bab5 1462
3f8e2d75
JH
1463 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1464 * local supported commands HCI command.
1465 */
1466 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
42c6b129 1467 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
2177bab5
JH
1468
1469 if (lmp_ssp_capable(hdev)) {
57af75a8
MH
1470 /* When SSP is available, then the host features page
1471 * should also be available as well. However some
1472 * controllers list the max_page as 0 as long as SSP
1473 * has not been enabled. To achieve proper debugging
1474 * output, force the minimum max_page to 1 at least.
1475 */
1476 hdev->max_page = 0x01;
1477
2177bab5
JH
1478 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1479 u8 mode = 0x01;
42c6b129
JH
1480 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1481 sizeof(mode), &mode);
2177bab5
JH
1482 } else {
1483 struct hci_cp_write_eir cp;
1484
1485 memset(hdev->eir, 0, sizeof(hdev->eir));
1486 memset(&cp, 0, sizeof(cp));
1487
42c6b129 1488 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
2177bab5
JH
1489 }
1490 }
1491
1492 if (lmp_inq_rssi_capable(hdev))
42c6b129 1493 hci_setup_inquiry_mode(req);
2177bab5
JH
1494
1495 if (lmp_inq_tx_pwr_capable(hdev))
42c6b129 1496 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
2177bab5
JH
1497
1498 if (lmp_ext_feat_capable(hdev)) {
1499 struct hci_cp_read_local_ext_features cp;
1500
1501 cp.page = 0x01;
42c6b129
JH
1502 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1503 sizeof(cp), &cp);
2177bab5
JH
1504 }
1505
1506 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1507 u8 enable = 1;
42c6b129
JH
1508 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1509 &enable);
2177bab5
JH
1510 }
1511}
1512
42c6b129 1513static void hci_setup_link_policy(struct hci_request *req)
2177bab5 1514{
42c6b129 1515 struct hci_dev *hdev = req->hdev;
2177bab5
JH
1516 struct hci_cp_write_def_link_policy cp;
1517 u16 link_policy = 0;
1518
1519 if (lmp_rswitch_capable(hdev))
1520 link_policy |= HCI_LP_RSWITCH;
1521 if (lmp_hold_capable(hdev))
1522 link_policy |= HCI_LP_HOLD;
1523 if (lmp_sniff_capable(hdev))
1524 link_policy |= HCI_LP_SNIFF;
1525 if (lmp_park_capable(hdev))
1526 link_policy |= HCI_LP_PARK;
1527
1528 cp.policy = cpu_to_le16(link_policy);
42c6b129 1529 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
2177bab5
JH
1530}
1531
42c6b129 1532static void hci_set_le_support(struct hci_request *req)
2177bab5 1533{
42c6b129 1534 struct hci_dev *hdev = req->hdev;
2177bab5
JH
1535 struct hci_cp_write_le_host_supported cp;
1536
c73eee91
JH
1537 /* LE-only devices do not support explicit enablement */
1538 if (!lmp_bredr_capable(hdev))
1539 return;
1540
2177bab5
JH
1541 memset(&cp, 0, sizeof(cp));
1542
1543 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1544 cp.le = 0x01;
1545 cp.simul = lmp_le_br_capable(hdev);
1546 }
1547
1548 if (cp.le != lmp_host_le_capable(hdev))
42c6b129
JH
1549 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1550 &cp);
2177bab5
JH
1551}
1552
d62e6d67
JH
1553static void hci_set_event_mask_page_2(struct hci_request *req)
1554{
1555 struct hci_dev *hdev = req->hdev;
1556 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1557
1558 /* If Connectionless Slave Broadcast master role is supported
1559 * enable all necessary events for it.
1560 */
53b834d2 1561 if (lmp_csb_master_capable(hdev)) {
d62e6d67
JH
1562 events[1] |= 0x40; /* Triggered Clock Capture */
1563 events[1] |= 0x80; /* Synchronization Train Complete */
1564 events[2] |= 0x10; /* Slave Page Response Timeout */
1565 events[2] |= 0x20; /* CSB Channel Map Change */
1566 }
1567
1568 /* If Connectionless Slave Broadcast slave role is supported
1569 * enable all necessary events for it.
1570 */
53b834d2 1571 if (lmp_csb_slave_capable(hdev)) {
d62e6d67
JH
1572 events[2] |= 0x01; /* Synchronization Train Received */
1573 events[2] |= 0x02; /* CSB Receive */
1574 events[2] |= 0x04; /* CSB Timeout */
1575 events[2] |= 0x08; /* Truncated Page Complete */
1576 }
1577
40c59fcb
MH
1578 /* Enable Authenticated Payload Timeout Expired event if supported */
1579 if (lmp_ping_capable(hdev))
1580 events[2] |= 0x80;
1581
d62e6d67
JH
1582 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1583}
1584
42c6b129 1585static void hci_init3_req(struct hci_request *req, unsigned long opt)
2177bab5 1586{
42c6b129 1587 struct hci_dev *hdev = req->hdev;
d2c5d77f 1588 u8 p;
42c6b129 1589
b8f4e068
GP
1590 /* Some Broadcom based Bluetooth controllers do not support the
1591 * Delete Stored Link Key command. They are clearly indicating its
1592 * absence in the bit mask of supported commands.
1593 *
1594 * Check the supported commands and only if the the command is marked
1595 * as supported send it. If not supported assume that the controller
1596 * does not have actual support for stored link keys which makes this
1597 * command redundant anyway.
f9f462fa
MH
1598 *
1599 * Some controllers indicate that they support handling deleting
1600 * stored link keys, but they don't. The quirk lets a driver
1601 * just disable this command.
637b4cae 1602 */
f9f462fa
MH
1603 if (hdev->commands[6] & 0x80 &&
1604 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
59f45d57
JH
1605 struct hci_cp_delete_stored_link_key cp;
1606
1607 bacpy(&cp.bdaddr, BDADDR_ANY);
1608 cp.delete_all = 0x01;
1609 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1610 sizeof(cp), &cp);
1611 }
1612
2177bab5 1613 if (hdev->commands[5] & 0x10)
42c6b129 1614 hci_setup_link_policy(req);
2177bab5 1615
7bf32048 1616 if (lmp_le_capable(hdev))
42c6b129 1617 hci_set_le_support(req);
d2c5d77f
JH
1618
1619 /* Read features beyond page 1 if available */
1620 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1621 struct hci_cp_read_local_ext_features cp;
1622
1623 cp.page = p;
1624 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1625 sizeof(cp), &cp);
1626 }
2177bab5
JH
1627}
1628
5d4e7e8d
JH
1629static void hci_init4_req(struct hci_request *req, unsigned long opt)
1630{
1631 struct hci_dev *hdev = req->hdev;
1632
d62e6d67
JH
1633 /* Set event mask page 2 if the HCI command for it is supported */
1634 if (hdev->commands[22] & 0x04)
1635 hci_set_event_mask_page_2(req);
1636
5d4e7e8d 1637 /* Check for Synchronization Train support */
53b834d2 1638 if (lmp_sync_train_capable(hdev))
5d4e7e8d 1639 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
a6d0d690
MH
1640
1641 /* Enable Secure Connections if supported and configured */
5afeac14 1642 if ((lmp_sc_capable(hdev) ||
111902f7 1643 test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
a6d0d690
MH
1644 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1645 u8 support = 0x01;
1646 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1647 sizeof(support), &support);
1648 }
5d4e7e8d
JH
1649}
1650
2177bab5
JH
1651static int __hci_init(struct hci_dev *hdev)
1652{
1653 int err;
1654
1655 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1656 if (err < 0)
1657 return err;
1658
4b4148e9
MH
1659 /* The Device Under Test (DUT) mode is special and available for
1660 * all controller types. So just create it early on.
1661 */
1662 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1663 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1664 &dut_mode_fops);
1665 }
1666
2177bab5
JH
1667 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1668 * BR/EDR/LE type controllers. AMP controllers only need the
1669 * first stage init.
1670 */
1671 if (hdev->dev_type != HCI_BREDR)
1672 return 0;
1673
1674 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1675 if (err < 0)
1676 return err;
1677
5d4e7e8d
JH
1678 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1679 if (err < 0)
1680 return err;
1681
baf27f6e
MH
1682 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1683 if (err < 0)
1684 return err;
1685
1686 /* Only create debugfs entries during the initial setup
1687 * phase and not every time the controller gets powered on.
1688 */
1689 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1690 return 0;
1691
dfb826a8
MH
1692 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1693 &features_fops);
ceeb3bc0
MH
1694 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1695 &hdev->manufacturer);
1696 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1697 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
70afe0b8
MH
1698 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1699 &blacklist_fops);
47219839
MH
1700 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1701
31ad1691
AK
1702 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1703 &conn_info_min_age_fops);
1704 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1705 &conn_info_max_age_fops);
1706
baf27f6e
MH
1707 if (lmp_bredr_capable(hdev)) {
1708 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1709 hdev, &inquiry_cache_fops);
02d08d15
MH
1710 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1711 hdev, &link_keys_fops);
babdbb3c
MH
1712 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1713 hdev, &dev_class_fops);
041000b9
MH
1714 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1715 hdev, &voice_setting_fops);
baf27f6e
MH
1716 }
1717
06f5b778 1718 if (lmp_ssp_capable(hdev)) {
ebd1e33b
MH
1719 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1720 hdev, &auto_accept_delay_fops);
5afeac14
MH
1721 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1722 hdev, &force_sc_support_fops);
134c2a89
MH
1723 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1724 hdev, &sc_only_mode_fops);
06f5b778 1725 }
ebd1e33b 1726
2bfa3531
MH
1727 if (lmp_sniff_capable(hdev)) {
1728 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1729 hdev, &idle_timeout_fops);
1730 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1731 hdev, &sniff_min_interval_fops);
1732 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1733 hdev, &sniff_max_interval_fops);
1734 }
1735
d0f729b8 1736 if (lmp_le_capable(hdev)) {
ac345813
MH
1737 debugfs_create_file("identity", 0400, hdev->debugfs,
1738 hdev, &identity_fops);
1739 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1740 hdev, &rpa_timeout_fops);
7a4cd51d
MH
1741 debugfs_create_file("random_address", 0444, hdev->debugfs,
1742 hdev, &random_address_fops);
b32bba6c
MH
1743 debugfs_create_file("static_address", 0444, hdev->debugfs,
1744 hdev, &static_address_fops);
1745
1746 /* For controllers with a public address, provide a debug
1747 * option to force the usage of the configured static
1748 * address. By default the public address is used.
1749 */
1750 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1751 debugfs_create_file("force_static_address", 0644,
1752 hdev->debugfs, hdev,
1753 &force_static_address_fops);
1754
d0f729b8
MH
1755 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1756 &hdev->le_white_list_size);
d2ab0ac1
MH
1757 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1758 &white_list_fops);
3698d704
MH
1759 debugfs_create_file("identity_resolving_keys", 0400,
1760 hdev->debugfs, hdev,
1761 &identity_resolving_keys_fops);
8f8625cd
MH
1762 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1763 hdev, &long_term_keys_fops);
4e70c7e7
MH
1764 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1765 hdev, &conn_min_interval_fops);
1766 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1767 hdev, &conn_max_interval_fops);
816a93d1
MH
1768 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1769 hdev, &conn_latency_fops);
f1649577
MH
1770 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1771 hdev, &supervision_timeout_fops);
3f959d46
MH
1772 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1773 hdev, &adv_channel_map_fops);
0b3c7d37
MH
1774 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1775 &device_list_fops);
b9a7a61e
LR
1776 debugfs_create_u16("discov_interleaved_timeout", 0644,
1777 hdev->debugfs,
1778 &hdev->discov_interleaved_timeout);
d0f729b8 1779 }
e7b8fc92 1780
baf27f6e 1781 return 0;
2177bab5
JH
1782}
1783
42c6b129 1784static void hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1785{
1786 __u8 scan = opt;
1787
42c6b129 1788 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
1789
1790 /* Inquiry and Page scans */
42c6b129 1791 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
1792}
1793
42c6b129 1794static void hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1795{
1796 __u8 auth = opt;
1797
42c6b129 1798 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
1799
1800 /* Authentication */
42c6b129 1801 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
1802}
1803
42c6b129 1804static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1805{
1806 __u8 encrypt = opt;
1807
42c6b129 1808 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 1809
e4e8e37c 1810 /* Encryption */
42c6b129 1811 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
1812}
1813
42c6b129 1814static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
1815{
1816 __le16 policy = cpu_to_le16(opt);
1817
42c6b129 1818 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
1819
1820 /* Default link policy */
42c6b129 1821 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
e4e8e37c
MH
1822}
1823
8e87d142 1824/* Get HCI device by index.
1da177e4
LT
1825 * Device is held on return. */
1826struct hci_dev *hci_dev_get(int index)
1827{
8035ded4 1828 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
1829
1830 BT_DBG("%d", index);
1831
1832 if (index < 0)
1833 return NULL;
1834
1835 read_lock(&hci_dev_list_lock);
8035ded4 1836 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
1837 if (d->id == index) {
1838 hdev = hci_dev_hold(d);
1839 break;
1840 }
1841 }
1842 read_unlock(&hci_dev_list_lock);
1843 return hdev;
1844}
1da177e4
LT
1845
1846/* ---- Inquiry support ---- */
ff9ef578 1847
30dc78e1
JH
1848bool hci_discovery_active(struct hci_dev *hdev)
1849{
1850 struct discovery_state *discov = &hdev->discovery;
1851
6fbe195d 1852 switch (discov->state) {
343f935b 1853 case DISCOVERY_FINDING:
6fbe195d 1854 case DISCOVERY_RESOLVING:
30dc78e1
JH
1855 return true;
1856
6fbe195d
AG
1857 default:
1858 return false;
1859 }
30dc78e1
JH
1860}
1861
ff9ef578
JH
1862void hci_discovery_set_state(struct hci_dev *hdev, int state)
1863{
1864 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1865
1866 if (hdev->discovery.state == state)
1867 return;
1868
1869 switch (state) {
1870 case DISCOVERY_STOPPED:
c54c3860
AG
1871 hci_update_background_scan(hdev);
1872
7b99b659
AG
1873 if (hdev->discovery.state != DISCOVERY_STARTING)
1874 mgmt_discovering(hdev, 0);
ff9ef578
JH
1875 break;
1876 case DISCOVERY_STARTING:
1877 break;
343f935b 1878 case DISCOVERY_FINDING:
ff9ef578
JH
1879 mgmt_discovering(hdev, 1);
1880 break;
30dc78e1
JH
1881 case DISCOVERY_RESOLVING:
1882 break;
ff9ef578
JH
1883 case DISCOVERY_STOPPING:
1884 break;
1885 }
1886
1887 hdev->discovery.state = state;
1888}
1889
1f9b9a5d 1890void hci_inquiry_cache_flush(struct hci_dev *hdev)
1da177e4 1891{
30883512 1892 struct discovery_state *cache = &hdev->discovery;
b57c1a56 1893 struct inquiry_entry *p, *n;
1da177e4 1894
561aafbc
JH
1895 list_for_each_entry_safe(p, n, &cache->all, all) {
1896 list_del(&p->all);
b57c1a56 1897 kfree(p);
1da177e4 1898 }
561aafbc
JH
1899
1900 INIT_LIST_HEAD(&cache->unknown);
1901 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
1902}
1903
a8c5fb1a
GP
1904struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1905 bdaddr_t *bdaddr)
1da177e4 1906{
30883512 1907 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1908 struct inquiry_entry *e;
1909
6ed93dc6 1910 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 1911
561aafbc
JH
1912 list_for_each_entry(e, &cache->all, all) {
1913 if (!bacmp(&e->data.bdaddr, bdaddr))
1914 return e;
1915 }
1916
1917 return NULL;
1918}
1919
1920struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 1921 bdaddr_t *bdaddr)
561aafbc 1922{
30883512 1923 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
1924 struct inquiry_entry *e;
1925
6ed93dc6 1926 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
1927
1928 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 1929 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
1930 return e;
1931 }
1932
1933 return NULL;
1da177e4
LT
1934}
1935
30dc78e1 1936struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
1937 bdaddr_t *bdaddr,
1938 int state)
30dc78e1
JH
1939{
1940 struct discovery_state *cache = &hdev->discovery;
1941 struct inquiry_entry *e;
1942
6ed93dc6 1943 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
1944
1945 list_for_each_entry(e, &cache->resolve, list) {
1946 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1947 return e;
1948 if (!bacmp(&e->data.bdaddr, bdaddr))
1949 return e;
1950 }
1951
1952 return NULL;
1953}
1954
a3d4e20a 1955void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 1956 struct inquiry_entry *ie)
a3d4e20a
JH
1957{
1958 struct discovery_state *cache = &hdev->discovery;
1959 struct list_head *pos = &cache->resolve;
1960 struct inquiry_entry *p;
1961
1962 list_del(&ie->list);
1963
1964 list_for_each_entry(p, &cache->resolve, list) {
1965 if (p->name_state != NAME_PENDING &&
a8c5fb1a 1966 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
1967 break;
1968 pos = &p->list;
1969 }
1970
1971 list_add(&ie->list, pos);
1972}
1973
af58925c
MH
1974u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1975 bool name_known)
1da177e4 1976{
30883512 1977 struct discovery_state *cache = &hdev->discovery;
70f23020 1978 struct inquiry_entry *ie;
af58925c 1979 u32 flags = 0;
1da177e4 1980
6ed93dc6 1981 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 1982
2b2fec4d
SJ
1983 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1984
af58925c
MH
1985 if (!data->ssp_mode)
1986 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
388fc8fa 1987
70f23020 1988 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 1989 if (ie) {
af58925c
MH
1990 if (!ie->data.ssp_mode)
1991 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
388fc8fa 1992
a3d4e20a 1993 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 1994 data->rssi != ie->data.rssi) {
a3d4e20a
JH
1995 ie->data.rssi = data->rssi;
1996 hci_inquiry_cache_update_resolve(hdev, ie);
1997 }
1998
561aafbc 1999 goto update;
a3d4e20a 2000 }
561aafbc
JH
2001
2002 /* Entry not in the cache. Add new one. */
2003 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
af58925c
MH
2004 if (!ie) {
2005 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2006 goto done;
2007 }
561aafbc
JH
2008
2009 list_add(&ie->all, &cache->all);
2010
2011 if (name_known) {
2012 ie->name_state = NAME_KNOWN;
2013 } else {
2014 ie->name_state = NAME_NOT_KNOWN;
2015 list_add(&ie->list, &cache->unknown);
2016 }
70f23020 2017
561aafbc
JH
2018update:
2019 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 2020 ie->name_state != NAME_PENDING) {
561aafbc
JH
2021 ie->name_state = NAME_KNOWN;
2022 list_del(&ie->list);
1da177e4
LT
2023 }
2024
70f23020
AE
2025 memcpy(&ie->data, data, sizeof(*data));
2026 ie->timestamp = jiffies;
1da177e4 2027 cache->timestamp = jiffies;
3175405b
JH
2028
2029 if (ie->name_state == NAME_NOT_KNOWN)
af58925c 2030 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
3175405b 2031
af58925c
MH
2032done:
2033 return flags;
1da177e4
LT
2034}
2035
2036static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2037{
30883512 2038 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
2039 struct inquiry_info *info = (struct inquiry_info *) buf;
2040 struct inquiry_entry *e;
2041 int copied = 0;
2042
561aafbc 2043 list_for_each_entry(e, &cache->all, all) {
1da177e4 2044 struct inquiry_data *data = &e->data;
b57c1a56
JH
2045
2046 if (copied >= num)
2047 break;
2048
1da177e4
LT
2049 bacpy(&info->bdaddr, &data->bdaddr);
2050 info->pscan_rep_mode = data->pscan_rep_mode;
2051 info->pscan_period_mode = data->pscan_period_mode;
2052 info->pscan_mode = data->pscan_mode;
2053 memcpy(info->dev_class, data->dev_class, 3);
2054 info->clock_offset = data->clock_offset;
b57c1a56 2055
1da177e4 2056 info++;
b57c1a56 2057 copied++;
1da177e4
LT
2058 }
2059
2060 BT_DBG("cache %p, copied %d", cache, copied);
2061 return copied;
2062}
2063
42c6b129 2064static void hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
2065{
2066 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 2067 struct hci_dev *hdev = req->hdev;
1da177e4
LT
2068 struct hci_cp_inquiry cp;
2069
2070 BT_DBG("%s", hdev->name);
2071
2072 if (test_bit(HCI_INQUIRY, &hdev->flags))
2073 return;
2074
2075 /* Start Inquiry */
2076 memcpy(&cp.lap, &ir->lap, 3);
2077 cp.length = ir->length;
2078 cp.num_rsp = ir->num_rsp;
42c6b129 2079 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
2080}
2081
3e13fa1e
AG
2082static int wait_inquiry(void *word)
2083{
2084 schedule();
2085 return signal_pending(current);
2086}
2087
1da177e4
LT
2088int hci_inquiry(void __user *arg)
2089{
2090 __u8 __user *ptr = arg;
2091 struct hci_inquiry_req ir;
2092 struct hci_dev *hdev;
2093 int err = 0, do_inquiry = 0, max_rsp;
2094 long timeo;
2095 __u8 *buf;
2096
2097 if (copy_from_user(&ir, ptr, sizeof(ir)))
2098 return -EFAULT;
2099
5a08ecce
AE
2100 hdev = hci_dev_get(ir.dev_id);
2101 if (!hdev)
1da177e4
LT
2102 return -ENODEV;
2103
0736cfa8
MH
2104 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2105 err = -EBUSY;
2106 goto done;
2107 }
2108
fee746b0
MH
2109 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
2110 err = -EOPNOTSUPP;
2111 goto done;
2112 }
2113
5b69bef5
MH
2114 if (hdev->dev_type != HCI_BREDR) {
2115 err = -EOPNOTSUPP;
2116 goto done;
2117 }
2118
56f87901
JH
2119 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2120 err = -EOPNOTSUPP;
2121 goto done;
2122 }
2123
09fd0de5 2124 hci_dev_lock(hdev);
8e87d142 2125 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 2126 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1f9b9a5d 2127 hci_inquiry_cache_flush(hdev);
1da177e4
LT
2128 do_inquiry = 1;
2129 }
09fd0de5 2130 hci_dev_unlock(hdev);
1da177e4 2131
04837f64 2132 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
2133
2134 if (do_inquiry) {
01178cd4
JH
2135 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2136 timeo);
70f23020
AE
2137 if (err < 0)
2138 goto done;
3e13fa1e
AG
2139
2140 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2141 * cleared). If it is interrupted by a signal, return -EINTR.
2142 */
2143 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2144 TASK_INTERRUPTIBLE))
2145 return -EINTR;
70f23020 2146 }
1da177e4 2147
8fc9ced3
GP
2148 /* for unlimited number of responses we will use buffer with
2149 * 255 entries
2150 */
1da177e4
LT
2151 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2152
2153 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2154 * copy it to the user space.
2155 */
01df8c31 2156 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 2157 if (!buf) {
1da177e4
LT
2158 err = -ENOMEM;
2159 goto done;
2160 }
2161
09fd0de5 2162 hci_dev_lock(hdev);
1da177e4 2163 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 2164 hci_dev_unlock(hdev);
1da177e4
LT
2165
2166 BT_DBG("num_rsp %d", ir.num_rsp);
2167
2168 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2169 ptr += sizeof(ir);
2170 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 2171 ir.num_rsp))
1da177e4 2172 err = -EFAULT;
8e87d142 2173 } else
1da177e4
LT
2174 err = -EFAULT;
2175
2176 kfree(buf);
2177
2178done:
2179 hci_dev_put(hdev);
2180 return err;
2181}
2182
cbed0ca1 2183static int hci_dev_do_open(struct hci_dev *hdev)
1da177e4 2184{
1da177e4
LT
2185 int ret = 0;
2186
1da177e4
LT
2187 BT_DBG("%s %p", hdev->name, hdev);
2188
2189 hci_req_lock(hdev);
2190
94324962
JH
2191 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2192 ret = -ENODEV;
2193 goto done;
2194 }
2195
a5c8f270
MH
2196 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
2197 /* Check for rfkill but allow the HCI setup stage to
2198 * proceed (which in itself doesn't cause any RF activity).
2199 */
2200 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2201 ret = -ERFKILL;
2202 goto done;
2203 }
2204
2205 /* Check for valid public address or a configured static
2206 * random adddress, but let the HCI setup proceed to
2207 * be able to determine if there is a public address
2208 * or not.
2209 *
c6beca0e
MH
2210 * In case of user channel usage, it is not important
2211 * if a public address or static random address is
2212 * available.
2213 *
a5c8f270
MH
2214 * This check is only valid for BR/EDR controllers
2215 * since AMP controllers do not have an address.
2216 */
c6beca0e
MH
2217 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2218 hdev->dev_type == HCI_BREDR &&
a5c8f270
MH
2219 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2220 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2221 ret = -EADDRNOTAVAIL;
2222 goto done;
2223 }
611b30f7
MH
2224 }
2225
1da177e4
LT
2226 if (test_bit(HCI_UP, &hdev->flags)) {
2227 ret = -EALREADY;
2228 goto done;
2229 }
2230
1da177e4
LT
2231 if (hdev->open(hdev)) {
2232 ret = -EIO;
2233 goto done;
2234 }
2235
f41c70c4
MH
2236 atomic_set(&hdev->cmd_cnt, 1);
2237 set_bit(HCI_INIT, &hdev->flags);
2238
2239 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
2240 ret = hdev->setup(hdev);
2241
2242 if (!ret) {
fee746b0 2243 if (!test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks) &&
0736cfa8 2244 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
f41c70c4 2245 ret = __hci_init(hdev);
1da177e4
LT
2246 }
2247
f41c70c4
MH
2248 clear_bit(HCI_INIT, &hdev->flags);
2249
1da177e4
LT
2250 if (!ret) {
2251 hci_dev_hold(hdev);
d6bfd59c 2252 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
1da177e4
LT
2253 set_bit(HCI_UP, &hdev->flags);
2254 hci_notify(hdev, HCI_DEV_UP);
bb4b2a9a 2255 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
0736cfa8 2256 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1514b892 2257 hdev->dev_type == HCI_BREDR) {
09fd0de5 2258 hci_dev_lock(hdev);
744cf19e 2259 mgmt_powered(hdev, 1);
09fd0de5 2260 hci_dev_unlock(hdev);
56e5cb86 2261 }
8e87d142 2262 } else {
1da177e4 2263 /* Init failed, cleanup */
3eff45ea 2264 flush_work(&hdev->tx_work);
c347b765 2265 flush_work(&hdev->cmd_work);
b78752cc 2266 flush_work(&hdev->rx_work);
1da177e4
LT
2267
2268 skb_queue_purge(&hdev->cmd_q);
2269 skb_queue_purge(&hdev->rx_q);
2270
2271 if (hdev->flush)
2272 hdev->flush(hdev);
2273
2274 if (hdev->sent_cmd) {
2275 kfree_skb(hdev->sent_cmd);
2276 hdev->sent_cmd = NULL;
2277 }
2278
2279 hdev->close(hdev);
fee746b0 2280 hdev->flags &= BIT(HCI_RAW);
1da177e4
LT
2281 }
2282
2283done:
2284 hci_req_unlock(hdev);
1da177e4
LT
2285 return ret;
2286}
2287
cbed0ca1
JH
2288/* ---- HCI ioctl helpers ---- */
2289
2290int hci_dev_open(__u16 dev)
2291{
2292 struct hci_dev *hdev;
2293 int err;
2294
2295 hdev = hci_dev_get(dev);
2296 if (!hdev)
2297 return -ENODEV;
2298
fee746b0
MH
2299 /* Devices that are marked for raw-only usage can only be powered
2300 * up as user channel. Trying to bring them up as normal devices
2301 * will result into a failure. Only user channel operation is
2302 * possible.
2303 *
2304 * When this function is called for a user channel, the flag
2305 * HCI_USER_CHANNEL will be set first before attempting to
2306 * open the device.
2307 */
2308 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks) &&
2309 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2310 err = -EOPNOTSUPP;
2311 goto done;
2312 }
2313
e1d08f40
JH
2314 /* We need to ensure that no other power on/off work is pending
2315 * before proceeding to call hci_dev_do_open. This is
2316 * particularly important if the setup procedure has not yet
2317 * completed.
2318 */
2319 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2320 cancel_delayed_work(&hdev->power_off);
2321
a5c8f270
MH
2322 /* After this call it is guaranteed that the setup procedure
2323 * has finished. This means that error conditions like RFKILL
2324 * or no valid public or static random address apply.
2325 */
e1d08f40
JH
2326 flush_workqueue(hdev->req_workqueue);
2327
cbed0ca1
JH
2328 err = hci_dev_do_open(hdev);
2329
fee746b0 2330done:
cbed0ca1 2331 hci_dev_put(hdev);
cbed0ca1
JH
2332 return err;
2333}
2334
1da177e4
LT
2335static int hci_dev_do_close(struct hci_dev *hdev)
2336{
2337 BT_DBG("%s %p", hdev->name, hdev);
2338
78c04c0b
VCG
2339 cancel_delayed_work(&hdev->power_off);
2340
1da177e4
LT
2341 hci_req_cancel(hdev, ENODEV);
2342 hci_req_lock(hdev);
2343
2344 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
65cc2b49 2345 cancel_delayed_work_sync(&hdev->cmd_timer);
1da177e4
LT
2346 hci_req_unlock(hdev);
2347 return 0;
2348 }
2349
3eff45ea
GP
2350 /* Flush RX and TX works */
2351 flush_work(&hdev->tx_work);
b78752cc 2352 flush_work(&hdev->rx_work);
1da177e4 2353
16ab91ab 2354 if (hdev->discov_timeout > 0) {
e0f9309f 2355 cancel_delayed_work(&hdev->discov_off);
16ab91ab 2356 hdev->discov_timeout = 0;
5e5282bb 2357 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
310a3d48 2358 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
16ab91ab
JH
2359 }
2360
a8b2d5c2 2361 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
7d78525d
JH
2362 cancel_delayed_work(&hdev->service_cache);
2363
7ba8b4be 2364 cancel_delayed_work_sync(&hdev->le_scan_disable);
4518bb0f
JH
2365
2366 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2367 cancel_delayed_work_sync(&hdev->rpa_expired);
7ba8b4be 2368
09fd0de5 2369 hci_dev_lock(hdev);
1f9b9a5d 2370 hci_inquiry_cache_flush(hdev);
1da177e4 2371 hci_conn_hash_flush(hdev);
6046dc3e 2372 hci_pend_le_conns_clear(hdev);
09fd0de5 2373 hci_dev_unlock(hdev);
1da177e4
LT
2374
2375 hci_notify(hdev, HCI_DEV_DOWN);
2376
2377 if (hdev->flush)
2378 hdev->flush(hdev);
2379
2380 /* Reset device */
2381 skb_queue_purge(&hdev->cmd_q);
2382 atomic_set(&hdev->cmd_cnt, 1);
fee746b0 2383 if (!test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks) &&
3a6afbd2 2384 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
a6c511c6 2385 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1da177e4 2386 set_bit(HCI_INIT, &hdev->flags);
01178cd4 2387 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1da177e4
LT
2388 clear_bit(HCI_INIT, &hdev->flags);
2389 }
2390
c347b765
GP
2391 /* flush cmd work */
2392 flush_work(&hdev->cmd_work);
1da177e4
LT
2393
2394 /* Drop queues */
2395 skb_queue_purge(&hdev->rx_q);
2396 skb_queue_purge(&hdev->cmd_q);
2397 skb_queue_purge(&hdev->raw_q);
2398
2399 /* Drop last sent command */
2400 if (hdev->sent_cmd) {
65cc2b49 2401 cancel_delayed_work_sync(&hdev->cmd_timer);
1da177e4
LT
2402 kfree_skb(hdev->sent_cmd);
2403 hdev->sent_cmd = NULL;
2404 }
2405
b6ddb638
JH
2406 kfree_skb(hdev->recv_evt);
2407 hdev->recv_evt = NULL;
2408
1da177e4
LT
2409 /* After this point our queues are empty
2410 * and no tasks are scheduled. */
2411 hdev->close(hdev);
2412
35b973c9 2413 /* Clear flags */
fee746b0 2414 hdev->flags &= BIT(HCI_RAW);
35b973c9
JH
2415 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2416
93c311a0
MH
2417 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2418 if (hdev->dev_type == HCI_BREDR) {
2419 hci_dev_lock(hdev);
2420 mgmt_powered(hdev, 0);
2421 hci_dev_unlock(hdev);
2422 }
8ee56540 2423 }
5add6af8 2424
ced5c338 2425 /* Controller radio is available but is currently powered down */
536619e8 2426 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
ced5c338 2427
e59fda8d 2428 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 2429 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
7a4cd51d 2430 bacpy(&hdev->random_addr, BDADDR_ANY);
e59fda8d 2431
1da177e4
LT
2432 hci_req_unlock(hdev);
2433
2434 hci_dev_put(hdev);
2435 return 0;
2436}
2437
2438int hci_dev_close(__u16 dev)
2439{
2440 struct hci_dev *hdev;
2441 int err;
2442
70f23020
AE
2443 hdev = hci_dev_get(dev);
2444 if (!hdev)
1da177e4 2445 return -ENODEV;
8ee56540 2446
0736cfa8
MH
2447 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2448 err = -EBUSY;
2449 goto done;
2450 }
2451
8ee56540
MH
2452 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2453 cancel_delayed_work(&hdev->power_off);
2454
1da177e4 2455 err = hci_dev_do_close(hdev);
8ee56540 2456
0736cfa8 2457done:
1da177e4
LT
2458 hci_dev_put(hdev);
2459 return err;
2460}
2461
2462int hci_dev_reset(__u16 dev)
2463{
2464 struct hci_dev *hdev;
2465 int ret = 0;
2466
70f23020
AE
2467 hdev = hci_dev_get(dev);
2468 if (!hdev)
1da177e4
LT
2469 return -ENODEV;
2470
2471 hci_req_lock(hdev);
1da177e4 2472
808a049e
MH
2473 if (!test_bit(HCI_UP, &hdev->flags)) {
2474 ret = -ENETDOWN;
1da177e4 2475 goto done;
808a049e 2476 }
1da177e4 2477
0736cfa8
MH
2478 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2479 ret = -EBUSY;
2480 goto done;
2481 }
2482
fee746b0
MH
2483 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
2484 ret = -EOPNOTSUPP;
2485 goto done;
2486 }
2487
1da177e4
LT
2488 /* Drop queues */
2489 skb_queue_purge(&hdev->rx_q);
2490 skb_queue_purge(&hdev->cmd_q);
2491
09fd0de5 2492 hci_dev_lock(hdev);
1f9b9a5d 2493 hci_inquiry_cache_flush(hdev);
1da177e4 2494 hci_conn_hash_flush(hdev);
09fd0de5 2495 hci_dev_unlock(hdev);
1da177e4
LT
2496
2497 if (hdev->flush)
2498 hdev->flush(hdev);
2499
8e87d142 2500 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 2501 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4 2502
fee746b0 2503 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1da177e4
LT
2504
2505done:
1da177e4
LT
2506 hci_req_unlock(hdev);
2507 hci_dev_put(hdev);
2508 return ret;
2509}
2510
2511int hci_dev_reset_stat(__u16 dev)
2512{
2513 struct hci_dev *hdev;
2514 int ret = 0;
2515
70f23020
AE
2516 hdev = hci_dev_get(dev);
2517 if (!hdev)
1da177e4
LT
2518 return -ENODEV;
2519
0736cfa8
MH
2520 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2521 ret = -EBUSY;
2522 goto done;
2523 }
2524
fee746b0
MH
2525 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
2526 ret = -EOPNOTSUPP;
2527 goto done;
2528 }
2529
1da177e4
LT
2530 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2531
0736cfa8 2532done:
1da177e4 2533 hci_dev_put(hdev);
1da177e4
LT
2534 return ret;
2535}
2536
2537int hci_dev_cmd(unsigned int cmd, void __user *arg)
2538{
2539 struct hci_dev *hdev;
2540 struct hci_dev_req dr;
2541 int err = 0;
2542
2543 if (copy_from_user(&dr, arg, sizeof(dr)))
2544 return -EFAULT;
2545
70f23020
AE
2546 hdev = hci_dev_get(dr.dev_id);
2547 if (!hdev)
1da177e4
LT
2548 return -ENODEV;
2549
0736cfa8
MH
2550 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2551 err = -EBUSY;
2552 goto done;
2553 }
2554
fee746b0
MH
2555 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
2556 err = -EOPNOTSUPP;
2557 goto done;
2558 }
2559
5b69bef5
MH
2560 if (hdev->dev_type != HCI_BREDR) {
2561 err = -EOPNOTSUPP;
2562 goto done;
2563 }
2564
56f87901
JH
2565 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2566 err = -EOPNOTSUPP;
2567 goto done;
2568 }
2569
1da177e4
LT
2570 switch (cmd) {
2571 case HCISETAUTH:
01178cd4
JH
2572 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2573 HCI_INIT_TIMEOUT);
1da177e4
LT
2574 break;
2575
2576 case HCISETENCRYPT:
2577 if (!lmp_encrypt_capable(hdev)) {
2578 err = -EOPNOTSUPP;
2579 break;
2580 }
2581
2582 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2583 /* Auth must be enabled first */
01178cd4
JH
2584 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2585 HCI_INIT_TIMEOUT);
1da177e4
LT
2586 if (err)
2587 break;
2588 }
2589
01178cd4
JH
2590 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2591 HCI_INIT_TIMEOUT);
1da177e4
LT
2592 break;
2593
2594 case HCISETSCAN:
01178cd4
JH
2595 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2596 HCI_INIT_TIMEOUT);
1da177e4
LT
2597 break;
2598
1da177e4 2599 case HCISETLINKPOL:
01178cd4
JH
2600 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2601 HCI_INIT_TIMEOUT);
1da177e4
LT
2602 break;
2603
2604 case HCISETLINKMODE:
e4e8e37c
MH
2605 hdev->link_mode = ((__u16) dr.dev_opt) &
2606 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2607 break;
2608
2609 case HCISETPTYPE:
2610 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
2611 break;
2612
2613 case HCISETACLMTU:
e4e8e37c
MH
2614 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2615 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2616 break;
2617
2618 case HCISETSCOMTU:
e4e8e37c
MH
2619 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2620 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2621 break;
2622
2623 default:
2624 err = -EINVAL;
2625 break;
2626 }
e4e8e37c 2627
0736cfa8 2628done:
1da177e4
LT
2629 hci_dev_put(hdev);
2630 return err;
2631}
2632
2633int hci_get_dev_list(void __user *arg)
2634{
8035ded4 2635 struct hci_dev *hdev;
1da177e4
LT
2636 struct hci_dev_list_req *dl;
2637 struct hci_dev_req *dr;
1da177e4
LT
2638 int n = 0, size, err;
2639 __u16 dev_num;
2640
2641 if (get_user(dev_num, (__u16 __user *) arg))
2642 return -EFAULT;
2643
2644 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2645 return -EINVAL;
2646
2647 size = sizeof(*dl) + dev_num * sizeof(*dr);
2648
70f23020
AE
2649 dl = kzalloc(size, GFP_KERNEL);
2650 if (!dl)
1da177e4
LT
2651 return -ENOMEM;
2652
2653 dr = dl->dev_req;
2654
f20d09d5 2655 read_lock(&hci_dev_list_lock);
8035ded4 2656 list_for_each_entry(hdev, &hci_dev_list, list) {
a8b2d5c2 2657 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
e0f9309f 2658 cancel_delayed_work(&hdev->power_off);
c542a06c 2659
a8b2d5c2
JH
2660 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2661 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 2662
1da177e4
LT
2663 (dr + n)->dev_id = hdev->id;
2664 (dr + n)->dev_opt = hdev->flags;
c542a06c 2665
1da177e4
LT
2666 if (++n >= dev_num)
2667 break;
2668 }
f20d09d5 2669 read_unlock(&hci_dev_list_lock);
1da177e4
LT
2670
2671 dl->dev_num = n;
2672 size = sizeof(*dl) + n * sizeof(*dr);
2673
2674 err = copy_to_user(arg, dl, size);
2675 kfree(dl);
2676
2677 return err ? -EFAULT : 0;
2678}
2679
2680int hci_get_dev_info(void __user *arg)
2681{
2682 struct hci_dev *hdev;
2683 struct hci_dev_info di;
2684 int err = 0;
2685
2686 if (copy_from_user(&di, arg, sizeof(di)))
2687 return -EFAULT;
2688
70f23020
AE
2689 hdev = hci_dev_get(di.dev_id);
2690 if (!hdev)
1da177e4
LT
2691 return -ENODEV;
2692
a8b2d5c2 2693 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
3243553f 2694 cancel_delayed_work_sync(&hdev->power_off);
ab81cbf9 2695
a8b2d5c2
JH
2696 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2697 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 2698
1da177e4
LT
2699 strcpy(di.name, hdev->name);
2700 di.bdaddr = hdev->bdaddr;
60f2a3ed 2701 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
1da177e4
LT
2702 di.flags = hdev->flags;
2703 di.pkt_type = hdev->pkt_type;
572c7f84
JH
2704 if (lmp_bredr_capable(hdev)) {
2705 di.acl_mtu = hdev->acl_mtu;
2706 di.acl_pkts = hdev->acl_pkts;
2707 di.sco_mtu = hdev->sco_mtu;
2708 di.sco_pkts = hdev->sco_pkts;
2709 } else {
2710 di.acl_mtu = hdev->le_mtu;
2711 di.acl_pkts = hdev->le_pkts;
2712 di.sco_mtu = 0;
2713 di.sco_pkts = 0;
2714 }
1da177e4
LT
2715 di.link_policy = hdev->link_policy;
2716 di.link_mode = hdev->link_mode;
2717
2718 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2719 memcpy(&di.features, &hdev->features, sizeof(di.features));
2720
2721 if (copy_to_user(arg, &di, sizeof(di)))
2722 err = -EFAULT;
2723
2724 hci_dev_put(hdev);
2725
2726 return err;
2727}
2728
2729/* ---- Interface to HCI drivers ---- */
2730
611b30f7
MH
2731static int hci_rfkill_set_block(void *data, bool blocked)
2732{
2733 struct hci_dev *hdev = data;
2734
2735 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2736
0736cfa8
MH
2737 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2738 return -EBUSY;
2739
5e130367
JH
2740 if (blocked) {
2741 set_bit(HCI_RFKILLED, &hdev->dev_flags);
bf543036
JH
2742 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2743 hci_dev_do_close(hdev);
5e130367
JH
2744 } else {
2745 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
1025c04c 2746 }
611b30f7
MH
2747
2748 return 0;
2749}
2750
2751static const struct rfkill_ops hci_rfkill_ops = {
2752 .set_block = hci_rfkill_set_block,
2753};
2754
ab81cbf9
JH
2755static void hci_power_on(struct work_struct *work)
2756{
2757 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
96570ffc 2758 int err;
ab81cbf9
JH
2759
2760 BT_DBG("%s", hdev->name);
2761
cbed0ca1 2762 err = hci_dev_do_open(hdev);
96570ffc
JH
2763 if (err < 0) {
2764 mgmt_set_powered_failed(hdev, err);
ab81cbf9 2765 return;
96570ffc 2766 }
ab81cbf9 2767
a5c8f270
MH
2768 /* During the HCI setup phase, a few error conditions are
2769 * ignored and they need to be checked now. If they are still
2770 * valid, it is important to turn the device back off.
2771 */
2772 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2773 (hdev->dev_type == HCI_BREDR &&
2774 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2775 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
bf543036
JH
2776 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2777 hci_dev_do_close(hdev);
2778 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
19202573
JH
2779 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2780 HCI_AUTO_OFF_TIMEOUT);
bf543036 2781 }
ab81cbf9 2782
fee746b0
MH
2783 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
2784 if (!test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2785 mgmt_index_added(hdev);
2786 }
ab81cbf9
JH
2787}
2788
2789static void hci_power_off(struct work_struct *work)
2790{
3243553f 2791 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 2792 power_off.work);
ab81cbf9
JH
2793
2794 BT_DBG("%s", hdev->name);
2795
8ee56540 2796 hci_dev_do_close(hdev);
ab81cbf9
JH
2797}
2798
16ab91ab
JH
2799static void hci_discov_off(struct work_struct *work)
2800{
2801 struct hci_dev *hdev;
16ab91ab
JH
2802
2803 hdev = container_of(work, struct hci_dev, discov_off.work);
2804
2805 BT_DBG("%s", hdev->name);
2806
d1967ff8 2807 mgmt_discoverable_timeout(hdev);
16ab91ab
JH
2808}
2809
35f7498a 2810void hci_uuids_clear(struct hci_dev *hdev)
2aeb9a1a 2811{
4821002c 2812 struct bt_uuid *uuid, *tmp;
2aeb9a1a 2813
4821002c
JH
2814 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2815 list_del(&uuid->list);
2aeb9a1a
JH
2816 kfree(uuid);
2817 }
2aeb9a1a
JH
2818}
2819
35f7498a 2820void hci_link_keys_clear(struct hci_dev *hdev)
55ed8ca1
JH
2821{
2822 struct list_head *p, *n;
2823
2824 list_for_each_safe(p, n, &hdev->link_keys) {
2825 struct link_key *key;
2826
2827 key = list_entry(p, struct link_key, list);
2828
2829 list_del(p);
2830 kfree(key);
2831 }
55ed8ca1
JH
2832}
2833
35f7498a 2834void hci_smp_ltks_clear(struct hci_dev *hdev)
b899efaf
VCG
2835{
2836 struct smp_ltk *k, *tmp;
2837
2838 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2839 list_del(&k->list);
2840 kfree(k);
2841 }
b899efaf
VCG
2842}
2843
970c4e46
JH
2844void hci_smp_irks_clear(struct hci_dev *hdev)
2845{
2846 struct smp_irk *k, *tmp;
2847
2848 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2849 list_del(&k->list);
2850 kfree(k);
2851 }
2852}
2853
55ed8ca1
JH
2854struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2855{
8035ded4 2856 struct link_key *k;
55ed8ca1 2857
8035ded4 2858 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
2859 if (bacmp(bdaddr, &k->bdaddr) == 0)
2860 return k;
55ed8ca1
JH
2861
2862 return NULL;
2863}
2864
745c0ce3 2865static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 2866 u8 key_type, u8 old_key_type)
d25e28ab
JH
2867{
2868 /* Legacy key */
2869 if (key_type < 0x03)
745c0ce3 2870 return true;
d25e28ab
JH
2871
2872 /* Debug keys are insecure so don't store them persistently */
2873 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 2874 return false;
d25e28ab
JH
2875
2876 /* Changed combination key and there's no previous one */
2877 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 2878 return false;
d25e28ab
JH
2879
2880 /* Security mode 3 case */
2881 if (!conn)
745c0ce3 2882 return true;
d25e28ab
JH
2883
2884 /* Neither local nor remote side had no-bonding as requirement */
2885 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 2886 return true;
d25e28ab
JH
2887
2888 /* Local side had dedicated bonding as requirement */
2889 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 2890 return true;
d25e28ab
JH
2891
2892 /* Remote side had dedicated bonding as requirement */
2893 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 2894 return true;
d25e28ab
JH
2895
2896 /* If none of the above criteria match, then don't store the key
2897 * persistently */
745c0ce3 2898 return false;
d25e28ab
JH
2899}
2900
98a0b845
JH
2901static bool ltk_type_master(u8 type)
2902{
d97c9fb0 2903 return (type == SMP_LTK);
98a0b845
JH
2904}
2905
fe39c7b2 2906struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
98a0b845 2907 bool master)
75d262c2 2908{
c9839a11 2909 struct smp_ltk *k;
75d262c2 2910
c9839a11 2911 list_for_each_entry(k, &hdev->long_term_keys, list) {
fe39c7b2 2912 if (k->ediv != ediv || k->rand != rand)
75d262c2
VCG
2913 continue;
2914
98a0b845
JH
2915 if (ltk_type_master(k->type) != master)
2916 continue;
2917
c9839a11 2918 return k;
75d262c2
VCG
2919 }
2920
2921 return NULL;
2922}
75d262c2 2923
c9839a11 2924struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
98a0b845 2925 u8 addr_type, bool master)
75d262c2 2926{
c9839a11 2927 struct smp_ltk *k;
75d262c2 2928
c9839a11
VCG
2929 list_for_each_entry(k, &hdev->long_term_keys, list)
2930 if (addr_type == k->bdaddr_type &&
98a0b845
JH
2931 bacmp(bdaddr, &k->bdaddr) == 0 &&
2932 ltk_type_master(k->type) == master)
75d262c2
VCG
2933 return k;
2934
2935 return NULL;
2936}
75d262c2 2937
970c4e46
JH
2938struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2939{
2940 struct smp_irk *irk;
2941
2942 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2943 if (!bacmp(&irk->rpa, rpa))
2944 return irk;
2945 }
2946
2947 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2948 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
2949 bacpy(&irk->rpa, rpa);
2950 return irk;
2951 }
2952 }
2953
2954 return NULL;
2955}
2956
2957struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2958 u8 addr_type)
2959{
2960 struct smp_irk *irk;
2961
6cfc9988
JH
2962 /* Identity Address must be public or static random */
2963 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2964 return NULL;
2965
970c4e46
JH
2966 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2967 if (addr_type == irk->addr_type &&
2968 bacmp(bdaddr, &irk->bdaddr) == 0)
2969 return irk;
2970 }
2971
2972 return NULL;
2973}
2974
567fa2aa 2975struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
7652ff6a
JH
2976 bdaddr_t *bdaddr, u8 *val, u8 type,
2977 u8 pin_len, bool *persistent)
55ed8ca1
JH
2978{
2979 struct link_key *key, *old_key;
745c0ce3 2980 u8 old_key_type;
55ed8ca1
JH
2981
2982 old_key = hci_find_link_key(hdev, bdaddr);
2983 if (old_key) {
2984 old_key_type = old_key->type;
2985 key = old_key;
2986 } else {
12adcf3a 2987 old_key_type = conn ? conn->key_type : 0xff;
0a14ab41 2988 key = kzalloc(sizeof(*key), GFP_KERNEL);
55ed8ca1 2989 if (!key)
567fa2aa 2990 return NULL;
55ed8ca1
JH
2991 list_add(&key->list, &hdev->link_keys);
2992 }
2993
6ed93dc6 2994 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 2995
d25e28ab
JH
2996 /* Some buggy controller combinations generate a changed
2997 * combination key for legacy pairing even when there's no
2998 * previous key */
2999 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 3000 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 3001 type = HCI_LK_COMBINATION;
655fe6ec
JH
3002 if (conn)
3003 conn->key_type = type;
3004 }
d25e28ab 3005
55ed8ca1 3006 bacpy(&key->bdaddr, bdaddr);
9b3b4460 3007 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
3008 key->pin_len = pin_len;
3009
b6020ba0 3010 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 3011 key->type = old_key_type;
4748fed2
JH
3012 else
3013 key->type = type;
3014
7652ff6a
JH
3015 if (persistent)
3016 *persistent = hci_persistent_key(hdev, conn, type,
3017 old_key_type);
55ed8ca1 3018
567fa2aa 3019 return key;
55ed8ca1
JH
3020}
3021
ca9142b8 3022struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
35d70271 3023 u8 addr_type, u8 type, u8 authenticated,
fe39c7b2 3024 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
75d262c2 3025{
c9839a11 3026 struct smp_ltk *key, *old_key;
98a0b845 3027 bool master = ltk_type_master(type);
75d262c2 3028
98a0b845 3029 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
c9839a11 3030 if (old_key)
75d262c2 3031 key = old_key;
c9839a11 3032 else {
0a14ab41 3033 key = kzalloc(sizeof(*key), GFP_KERNEL);
75d262c2 3034 if (!key)
ca9142b8 3035 return NULL;
c9839a11 3036 list_add(&key->list, &hdev->long_term_keys);
75d262c2
VCG
3037 }
3038
75d262c2 3039 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
3040 key->bdaddr_type = addr_type;
3041 memcpy(key->val, tk, sizeof(key->val));
3042 key->authenticated = authenticated;
3043 key->ediv = ediv;
fe39c7b2 3044 key->rand = rand;
c9839a11
VCG
3045 key->enc_size = enc_size;
3046 key->type = type;
75d262c2 3047
ca9142b8 3048 return key;
75d262c2
VCG
3049}
3050
ca9142b8
JH
3051struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3052 u8 addr_type, u8 val[16], bdaddr_t *rpa)
970c4e46
JH
3053{
3054 struct smp_irk *irk;
3055
3056 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3057 if (!irk) {
3058 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3059 if (!irk)
ca9142b8 3060 return NULL;
970c4e46
JH
3061
3062 bacpy(&irk->bdaddr, bdaddr);
3063 irk->addr_type = addr_type;
3064
3065 list_add(&irk->list, &hdev->identity_resolving_keys);
3066 }
3067
3068 memcpy(irk->val, val, 16);
3069 bacpy(&irk->rpa, rpa);
3070
ca9142b8 3071 return irk;
970c4e46
JH
3072}
3073
55ed8ca1
JH
3074int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3075{
3076 struct link_key *key;
3077
3078 key = hci_find_link_key(hdev, bdaddr);
3079 if (!key)
3080 return -ENOENT;
3081
6ed93dc6 3082 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1
JH
3083
3084 list_del(&key->list);
3085 kfree(key);
3086
3087 return 0;
3088}
3089
e0b2b27e 3090int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
b899efaf
VCG
3091{
3092 struct smp_ltk *k, *tmp;
c51ffa0b 3093 int removed = 0;
b899efaf
VCG
3094
3095 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
e0b2b27e 3096 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
b899efaf
VCG
3097 continue;
3098
6ed93dc6 3099 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf
VCG
3100
3101 list_del(&k->list);
3102 kfree(k);
c51ffa0b 3103 removed++;
b899efaf
VCG
3104 }
3105
c51ffa0b 3106 return removed ? 0 : -ENOENT;
b899efaf
VCG
3107}
3108
a7ec7338
JH
3109void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3110{
3111 struct smp_irk *k, *tmp;
3112
668b7b19 3113 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
a7ec7338
JH
3114 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3115 continue;
3116
3117 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3118
3119 list_del(&k->list);
3120 kfree(k);
3121 }
3122}
3123
6bd32326 3124/* HCI command timer function */
65cc2b49 3125static void hci_cmd_timeout(struct work_struct *work)
6bd32326 3126{
65cc2b49
MH
3127 struct hci_dev *hdev = container_of(work, struct hci_dev,
3128 cmd_timer.work);
6bd32326 3129
bda4f23a
AE
3130 if (hdev->sent_cmd) {
3131 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3132 u16 opcode = __le16_to_cpu(sent->opcode);
3133
3134 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3135 } else {
3136 BT_ERR("%s command tx timeout", hdev->name);
3137 }
3138
6bd32326 3139 atomic_set(&hdev->cmd_cnt, 1);
c347b765 3140 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
3141}
3142
2763eda6 3143struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
04124681 3144 bdaddr_t *bdaddr)
2763eda6
SJ
3145{
3146 struct oob_data *data;
3147
3148 list_for_each_entry(data, &hdev->remote_oob_data, list)
3149 if (bacmp(bdaddr, &data->bdaddr) == 0)
3150 return data;
3151
3152 return NULL;
3153}
3154
3155int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3156{
3157 struct oob_data *data;
3158
3159 data = hci_find_remote_oob_data(hdev, bdaddr);
3160 if (!data)
3161 return -ENOENT;
3162
6ed93dc6 3163 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2763eda6
SJ
3164
3165 list_del(&data->list);
3166 kfree(data);
3167
3168 return 0;
3169}
3170
35f7498a 3171void hci_remote_oob_data_clear(struct hci_dev *hdev)
2763eda6
SJ
3172{
3173 struct oob_data *data, *n;
3174
3175 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3176 list_del(&data->list);
3177 kfree(data);
3178 }
2763eda6
SJ
3179}
3180
0798872e
MH
3181int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3182 u8 *hash, u8 *randomizer)
2763eda6
SJ
3183{
3184 struct oob_data *data;
3185
3186 data = hci_find_remote_oob_data(hdev, bdaddr);
2763eda6 3187 if (!data) {
0a14ab41 3188 data = kmalloc(sizeof(*data), GFP_KERNEL);
2763eda6
SJ
3189 if (!data)
3190 return -ENOMEM;
3191
3192 bacpy(&data->bdaddr, bdaddr);
3193 list_add(&data->list, &hdev->remote_oob_data);
3194 }
3195
519ca9d0
MH
3196 memcpy(data->hash192, hash, sizeof(data->hash192));
3197 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
2763eda6 3198
0798872e
MH
3199 memset(data->hash256, 0, sizeof(data->hash256));
3200 memset(data->randomizer256, 0, sizeof(data->randomizer256));
3201
3202 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3203
3204 return 0;
3205}
3206
3207int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3208 u8 *hash192, u8 *randomizer192,
3209 u8 *hash256, u8 *randomizer256)
3210{
3211 struct oob_data *data;
3212
3213 data = hci_find_remote_oob_data(hdev, bdaddr);
3214 if (!data) {
0a14ab41 3215 data = kmalloc(sizeof(*data), GFP_KERNEL);
0798872e
MH
3216 if (!data)
3217 return -ENOMEM;
3218
3219 bacpy(&data->bdaddr, bdaddr);
3220 list_add(&data->list, &hdev->remote_oob_data);
3221 }
3222
3223 memcpy(data->hash192, hash192, sizeof(data->hash192));
3224 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3225
3226 memcpy(data->hash256, hash256, sizeof(data->hash256));
3227 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3228
6ed93dc6 3229 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
3230
3231 return 0;
3232}
3233
b9ee0a78
MH
3234struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
3235 bdaddr_t *bdaddr, u8 type)
b2a66aad 3236{
8035ded4 3237 struct bdaddr_list *b;
b2a66aad 3238
b9ee0a78
MH
3239 list_for_each_entry(b, &hdev->blacklist, list) {
3240 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
b2a66aad 3241 return b;
b9ee0a78 3242 }
b2a66aad
AJ
3243
3244 return NULL;
3245}
3246
c9507490 3247static void hci_blacklist_clear(struct hci_dev *hdev)
b2a66aad
AJ
3248{
3249 struct list_head *p, *n;
3250
3251 list_for_each_safe(p, n, &hdev->blacklist) {
b9ee0a78 3252 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
b2a66aad
AJ
3253
3254 list_del(p);
3255 kfree(b);
3256 }
b2a66aad
AJ
3257}
3258
88c1fe4b 3259int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
3260{
3261 struct bdaddr_list *entry;
b2a66aad 3262
b9ee0a78 3263 if (!bacmp(bdaddr, BDADDR_ANY))
b2a66aad
AJ
3264 return -EBADF;
3265
b9ee0a78 3266 if (hci_blacklist_lookup(hdev, bdaddr, type))
5e762444 3267 return -EEXIST;
b2a66aad
AJ
3268
3269 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
5e762444
AJ
3270 if (!entry)
3271 return -ENOMEM;
b2a66aad
AJ
3272
3273 bacpy(&entry->bdaddr, bdaddr);
b9ee0a78 3274 entry->bdaddr_type = type;
b2a66aad
AJ
3275
3276 list_add(&entry->list, &hdev->blacklist);
3277
88c1fe4b 3278 return mgmt_device_blocked(hdev, bdaddr, type);
b2a66aad
AJ
3279}
3280
88c1fe4b 3281int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
3282{
3283 struct bdaddr_list *entry;
b2a66aad 3284
35f7498a
JH
3285 if (!bacmp(bdaddr, BDADDR_ANY)) {
3286 hci_blacklist_clear(hdev);
3287 return 0;
3288 }
b2a66aad 3289
b9ee0a78 3290 entry = hci_blacklist_lookup(hdev, bdaddr, type);
1ec918ce 3291 if (!entry)
5e762444 3292 return -ENOENT;
b2a66aad
AJ
3293
3294 list_del(&entry->list);
3295 kfree(entry);
3296
88c1fe4b 3297 return mgmt_device_unblocked(hdev, bdaddr, type);
b2a66aad
AJ
3298}
3299
d2ab0ac1
MH
3300struct bdaddr_list *hci_white_list_lookup(struct hci_dev *hdev,
3301 bdaddr_t *bdaddr, u8 type)
3302{
3303 struct bdaddr_list *b;
3304
3305 list_for_each_entry(b, &hdev->le_white_list, list) {
3306 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3307 return b;
3308 }
3309
3310 return NULL;
3311}
3312
3313void hci_white_list_clear(struct hci_dev *hdev)
3314{
3315 struct list_head *p, *n;
3316
3317 list_for_each_safe(p, n, &hdev->le_white_list) {
3318 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3319
3320 list_del(p);
3321 kfree(b);
3322 }
3323}
3324
3325int hci_white_list_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3326{
3327 struct bdaddr_list *entry;
3328
3329 if (!bacmp(bdaddr, BDADDR_ANY))
3330 return -EBADF;
3331
3332 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3333 if (!entry)
3334 return -ENOMEM;
3335
3336 bacpy(&entry->bdaddr, bdaddr);
3337 entry->bdaddr_type = type;
3338
3339 list_add(&entry->list, &hdev->le_white_list);
3340
3341 return 0;
3342}
3343
3344int hci_white_list_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3345{
3346 struct bdaddr_list *entry;
3347
3348 if (!bacmp(bdaddr, BDADDR_ANY))
3349 return -EBADF;
3350
3351 entry = hci_white_list_lookup(hdev, bdaddr, type);
3352 if (!entry)
3353 return -ENOENT;
3354
3355 list_del(&entry->list);
3356 kfree(entry);
3357
3358 return 0;
3359}
3360
15819a70
AG
3361/* This function requires the caller holds hdev->lock */
3362struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3363 bdaddr_t *addr, u8 addr_type)
3364{
3365 struct hci_conn_params *params;
3366
3367 list_for_each_entry(params, &hdev->le_conn_params, list) {
3368 if (bacmp(&params->addr, addr) == 0 &&
3369 params->addr_type == addr_type) {
3370 return params;
3371 }
3372 }
3373
3374 return NULL;
3375}
3376
cef952ce
AG
3377static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3378{
3379 struct hci_conn *conn;
3380
3381 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3382 if (!conn)
3383 return false;
3384
3385 if (conn->dst_type != type)
3386 return false;
3387
3388 if (conn->state != BT_CONNECTED)
3389 return false;
3390
3391 return true;
3392}
3393
a9b0a04c
AG
3394static bool is_identity_address(bdaddr_t *addr, u8 addr_type)
3395{
3396 if (addr_type == ADDR_LE_DEV_PUBLIC)
3397 return true;
3398
3399 /* Check for Random Static address type */
3400 if ((addr->b[5] & 0xc0) == 0xc0)
3401 return true;
3402
3403 return false;
3404}
3405
4b10966f
MH
3406/* This function requires the caller holds hdev->lock */
3407struct bdaddr_list *hci_pend_le_conn_lookup(struct hci_dev *hdev,
3408 bdaddr_t *addr, u8 addr_type)
3409{
3410 struct bdaddr_list *entry;
3411
3412 list_for_each_entry(entry, &hdev->pend_le_conns, list) {
3413 if (bacmp(&entry->bdaddr, addr) == 0 &&
3414 entry->bdaddr_type == addr_type)
3415 return entry;
3416 }
3417
3418 return NULL;
3419}
3420
3421/* This function requires the caller holds hdev->lock */
3422void hci_pend_le_conn_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3423{
3424 struct bdaddr_list *entry;
3425
3426 entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3427 if (entry)
3428 goto done;
3429
3430 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3431 if (!entry) {
3432 BT_ERR("Out of memory");
3433 return;
3434 }
3435
3436 bacpy(&entry->bdaddr, addr);
3437 entry->bdaddr_type = addr_type;
3438
3439 list_add(&entry->list, &hdev->pend_le_conns);
3440
3441 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3442
3443done:
3444 hci_update_background_scan(hdev);
3445}
3446
3447/* This function requires the caller holds hdev->lock */
3448void hci_pend_le_conn_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3449{
3450 struct bdaddr_list *entry;
3451
3452 entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3453 if (!entry)
3454 goto done;
3455
3456 list_del(&entry->list);
3457 kfree(entry);
3458
3459 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3460
3461done:
3462 hci_update_background_scan(hdev);
3463}
3464
3465/* This function requires the caller holds hdev->lock */
3466void hci_pend_le_conns_clear(struct hci_dev *hdev)
3467{
3468 struct bdaddr_list *entry, *tmp;
3469
3470 list_for_each_entry_safe(entry, tmp, &hdev->pend_le_conns, list) {
3471 list_del(&entry->list);
3472 kfree(entry);
3473 }
3474
3475 BT_DBG("All LE pending connections cleared");
1c1697c0
MH
3476
3477 hci_update_background_scan(hdev);
4b10966f
MH
3478}
3479
15819a70 3480/* This function requires the caller holds hdev->lock */
51d167c0
MH
3481struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3482 bdaddr_t *addr, u8 addr_type)
bf5b3c8b
MH
3483{
3484 struct hci_conn_params *params;
3485
3486 if (!is_identity_address(addr, addr_type))
51d167c0 3487 return NULL;
bf5b3c8b
MH
3488
3489 params = hci_conn_params_lookup(hdev, addr, addr_type);
3490 if (params)
51d167c0 3491 return params;
bf5b3c8b
MH
3492
3493 params = kzalloc(sizeof(*params), GFP_KERNEL);
3494 if (!params) {
3495 BT_ERR("Out of memory");
51d167c0 3496 return NULL;
bf5b3c8b
MH
3497 }
3498
3499 bacpy(&params->addr, addr);
3500 params->addr_type = addr_type;
3501
3502 list_add(&params->list, &hdev->le_conn_params);
3503
3504 params->conn_min_interval = hdev->le_conn_min_interval;
3505 params->conn_max_interval = hdev->le_conn_max_interval;
3506 params->conn_latency = hdev->le_conn_latency;
3507 params->supervision_timeout = hdev->le_supv_timeout;
3508 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3509
3510 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3511
51d167c0 3512 return params;
bf5b3c8b
MH
3513}
3514
3515/* This function requires the caller holds hdev->lock */
3516int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
d06b50ce 3517 u8 auto_connect)
15819a70
AG
3518{
3519 struct hci_conn_params *params;
3520
8c87aae1
MH
3521 params = hci_conn_params_add(hdev, addr, addr_type);
3522 if (!params)
3523 return -EIO;
cef952ce 3524
9fcb18ef 3525 params->auto_connect = auto_connect;
15819a70 3526
cef952ce
AG
3527 switch (auto_connect) {
3528 case HCI_AUTO_CONN_DISABLED:
3529 case HCI_AUTO_CONN_LINK_LOSS:
3530 hci_pend_le_conn_del(hdev, addr, addr_type);
3531 break;
3532 case HCI_AUTO_CONN_ALWAYS:
3533 if (!is_connected(hdev, addr, addr_type))
3534 hci_pend_le_conn_add(hdev, addr, addr_type);
3535 break;
3536 }
15819a70 3537
d06b50ce
MH
3538 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3539 auto_connect);
a9b0a04c
AG
3540
3541 return 0;
15819a70
AG
3542}
3543
3544/* This function requires the caller holds hdev->lock */
3545void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3546{
3547 struct hci_conn_params *params;
3548
3549 params = hci_conn_params_lookup(hdev, addr, addr_type);
3550 if (!params)
3551 return;
3552
cef952ce
AG
3553 hci_pend_le_conn_del(hdev, addr, addr_type);
3554
15819a70
AG
3555 list_del(&params->list);
3556 kfree(params);
3557
3558 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3559}
3560
3561/* This function requires the caller holds hdev->lock */
3562void hci_conn_params_clear(struct hci_dev *hdev)
3563{
3564 struct hci_conn_params *params, *tmp;
3565
3566 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3567 list_del(&params->list);
3568 kfree(params);
3569 }
3570
1089b67d
MH
3571 hci_pend_le_conns_clear(hdev);
3572
15819a70
AG
3573 BT_DBG("All LE connection parameters were removed");
3574}
3575
4c87eaab 3576static void inquiry_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 3577{
4c87eaab
AG
3578 if (status) {
3579 BT_ERR("Failed to start inquiry: status %d", status);
7ba8b4be 3580
4c87eaab
AG
3581 hci_dev_lock(hdev);
3582 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3583 hci_dev_unlock(hdev);
3584 return;
3585 }
7ba8b4be
AG
3586}
3587
4c87eaab 3588static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 3589{
4c87eaab
AG
3590 /* General inquiry access code (GIAC) */
3591 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3592 struct hci_request req;
3593 struct hci_cp_inquiry cp;
7ba8b4be
AG
3594 int err;
3595
4c87eaab
AG
3596 if (status) {
3597 BT_ERR("Failed to disable LE scanning: status %d", status);
3598 return;
3599 }
7ba8b4be 3600
4c87eaab
AG
3601 switch (hdev->discovery.type) {
3602 case DISCOV_TYPE_LE:
3603 hci_dev_lock(hdev);
3604 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3605 hci_dev_unlock(hdev);
3606 break;
7ba8b4be 3607
4c87eaab
AG
3608 case DISCOV_TYPE_INTERLEAVED:
3609 hci_req_init(&req, hdev);
7ba8b4be 3610
4c87eaab
AG
3611 memset(&cp, 0, sizeof(cp));
3612 memcpy(&cp.lap, lap, sizeof(cp.lap));
3613 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3614 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
7ba8b4be 3615
4c87eaab 3616 hci_dev_lock(hdev);
7dbfac1d 3617
4c87eaab 3618 hci_inquiry_cache_flush(hdev);
7dbfac1d 3619
4c87eaab
AG
3620 err = hci_req_run(&req, inquiry_complete);
3621 if (err) {
3622 BT_ERR("Inquiry request failed: err %d", err);
3623 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3624 }
7dbfac1d 3625
4c87eaab
AG
3626 hci_dev_unlock(hdev);
3627 break;
7dbfac1d 3628 }
7dbfac1d
AG
3629}
3630
7ba8b4be
AG
3631static void le_scan_disable_work(struct work_struct *work)
3632{
3633 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 3634 le_scan_disable.work);
4c87eaab
AG
3635 struct hci_request req;
3636 int err;
7ba8b4be
AG
3637
3638 BT_DBG("%s", hdev->name);
3639
4c87eaab 3640 hci_req_init(&req, hdev);
28b75a89 3641
b1efcc28 3642 hci_req_add_le_scan_disable(&req);
28b75a89 3643
4c87eaab
AG
3644 err = hci_req_run(&req, le_scan_disable_work_complete);
3645 if (err)
3646 BT_ERR("Disable LE scanning request failed: err %d", err);
28b75a89
AG
3647}
3648
8d97250e
JH
3649static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3650{
3651 struct hci_dev *hdev = req->hdev;
3652
3653 /* If we're advertising or initiating an LE connection we can't
3654 * go ahead and change the random address at this time. This is
3655 * because the eventual initiator address used for the
3656 * subsequently created connection will be undefined (some
3657 * controllers use the new address and others the one we had
3658 * when the operation started).
3659 *
3660 * In this kind of scenario skip the update and let the random
3661 * address be updated at the next cycle.
3662 */
3663 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) ||
3664 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3665 BT_DBG("Deferring random address update");
3666 return;
3667 }
3668
3669 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3670}
3671
94b1fc92
MH
3672int hci_update_random_address(struct hci_request *req, bool require_privacy,
3673 u8 *own_addr_type)
ebd3a747
JH
3674{
3675 struct hci_dev *hdev = req->hdev;
3676 int err;
3677
3678 /* If privacy is enabled use a resolvable private address. If
2b5224dc
MH
3679 * current RPA has expired or there is something else than
3680 * the current RPA in use, then generate a new one.
ebd3a747
JH
3681 */
3682 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
ebd3a747
JH
3683 int to;
3684
3685 *own_addr_type = ADDR_LE_DEV_RANDOM;
3686
3687 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
2b5224dc 3688 !bacmp(&hdev->random_addr, &hdev->rpa))
ebd3a747
JH
3689 return 0;
3690
2b5224dc 3691 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
ebd3a747
JH
3692 if (err < 0) {
3693 BT_ERR("%s failed to generate new RPA", hdev->name);
3694 return err;
3695 }
3696
8d97250e 3697 set_random_addr(req, &hdev->rpa);
ebd3a747
JH
3698
3699 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3700 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3701
3702 return 0;
94b1fc92
MH
3703 }
3704
3705 /* In case of required privacy without resolvable private address,
3706 * use an unresolvable private address. This is useful for active
3707 * scanning and non-connectable advertising.
3708 */
3709 if (require_privacy) {
3710 bdaddr_t urpa;
3711
3712 get_random_bytes(&urpa, 6);
3713 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3714
3715 *own_addr_type = ADDR_LE_DEV_RANDOM;
8d97250e 3716 set_random_addr(req, &urpa);
94b1fc92 3717 return 0;
ebd3a747
JH
3718 }
3719
3720 /* If forcing static address is in use or there is no public
3721 * address use the static address as random address (but skip
3722 * the HCI command if the current random address is already the
3723 * static one.
3724 */
111902f7 3725 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
ebd3a747
JH
3726 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3727 *own_addr_type = ADDR_LE_DEV_RANDOM;
3728 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3729 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3730 &hdev->static_addr);
3731 return 0;
3732 }
3733
3734 /* Neither privacy nor static address is being used so use a
3735 * public address.
3736 */
3737 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3738
3739 return 0;
3740}
3741
a1f4c318
JH
3742/* Copy the Identity Address of the controller.
3743 *
3744 * If the controller has a public BD_ADDR, then by default use that one.
3745 * If this is a LE only controller without a public address, default to
3746 * the static random address.
3747 *
3748 * For debugging purposes it is possible to force controllers with a
3749 * public address to use the static random address instead.
3750 */
3751void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3752 u8 *bdaddr_type)
3753{
111902f7 3754 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
a1f4c318
JH
3755 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3756 bacpy(bdaddr, &hdev->static_addr);
3757 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3758 } else {
3759 bacpy(bdaddr, &hdev->bdaddr);
3760 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3761 }
3762}
3763
9be0dab7
DH
3764/* Alloc HCI device */
3765struct hci_dev *hci_alloc_dev(void)
3766{
3767 struct hci_dev *hdev;
3768
3769 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3770 if (!hdev)
3771 return NULL;
3772
b1b813d4
DH
3773 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3774 hdev->esco_type = (ESCO_HV1);
3775 hdev->link_mode = (HCI_LM_ACCEPT);
b4cb9fb2
MH
3776 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3777 hdev->io_capability = 0x03; /* No Input No Output */
bbaf444a
JH
3778 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3779 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
b1b813d4 3780
b1b813d4
DH
3781 hdev->sniff_max_interval = 800;
3782 hdev->sniff_min_interval = 80;
3783
3f959d46 3784 hdev->le_adv_channel_map = 0x07;
bef64738
MH
3785 hdev->le_scan_interval = 0x0060;
3786 hdev->le_scan_window = 0x0030;
4e70c7e7
MH
3787 hdev->le_conn_min_interval = 0x0028;
3788 hdev->le_conn_max_interval = 0x0038;
04fb7d90
MH
3789 hdev->le_conn_latency = 0x0000;
3790 hdev->le_supv_timeout = 0x002a;
bef64738 3791
d6bfd59c 3792 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
b9a7a61e 3793 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
31ad1691
AK
3794 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3795 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
d6bfd59c 3796
b1b813d4
DH
3797 mutex_init(&hdev->lock);
3798 mutex_init(&hdev->req_lock);
3799
3800 INIT_LIST_HEAD(&hdev->mgmt_pending);
3801 INIT_LIST_HEAD(&hdev->blacklist);
3802 INIT_LIST_HEAD(&hdev->uuids);
3803 INIT_LIST_HEAD(&hdev->link_keys);
3804 INIT_LIST_HEAD(&hdev->long_term_keys);
970c4e46 3805 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
b1b813d4 3806 INIT_LIST_HEAD(&hdev->remote_oob_data);
d2ab0ac1 3807 INIT_LIST_HEAD(&hdev->le_white_list);
15819a70 3808 INIT_LIST_HEAD(&hdev->le_conn_params);
77a77a30 3809 INIT_LIST_HEAD(&hdev->pend_le_conns);
6b536b5e 3810 INIT_LIST_HEAD(&hdev->conn_hash.list);
b1b813d4
DH
3811
3812 INIT_WORK(&hdev->rx_work, hci_rx_work);
3813 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3814 INIT_WORK(&hdev->tx_work, hci_tx_work);
3815 INIT_WORK(&hdev->power_on, hci_power_on);
b1b813d4 3816
b1b813d4
DH
3817 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3818 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3819 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3820
b1b813d4
DH
3821 skb_queue_head_init(&hdev->rx_q);
3822 skb_queue_head_init(&hdev->cmd_q);
3823 skb_queue_head_init(&hdev->raw_q);
3824
3825 init_waitqueue_head(&hdev->req_wait_q);
3826
65cc2b49 3827 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
b1b813d4 3828
b1b813d4
DH
3829 hci_init_sysfs(hdev);
3830 discovery_init(hdev);
9be0dab7
DH
3831
3832 return hdev;
3833}
3834EXPORT_SYMBOL(hci_alloc_dev);
3835
3836/* Free HCI device */
3837void hci_free_dev(struct hci_dev *hdev)
3838{
9be0dab7
DH
3839 /* will free via device release */
3840 put_device(&hdev->dev);
3841}
3842EXPORT_SYMBOL(hci_free_dev);
3843
1da177e4
LT
3844/* Register HCI device */
3845int hci_register_dev(struct hci_dev *hdev)
3846{
b1b813d4 3847 int id, error;
1da177e4 3848
010666a1 3849 if (!hdev->open || !hdev->close)
1da177e4
LT
3850 return -EINVAL;
3851
08add513
MM
3852 /* Do not allow HCI_AMP devices to register at index 0,
3853 * so the index can be used as the AMP controller ID.
3854 */
3df92b31
SL
3855 switch (hdev->dev_type) {
3856 case HCI_BREDR:
3857 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3858 break;
3859 case HCI_AMP:
3860 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3861 break;
3862 default:
3863 return -EINVAL;
1da177e4 3864 }
8e87d142 3865
3df92b31
SL
3866 if (id < 0)
3867 return id;
3868
1da177e4
LT
3869 sprintf(hdev->name, "hci%d", id);
3870 hdev->id = id;
2d8b3a11
AE
3871
3872 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3873
d8537548
KC
3874 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3875 WQ_MEM_RECLAIM, 1, hdev->name);
33ca954d
DH
3876 if (!hdev->workqueue) {
3877 error = -ENOMEM;
3878 goto err;
3879 }
f48fd9c8 3880
d8537548
KC
3881 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3882 WQ_MEM_RECLAIM, 1, hdev->name);
6ead1bbc
JH
3883 if (!hdev->req_workqueue) {
3884 destroy_workqueue(hdev->workqueue);
3885 error = -ENOMEM;
3886 goto err;
3887 }
3888
0153e2ec
MH
3889 if (!IS_ERR_OR_NULL(bt_debugfs))
3890 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3891
bdc3e0f1
MH
3892 dev_set_name(&hdev->dev, "%s", hdev->name);
3893
99780a7b
JH
3894 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3895 CRYPTO_ALG_ASYNC);
3896 if (IS_ERR(hdev->tfm_aes)) {
3897 BT_ERR("Unable to create crypto context");
3898 error = PTR_ERR(hdev->tfm_aes);
3899 hdev->tfm_aes = NULL;
3900 goto err_wqueue;
3901 }
3902
bdc3e0f1 3903 error = device_add(&hdev->dev);
33ca954d 3904 if (error < 0)
99780a7b 3905 goto err_tfm;
1da177e4 3906
611b30f7 3907 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
3908 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3909 hdev);
611b30f7
MH
3910 if (hdev->rfkill) {
3911 if (rfkill_register(hdev->rfkill) < 0) {
3912 rfkill_destroy(hdev->rfkill);
3913 hdev->rfkill = NULL;
3914 }
3915 }
3916
5e130367
JH
3917 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3918 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3919
a8b2d5c2 3920 set_bit(HCI_SETUP, &hdev->dev_flags);
004b0258 3921 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
ce2be9ac 3922
01cd3404 3923 if (hdev->dev_type == HCI_BREDR) {
56f87901
JH
3924 /* Assume BR/EDR support until proven otherwise (such as
3925 * through reading supported features during init.
3926 */
3927 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3928 }
ce2be9ac 3929
fcee3377
GP
3930 write_lock(&hci_dev_list_lock);
3931 list_add(&hdev->list, &hci_dev_list);
3932 write_unlock(&hci_dev_list_lock);
3933
fee746b0
MH
3934 /* Devices that are marked for raw-only usage need to set
3935 * the HCI_RAW flag to indicate that only user channel is
3936 * supported.
3937 */
3938 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3939 set_bit(HCI_RAW, &hdev->flags);
3940
1da177e4 3941 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 3942 hci_dev_hold(hdev);
1da177e4 3943
19202573 3944 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 3945
1da177e4 3946 return id;
f48fd9c8 3947
99780a7b
JH
3948err_tfm:
3949 crypto_free_blkcipher(hdev->tfm_aes);
33ca954d
DH
3950err_wqueue:
3951 destroy_workqueue(hdev->workqueue);
6ead1bbc 3952 destroy_workqueue(hdev->req_workqueue);
33ca954d 3953err:
3df92b31 3954 ida_simple_remove(&hci_index_ida, hdev->id);
f48fd9c8 3955
33ca954d 3956 return error;
1da177e4
LT
3957}
3958EXPORT_SYMBOL(hci_register_dev);
3959
3960/* Unregister HCI device */
59735631 3961void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 3962{
3df92b31 3963 int i, id;
ef222013 3964
c13854ce 3965 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 3966
94324962
JH
3967 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3968
3df92b31
SL
3969 id = hdev->id;
3970
f20d09d5 3971 write_lock(&hci_dev_list_lock);
1da177e4 3972 list_del(&hdev->list);
f20d09d5 3973 write_unlock(&hci_dev_list_lock);
1da177e4
LT
3974
3975 hci_dev_do_close(hdev);
3976
cd4c5391 3977 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
3978 kfree_skb(hdev->reassembly[i]);
3979
b9b5ef18
GP
3980 cancel_work_sync(&hdev->power_on);
3981
ab81cbf9 3982 if (!test_bit(HCI_INIT, &hdev->flags) &&
fee746b0
MH
3983 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
3984 !test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
09fd0de5 3985 hci_dev_lock(hdev);
744cf19e 3986 mgmt_index_removed(hdev);
09fd0de5 3987 hci_dev_unlock(hdev);
56e5cb86 3988 }
ab81cbf9 3989
2e58ef3e
JH
3990 /* mgmt_index_removed should take care of emptying the
3991 * pending list */
3992 BUG_ON(!list_empty(&hdev->mgmt_pending));
3993
1da177e4
LT
3994 hci_notify(hdev, HCI_DEV_UNREG);
3995
611b30f7
MH
3996 if (hdev->rfkill) {
3997 rfkill_unregister(hdev->rfkill);
3998 rfkill_destroy(hdev->rfkill);
3999 }
4000
99780a7b
JH
4001 if (hdev->tfm_aes)
4002 crypto_free_blkcipher(hdev->tfm_aes);
4003
bdc3e0f1 4004 device_del(&hdev->dev);
147e2d59 4005
0153e2ec
MH
4006 debugfs_remove_recursive(hdev->debugfs);
4007
f48fd9c8 4008 destroy_workqueue(hdev->workqueue);
6ead1bbc 4009 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 4010
09fd0de5 4011 hci_dev_lock(hdev);
e2e0cacb 4012 hci_blacklist_clear(hdev);
2aeb9a1a 4013 hci_uuids_clear(hdev);
55ed8ca1 4014 hci_link_keys_clear(hdev);
b899efaf 4015 hci_smp_ltks_clear(hdev);
970c4e46 4016 hci_smp_irks_clear(hdev);
2763eda6 4017 hci_remote_oob_data_clear(hdev);
d2ab0ac1 4018 hci_white_list_clear(hdev);
15819a70 4019 hci_conn_params_clear(hdev);
09fd0de5 4020 hci_dev_unlock(hdev);
e2e0cacb 4021
dc946bd8 4022 hci_dev_put(hdev);
3df92b31
SL
4023
4024 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
4025}
4026EXPORT_SYMBOL(hci_unregister_dev);
4027
4028/* Suspend HCI device */
4029int hci_suspend_dev(struct hci_dev *hdev)
4030{
4031 hci_notify(hdev, HCI_DEV_SUSPEND);
4032 return 0;
4033}
4034EXPORT_SYMBOL(hci_suspend_dev);
4035
4036/* Resume HCI device */
4037int hci_resume_dev(struct hci_dev *hdev)
4038{
4039 hci_notify(hdev, HCI_DEV_RESUME);
4040 return 0;
4041}
4042EXPORT_SYMBOL(hci_resume_dev);
4043
76bca880 4044/* Receive frame from HCI drivers */
e1a26170 4045int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
76bca880 4046{
76bca880 4047 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 4048 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
4049 kfree_skb(skb);
4050 return -ENXIO;
4051 }
4052
d82603c6 4053 /* Incoming skb */
76bca880
MH
4054 bt_cb(skb)->incoming = 1;
4055
4056 /* Time stamp */
4057 __net_timestamp(skb);
4058
76bca880 4059 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 4060 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 4061
76bca880
MH
4062 return 0;
4063}
4064EXPORT_SYMBOL(hci_recv_frame);
4065
33e882a5 4066static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
a8c5fb1a 4067 int count, __u8 index)
33e882a5
SS
4068{
4069 int len = 0;
4070 int hlen = 0;
4071 int remain = count;
4072 struct sk_buff *skb;
4073 struct bt_skb_cb *scb;
4074
4075 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
a8c5fb1a 4076 index >= NUM_REASSEMBLY)
33e882a5
SS
4077 return -EILSEQ;
4078
4079 skb = hdev->reassembly[index];
4080
4081 if (!skb) {
4082 switch (type) {
4083 case HCI_ACLDATA_PKT:
4084 len = HCI_MAX_FRAME_SIZE;
4085 hlen = HCI_ACL_HDR_SIZE;
4086 break;
4087 case HCI_EVENT_PKT:
4088 len = HCI_MAX_EVENT_SIZE;
4089 hlen = HCI_EVENT_HDR_SIZE;
4090 break;
4091 case HCI_SCODATA_PKT:
4092 len = HCI_MAX_SCO_SIZE;
4093 hlen = HCI_SCO_HDR_SIZE;
4094 break;
4095 }
4096
1e429f38 4097 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
4098 if (!skb)
4099 return -ENOMEM;
4100
4101 scb = (void *) skb->cb;
4102 scb->expect = hlen;
4103 scb->pkt_type = type;
4104
33e882a5
SS
4105 hdev->reassembly[index] = skb;
4106 }
4107
4108 while (count) {
4109 scb = (void *) skb->cb;
89bb46d0 4110 len = min_t(uint, scb->expect, count);
33e882a5
SS
4111
4112 memcpy(skb_put(skb, len), data, len);
4113
4114 count -= len;
4115 data += len;
4116 scb->expect -= len;
4117 remain = count;
4118
4119 switch (type) {
4120 case HCI_EVENT_PKT:
4121 if (skb->len == HCI_EVENT_HDR_SIZE) {
4122 struct hci_event_hdr *h = hci_event_hdr(skb);
4123 scb->expect = h->plen;
4124
4125 if (skb_tailroom(skb) < scb->expect) {
4126 kfree_skb(skb);
4127 hdev->reassembly[index] = NULL;
4128 return -ENOMEM;
4129 }
4130 }
4131 break;
4132
4133 case HCI_ACLDATA_PKT:
4134 if (skb->len == HCI_ACL_HDR_SIZE) {
4135 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4136 scb->expect = __le16_to_cpu(h->dlen);
4137
4138 if (skb_tailroom(skb) < scb->expect) {
4139 kfree_skb(skb);
4140 hdev->reassembly[index] = NULL;
4141 return -ENOMEM;
4142 }
4143 }
4144 break;
4145
4146 case HCI_SCODATA_PKT:
4147 if (skb->len == HCI_SCO_HDR_SIZE) {
4148 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4149 scb->expect = h->dlen;
4150
4151 if (skb_tailroom(skb) < scb->expect) {
4152 kfree_skb(skb);
4153 hdev->reassembly[index] = NULL;
4154 return -ENOMEM;
4155 }
4156 }
4157 break;
4158 }
4159
4160 if (scb->expect == 0) {
4161 /* Complete frame */
4162
4163 bt_cb(skb)->pkt_type = type;
e1a26170 4164 hci_recv_frame(hdev, skb);
33e882a5
SS
4165
4166 hdev->reassembly[index] = NULL;
4167 return remain;
4168 }
4169 }
4170
4171 return remain;
4172}
4173
ef222013
MH
4174int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4175{
f39a3c06
SS
4176 int rem = 0;
4177
ef222013
MH
4178 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4179 return -EILSEQ;
4180
da5f6c37 4181 while (count) {
1e429f38 4182 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
4183 if (rem < 0)
4184 return rem;
ef222013 4185
f39a3c06
SS
4186 data += (count - rem);
4187 count = rem;
f81c6224 4188 }
ef222013 4189
f39a3c06 4190 return rem;
ef222013
MH
4191}
4192EXPORT_SYMBOL(hci_recv_fragment);
4193
99811510
SS
4194#define STREAM_REASSEMBLY 0
4195
4196int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4197{
4198 int type;
4199 int rem = 0;
4200
da5f6c37 4201 while (count) {
99811510
SS
4202 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4203
4204 if (!skb) {
4205 struct { char type; } *pkt;
4206
4207 /* Start of the frame */
4208 pkt = data;
4209 type = pkt->type;
4210
4211 data++;
4212 count--;
4213 } else
4214 type = bt_cb(skb)->pkt_type;
4215
1e429f38 4216 rem = hci_reassembly(hdev, type, data, count,
a8c5fb1a 4217 STREAM_REASSEMBLY);
99811510
SS
4218 if (rem < 0)
4219 return rem;
4220
4221 data += (count - rem);
4222 count = rem;
f81c6224 4223 }
99811510
SS
4224
4225 return rem;
4226}
4227EXPORT_SYMBOL(hci_recv_stream_fragment);
4228
1da177e4
LT
4229/* ---- Interface to upper protocols ---- */
4230
1da177e4
LT
4231int hci_register_cb(struct hci_cb *cb)
4232{
4233 BT_DBG("%p name %s", cb, cb->name);
4234
f20d09d5 4235 write_lock(&hci_cb_list_lock);
1da177e4 4236 list_add(&cb->list, &hci_cb_list);
f20d09d5 4237 write_unlock(&hci_cb_list_lock);
1da177e4
LT
4238
4239 return 0;
4240}
4241EXPORT_SYMBOL(hci_register_cb);
4242
4243int hci_unregister_cb(struct hci_cb *cb)
4244{
4245 BT_DBG("%p name %s", cb, cb->name);
4246
f20d09d5 4247 write_lock(&hci_cb_list_lock);
1da177e4 4248 list_del(&cb->list);
f20d09d5 4249 write_unlock(&hci_cb_list_lock);
1da177e4
LT
4250
4251 return 0;
4252}
4253EXPORT_SYMBOL(hci_unregister_cb);
4254
51086991 4255static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4 4256{
0d48d939 4257 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 4258
cd82e61c
MH
4259 /* Time stamp */
4260 __net_timestamp(skb);
1da177e4 4261
cd82e61c
MH
4262 /* Send copy to monitor */
4263 hci_send_to_monitor(hdev, skb);
4264
4265 if (atomic_read(&hdev->promisc)) {
4266 /* Send copy to the sockets */
470fe1b5 4267 hci_send_to_sock(hdev, skb);
1da177e4
LT
4268 }
4269
4270 /* Get rid of skb owner, prior to sending to the driver. */
4271 skb_orphan(skb);
4272
7bd8f09f 4273 if (hdev->send(hdev, skb) < 0)
51086991 4274 BT_ERR("%s sending frame failed", hdev->name);
1da177e4
LT
4275}
4276
3119ae95
JH
4277void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4278{
4279 skb_queue_head_init(&req->cmd_q);
4280 req->hdev = hdev;
5d73e034 4281 req->err = 0;
3119ae95
JH
4282}
4283
4284int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4285{
4286 struct hci_dev *hdev = req->hdev;
4287 struct sk_buff *skb;
4288 unsigned long flags;
4289
4290 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4291
5d73e034
AG
4292 /* If an error occured during request building, remove all HCI
4293 * commands queued on the HCI request queue.
4294 */
4295 if (req->err) {
4296 skb_queue_purge(&req->cmd_q);
4297 return req->err;
4298 }
4299
3119ae95
JH
4300 /* Do not allow empty requests */
4301 if (skb_queue_empty(&req->cmd_q))
382b0c39 4302 return -ENODATA;
3119ae95
JH
4303
4304 skb = skb_peek_tail(&req->cmd_q);
4305 bt_cb(skb)->req.complete = complete;
4306
4307 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4308 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4309 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4310
4311 queue_work(hdev->workqueue, &hdev->cmd_work);
4312
4313 return 0;
4314}
4315
1ca3a9d0 4316static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
07dc93dd 4317 u32 plen, const void *param)
1da177e4
LT
4318{
4319 int len = HCI_COMMAND_HDR_SIZE + plen;
4320 struct hci_command_hdr *hdr;
4321 struct sk_buff *skb;
4322
1da177e4 4323 skb = bt_skb_alloc(len, GFP_ATOMIC);
1ca3a9d0
JH
4324 if (!skb)
4325 return NULL;
1da177e4
LT
4326
4327 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 4328 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
4329 hdr->plen = plen;
4330
4331 if (plen)
4332 memcpy(skb_put(skb, plen), param, plen);
4333
4334 BT_DBG("skb len %d", skb->len);
4335
0d48d939 4336 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
c78ae283 4337
1ca3a9d0
JH
4338 return skb;
4339}
4340
4341/* Send HCI command */
07dc93dd
JH
4342int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4343 const void *param)
1ca3a9d0
JH
4344{
4345 struct sk_buff *skb;
4346
4347 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4348
4349 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4350 if (!skb) {
4351 BT_ERR("%s no memory for command", hdev->name);
4352 return -ENOMEM;
4353 }
4354
11714b3d
JH
4355 /* Stand-alone HCI commands must be flaged as
4356 * single-command requests.
4357 */
4358 bt_cb(skb)->req.start = true;
4359
1da177e4 4360 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 4361 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
4362
4363 return 0;
4364}
1da177e4 4365
71c76a17 4366/* Queue a command to an asynchronous HCI request */
07dc93dd
JH
4367void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4368 const void *param, u8 event)
71c76a17
JH
4369{
4370 struct hci_dev *hdev = req->hdev;
4371 struct sk_buff *skb;
4372
4373 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4374
34739c1e
AG
4375 /* If an error occured during request building, there is no point in
4376 * queueing the HCI command. We can simply return.
4377 */
4378 if (req->err)
4379 return;
4380
71c76a17
JH
4381 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4382 if (!skb) {
5d73e034
AG
4383 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4384 hdev->name, opcode);
4385 req->err = -ENOMEM;
e348fe6b 4386 return;
71c76a17
JH
4387 }
4388
4389 if (skb_queue_empty(&req->cmd_q))
4390 bt_cb(skb)->req.start = true;
4391
02350a72
JH
4392 bt_cb(skb)->req.event = event;
4393
71c76a17 4394 skb_queue_tail(&req->cmd_q, skb);
71c76a17
JH
4395}
4396
07dc93dd
JH
4397void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4398 const void *param)
02350a72
JH
4399{
4400 hci_req_add_ev(req, opcode, plen, param, 0);
4401}
4402
1da177e4 4403/* Get data from the previously sent command */
a9de9248 4404void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
4405{
4406 struct hci_command_hdr *hdr;
4407
4408 if (!hdev->sent_cmd)
4409 return NULL;
4410
4411 hdr = (void *) hdev->sent_cmd->data;
4412
a9de9248 4413 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
4414 return NULL;
4415
f0e09510 4416 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
4417
4418 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4419}
4420
4421/* Send ACL data */
4422static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4423{
4424 struct hci_acl_hdr *hdr;
4425 int len = skb->len;
4426
badff6d0
ACM
4427 skb_push(skb, HCI_ACL_HDR_SIZE);
4428 skb_reset_transport_header(skb);
9c70220b 4429 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
4430 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4431 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
4432}
4433
ee22be7e 4434static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 4435 struct sk_buff *skb, __u16 flags)
1da177e4 4436{
ee22be7e 4437 struct hci_conn *conn = chan->conn;
1da177e4
LT
4438 struct hci_dev *hdev = conn->hdev;
4439 struct sk_buff *list;
4440
087bfd99
GP
4441 skb->len = skb_headlen(skb);
4442 skb->data_len = 0;
4443
4444 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
204a6e54
AE
4445
4446 switch (hdev->dev_type) {
4447 case HCI_BREDR:
4448 hci_add_acl_hdr(skb, conn->handle, flags);
4449 break;
4450 case HCI_AMP:
4451 hci_add_acl_hdr(skb, chan->handle, flags);
4452 break;
4453 default:
4454 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4455 return;
4456 }
087bfd99 4457
70f23020
AE
4458 list = skb_shinfo(skb)->frag_list;
4459 if (!list) {
1da177e4
LT
4460 /* Non fragmented */
4461 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4462
73d80deb 4463 skb_queue_tail(queue, skb);
1da177e4
LT
4464 } else {
4465 /* Fragmented */
4466 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4467
4468 skb_shinfo(skb)->frag_list = NULL;
4469
4470 /* Queue all fragments atomically */
af3e6359 4471 spin_lock(&queue->lock);
1da177e4 4472
73d80deb 4473 __skb_queue_tail(queue, skb);
e702112f
AE
4474
4475 flags &= ~ACL_START;
4476 flags |= ACL_CONT;
1da177e4
LT
4477 do {
4478 skb = list; list = list->next;
8e87d142 4479
0d48d939 4480 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 4481 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
4482
4483 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4484
73d80deb 4485 __skb_queue_tail(queue, skb);
1da177e4
LT
4486 } while (list);
4487
af3e6359 4488 spin_unlock(&queue->lock);
1da177e4 4489 }
73d80deb
LAD
4490}
4491
4492void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4493{
ee22be7e 4494 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 4495
f0e09510 4496 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb 4497
ee22be7e 4498 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 4499
3eff45ea 4500 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 4501}
1da177e4
LT
4502
4503/* Send SCO data */
0d861d8b 4504void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
4505{
4506 struct hci_dev *hdev = conn->hdev;
4507 struct hci_sco_hdr hdr;
4508
4509 BT_DBG("%s len %d", hdev->name, skb->len);
4510
aca3192c 4511 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
4512 hdr.dlen = skb->len;
4513
badff6d0
ACM
4514 skb_push(skb, HCI_SCO_HDR_SIZE);
4515 skb_reset_transport_header(skb);
9c70220b 4516 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4 4517
0d48d939 4518 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 4519
1da177e4 4520 skb_queue_tail(&conn->data_q, skb);
3eff45ea 4521 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 4522}
1da177e4
LT
4523
4524/* ---- HCI TX task (outgoing data) ---- */
4525
4526/* HCI Connection scheduler */
6039aa73
GP
4527static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4528 int *quote)
1da177e4
LT
4529{
4530 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 4531 struct hci_conn *conn = NULL, *c;
abc5de8f 4532 unsigned int num = 0, min = ~0;
1da177e4 4533
8e87d142 4534 /* We don't have to lock device here. Connections are always
1da177e4 4535 * added and removed with TX task disabled. */
bf4c6325
GP
4536
4537 rcu_read_lock();
4538
4539 list_for_each_entry_rcu(c, &h->list, list) {
769be974 4540 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 4541 continue;
769be974
MH
4542
4543 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4544 continue;
4545
1da177e4
LT
4546 num++;
4547
4548 if (c->sent < min) {
4549 min = c->sent;
4550 conn = c;
4551 }
52087a79
LAD
4552
4553 if (hci_conn_num(hdev, type) == num)
4554 break;
1da177e4
LT
4555 }
4556
bf4c6325
GP
4557 rcu_read_unlock();
4558
1da177e4 4559 if (conn) {
6ed58ec5
VT
4560 int cnt, q;
4561
4562 switch (conn->type) {
4563 case ACL_LINK:
4564 cnt = hdev->acl_cnt;
4565 break;
4566 case SCO_LINK:
4567 case ESCO_LINK:
4568 cnt = hdev->sco_cnt;
4569 break;
4570 case LE_LINK:
4571 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4572 break;
4573 default:
4574 cnt = 0;
4575 BT_ERR("Unknown link type");
4576 }
4577
4578 q = cnt / num;
1da177e4
LT
4579 *quote = q ? q : 1;
4580 } else
4581 *quote = 0;
4582
4583 BT_DBG("conn %p quote %d", conn, *quote);
4584 return conn;
4585}
4586
6039aa73 4587static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
4588{
4589 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 4590 struct hci_conn *c;
1da177e4 4591
bae1f5d9 4592 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 4593
bf4c6325
GP
4594 rcu_read_lock();
4595
1da177e4 4596 /* Kill stalled connections */
bf4c6325 4597 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 4598 if (c->type == type && c->sent) {
6ed93dc6
AE
4599 BT_ERR("%s killing stalled connection %pMR",
4600 hdev->name, &c->dst);
bed71748 4601 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
4602 }
4603 }
bf4c6325
GP
4604
4605 rcu_read_unlock();
1da177e4
LT
4606}
4607
6039aa73
GP
4608static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4609 int *quote)
1da177e4 4610{
73d80deb
LAD
4611 struct hci_conn_hash *h = &hdev->conn_hash;
4612 struct hci_chan *chan = NULL;
abc5de8f 4613 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 4614 struct hci_conn *conn;
73d80deb
LAD
4615 int cnt, q, conn_num = 0;
4616
4617 BT_DBG("%s", hdev->name);
4618
bf4c6325
GP
4619 rcu_read_lock();
4620
4621 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
4622 struct hci_chan *tmp;
4623
4624 if (conn->type != type)
4625 continue;
4626
4627 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4628 continue;
4629
4630 conn_num++;
4631
8192edef 4632 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
4633 struct sk_buff *skb;
4634
4635 if (skb_queue_empty(&tmp->data_q))
4636 continue;
4637
4638 skb = skb_peek(&tmp->data_q);
4639 if (skb->priority < cur_prio)
4640 continue;
4641
4642 if (skb->priority > cur_prio) {
4643 num = 0;
4644 min = ~0;
4645 cur_prio = skb->priority;
4646 }
4647
4648 num++;
4649
4650 if (conn->sent < min) {
4651 min = conn->sent;
4652 chan = tmp;
4653 }
4654 }
4655
4656 if (hci_conn_num(hdev, type) == conn_num)
4657 break;
4658 }
4659
bf4c6325
GP
4660 rcu_read_unlock();
4661
73d80deb
LAD
4662 if (!chan)
4663 return NULL;
4664
4665 switch (chan->conn->type) {
4666 case ACL_LINK:
4667 cnt = hdev->acl_cnt;
4668 break;
bd1eb66b
AE
4669 case AMP_LINK:
4670 cnt = hdev->block_cnt;
4671 break;
73d80deb
LAD
4672 case SCO_LINK:
4673 case ESCO_LINK:
4674 cnt = hdev->sco_cnt;
4675 break;
4676 case LE_LINK:
4677 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4678 break;
4679 default:
4680 cnt = 0;
4681 BT_ERR("Unknown link type");
4682 }
4683
4684 q = cnt / num;
4685 *quote = q ? q : 1;
4686 BT_DBG("chan %p quote %d", chan, *quote);
4687 return chan;
4688}
4689
02b20f0b
LAD
4690static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4691{
4692 struct hci_conn_hash *h = &hdev->conn_hash;
4693 struct hci_conn *conn;
4694 int num = 0;
4695
4696 BT_DBG("%s", hdev->name);
4697
bf4c6325
GP
4698 rcu_read_lock();
4699
4700 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
4701 struct hci_chan *chan;
4702
4703 if (conn->type != type)
4704 continue;
4705
4706 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4707 continue;
4708
4709 num++;
4710
8192edef 4711 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
4712 struct sk_buff *skb;
4713
4714 if (chan->sent) {
4715 chan->sent = 0;
4716 continue;
4717 }
4718
4719 if (skb_queue_empty(&chan->data_q))
4720 continue;
4721
4722 skb = skb_peek(&chan->data_q);
4723 if (skb->priority >= HCI_PRIO_MAX - 1)
4724 continue;
4725
4726 skb->priority = HCI_PRIO_MAX - 1;
4727
4728 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 4729 skb->priority);
02b20f0b
LAD
4730 }
4731
4732 if (hci_conn_num(hdev, type) == num)
4733 break;
4734 }
bf4c6325
GP
4735
4736 rcu_read_unlock();
4737
02b20f0b
LAD
4738}
4739
b71d385a
AE
4740static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4741{
4742 /* Calculate count of blocks used by this packet */
4743 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4744}
4745
6039aa73 4746static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 4747{
fee746b0 4748 if (!test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
1da177e4
LT
4749 /* ACL tx timeout must be longer than maximum
4750 * link supervision timeout (40.9 seconds) */
63d2bc1b 4751 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 4752 HCI_ACL_TX_TIMEOUT))
bae1f5d9 4753 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 4754 }
63d2bc1b 4755}
1da177e4 4756
6039aa73 4757static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
4758{
4759 unsigned int cnt = hdev->acl_cnt;
4760 struct hci_chan *chan;
4761 struct sk_buff *skb;
4762 int quote;
4763
4764 __check_timeout(hdev, cnt);
04837f64 4765
73d80deb 4766 while (hdev->acl_cnt &&
a8c5fb1a 4767 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
4768 u32 priority = (skb_peek(&chan->data_q))->priority;
4769 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4770 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4771 skb->len, skb->priority);
73d80deb 4772
ec1cce24
LAD
4773 /* Stop if priority has changed */
4774 if (skb->priority < priority)
4775 break;
4776
4777 skb = skb_dequeue(&chan->data_q);
4778
73d80deb 4779 hci_conn_enter_active_mode(chan->conn,
04124681 4780 bt_cb(skb)->force_active);
04837f64 4781
57d17d70 4782 hci_send_frame(hdev, skb);
1da177e4
LT
4783 hdev->acl_last_tx = jiffies;
4784
4785 hdev->acl_cnt--;
73d80deb
LAD
4786 chan->sent++;
4787 chan->conn->sent++;
1da177e4
LT
4788 }
4789 }
02b20f0b
LAD
4790
4791 if (cnt != hdev->acl_cnt)
4792 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
4793}
4794
6039aa73 4795static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 4796{
63d2bc1b 4797 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
4798 struct hci_chan *chan;
4799 struct sk_buff *skb;
4800 int quote;
bd1eb66b 4801 u8 type;
b71d385a 4802
63d2bc1b 4803 __check_timeout(hdev, cnt);
b71d385a 4804
bd1eb66b
AE
4805 BT_DBG("%s", hdev->name);
4806
4807 if (hdev->dev_type == HCI_AMP)
4808 type = AMP_LINK;
4809 else
4810 type = ACL_LINK;
4811
b71d385a 4812 while (hdev->block_cnt > 0 &&
bd1eb66b 4813 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
4814 u32 priority = (skb_peek(&chan->data_q))->priority;
4815 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4816 int blocks;
4817
4818 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4819 skb->len, skb->priority);
b71d385a
AE
4820
4821 /* Stop if priority has changed */
4822 if (skb->priority < priority)
4823 break;
4824
4825 skb = skb_dequeue(&chan->data_q);
4826
4827 blocks = __get_blocks(hdev, skb);
4828 if (blocks > hdev->block_cnt)
4829 return;
4830
4831 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 4832 bt_cb(skb)->force_active);
b71d385a 4833
57d17d70 4834 hci_send_frame(hdev, skb);
b71d385a
AE
4835 hdev->acl_last_tx = jiffies;
4836
4837 hdev->block_cnt -= blocks;
4838 quote -= blocks;
4839
4840 chan->sent += blocks;
4841 chan->conn->sent += blocks;
4842 }
4843 }
4844
4845 if (cnt != hdev->block_cnt)
bd1eb66b 4846 hci_prio_recalculate(hdev, type);
b71d385a
AE
4847}
4848
6039aa73 4849static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
4850{
4851 BT_DBG("%s", hdev->name);
4852
bd1eb66b
AE
4853 /* No ACL link over BR/EDR controller */
4854 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4855 return;
4856
4857 /* No AMP link over AMP controller */
4858 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
4859 return;
4860
4861 switch (hdev->flow_ctl_mode) {
4862 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4863 hci_sched_acl_pkt(hdev);
4864 break;
4865
4866 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4867 hci_sched_acl_blk(hdev);
4868 break;
4869 }
4870}
4871
1da177e4 4872/* Schedule SCO */
6039aa73 4873static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
4874{
4875 struct hci_conn *conn;
4876 struct sk_buff *skb;
4877 int quote;
4878
4879 BT_DBG("%s", hdev->name);
4880
52087a79
LAD
4881 if (!hci_conn_num(hdev, SCO_LINK))
4882 return;
4883
1da177e4
LT
4884 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4885 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4886 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 4887 hci_send_frame(hdev, skb);
1da177e4
LT
4888
4889 conn->sent++;
4890 if (conn->sent == ~0)
4891 conn->sent = 0;
4892 }
4893 }
4894}
4895
6039aa73 4896static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
4897{
4898 struct hci_conn *conn;
4899 struct sk_buff *skb;
4900 int quote;
4901
4902 BT_DBG("%s", hdev->name);
4903
52087a79
LAD
4904 if (!hci_conn_num(hdev, ESCO_LINK))
4905 return;
4906
8fc9ced3
GP
4907 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4908 &quote))) {
b6a0dc82
MH
4909 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4910 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 4911 hci_send_frame(hdev, skb);
b6a0dc82
MH
4912
4913 conn->sent++;
4914 if (conn->sent == ~0)
4915 conn->sent = 0;
4916 }
4917 }
4918}
4919
6039aa73 4920static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 4921{
73d80deb 4922 struct hci_chan *chan;
6ed58ec5 4923 struct sk_buff *skb;
02b20f0b 4924 int quote, cnt, tmp;
6ed58ec5
VT
4925
4926 BT_DBG("%s", hdev->name);
4927
52087a79
LAD
4928 if (!hci_conn_num(hdev, LE_LINK))
4929 return;
4930
fee746b0 4931 if (!test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
6ed58ec5
VT
4932 /* LE tx timeout must be longer than maximum
4933 * link supervision timeout (40.9 seconds) */
bae1f5d9 4934 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 4935 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 4936 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
4937 }
4938
4939 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 4940 tmp = cnt;
73d80deb 4941 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
4942 u32 priority = (skb_peek(&chan->data_q))->priority;
4943 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4944 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4945 skb->len, skb->priority);
6ed58ec5 4946
ec1cce24
LAD
4947 /* Stop if priority has changed */
4948 if (skb->priority < priority)
4949 break;
4950
4951 skb = skb_dequeue(&chan->data_q);
4952
57d17d70 4953 hci_send_frame(hdev, skb);
6ed58ec5
VT
4954 hdev->le_last_tx = jiffies;
4955
4956 cnt--;
73d80deb
LAD
4957 chan->sent++;
4958 chan->conn->sent++;
6ed58ec5
VT
4959 }
4960 }
73d80deb 4961
6ed58ec5
VT
4962 if (hdev->le_pkts)
4963 hdev->le_cnt = cnt;
4964 else
4965 hdev->acl_cnt = cnt;
02b20f0b
LAD
4966
4967 if (cnt != tmp)
4968 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
4969}
4970
3eff45ea 4971static void hci_tx_work(struct work_struct *work)
1da177e4 4972{
3eff45ea 4973 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
4974 struct sk_buff *skb;
4975
6ed58ec5 4976 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 4977 hdev->sco_cnt, hdev->le_cnt);
1da177e4 4978
52de599e
MH
4979 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4980 /* Schedule queues and send stuff to HCI driver */
4981 hci_sched_acl(hdev);
4982 hci_sched_sco(hdev);
4983 hci_sched_esco(hdev);
4984 hci_sched_le(hdev);
4985 }
6ed58ec5 4986
1da177e4
LT
4987 /* Send next queued raw (unknown type) packet */
4988 while ((skb = skb_dequeue(&hdev->raw_q)))
57d17d70 4989 hci_send_frame(hdev, skb);
1da177e4
LT
4990}
4991
25985edc 4992/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
4993
4994/* ACL data packet */
6039aa73 4995static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
4996{
4997 struct hci_acl_hdr *hdr = (void *) skb->data;
4998 struct hci_conn *conn;
4999 __u16 handle, flags;
5000
5001 skb_pull(skb, HCI_ACL_HDR_SIZE);
5002
5003 handle = __le16_to_cpu(hdr->handle);
5004 flags = hci_flags(handle);
5005 handle = hci_handle(handle);
5006
f0e09510 5007 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 5008 handle, flags);
1da177e4
LT
5009
5010 hdev->stat.acl_rx++;
5011
5012 hci_dev_lock(hdev);
5013 conn = hci_conn_hash_lookup_handle(hdev, handle);
5014 hci_dev_unlock(hdev);
8e87d142 5015
1da177e4 5016 if (conn) {
65983fc7 5017 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 5018
1da177e4 5019 /* Send to upper protocol */
686ebf28
UF
5020 l2cap_recv_acldata(conn, skb, flags);
5021 return;
1da177e4 5022 } else {
8e87d142 5023 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 5024 hdev->name, handle);
1da177e4
LT
5025 }
5026
5027 kfree_skb(skb);
5028}
5029
5030/* SCO data packet */
6039aa73 5031static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
5032{
5033 struct hci_sco_hdr *hdr = (void *) skb->data;
5034 struct hci_conn *conn;
5035 __u16 handle;
5036
5037 skb_pull(skb, HCI_SCO_HDR_SIZE);
5038
5039 handle = __le16_to_cpu(hdr->handle);
5040
f0e09510 5041 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
1da177e4
LT
5042
5043 hdev->stat.sco_rx++;
5044
5045 hci_dev_lock(hdev);
5046 conn = hci_conn_hash_lookup_handle(hdev, handle);
5047 hci_dev_unlock(hdev);
5048
5049 if (conn) {
1da177e4 5050 /* Send to upper protocol */
686ebf28
UF
5051 sco_recv_scodata(conn, skb);
5052 return;
1da177e4 5053 } else {
8e87d142 5054 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 5055 hdev->name, handle);
1da177e4
LT
5056 }
5057
5058 kfree_skb(skb);
5059}
5060
9238f36a
JH
5061static bool hci_req_is_complete(struct hci_dev *hdev)
5062{
5063 struct sk_buff *skb;
5064
5065 skb = skb_peek(&hdev->cmd_q);
5066 if (!skb)
5067 return true;
5068
5069 return bt_cb(skb)->req.start;
5070}
5071
42c6b129
JH
5072static void hci_resend_last(struct hci_dev *hdev)
5073{
5074 struct hci_command_hdr *sent;
5075 struct sk_buff *skb;
5076 u16 opcode;
5077
5078 if (!hdev->sent_cmd)
5079 return;
5080
5081 sent = (void *) hdev->sent_cmd->data;
5082 opcode = __le16_to_cpu(sent->opcode);
5083 if (opcode == HCI_OP_RESET)
5084 return;
5085
5086 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5087 if (!skb)
5088 return;
5089
5090 skb_queue_head(&hdev->cmd_q, skb);
5091 queue_work(hdev->workqueue, &hdev->cmd_work);
5092}
5093
9238f36a
JH
5094void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5095{
5096 hci_req_complete_t req_complete = NULL;
5097 struct sk_buff *skb;
5098 unsigned long flags;
5099
5100 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5101
42c6b129
JH
5102 /* If the completed command doesn't match the last one that was
5103 * sent we need to do special handling of it.
9238f36a 5104 */
42c6b129
JH
5105 if (!hci_sent_cmd_data(hdev, opcode)) {
5106 /* Some CSR based controllers generate a spontaneous
5107 * reset complete event during init and any pending
5108 * command will never be completed. In such a case we
5109 * need to resend whatever was the last sent
5110 * command.
5111 */
5112 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5113 hci_resend_last(hdev);
5114
9238f36a 5115 return;
42c6b129 5116 }
9238f36a
JH
5117
5118 /* If the command succeeded and there's still more commands in
5119 * this request the request is not yet complete.
5120 */
5121 if (!status && !hci_req_is_complete(hdev))
5122 return;
5123
5124 /* If this was the last command in a request the complete
5125 * callback would be found in hdev->sent_cmd instead of the
5126 * command queue (hdev->cmd_q).
5127 */
5128 if (hdev->sent_cmd) {
5129 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
53e21fbc
JH
5130
5131 if (req_complete) {
5132 /* We must set the complete callback to NULL to
5133 * avoid calling the callback more than once if
5134 * this function gets called again.
5135 */
5136 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5137
9238f36a 5138 goto call_complete;
53e21fbc 5139 }
9238f36a
JH
5140 }
5141
5142 /* Remove all pending commands belonging to this request */
5143 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5144 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5145 if (bt_cb(skb)->req.start) {
5146 __skb_queue_head(&hdev->cmd_q, skb);
5147 break;
5148 }
5149
5150 req_complete = bt_cb(skb)->req.complete;
5151 kfree_skb(skb);
5152 }
5153 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5154
5155call_complete:
5156 if (req_complete)
5157 req_complete(hdev, status);
5158}
5159
b78752cc 5160static void hci_rx_work(struct work_struct *work)
1da177e4 5161{
b78752cc 5162 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
5163 struct sk_buff *skb;
5164
5165 BT_DBG("%s", hdev->name);
5166
1da177e4 5167 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
5168 /* Send copy to monitor */
5169 hci_send_to_monitor(hdev, skb);
5170
1da177e4
LT
5171 if (atomic_read(&hdev->promisc)) {
5172 /* Send copy to the sockets */
470fe1b5 5173 hci_send_to_sock(hdev, skb);
1da177e4
LT
5174 }
5175
fee746b0 5176 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1da177e4
LT
5177 kfree_skb(skb);
5178 continue;
5179 }
5180
5181 if (test_bit(HCI_INIT, &hdev->flags)) {
5182 /* Don't process data packets in this states. */
0d48d939 5183 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
5184 case HCI_ACLDATA_PKT:
5185 case HCI_SCODATA_PKT:
5186 kfree_skb(skb);
5187 continue;
3ff50b79 5188 }
1da177e4
LT
5189 }
5190
5191 /* Process frame */
0d48d939 5192 switch (bt_cb(skb)->pkt_type) {
1da177e4 5193 case HCI_EVENT_PKT:
b78752cc 5194 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
5195 hci_event_packet(hdev, skb);
5196 break;
5197
5198 case HCI_ACLDATA_PKT:
5199 BT_DBG("%s ACL data packet", hdev->name);
5200 hci_acldata_packet(hdev, skb);
5201 break;
5202
5203 case HCI_SCODATA_PKT:
5204 BT_DBG("%s SCO data packet", hdev->name);
5205 hci_scodata_packet(hdev, skb);
5206 break;
5207
5208 default:
5209 kfree_skb(skb);
5210 break;
5211 }
5212 }
1da177e4
LT
5213}
5214
c347b765 5215static void hci_cmd_work(struct work_struct *work)
1da177e4 5216{
c347b765 5217 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
5218 struct sk_buff *skb;
5219
2104786b
AE
5220 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5221 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 5222
1da177e4 5223 /* Send queued commands */
5a08ecce
AE
5224 if (atomic_read(&hdev->cmd_cnt)) {
5225 skb = skb_dequeue(&hdev->cmd_q);
5226 if (!skb)
5227 return;
5228
7585b97a 5229 kfree_skb(hdev->sent_cmd);
1da177e4 5230
a675d7f1 5231 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
70f23020 5232 if (hdev->sent_cmd) {
1da177e4 5233 atomic_dec(&hdev->cmd_cnt);
57d17d70 5234 hci_send_frame(hdev, skb);
7bdb8a5c 5235 if (test_bit(HCI_RESET, &hdev->flags))
65cc2b49 5236 cancel_delayed_work(&hdev->cmd_timer);
7bdb8a5c 5237 else
65cc2b49
MH
5238 schedule_delayed_work(&hdev->cmd_timer,
5239 HCI_CMD_TIMEOUT);
1da177e4
LT
5240 } else {
5241 skb_queue_head(&hdev->cmd_q, skb);
c347b765 5242 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
5243 }
5244 }
5245}
b1efcc28
AG
5246
5247void hci_req_add_le_scan_disable(struct hci_request *req)
5248{
5249 struct hci_cp_le_set_scan_enable cp;
5250
5251 memset(&cp, 0, sizeof(cp));
5252 cp.enable = LE_SCAN_DISABLE;
5253 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5254}
a4790dbd 5255
8ef30fd3
AG
5256void hci_req_add_le_passive_scan(struct hci_request *req)
5257{
5258 struct hci_cp_le_set_scan_param param_cp;
5259 struct hci_cp_le_set_scan_enable enable_cp;
5260 struct hci_dev *hdev = req->hdev;
5261 u8 own_addr_type;
5262
6ab535a7
MH
5263 /* Set require_privacy to false since no SCAN_REQ are send
5264 * during passive scanning. Not using an unresolvable address
5265 * here is important so that peer devices using direct
5266 * advertising with our address will be correctly reported
5267 * by the controller.
8ef30fd3 5268 */
6ab535a7 5269 if (hci_update_random_address(req, false, &own_addr_type))
8ef30fd3
AG
5270 return;
5271
5272 memset(&param_cp, 0, sizeof(param_cp));
5273 param_cp.type = LE_SCAN_PASSIVE;
5274 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5275 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5276 param_cp.own_address_type = own_addr_type;
5277 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5278 &param_cp);
5279
5280 memset(&enable_cp, 0, sizeof(enable_cp));
5281 enable_cp.enable = LE_SCAN_ENABLE;
4340a124 5282 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
8ef30fd3
AG
5283 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5284 &enable_cp);
5285}
5286
a4790dbd
AG
5287static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5288{
5289 if (status)
5290 BT_DBG("HCI request failed to update background scanning: "
5291 "status 0x%2.2x", status);
5292}
5293
5294/* This function controls the background scanning based on hdev->pend_le_conns
5295 * list. If there are pending LE connection we start the background scanning,
5296 * otherwise we stop it.
5297 *
5298 * This function requires the caller holds hdev->lock.
5299 */
5300void hci_update_background_scan(struct hci_dev *hdev)
5301{
a4790dbd
AG
5302 struct hci_request req;
5303 struct hci_conn *conn;
5304 int err;
5305
c20c02d5
MH
5306 if (!test_bit(HCI_UP, &hdev->flags) ||
5307 test_bit(HCI_INIT, &hdev->flags) ||
5308 test_bit(HCI_SETUP, &hdev->dev_flags) ||
b8221770 5309 test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
c20c02d5 5310 test_bit(HCI_UNREGISTER, &hdev->dev_flags))
1c1697c0
MH
5311 return;
5312
a4790dbd
AG
5313 hci_req_init(&req, hdev);
5314
5315 if (list_empty(&hdev->pend_le_conns)) {
5316 /* If there is no pending LE connections, we should stop
5317 * the background scanning.
5318 */
5319
5320 /* If controller is not scanning we are done. */
5321 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5322 return;
5323
5324 hci_req_add_le_scan_disable(&req);
5325
5326 BT_DBG("%s stopping background scanning", hdev->name);
5327 } else {
a4790dbd
AG
5328 /* If there is at least one pending LE connection, we should
5329 * keep the background scan running.
5330 */
5331
a4790dbd
AG
5332 /* If controller is connecting, we should not start scanning
5333 * since some controllers are not able to scan and connect at
5334 * the same time.
5335 */
5336 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5337 if (conn)
5338 return;
5339
4340a124
AG
5340 /* If controller is currently scanning, we stop it to ensure we
5341 * don't miss any advertising (due to duplicates filter).
5342 */
5343 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5344 hci_req_add_le_scan_disable(&req);
5345
8ef30fd3 5346 hci_req_add_le_passive_scan(&req);
a4790dbd
AG
5347
5348 BT_DBG("%s starting background scanning", hdev->name);
5349 }
5350
5351 err = hci_req_run(&req, update_background_scan_complete);
5352 if (err)
5353 BT_ERR("Failed to run HCI request: err %d", err);
5354}