]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - net/bluetooth/hci_core.c
Bluetooth: Fix sparse warning with btmrvl driver
[mirror_ubuntu-artful-kernel.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
8c520a59 30#include <linux/rfkill.h>
baf27f6e 31#include <linux/debugfs.h>
99780a7b 32#include <linux/crypto.h>
47219839 33#include <asm/unaligned.h>
1da177e4
LT
34
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
4bc58f51 37#include <net/bluetooth/l2cap.h>
af58925c 38#include <net/bluetooth/mgmt.h>
1da177e4 39
970c4e46
JH
40#include "smp.h"
41
b78752cc 42static void hci_rx_work(struct work_struct *work);
c347b765 43static void hci_cmd_work(struct work_struct *work);
3eff45ea 44static void hci_tx_work(struct work_struct *work);
1da177e4 45
1da177e4
LT
46/* HCI device list */
47LIST_HEAD(hci_dev_list);
48DEFINE_RWLOCK(hci_dev_list_lock);
49
50/* HCI callback list */
51LIST_HEAD(hci_cb_list);
52DEFINE_RWLOCK(hci_cb_list_lock);
53
3df92b31
SL
54/* HCI ID Numbering */
55static DEFINE_IDA(hci_index_ida);
56
1da177e4
LT
57/* ---- HCI notifications ---- */
58
6516455d 59static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 60{
040030ef 61 hci_sock_dev_event(hdev, event);
1da177e4
LT
62}
63
baf27f6e
MH
64/* ---- HCI debugfs entries ---- */
65
4b4148e9
MH
66static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
67 size_t count, loff_t *ppos)
68{
69 struct hci_dev *hdev = file->private_data;
70 char buf[3];
71
111902f7 72 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
4b4148e9
MH
73 buf[1] = '\n';
74 buf[2] = '\0';
75 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
76}
77
78static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
79 size_t count, loff_t *ppos)
80{
81 struct hci_dev *hdev = file->private_data;
82 struct sk_buff *skb;
83 char buf[32];
84 size_t buf_size = min(count, (sizeof(buf)-1));
85 bool enable;
86 int err;
87
88 if (!test_bit(HCI_UP, &hdev->flags))
89 return -ENETDOWN;
90
91 if (copy_from_user(buf, user_buf, buf_size))
92 return -EFAULT;
93
94 buf[buf_size] = '\0';
95 if (strtobool(buf, &enable))
96 return -EINVAL;
97
111902f7 98 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
4b4148e9
MH
99 return -EALREADY;
100
101 hci_req_lock(hdev);
102 if (enable)
103 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
104 HCI_CMD_TIMEOUT);
105 else
106 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
107 HCI_CMD_TIMEOUT);
108 hci_req_unlock(hdev);
109
110 if (IS_ERR(skb))
111 return PTR_ERR(skb);
112
113 err = -bt_to_errno(skb->data[0]);
114 kfree_skb(skb);
115
116 if (err < 0)
117 return err;
118
111902f7 119 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
4b4148e9
MH
120
121 return count;
122}
123
124static const struct file_operations dut_mode_fops = {
125 .open = simple_open,
126 .read = dut_mode_read,
127 .write = dut_mode_write,
128 .llseek = default_llseek,
129};
130
dfb826a8
MH
131static int features_show(struct seq_file *f, void *ptr)
132{
133 struct hci_dev *hdev = f->private;
134 u8 p;
135
136 hci_dev_lock(hdev);
137 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
cfbb2b5b 138 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
dfb826a8
MH
139 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
140 hdev->features[p][0], hdev->features[p][1],
141 hdev->features[p][2], hdev->features[p][3],
142 hdev->features[p][4], hdev->features[p][5],
143 hdev->features[p][6], hdev->features[p][7]);
144 }
cfbb2b5b
MH
145 if (lmp_le_capable(hdev))
146 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
147 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
148 hdev->le_features[0], hdev->le_features[1],
149 hdev->le_features[2], hdev->le_features[3],
150 hdev->le_features[4], hdev->le_features[5],
151 hdev->le_features[6], hdev->le_features[7]);
dfb826a8
MH
152 hci_dev_unlock(hdev);
153
154 return 0;
155}
156
157static int features_open(struct inode *inode, struct file *file)
158{
159 return single_open(file, features_show, inode->i_private);
160}
161
162static const struct file_operations features_fops = {
163 .open = features_open,
164 .read = seq_read,
165 .llseek = seq_lseek,
166 .release = single_release,
167};
168
70afe0b8
MH
169static int blacklist_show(struct seq_file *f, void *p)
170{
171 struct hci_dev *hdev = f->private;
172 struct bdaddr_list *b;
173
174 hci_dev_lock(hdev);
175 list_for_each_entry(b, &hdev->blacklist, list)
b25f0785 176 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
70afe0b8
MH
177 hci_dev_unlock(hdev);
178
179 return 0;
180}
181
182static int blacklist_open(struct inode *inode, struct file *file)
183{
184 return single_open(file, blacklist_show, inode->i_private);
185}
186
187static const struct file_operations blacklist_fops = {
188 .open = blacklist_open,
189 .read = seq_read,
190 .llseek = seq_lseek,
191 .release = single_release,
192};
193
47219839
MH
194static int uuids_show(struct seq_file *f, void *p)
195{
196 struct hci_dev *hdev = f->private;
197 struct bt_uuid *uuid;
198
199 hci_dev_lock(hdev);
200 list_for_each_entry(uuid, &hdev->uuids, list) {
58f01aa9
MH
201 u8 i, val[16];
202
203 /* The Bluetooth UUID values are stored in big endian,
204 * but with reversed byte order. So convert them into
205 * the right order for the %pUb modifier.
206 */
207 for (i = 0; i < 16; i++)
208 val[i] = uuid->uuid[15 - i];
209
210 seq_printf(f, "%pUb\n", val);
47219839
MH
211 }
212 hci_dev_unlock(hdev);
213
214 return 0;
215}
216
217static int uuids_open(struct inode *inode, struct file *file)
218{
219 return single_open(file, uuids_show, inode->i_private);
220}
221
222static const struct file_operations uuids_fops = {
223 .open = uuids_open,
224 .read = seq_read,
225 .llseek = seq_lseek,
226 .release = single_release,
227};
228
baf27f6e
MH
229static int inquiry_cache_show(struct seq_file *f, void *p)
230{
231 struct hci_dev *hdev = f->private;
232 struct discovery_state *cache = &hdev->discovery;
233 struct inquiry_entry *e;
234
235 hci_dev_lock(hdev);
236
237 list_for_each_entry(e, &cache->all, all) {
238 struct inquiry_data *data = &e->data;
239 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
240 &data->bdaddr,
241 data->pscan_rep_mode, data->pscan_period_mode,
242 data->pscan_mode, data->dev_class[2],
243 data->dev_class[1], data->dev_class[0],
244 __le16_to_cpu(data->clock_offset),
245 data->rssi, data->ssp_mode, e->timestamp);
246 }
247
248 hci_dev_unlock(hdev);
249
250 return 0;
251}
252
253static int inquiry_cache_open(struct inode *inode, struct file *file)
254{
255 return single_open(file, inquiry_cache_show, inode->i_private);
256}
257
258static const struct file_operations inquiry_cache_fops = {
259 .open = inquiry_cache_open,
260 .read = seq_read,
261 .llseek = seq_lseek,
262 .release = single_release,
263};
264
02d08d15
MH
265static int link_keys_show(struct seq_file *f, void *ptr)
266{
267 struct hci_dev *hdev = f->private;
268 struct list_head *p, *n;
269
270 hci_dev_lock(hdev);
271 list_for_each_safe(p, n, &hdev->link_keys) {
272 struct link_key *key = list_entry(p, struct link_key, list);
273 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
274 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
275 }
276 hci_dev_unlock(hdev);
277
278 return 0;
279}
280
281static int link_keys_open(struct inode *inode, struct file *file)
282{
283 return single_open(file, link_keys_show, inode->i_private);
284}
285
286static const struct file_operations link_keys_fops = {
287 .open = link_keys_open,
288 .read = seq_read,
289 .llseek = seq_lseek,
290 .release = single_release,
291};
292
babdbb3c
MH
293static int dev_class_show(struct seq_file *f, void *ptr)
294{
295 struct hci_dev *hdev = f->private;
296
297 hci_dev_lock(hdev);
298 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
299 hdev->dev_class[1], hdev->dev_class[0]);
300 hci_dev_unlock(hdev);
301
302 return 0;
303}
304
305static int dev_class_open(struct inode *inode, struct file *file)
306{
307 return single_open(file, dev_class_show, inode->i_private);
308}
309
310static const struct file_operations dev_class_fops = {
311 .open = dev_class_open,
312 .read = seq_read,
313 .llseek = seq_lseek,
314 .release = single_release,
315};
316
041000b9
MH
317static int voice_setting_get(void *data, u64 *val)
318{
319 struct hci_dev *hdev = data;
320
321 hci_dev_lock(hdev);
322 *val = hdev->voice_setting;
323 hci_dev_unlock(hdev);
324
325 return 0;
326}
327
328DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
329 NULL, "0x%4.4llx\n");
330
ebd1e33b
MH
331static int auto_accept_delay_set(void *data, u64 val)
332{
333 struct hci_dev *hdev = data;
334
335 hci_dev_lock(hdev);
336 hdev->auto_accept_delay = val;
337 hci_dev_unlock(hdev);
338
339 return 0;
340}
341
342static int auto_accept_delay_get(void *data, u64 *val)
343{
344 struct hci_dev *hdev = data;
345
346 hci_dev_lock(hdev);
347 *val = hdev->auto_accept_delay;
348 hci_dev_unlock(hdev);
349
350 return 0;
351}
352
353DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
354 auto_accept_delay_set, "%llu\n");
355
5afeac14
MH
356static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
357 size_t count, loff_t *ppos)
358{
359 struct hci_dev *hdev = file->private_data;
360 char buf[3];
361
111902f7 362 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
5afeac14
MH
363 buf[1] = '\n';
364 buf[2] = '\0';
365 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
366}
367
368static ssize_t force_sc_support_write(struct file *file,
369 const char __user *user_buf,
370 size_t count, loff_t *ppos)
371{
372 struct hci_dev *hdev = file->private_data;
373 char buf[32];
374 size_t buf_size = min(count, (sizeof(buf)-1));
375 bool enable;
376
377 if (test_bit(HCI_UP, &hdev->flags))
378 return -EBUSY;
379
380 if (copy_from_user(buf, user_buf, buf_size))
381 return -EFAULT;
382
383 buf[buf_size] = '\0';
384 if (strtobool(buf, &enable))
385 return -EINVAL;
386
111902f7 387 if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
5afeac14
MH
388 return -EALREADY;
389
111902f7 390 change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
5afeac14
MH
391
392 return count;
393}
394
395static const struct file_operations force_sc_support_fops = {
396 .open = simple_open,
397 .read = force_sc_support_read,
398 .write = force_sc_support_write,
399 .llseek = default_llseek,
400};
401
134c2a89
MH
402static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
403 size_t count, loff_t *ppos)
404{
405 struct hci_dev *hdev = file->private_data;
406 char buf[3];
407
408 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
409 buf[1] = '\n';
410 buf[2] = '\0';
411 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
412}
413
414static const struct file_operations sc_only_mode_fops = {
415 .open = simple_open,
416 .read = sc_only_mode_read,
417 .llseek = default_llseek,
418};
419
2bfa3531
MH
420static int idle_timeout_set(void *data, u64 val)
421{
422 struct hci_dev *hdev = data;
423
424 if (val != 0 && (val < 500 || val > 3600000))
425 return -EINVAL;
426
427 hci_dev_lock(hdev);
2be48b65 428 hdev->idle_timeout = val;
2bfa3531
MH
429 hci_dev_unlock(hdev);
430
431 return 0;
432}
433
434static int idle_timeout_get(void *data, u64 *val)
435{
436 struct hci_dev *hdev = data;
437
438 hci_dev_lock(hdev);
439 *val = hdev->idle_timeout;
440 hci_dev_unlock(hdev);
441
442 return 0;
443}
444
445DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
446 idle_timeout_set, "%llu\n");
447
c982b2ea
JH
448static int rpa_timeout_set(void *data, u64 val)
449{
450 struct hci_dev *hdev = data;
451
452 /* Require the RPA timeout to be at least 30 seconds and at most
453 * 24 hours.
454 */
455 if (val < 30 || val > (60 * 60 * 24))
456 return -EINVAL;
457
458 hci_dev_lock(hdev);
459 hdev->rpa_timeout = val;
460 hci_dev_unlock(hdev);
461
462 return 0;
463}
464
465static int rpa_timeout_get(void *data, u64 *val)
466{
467 struct hci_dev *hdev = data;
468
469 hci_dev_lock(hdev);
470 *val = hdev->rpa_timeout;
471 hci_dev_unlock(hdev);
472
473 return 0;
474}
475
476DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
477 rpa_timeout_set, "%llu\n");
478
2bfa3531
MH
479static int sniff_min_interval_set(void *data, u64 val)
480{
481 struct hci_dev *hdev = data;
482
483 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
484 return -EINVAL;
485
486 hci_dev_lock(hdev);
2be48b65 487 hdev->sniff_min_interval = val;
2bfa3531
MH
488 hci_dev_unlock(hdev);
489
490 return 0;
491}
492
493static int sniff_min_interval_get(void *data, u64 *val)
494{
495 struct hci_dev *hdev = data;
496
497 hci_dev_lock(hdev);
498 *val = hdev->sniff_min_interval;
499 hci_dev_unlock(hdev);
500
501 return 0;
502}
503
504DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
505 sniff_min_interval_set, "%llu\n");
506
507static int sniff_max_interval_set(void *data, u64 val)
508{
509 struct hci_dev *hdev = data;
510
511 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
512 return -EINVAL;
513
514 hci_dev_lock(hdev);
2be48b65 515 hdev->sniff_max_interval = val;
2bfa3531
MH
516 hci_dev_unlock(hdev);
517
518 return 0;
519}
520
521static int sniff_max_interval_get(void *data, u64 *val)
522{
523 struct hci_dev *hdev = data;
524
525 hci_dev_lock(hdev);
526 *val = hdev->sniff_max_interval;
527 hci_dev_unlock(hdev);
528
529 return 0;
530}
531
532DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
533 sniff_max_interval_set, "%llu\n");
534
31ad1691
AK
535static int conn_info_min_age_set(void *data, u64 val)
536{
537 struct hci_dev *hdev = data;
538
539 if (val == 0 || val > hdev->conn_info_max_age)
540 return -EINVAL;
541
542 hci_dev_lock(hdev);
543 hdev->conn_info_min_age = val;
544 hci_dev_unlock(hdev);
545
546 return 0;
547}
548
549static int conn_info_min_age_get(void *data, u64 *val)
550{
551 struct hci_dev *hdev = data;
552
553 hci_dev_lock(hdev);
554 *val = hdev->conn_info_min_age;
555 hci_dev_unlock(hdev);
556
557 return 0;
558}
559
560DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
561 conn_info_min_age_set, "%llu\n");
562
563static int conn_info_max_age_set(void *data, u64 val)
564{
565 struct hci_dev *hdev = data;
566
567 if (val == 0 || val < hdev->conn_info_min_age)
568 return -EINVAL;
569
570 hci_dev_lock(hdev);
571 hdev->conn_info_max_age = val;
572 hci_dev_unlock(hdev);
573
574 return 0;
575}
576
577static int conn_info_max_age_get(void *data, u64 *val)
578{
579 struct hci_dev *hdev = data;
580
581 hci_dev_lock(hdev);
582 *val = hdev->conn_info_max_age;
583 hci_dev_unlock(hdev);
584
585 return 0;
586}
587
588DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
589 conn_info_max_age_set, "%llu\n");
590
ac345813
MH
591static int identity_show(struct seq_file *f, void *p)
592{
593 struct hci_dev *hdev = f->private;
a1f4c318 594 bdaddr_t addr;
ac345813
MH
595 u8 addr_type;
596
597 hci_dev_lock(hdev);
598
a1f4c318 599 hci_copy_identity_address(hdev, &addr, &addr_type);
ac345813 600
a1f4c318 601 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
473deef2 602 16, hdev->irk, &hdev->rpa);
ac345813
MH
603
604 hci_dev_unlock(hdev);
605
606 return 0;
607}
608
609static int identity_open(struct inode *inode, struct file *file)
610{
611 return single_open(file, identity_show, inode->i_private);
612}
613
614static const struct file_operations identity_fops = {
615 .open = identity_open,
616 .read = seq_read,
617 .llseek = seq_lseek,
618 .release = single_release,
619};
620
7a4cd51d
MH
621static int random_address_show(struct seq_file *f, void *p)
622{
623 struct hci_dev *hdev = f->private;
624
625 hci_dev_lock(hdev);
626 seq_printf(f, "%pMR\n", &hdev->random_addr);
627 hci_dev_unlock(hdev);
628
629 return 0;
630}
631
632static int random_address_open(struct inode *inode, struct file *file)
633{
634 return single_open(file, random_address_show, inode->i_private);
635}
636
637static const struct file_operations random_address_fops = {
638 .open = random_address_open,
639 .read = seq_read,
640 .llseek = seq_lseek,
641 .release = single_release,
642};
643
e7b8fc92
MH
644static int static_address_show(struct seq_file *f, void *p)
645{
646 struct hci_dev *hdev = f->private;
647
648 hci_dev_lock(hdev);
649 seq_printf(f, "%pMR\n", &hdev->static_addr);
650 hci_dev_unlock(hdev);
651
652 return 0;
653}
654
655static int static_address_open(struct inode *inode, struct file *file)
656{
657 return single_open(file, static_address_show, inode->i_private);
658}
659
660static const struct file_operations static_address_fops = {
661 .open = static_address_open,
662 .read = seq_read,
663 .llseek = seq_lseek,
664 .release = single_release,
665};
666
b32bba6c
MH
667static ssize_t force_static_address_read(struct file *file,
668 char __user *user_buf,
669 size_t count, loff_t *ppos)
92202185 670{
b32bba6c
MH
671 struct hci_dev *hdev = file->private_data;
672 char buf[3];
92202185 673
111902f7 674 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
b32bba6c
MH
675 buf[1] = '\n';
676 buf[2] = '\0';
677 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
92202185
MH
678}
679
b32bba6c
MH
680static ssize_t force_static_address_write(struct file *file,
681 const char __user *user_buf,
682 size_t count, loff_t *ppos)
92202185 683{
b32bba6c
MH
684 struct hci_dev *hdev = file->private_data;
685 char buf[32];
686 size_t buf_size = min(count, (sizeof(buf)-1));
687 bool enable;
92202185 688
b32bba6c
MH
689 if (test_bit(HCI_UP, &hdev->flags))
690 return -EBUSY;
92202185 691
b32bba6c
MH
692 if (copy_from_user(buf, user_buf, buf_size))
693 return -EFAULT;
694
695 buf[buf_size] = '\0';
696 if (strtobool(buf, &enable))
697 return -EINVAL;
698
111902f7 699 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
b32bba6c
MH
700 return -EALREADY;
701
111902f7 702 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
b32bba6c
MH
703
704 return count;
92202185
MH
705}
706
b32bba6c
MH
707static const struct file_operations force_static_address_fops = {
708 .open = simple_open,
709 .read = force_static_address_read,
710 .write = force_static_address_write,
711 .llseek = default_llseek,
712};
92202185 713
d2ab0ac1
MH
714static int white_list_show(struct seq_file *f, void *ptr)
715{
716 struct hci_dev *hdev = f->private;
717 struct bdaddr_list *b;
718
719 hci_dev_lock(hdev);
720 list_for_each_entry(b, &hdev->le_white_list, list)
721 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
722 hci_dev_unlock(hdev);
723
724 return 0;
725}
726
727static int white_list_open(struct inode *inode, struct file *file)
728{
729 return single_open(file, white_list_show, inode->i_private);
730}
731
732static const struct file_operations white_list_fops = {
733 .open = white_list_open,
734 .read = seq_read,
735 .llseek = seq_lseek,
736 .release = single_release,
737};
738
3698d704
MH
739static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
740{
741 struct hci_dev *hdev = f->private;
742 struct list_head *p, *n;
743
744 hci_dev_lock(hdev);
745 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
746 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
747 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
748 &irk->bdaddr, irk->addr_type,
749 16, irk->val, &irk->rpa);
750 }
751 hci_dev_unlock(hdev);
752
753 return 0;
754}
755
756static int identity_resolving_keys_open(struct inode *inode, struct file *file)
757{
758 return single_open(file, identity_resolving_keys_show,
759 inode->i_private);
760}
761
762static const struct file_operations identity_resolving_keys_fops = {
763 .open = identity_resolving_keys_open,
764 .read = seq_read,
765 .llseek = seq_lseek,
766 .release = single_release,
767};
768
8f8625cd
MH
769static int long_term_keys_show(struct seq_file *f, void *ptr)
770{
771 struct hci_dev *hdev = f->private;
772 struct list_head *p, *n;
773
774 hci_dev_lock(hdev);
f813f1be 775 list_for_each_safe(p, n, &hdev->long_term_keys) {
8f8625cd 776 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
fe39c7b2 777 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
8f8625cd
MH
778 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
779 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
fe39c7b2 780 __le64_to_cpu(ltk->rand), 16, ltk->val);
8f8625cd
MH
781 }
782 hci_dev_unlock(hdev);
783
784 return 0;
785}
786
787static int long_term_keys_open(struct inode *inode, struct file *file)
788{
789 return single_open(file, long_term_keys_show, inode->i_private);
790}
791
792static const struct file_operations long_term_keys_fops = {
793 .open = long_term_keys_open,
794 .read = seq_read,
795 .llseek = seq_lseek,
796 .release = single_release,
797};
798
4e70c7e7
MH
799static int conn_min_interval_set(void *data, u64 val)
800{
801 struct hci_dev *hdev = data;
802
803 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
804 return -EINVAL;
805
806 hci_dev_lock(hdev);
2be48b65 807 hdev->le_conn_min_interval = val;
4e70c7e7
MH
808 hci_dev_unlock(hdev);
809
810 return 0;
811}
812
813static int conn_min_interval_get(void *data, u64 *val)
814{
815 struct hci_dev *hdev = data;
816
817 hci_dev_lock(hdev);
818 *val = hdev->le_conn_min_interval;
819 hci_dev_unlock(hdev);
820
821 return 0;
822}
823
824DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
825 conn_min_interval_set, "%llu\n");
826
827static int conn_max_interval_set(void *data, u64 val)
828{
829 struct hci_dev *hdev = data;
830
831 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
832 return -EINVAL;
833
834 hci_dev_lock(hdev);
2be48b65 835 hdev->le_conn_max_interval = val;
4e70c7e7
MH
836 hci_dev_unlock(hdev);
837
838 return 0;
839}
840
841static int conn_max_interval_get(void *data, u64 *val)
842{
843 struct hci_dev *hdev = data;
844
845 hci_dev_lock(hdev);
846 *val = hdev->le_conn_max_interval;
847 hci_dev_unlock(hdev);
848
849 return 0;
850}
851
852DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
853 conn_max_interval_set, "%llu\n");
854
816a93d1
MH
855static int conn_latency_set(void *data, u64 val)
856{
857 struct hci_dev *hdev = data;
858
859 if (val > 0x01f3)
860 return -EINVAL;
861
862 hci_dev_lock(hdev);
863 hdev->le_conn_latency = val;
864 hci_dev_unlock(hdev);
865
866 return 0;
867}
868
869static int conn_latency_get(void *data, u64 *val)
870{
871 struct hci_dev *hdev = data;
872
873 hci_dev_lock(hdev);
874 *val = hdev->le_conn_latency;
875 hci_dev_unlock(hdev);
876
877 return 0;
878}
879
880DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
881 conn_latency_set, "%llu\n");
882
f1649577
MH
883static int supervision_timeout_set(void *data, u64 val)
884{
885 struct hci_dev *hdev = data;
886
887 if (val < 0x000a || val > 0x0c80)
888 return -EINVAL;
889
890 hci_dev_lock(hdev);
891 hdev->le_supv_timeout = val;
892 hci_dev_unlock(hdev);
893
894 return 0;
895}
896
897static int supervision_timeout_get(void *data, u64 *val)
898{
899 struct hci_dev *hdev = data;
900
901 hci_dev_lock(hdev);
902 *val = hdev->le_supv_timeout;
903 hci_dev_unlock(hdev);
904
905 return 0;
906}
907
908DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
909 supervision_timeout_set, "%llu\n");
910
3f959d46
MH
911static int adv_channel_map_set(void *data, u64 val)
912{
913 struct hci_dev *hdev = data;
914
915 if (val < 0x01 || val > 0x07)
916 return -EINVAL;
917
918 hci_dev_lock(hdev);
919 hdev->le_adv_channel_map = val;
920 hci_dev_unlock(hdev);
921
922 return 0;
923}
924
925static int adv_channel_map_get(void *data, u64 *val)
926{
927 struct hci_dev *hdev = data;
928
929 hci_dev_lock(hdev);
930 *val = hdev->le_adv_channel_map;
931 hci_dev_unlock(hdev);
932
933 return 0;
934}
935
936DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
937 adv_channel_map_set, "%llu\n");
938
0b3c7d37 939static int device_list_show(struct seq_file *f, void *ptr)
7d474e06 940{
0b3c7d37 941 struct hci_dev *hdev = f->private;
7d474e06
AG
942 struct hci_conn_params *p;
943
944 hci_dev_lock(hdev);
7d474e06 945 list_for_each_entry(p, &hdev->le_conn_params, list) {
0b3c7d37 946 seq_printf(f, "%pMR %u %u\n", &p->addr, p->addr_type,
7d474e06
AG
947 p->auto_connect);
948 }
7d474e06
AG
949 hci_dev_unlock(hdev);
950
951 return 0;
952}
953
0b3c7d37 954static int device_list_open(struct inode *inode, struct file *file)
7d474e06 955{
0b3c7d37 956 return single_open(file, device_list_show, inode->i_private);
7d474e06
AG
957}
958
0b3c7d37
MH
959static const struct file_operations device_list_fops = {
960 .open = device_list_open,
7d474e06 961 .read = seq_read,
7d474e06
AG
962 .llseek = seq_lseek,
963 .release = single_release,
964};
965
1da177e4
LT
966/* ---- HCI requests ---- */
967
42c6b129 968static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1da177e4 969{
42c6b129 970 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1da177e4
LT
971
972 if (hdev->req_status == HCI_REQ_PEND) {
973 hdev->req_result = result;
974 hdev->req_status = HCI_REQ_DONE;
975 wake_up_interruptible(&hdev->req_wait_q);
976 }
977}
978
979static void hci_req_cancel(struct hci_dev *hdev, int err)
980{
981 BT_DBG("%s err 0x%2.2x", hdev->name, err);
982
983 if (hdev->req_status == HCI_REQ_PEND) {
984 hdev->req_result = err;
985 hdev->req_status = HCI_REQ_CANCELED;
986 wake_up_interruptible(&hdev->req_wait_q);
987 }
988}
989
77a63e0a
FW
990static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
991 u8 event)
75e84b7c
JH
992{
993 struct hci_ev_cmd_complete *ev;
994 struct hci_event_hdr *hdr;
995 struct sk_buff *skb;
996
997 hci_dev_lock(hdev);
998
999 skb = hdev->recv_evt;
1000 hdev->recv_evt = NULL;
1001
1002 hci_dev_unlock(hdev);
1003
1004 if (!skb)
1005 return ERR_PTR(-ENODATA);
1006
1007 if (skb->len < sizeof(*hdr)) {
1008 BT_ERR("Too short HCI event");
1009 goto failed;
1010 }
1011
1012 hdr = (void *) skb->data;
1013 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1014
7b1abbbe
JH
1015 if (event) {
1016 if (hdr->evt != event)
1017 goto failed;
1018 return skb;
1019 }
1020
75e84b7c
JH
1021 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1022 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1023 goto failed;
1024 }
1025
1026 if (skb->len < sizeof(*ev)) {
1027 BT_ERR("Too short cmd_complete event");
1028 goto failed;
1029 }
1030
1031 ev = (void *) skb->data;
1032 skb_pull(skb, sizeof(*ev));
1033
1034 if (opcode == __le16_to_cpu(ev->opcode))
1035 return skb;
1036
1037 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1038 __le16_to_cpu(ev->opcode));
1039
1040failed:
1041 kfree_skb(skb);
1042 return ERR_PTR(-ENODATA);
1043}
1044
7b1abbbe 1045struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 1046 const void *param, u8 event, u32 timeout)
75e84b7c
JH
1047{
1048 DECLARE_WAITQUEUE(wait, current);
1049 struct hci_request req;
1050 int err = 0;
1051
1052 BT_DBG("%s", hdev->name);
1053
1054 hci_req_init(&req, hdev);
1055
7b1abbbe 1056 hci_req_add_ev(&req, opcode, plen, param, event);
75e84b7c
JH
1057
1058 hdev->req_status = HCI_REQ_PEND;
1059
1060 err = hci_req_run(&req, hci_req_sync_complete);
1061 if (err < 0)
1062 return ERR_PTR(err);
1063
1064 add_wait_queue(&hdev->req_wait_q, &wait);
1065 set_current_state(TASK_INTERRUPTIBLE);
1066
1067 schedule_timeout(timeout);
1068
1069 remove_wait_queue(&hdev->req_wait_q, &wait);
1070
1071 if (signal_pending(current))
1072 return ERR_PTR(-EINTR);
1073
1074 switch (hdev->req_status) {
1075 case HCI_REQ_DONE:
1076 err = -bt_to_errno(hdev->req_result);
1077 break;
1078
1079 case HCI_REQ_CANCELED:
1080 err = -hdev->req_result;
1081 break;
1082
1083 default:
1084 err = -ETIMEDOUT;
1085 break;
1086 }
1087
1088 hdev->req_status = hdev->req_result = 0;
1089
1090 BT_DBG("%s end: err %d", hdev->name, err);
1091
1092 if (err < 0)
1093 return ERR_PTR(err);
1094
7b1abbbe
JH
1095 return hci_get_cmd_complete(hdev, opcode, event);
1096}
1097EXPORT_SYMBOL(__hci_cmd_sync_ev);
1098
1099struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 1100 const void *param, u32 timeout)
7b1abbbe
JH
1101{
1102 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
75e84b7c
JH
1103}
1104EXPORT_SYMBOL(__hci_cmd_sync);
1105
1da177e4 1106/* Execute request and wait for completion. */
01178cd4 1107static int __hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
1108 void (*func)(struct hci_request *req,
1109 unsigned long opt),
01178cd4 1110 unsigned long opt, __u32 timeout)
1da177e4 1111{
42c6b129 1112 struct hci_request req;
1da177e4
LT
1113 DECLARE_WAITQUEUE(wait, current);
1114 int err = 0;
1115
1116 BT_DBG("%s start", hdev->name);
1117
42c6b129
JH
1118 hci_req_init(&req, hdev);
1119
1da177e4
LT
1120 hdev->req_status = HCI_REQ_PEND;
1121
42c6b129 1122 func(&req, opt);
53cce22d 1123
42c6b129
JH
1124 err = hci_req_run(&req, hci_req_sync_complete);
1125 if (err < 0) {
53cce22d 1126 hdev->req_status = 0;
920c8300
AG
1127
1128 /* ENODATA means the HCI request command queue is empty.
1129 * This can happen when a request with conditionals doesn't
1130 * trigger any commands to be sent. This is normal behavior
1131 * and should not trigger an error return.
42c6b129 1132 */
920c8300
AG
1133 if (err == -ENODATA)
1134 return 0;
1135
1136 return err;
53cce22d
JH
1137 }
1138
bc4445c7
AG
1139 add_wait_queue(&hdev->req_wait_q, &wait);
1140 set_current_state(TASK_INTERRUPTIBLE);
1141
1da177e4
LT
1142 schedule_timeout(timeout);
1143
1144 remove_wait_queue(&hdev->req_wait_q, &wait);
1145
1146 if (signal_pending(current))
1147 return -EINTR;
1148
1149 switch (hdev->req_status) {
1150 case HCI_REQ_DONE:
e175072f 1151 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
1152 break;
1153
1154 case HCI_REQ_CANCELED:
1155 err = -hdev->req_result;
1156 break;
1157
1158 default:
1159 err = -ETIMEDOUT;
1160 break;
3ff50b79 1161 }
1da177e4 1162
a5040efa 1163 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
1164
1165 BT_DBG("%s end: err %d", hdev->name, err);
1166
1167 return err;
1168}
1169
01178cd4 1170static int hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
1171 void (*req)(struct hci_request *req,
1172 unsigned long opt),
01178cd4 1173 unsigned long opt, __u32 timeout)
1da177e4
LT
1174{
1175 int ret;
1176
7c6a329e
MH
1177 if (!test_bit(HCI_UP, &hdev->flags))
1178 return -ENETDOWN;
1179
1da177e4
LT
1180 /* Serialize all requests */
1181 hci_req_lock(hdev);
01178cd4 1182 ret = __hci_req_sync(hdev, req, opt, timeout);
1da177e4
LT
1183 hci_req_unlock(hdev);
1184
1185 return ret;
1186}
1187
42c6b129 1188static void hci_reset_req(struct hci_request *req, unsigned long opt)
1da177e4 1189{
42c6b129 1190 BT_DBG("%s %ld", req->hdev->name, opt);
1da177e4
LT
1191
1192 /* Reset device */
42c6b129
JH
1193 set_bit(HCI_RESET, &req->hdev->flags);
1194 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1da177e4
LT
1195}
1196
42c6b129 1197static void bredr_init(struct hci_request *req)
1da177e4 1198{
42c6b129 1199 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
2455a3ea 1200
1da177e4 1201 /* Read Local Supported Features */
42c6b129 1202 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 1203
1143e5a6 1204 /* Read Local Version */
42c6b129 1205 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
2177bab5
JH
1206
1207 /* Read BD Address */
42c6b129 1208 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1da177e4
LT
1209}
1210
42c6b129 1211static void amp_init(struct hci_request *req)
e61ef499 1212{
42c6b129 1213 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
2455a3ea 1214
e61ef499 1215 /* Read Local Version */
42c6b129 1216 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489 1217
f6996cfe
MH
1218 /* Read Local Supported Commands */
1219 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1220
1221 /* Read Local Supported Features */
1222 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1223
6bcbc489 1224 /* Read Local AMP Info */
42c6b129 1225 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
1226
1227 /* Read Data Blk size */
42c6b129 1228 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
7528ca1c 1229
f38ba941
MH
1230 /* Read Flow Control Mode */
1231 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1232
7528ca1c
MH
1233 /* Read Location Data */
1234 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
e61ef499
AE
1235}
1236
42c6b129 1237static void hci_init1_req(struct hci_request *req, unsigned long opt)
e61ef499 1238{
42c6b129 1239 struct hci_dev *hdev = req->hdev;
e61ef499
AE
1240
1241 BT_DBG("%s %ld", hdev->name, opt);
1242
11778716
AE
1243 /* Reset */
1244 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
42c6b129 1245 hci_reset_req(req, 0);
11778716 1246
e61ef499
AE
1247 switch (hdev->dev_type) {
1248 case HCI_BREDR:
42c6b129 1249 bredr_init(req);
e61ef499
AE
1250 break;
1251
1252 case HCI_AMP:
42c6b129 1253 amp_init(req);
e61ef499
AE
1254 break;
1255
1256 default:
1257 BT_ERR("Unknown device type %d", hdev->dev_type);
1258 break;
1259 }
e61ef499
AE
1260}
1261
42c6b129 1262static void bredr_setup(struct hci_request *req)
2177bab5 1263{
4ca048e3
MH
1264 struct hci_dev *hdev = req->hdev;
1265
2177bab5
JH
1266 __le16 param;
1267 __u8 flt_type;
1268
1269 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
42c6b129 1270 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
1271
1272 /* Read Class of Device */
42c6b129 1273 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
2177bab5
JH
1274
1275 /* Read Local Name */
42c6b129 1276 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
2177bab5
JH
1277
1278 /* Read Voice Setting */
42c6b129 1279 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
2177bab5 1280
b4cb9fb2
MH
1281 /* Read Number of Supported IAC */
1282 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1283
4b836f39
MH
1284 /* Read Current IAC LAP */
1285 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1286
2177bab5
JH
1287 /* Clear Event Filters */
1288 flt_type = HCI_FLT_CLEAR_ALL;
42c6b129 1289 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
2177bab5
JH
1290
1291 /* Connection accept timeout ~20 secs */
dcf4adbf 1292 param = cpu_to_le16(0x7d00);
42c6b129 1293 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
2177bab5 1294
4ca048e3
MH
1295 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1296 * but it does not support page scan related HCI commands.
1297 */
1298 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
f332ec66
JH
1299 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1300 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1301 }
2177bab5
JH
1302}
1303
42c6b129 1304static void le_setup(struct hci_request *req)
2177bab5 1305{
c73eee91
JH
1306 struct hci_dev *hdev = req->hdev;
1307
2177bab5 1308 /* Read LE Buffer Size */
42c6b129 1309 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
1310
1311 /* Read LE Local Supported Features */
42c6b129 1312 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
2177bab5 1313
747d3f03
MH
1314 /* Read LE Supported States */
1315 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1316
2177bab5 1317 /* Read LE Advertising Channel TX Power */
42c6b129 1318 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
2177bab5
JH
1319
1320 /* Read LE White List Size */
42c6b129 1321 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
2177bab5 1322
747d3f03
MH
1323 /* Clear LE White List */
1324 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
c73eee91
JH
1325
1326 /* LE-only controllers have LE implicitly enabled */
1327 if (!lmp_bredr_capable(hdev))
1328 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2177bab5
JH
1329}
1330
1331static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1332{
1333 if (lmp_ext_inq_capable(hdev))
1334 return 0x02;
1335
1336 if (lmp_inq_rssi_capable(hdev))
1337 return 0x01;
1338
1339 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1340 hdev->lmp_subver == 0x0757)
1341 return 0x01;
1342
1343 if (hdev->manufacturer == 15) {
1344 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1345 return 0x01;
1346 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1347 return 0x01;
1348 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1349 return 0x01;
1350 }
1351
1352 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1353 hdev->lmp_subver == 0x1805)
1354 return 0x01;
1355
1356 return 0x00;
1357}
1358
42c6b129 1359static void hci_setup_inquiry_mode(struct hci_request *req)
2177bab5
JH
1360{
1361 u8 mode;
1362
42c6b129 1363 mode = hci_get_inquiry_mode(req->hdev);
2177bab5 1364
42c6b129 1365 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
2177bab5
JH
1366}
1367
42c6b129 1368static void hci_setup_event_mask(struct hci_request *req)
2177bab5 1369{
42c6b129
JH
1370 struct hci_dev *hdev = req->hdev;
1371
2177bab5
JH
1372 /* The second byte is 0xff instead of 0x9f (two reserved bits
1373 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1374 * command otherwise.
1375 */
1376 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1377
1378 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1379 * any event mask for pre 1.2 devices.
1380 */
1381 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1382 return;
1383
1384 if (lmp_bredr_capable(hdev)) {
1385 events[4] |= 0x01; /* Flow Specification Complete */
1386 events[4] |= 0x02; /* Inquiry Result with RSSI */
1387 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1388 events[5] |= 0x08; /* Synchronous Connection Complete */
1389 events[5] |= 0x10; /* Synchronous Connection Changed */
c7882cbd
MH
1390 } else {
1391 /* Use a different default for LE-only devices */
1392 memset(events, 0, sizeof(events));
1393 events[0] |= 0x10; /* Disconnection Complete */
1394 events[0] |= 0x80; /* Encryption Change */
1395 events[1] |= 0x08; /* Read Remote Version Information Complete */
1396 events[1] |= 0x20; /* Command Complete */
1397 events[1] |= 0x40; /* Command Status */
1398 events[1] |= 0x80; /* Hardware Error */
1399 events[2] |= 0x04; /* Number of Completed Packets */
1400 events[3] |= 0x02; /* Data Buffer Overflow */
1401 events[5] |= 0x80; /* Encryption Key Refresh Complete */
2177bab5
JH
1402 }
1403
1404 if (lmp_inq_rssi_capable(hdev))
1405 events[4] |= 0x02; /* Inquiry Result with RSSI */
1406
1407 if (lmp_sniffsubr_capable(hdev))
1408 events[5] |= 0x20; /* Sniff Subrating */
1409
1410 if (lmp_pause_enc_capable(hdev))
1411 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1412
1413 if (lmp_ext_inq_capable(hdev))
1414 events[5] |= 0x40; /* Extended Inquiry Result */
1415
1416 if (lmp_no_flush_capable(hdev))
1417 events[7] |= 0x01; /* Enhanced Flush Complete */
1418
1419 if (lmp_lsto_capable(hdev))
1420 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1421
1422 if (lmp_ssp_capable(hdev)) {
1423 events[6] |= 0x01; /* IO Capability Request */
1424 events[6] |= 0x02; /* IO Capability Response */
1425 events[6] |= 0x04; /* User Confirmation Request */
1426 events[6] |= 0x08; /* User Passkey Request */
1427 events[6] |= 0x10; /* Remote OOB Data Request */
1428 events[6] |= 0x20; /* Simple Pairing Complete */
1429 events[7] |= 0x04; /* User Passkey Notification */
1430 events[7] |= 0x08; /* Keypress Notification */
1431 events[7] |= 0x10; /* Remote Host Supported
1432 * Features Notification
1433 */
1434 }
1435
1436 if (lmp_le_capable(hdev))
1437 events[7] |= 0x20; /* LE Meta-Event */
1438
42c6b129 1439 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
2177bab5
JH
1440}
1441
42c6b129 1442static void hci_init2_req(struct hci_request *req, unsigned long opt)
2177bab5 1443{
42c6b129
JH
1444 struct hci_dev *hdev = req->hdev;
1445
2177bab5 1446 if (lmp_bredr_capable(hdev))
42c6b129 1447 bredr_setup(req);
56f87901
JH
1448 else
1449 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2177bab5
JH
1450
1451 if (lmp_le_capable(hdev))
42c6b129 1452 le_setup(req);
2177bab5 1453
42c6b129 1454 hci_setup_event_mask(req);
2177bab5 1455
3f8e2d75
JH
1456 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1457 * local supported commands HCI command.
1458 */
1459 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
42c6b129 1460 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
2177bab5
JH
1461
1462 if (lmp_ssp_capable(hdev)) {
57af75a8
MH
1463 /* When SSP is available, then the host features page
1464 * should also be available as well. However some
1465 * controllers list the max_page as 0 as long as SSP
1466 * has not been enabled. To achieve proper debugging
1467 * output, force the minimum max_page to 1 at least.
1468 */
1469 hdev->max_page = 0x01;
1470
2177bab5
JH
1471 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1472 u8 mode = 0x01;
42c6b129
JH
1473 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1474 sizeof(mode), &mode);
2177bab5
JH
1475 } else {
1476 struct hci_cp_write_eir cp;
1477
1478 memset(hdev->eir, 0, sizeof(hdev->eir));
1479 memset(&cp, 0, sizeof(cp));
1480
42c6b129 1481 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
2177bab5
JH
1482 }
1483 }
1484
1485 if (lmp_inq_rssi_capable(hdev))
42c6b129 1486 hci_setup_inquiry_mode(req);
2177bab5
JH
1487
1488 if (lmp_inq_tx_pwr_capable(hdev))
42c6b129 1489 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
2177bab5
JH
1490
1491 if (lmp_ext_feat_capable(hdev)) {
1492 struct hci_cp_read_local_ext_features cp;
1493
1494 cp.page = 0x01;
42c6b129
JH
1495 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1496 sizeof(cp), &cp);
2177bab5
JH
1497 }
1498
1499 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1500 u8 enable = 1;
42c6b129
JH
1501 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1502 &enable);
2177bab5
JH
1503 }
1504}
1505
42c6b129 1506static void hci_setup_link_policy(struct hci_request *req)
2177bab5 1507{
42c6b129 1508 struct hci_dev *hdev = req->hdev;
2177bab5
JH
1509 struct hci_cp_write_def_link_policy cp;
1510 u16 link_policy = 0;
1511
1512 if (lmp_rswitch_capable(hdev))
1513 link_policy |= HCI_LP_RSWITCH;
1514 if (lmp_hold_capable(hdev))
1515 link_policy |= HCI_LP_HOLD;
1516 if (lmp_sniff_capable(hdev))
1517 link_policy |= HCI_LP_SNIFF;
1518 if (lmp_park_capable(hdev))
1519 link_policy |= HCI_LP_PARK;
1520
1521 cp.policy = cpu_to_le16(link_policy);
42c6b129 1522 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
2177bab5
JH
1523}
1524
42c6b129 1525static void hci_set_le_support(struct hci_request *req)
2177bab5 1526{
42c6b129 1527 struct hci_dev *hdev = req->hdev;
2177bab5
JH
1528 struct hci_cp_write_le_host_supported cp;
1529
c73eee91
JH
1530 /* LE-only devices do not support explicit enablement */
1531 if (!lmp_bredr_capable(hdev))
1532 return;
1533
2177bab5
JH
1534 memset(&cp, 0, sizeof(cp));
1535
1536 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1537 cp.le = 0x01;
1538 cp.simul = lmp_le_br_capable(hdev);
1539 }
1540
1541 if (cp.le != lmp_host_le_capable(hdev))
42c6b129
JH
1542 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1543 &cp);
2177bab5
JH
1544}
1545
d62e6d67
JH
1546static void hci_set_event_mask_page_2(struct hci_request *req)
1547{
1548 struct hci_dev *hdev = req->hdev;
1549 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1550
1551 /* If Connectionless Slave Broadcast master role is supported
1552 * enable all necessary events for it.
1553 */
53b834d2 1554 if (lmp_csb_master_capable(hdev)) {
d62e6d67
JH
1555 events[1] |= 0x40; /* Triggered Clock Capture */
1556 events[1] |= 0x80; /* Synchronization Train Complete */
1557 events[2] |= 0x10; /* Slave Page Response Timeout */
1558 events[2] |= 0x20; /* CSB Channel Map Change */
1559 }
1560
1561 /* If Connectionless Slave Broadcast slave role is supported
1562 * enable all necessary events for it.
1563 */
53b834d2 1564 if (lmp_csb_slave_capable(hdev)) {
d62e6d67
JH
1565 events[2] |= 0x01; /* Synchronization Train Received */
1566 events[2] |= 0x02; /* CSB Receive */
1567 events[2] |= 0x04; /* CSB Timeout */
1568 events[2] |= 0x08; /* Truncated Page Complete */
1569 }
1570
40c59fcb
MH
1571 /* Enable Authenticated Payload Timeout Expired event if supported */
1572 if (lmp_ping_capable(hdev))
1573 events[2] |= 0x80;
1574
d62e6d67
JH
1575 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1576}
1577
42c6b129 1578static void hci_init3_req(struct hci_request *req, unsigned long opt)
2177bab5 1579{
42c6b129 1580 struct hci_dev *hdev = req->hdev;
d2c5d77f 1581 u8 p;
42c6b129 1582
b8f4e068
GP
1583 /* Some Broadcom based Bluetooth controllers do not support the
1584 * Delete Stored Link Key command. They are clearly indicating its
1585 * absence in the bit mask of supported commands.
1586 *
1587 * Check the supported commands and only if the the command is marked
1588 * as supported send it. If not supported assume that the controller
1589 * does not have actual support for stored link keys which makes this
1590 * command redundant anyway.
f9f462fa
MH
1591 *
1592 * Some controllers indicate that they support handling deleting
1593 * stored link keys, but they don't. The quirk lets a driver
1594 * just disable this command.
637b4cae 1595 */
f9f462fa
MH
1596 if (hdev->commands[6] & 0x80 &&
1597 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
59f45d57
JH
1598 struct hci_cp_delete_stored_link_key cp;
1599
1600 bacpy(&cp.bdaddr, BDADDR_ANY);
1601 cp.delete_all = 0x01;
1602 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1603 sizeof(cp), &cp);
1604 }
1605
2177bab5 1606 if (hdev->commands[5] & 0x10)
42c6b129 1607 hci_setup_link_policy(req);
2177bab5 1608
9193c6e8
AG
1609 if (lmp_le_capable(hdev)) {
1610 u8 events[8];
1611
1612 memset(events, 0, sizeof(events));
1613 events[0] = 0x1f;
662bc2e6
AG
1614
1615 /* If controller supports the Connection Parameters Request
1616 * Link Layer Procedure, enable the corresponding event.
1617 */
1618 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1619 events[0] |= 0x20; /* LE Remote Connection
1620 * Parameter Request
1621 */
1622
9193c6e8
AG
1623 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1624 events);
1625
42c6b129 1626 hci_set_le_support(req);
9193c6e8 1627 }
d2c5d77f
JH
1628
1629 /* Read features beyond page 1 if available */
1630 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1631 struct hci_cp_read_local_ext_features cp;
1632
1633 cp.page = p;
1634 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1635 sizeof(cp), &cp);
1636 }
2177bab5
JH
1637}
1638
5d4e7e8d
JH
1639static void hci_init4_req(struct hci_request *req, unsigned long opt)
1640{
1641 struct hci_dev *hdev = req->hdev;
1642
d62e6d67
JH
1643 /* Set event mask page 2 if the HCI command for it is supported */
1644 if (hdev->commands[22] & 0x04)
1645 hci_set_event_mask_page_2(req);
1646
5d4e7e8d 1647 /* Check for Synchronization Train support */
53b834d2 1648 if (lmp_sync_train_capable(hdev))
5d4e7e8d 1649 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
a6d0d690
MH
1650
1651 /* Enable Secure Connections if supported and configured */
5afeac14 1652 if ((lmp_sc_capable(hdev) ||
111902f7 1653 test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
a6d0d690
MH
1654 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1655 u8 support = 0x01;
1656 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1657 sizeof(support), &support);
1658 }
5d4e7e8d
JH
1659}
1660
2177bab5
JH
1661static int __hci_init(struct hci_dev *hdev)
1662{
1663 int err;
1664
1665 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1666 if (err < 0)
1667 return err;
1668
4b4148e9
MH
1669 /* The Device Under Test (DUT) mode is special and available for
1670 * all controller types. So just create it early on.
1671 */
1672 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1673 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1674 &dut_mode_fops);
1675 }
1676
2177bab5
JH
1677 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1678 * BR/EDR/LE type controllers. AMP controllers only need the
1679 * first stage init.
1680 */
1681 if (hdev->dev_type != HCI_BREDR)
1682 return 0;
1683
1684 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1685 if (err < 0)
1686 return err;
1687
5d4e7e8d
JH
1688 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1689 if (err < 0)
1690 return err;
1691
baf27f6e
MH
1692 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1693 if (err < 0)
1694 return err;
1695
1696 /* Only create debugfs entries during the initial setup
1697 * phase and not every time the controller gets powered on.
1698 */
1699 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1700 return 0;
1701
dfb826a8
MH
1702 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1703 &features_fops);
ceeb3bc0
MH
1704 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1705 &hdev->manufacturer);
1706 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1707 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
70afe0b8
MH
1708 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1709 &blacklist_fops);
47219839
MH
1710 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1711
31ad1691
AK
1712 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1713 &conn_info_min_age_fops);
1714 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1715 &conn_info_max_age_fops);
1716
baf27f6e
MH
1717 if (lmp_bredr_capable(hdev)) {
1718 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1719 hdev, &inquiry_cache_fops);
02d08d15
MH
1720 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1721 hdev, &link_keys_fops);
babdbb3c
MH
1722 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1723 hdev, &dev_class_fops);
041000b9
MH
1724 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1725 hdev, &voice_setting_fops);
baf27f6e
MH
1726 }
1727
06f5b778 1728 if (lmp_ssp_capable(hdev)) {
ebd1e33b
MH
1729 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1730 hdev, &auto_accept_delay_fops);
5afeac14
MH
1731 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1732 hdev, &force_sc_support_fops);
134c2a89
MH
1733 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1734 hdev, &sc_only_mode_fops);
06f5b778 1735 }
ebd1e33b 1736
2bfa3531
MH
1737 if (lmp_sniff_capable(hdev)) {
1738 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1739 hdev, &idle_timeout_fops);
1740 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1741 hdev, &sniff_min_interval_fops);
1742 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1743 hdev, &sniff_max_interval_fops);
1744 }
1745
d0f729b8 1746 if (lmp_le_capable(hdev)) {
ac345813
MH
1747 debugfs_create_file("identity", 0400, hdev->debugfs,
1748 hdev, &identity_fops);
1749 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1750 hdev, &rpa_timeout_fops);
7a4cd51d
MH
1751 debugfs_create_file("random_address", 0444, hdev->debugfs,
1752 hdev, &random_address_fops);
b32bba6c
MH
1753 debugfs_create_file("static_address", 0444, hdev->debugfs,
1754 hdev, &static_address_fops);
1755
1756 /* For controllers with a public address, provide a debug
1757 * option to force the usage of the configured static
1758 * address. By default the public address is used.
1759 */
1760 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1761 debugfs_create_file("force_static_address", 0644,
1762 hdev->debugfs, hdev,
1763 &force_static_address_fops);
1764
d0f729b8
MH
1765 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1766 &hdev->le_white_list_size);
d2ab0ac1
MH
1767 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1768 &white_list_fops);
3698d704
MH
1769 debugfs_create_file("identity_resolving_keys", 0400,
1770 hdev->debugfs, hdev,
1771 &identity_resolving_keys_fops);
8f8625cd
MH
1772 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1773 hdev, &long_term_keys_fops);
4e70c7e7
MH
1774 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1775 hdev, &conn_min_interval_fops);
1776 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1777 hdev, &conn_max_interval_fops);
816a93d1
MH
1778 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1779 hdev, &conn_latency_fops);
f1649577
MH
1780 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1781 hdev, &supervision_timeout_fops);
3f959d46
MH
1782 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1783 hdev, &adv_channel_map_fops);
0b3c7d37
MH
1784 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1785 &device_list_fops);
b9a7a61e
LR
1786 debugfs_create_u16("discov_interleaved_timeout", 0644,
1787 hdev->debugfs,
1788 &hdev->discov_interleaved_timeout);
d0f729b8 1789 }
e7b8fc92 1790
baf27f6e 1791 return 0;
2177bab5
JH
1792}
1793
42c6b129 1794static void hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1795{
1796 __u8 scan = opt;
1797
42c6b129 1798 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
1799
1800 /* Inquiry and Page scans */
42c6b129 1801 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
1802}
1803
42c6b129 1804static void hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1805{
1806 __u8 auth = opt;
1807
42c6b129 1808 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
1809
1810 /* Authentication */
42c6b129 1811 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
1812}
1813
42c6b129 1814static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1815{
1816 __u8 encrypt = opt;
1817
42c6b129 1818 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 1819
e4e8e37c 1820 /* Encryption */
42c6b129 1821 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
1822}
1823
42c6b129 1824static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
1825{
1826 __le16 policy = cpu_to_le16(opt);
1827
42c6b129 1828 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
1829
1830 /* Default link policy */
42c6b129 1831 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
e4e8e37c
MH
1832}
1833
8e87d142 1834/* Get HCI device by index.
1da177e4
LT
1835 * Device is held on return. */
1836struct hci_dev *hci_dev_get(int index)
1837{
8035ded4 1838 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
1839
1840 BT_DBG("%d", index);
1841
1842 if (index < 0)
1843 return NULL;
1844
1845 read_lock(&hci_dev_list_lock);
8035ded4 1846 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
1847 if (d->id == index) {
1848 hdev = hci_dev_hold(d);
1849 break;
1850 }
1851 }
1852 read_unlock(&hci_dev_list_lock);
1853 return hdev;
1854}
1da177e4
LT
1855
1856/* ---- Inquiry support ---- */
ff9ef578 1857
30dc78e1
JH
1858bool hci_discovery_active(struct hci_dev *hdev)
1859{
1860 struct discovery_state *discov = &hdev->discovery;
1861
6fbe195d 1862 switch (discov->state) {
343f935b 1863 case DISCOVERY_FINDING:
6fbe195d 1864 case DISCOVERY_RESOLVING:
30dc78e1
JH
1865 return true;
1866
6fbe195d
AG
1867 default:
1868 return false;
1869 }
30dc78e1
JH
1870}
1871
ff9ef578
JH
1872void hci_discovery_set_state(struct hci_dev *hdev, int state)
1873{
1874 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1875
1876 if (hdev->discovery.state == state)
1877 return;
1878
1879 switch (state) {
1880 case DISCOVERY_STOPPED:
c54c3860
AG
1881 hci_update_background_scan(hdev);
1882
7b99b659
AG
1883 if (hdev->discovery.state != DISCOVERY_STARTING)
1884 mgmt_discovering(hdev, 0);
ff9ef578
JH
1885 break;
1886 case DISCOVERY_STARTING:
1887 break;
343f935b 1888 case DISCOVERY_FINDING:
ff9ef578
JH
1889 mgmt_discovering(hdev, 1);
1890 break;
30dc78e1
JH
1891 case DISCOVERY_RESOLVING:
1892 break;
ff9ef578
JH
1893 case DISCOVERY_STOPPING:
1894 break;
1895 }
1896
1897 hdev->discovery.state = state;
1898}
1899
1f9b9a5d 1900void hci_inquiry_cache_flush(struct hci_dev *hdev)
1da177e4 1901{
30883512 1902 struct discovery_state *cache = &hdev->discovery;
b57c1a56 1903 struct inquiry_entry *p, *n;
1da177e4 1904
561aafbc
JH
1905 list_for_each_entry_safe(p, n, &cache->all, all) {
1906 list_del(&p->all);
b57c1a56 1907 kfree(p);
1da177e4 1908 }
561aafbc
JH
1909
1910 INIT_LIST_HEAD(&cache->unknown);
1911 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
1912}
1913
a8c5fb1a
GP
1914struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1915 bdaddr_t *bdaddr)
1da177e4 1916{
30883512 1917 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1918 struct inquiry_entry *e;
1919
6ed93dc6 1920 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 1921
561aafbc
JH
1922 list_for_each_entry(e, &cache->all, all) {
1923 if (!bacmp(&e->data.bdaddr, bdaddr))
1924 return e;
1925 }
1926
1927 return NULL;
1928}
1929
1930struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 1931 bdaddr_t *bdaddr)
561aafbc 1932{
30883512 1933 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
1934 struct inquiry_entry *e;
1935
6ed93dc6 1936 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
1937
1938 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 1939 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
1940 return e;
1941 }
1942
1943 return NULL;
1da177e4
LT
1944}
1945
30dc78e1 1946struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
1947 bdaddr_t *bdaddr,
1948 int state)
30dc78e1
JH
1949{
1950 struct discovery_state *cache = &hdev->discovery;
1951 struct inquiry_entry *e;
1952
6ed93dc6 1953 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
1954
1955 list_for_each_entry(e, &cache->resolve, list) {
1956 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1957 return e;
1958 if (!bacmp(&e->data.bdaddr, bdaddr))
1959 return e;
1960 }
1961
1962 return NULL;
1963}
1964
a3d4e20a 1965void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 1966 struct inquiry_entry *ie)
a3d4e20a
JH
1967{
1968 struct discovery_state *cache = &hdev->discovery;
1969 struct list_head *pos = &cache->resolve;
1970 struct inquiry_entry *p;
1971
1972 list_del(&ie->list);
1973
1974 list_for_each_entry(p, &cache->resolve, list) {
1975 if (p->name_state != NAME_PENDING &&
a8c5fb1a 1976 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
1977 break;
1978 pos = &p->list;
1979 }
1980
1981 list_add(&ie->list, pos);
1982}
1983
af58925c
MH
1984u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1985 bool name_known)
1da177e4 1986{
30883512 1987 struct discovery_state *cache = &hdev->discovery;
70f23020 1988 struct inquiry_entry *ie;
af58925c 1989 u32 flags = 0;
1da177e4 1990
6ed93dc6 1991 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 1992
2b2fec4d
SJ
1993 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1994
af58925c
MH
1995 if (!data->ssp_mode)
1996 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
388fc8fa 1997
70f23020 1998 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 1999 if (ie) {
af58925c
MH
2000 if (!ie->data.ssp_mode)
2001 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
388fc8fa 2002
a3d4e20a 2003 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 2004 data->rssi != ie->data.rssi) {
a3d4e20a
JH
2005 ie->data.rssi = data->rssi;
2006 hci_inquiry_cache_update_resolve(hdev, ie);
2007 }
2008
561aafbc 2009 goto update;
a3d4e20a 2010 }
561aafbc
JH
2011
2012 /* Entry not in the cache. Add new one. */
2013 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
af58925c
MH
2014 if (!ie) {
2015 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2016 goto done;
2017 }
561aafbc
JH
2018
2019 list_add(&ie->all, &cache->all);
2020
2021 if (name_known) {
2022 ie->name_state = NAME_KNOWN;
2023 } else {
2024 ie->name_state = NAME_NOT_KNOWN;
2025 list_add(&ie->list, &cache->unknown);
2026 }
70f23020 2027
561aafbc
JH
2028update:
2029 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 2030 ie->name_state != NAME_PENDING) {
561aafbc
JH
2031 ie->name_state = NAME_KNOWN;
2032 list_del(&ie->list);
1da177e4
LT
2033 }
2034
70f23020
AE
2035 memcpy(&ie->data, data, sizeof(*data));
2036 ie->timestamp = jiffies;
1da177e4 2037 cache->timestamp = jiffies;
3175405b
JH
2038
2039 if (ie->name_state == NAME_NOT_KNOWN)
af58925c 2040 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
3175405b 2041
af58925c
MH
2042done:
2043 return flags;
1da177e4
LT
2044}
2045
2046static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2047{
30883512 2048 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
2049 struct inquiry_info *info = (struct inquiry_info *) buf;
2050 struct inquiry_entry *e;
2051 int copied = 0;
2052
561aafbc 2053 list_for_each_entry(e, &cache->all, all) {
1da177e4 2054 struct inquiry_data *data = &e->data;
b57c1a56
JH
2055
2056 if (copied >= num)
2057 break;
2058
1da177e4
LT
2059 bacpy(&info->bdaddr, &data->bdaddr);
2060 info->pscan_rep_mode = data->pscan_rep_mode;
2061 info->pscan_period_mode = data->pscan_period_mode;
2062 info->pscan_mode = data->pscan_mode;
2063 memcpy(info->dev_class, data->dev_class, 3);
2064 info->clock_offset = data->clock_offset;
b57c1a56 2065
1da177e4 2066 info++;
b57c1a56 2067 copied++;
1da177e4
LT
2068 }
2069
2070 BT_DBG("cache %p, copied %d", cache, copied);
2071 return copied;
2072}
2073
42c6b129 2074static void hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
2075{
2076 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 2077 struct hci_dev *hdev = req->hdev;
1da177e4
LT
2078 struct hci_cp_inquiry cp;
2079
2080 BT_DBG("%s", hdev->name);
2081
2082 if (test_bit(HCI_INQUIRY, &hdev->flags))
2083 return;
2084
2085 /* Start Inquiry */
2086 memcpy(&cp.lap, &ir->lap, 3);
2087 cp.length = ir->length;
2088 cp.num_rsp = ir->num_rsp;
42c6b129 2089 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
2090}
2091
3e13fa1e
AG
2092static int wait_inquiry(void *word)
2093{
2094 schedule();
2095 return signal_pending(current);
2096}
2097
1da177e4
LT
2098int hci_inquiry(void __user *arg)
2099{
2100 __u8 __user *ptr = arg;
2101 struct hci_inquiry_req ir;
2102 struct hci_dev *hdev;
2103 int err = 0, do_inquiry = 0, max_rsp;
2104 long timeo;
2105 __u8 *buf;
2106
2107 if (copy_from_user(&ir, ptr, sizeof(ir)))
2108 return -EFAULT;
2109
5a08ecce
AE
2110 hdev = hci_dev_get(ir.dev_id);
2111 if (!hdev)
1da177e4
LT
2112 return -ENODEV;
2113
0736cfa8
MH
2114 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2115 err = -EBUSY;
2116 goto done;
2117 }
2118
fee746b0
MH
2119 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
2120 err = -EOPNOTSUPP;
2121 goto done;
2122 }
2123
5b69bef5
MH
2124 if (hdev->dev_type != HCI_BREDR) {
2125 err = -EOPNOTSUPP;
2126 goto done;
2127 }
2128
56f87901
JH
2129 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2130 err = -EOPNOTSUPP;
2131 goto done;
2132 }
2133
09fd0de5 2134 hci_dev_lock(hdev);
8e87d142 2135 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 2136 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1f9b9a5d 2137 hci_inquiry_cache_flush(hdev);
1da177e4
LT
2138 do_inquiry = 1;
2139 }
09fd0de5 2140 hci_dev_unlock(hdev);
1da177e4 2141
04837f64 2142 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
2143
2144 if (do_inquiry) {
01178cd4
JH
2145 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2146 timeo);
70f23020
AE
2147 if (err < 0)
2148 goto done;
3e13fa1e
AG
2149
2150 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2151 * cleared). If it is interrupted by a signal, return -EINTR.
2152 */
2153 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2154 TASK_INTERRUPTIBLE))
2155 return -EINTR;
70f23020 2156 }
1da177e4 2157
8fc9ced3
GP
2158 /* for unlimited number of responses we will use buffer with
2159 * 255 entries
2160 */
1da177e4
LT
2161 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2162
2163 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2164 * copy it to the user space.
2165 */
01df8c31 2166 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 2167 if (!buf) {
1da177e4
LT
2168 err = -ENOMEM;
2169 goto done;
2170 }
2171
09fd0de5 2172 hci_dev_lock(hdev);
1da177e4 2173 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 2174 hci_dev_unlock(hdev);
1da177e4
LT
2175
2176 BT_DBG("num_rsp %d", ir.num_rsp);
2177
2178 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2179 ptr += sizeof(ir);
2180 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 2181 ir.num_rsp))
1da177e4 2182 err = -EFAULT;
8e87d142 2183 } else
1da177e4
LT
2184 err = -EFAULT;
2185
2186 kfree(buf);
2187
2188done:
2189 hci_dev_put(hdev);
2190 return err;
2191}
2192
cbed0ca1 2193static int hci_dev_do_open(struct hci_dev *hdev)
1da177e4 2194{
1da177e4
LT
2195 int ret = 0;
2196
1da177e4
LT
2197 BT_DBG("%s %p", hdev->name, hdev);
2198
2199 hci_req_lock(hdev);
2200
94324962
JH
2201 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2202 ret = -ENODEV;
2203 goto done;
2204 }
2205
a5c8f270
MH
2206 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
2207 /* Check for rfkill but allow the HCI setup stage to
2208 * proceed (which in itself doesn't cause any RF activity).
2209 */
2210 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2211 ret = -ERFKILL;
2212 goto done;
2213 }
2214
2215 /* Check for valid public address or a configured static
2216 * random adddress, but let the HCI setup proceed to
2217 * be able to determine if there is a public address
2218 * or not.
2219 *
c6beca0e
MH
2220 * In case of user channel usage, it is not important
2221 * if a public address or static random address is
2222 * available.
2223 *
a5c8f270
MH
2224 * This check is only valid for BR/EDR controllers
2225 * since AMP controllers do not have an address.
2226 */
c6beca0e
MH
2227 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2228 hdev->dev_type == HCI_BREDR &&
a5c8f270
MH
2229 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2230 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2231 ret = -EADDRNOTAVAIL;
2232 goto done;
2233 }
611b30f7
MH
2234 }
2235
1da177e4
LT
2236 if (test_bit(HCI_UP, &hdev->flags)) {
2237 ret = -EALREADY;
2238 goto done;
2239 }
2240
1da177e4
LT
2241 if (hdev->open(hdev)) {
2242 ret = -EIO;
2243 goto done;
2244 }
2245
f41c70c4
MH
2246 atomic_set(&hdev->cmd_cnt, 1);
2247 set_bit(HCI_INIT, &hdev->flags);
2248
2249 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
2250 ret = hdev->setup(hdev);
2251
24c457e2
MH
2252 /* If public address change is configured, ensure that the
2253 * address gets programmed. If the driver does not support
2254 * changing the public address, fail the power on procedure.
2255 */
2256 if (!ret && bacmp(&hdev->public_addr, BDADDR_ANY)) {
2257 if (hdev->set_bdaddr)
2258 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2259 else
2260 ret = -EADDRNOTAVAIL;
2261 }
2262
f41c70c4 2263 if (!ret) {
fee746b0 2264 if (!test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks) &&
0736cfa8 2265 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
f41c70c4 2266 ret = __hci_init(hdev);
1da177e4
LT
2267 }
2268
f41c70c4
MH
2269 clear_bit(HCI_INIT, &hdev->flags);
2270
1da177e4
LT
2271 if (!ret) {
2272 hci_dev_hold(hdev);
d6bfd59c 2273 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
1da177e4
LT
2274 set_bit(HCI_UP, &hdev->flags);
2275 hci_notify(hdev, HCI_DEV_UP);
bb4b2a9a 2276 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
0736cfa8 2277 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1514b892 2278 hdev->dev_type == HCI_BREDR) {
09fd0de5 2279 hci_dev_lock(hdev);
744cf19e 2280 mgmt_powered(hdev, 1);
09fd0de5 2281 hci_dev_unlock(hdev);
56e5cb86 2282 }
8e87d142 2283 } else {
1da177e4 2284 /* Init failed, cleanup */
3eff45ea 2285 flush_work(&hdev->tx_work);
c347b765 2286 flush_work(&hdev->cmd_work);
b78752cc 2287 flush_work(&hdev->rx_work);
1da177e4
LT
2288
2289 skb_queue_purge(&hdev->cmd_q);
2290 skb_queue_purge(&hdev->rx_q);
2291
2292 if (hdev->flush)
2293 hdev->flush(hdev);
2294
2295 if (hdev->sent_cmd) {
2296 kfree_skb(hdev->sent_cmd);
2297 hdev->sent_cmd = NULL;
2298 }
2299
2300 hdev->close(hdev);
fee746b0 2301 hdev->flags &= BIT(HCI_RAW);
1da177e4
LT
2302 }
2303
2304done:
2305 hci_req_unlock(hdev);
1da177e4
LT
2306 return ret;
2307}
2308
cbed0ca1
JH
2309/* ---- HCI ioctl helpers ---- */
2310
2311int hci_dev_open(__u16 dev)
2312{
2313 struct hci_dev *hdev;
2314 int err;
2315
2316 hdev = hci_dev_get(dev);
2317 if (!hdev)
2318 return -ENODEV;
2319
fee746b0
MH
2320 /* Devices that are marked for raw-only usage can only be powered
2321 * up as user channel. Trying to bring them up as normal devices
2322 * will result into a failure. Only user channel operation is
2323 * possible.
2324 *
2325 * When this function is called for a user channel, the flag
2326 * HCI_USER_CHANNEL will be set first before attempting to
2327 * open the device.
2328 */
2329 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks) &&
2330 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2331 err = -EOPNOTSUPP;
2332 goto done;
2333 }
2334
e1d08f40
JH
2335 /* We need to ensure that no other power on/off work is pending
2336 * before proceeding to call hci_dev_do_open. This is
2337 * particularly important if the setup procedure has not yet
2338 * completed.
2339 */
2340 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2341 cancel_delayed_work(&hdev->power_off);
2342
a5c8f270
MH
2343 /* After this call it is guaranteed that the setup procedure
2344 * has finished. This means that error conditions like RFKILL
2345 * or no valid public or static random address apply.
2346 */
e1d08f40
JH
2347 flush_workqueue(hdev->req_workqueue);
2348
cbed0ca1
JH
2349 err = hci_dev_do_open(hdev);
2350
fee746b0 2351done:
cbed0ca1 2352 hci_dev_put(hdev);
cbed0ca1
JH
2353 return err;
2354}
2355
1da177e4
LT
2356static int hci_dev_do_close(struct hci_dev *hdev)
2357{
2358 BT_DBG("%s %p", hdev->name, hdev);
2359
78c04c0b
VCG
2360 cancel_delayed_work(&hdev->power_off);
2361
1da177e4
LT
2362 hci_req_cancel(hdev, ENODEV);
2363 hci_req_lock(hdev);
2364
2365 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
65cc2b49 2366 cancel_delayed_work_sync(&hdev->cmd_timer);
1da177e4
LT
2367 hci_req_unlock(hdev);
2368 return 0;
2369 }
2370
3eff45ea
GP
2371 /* Flush RX and TX works */
2372 flush_work(&hdev->tx_work);
b78752cc 2373 flush_work(&hdev->rx_work);
1da177e4 2374
16ab91ab 2375 if (hdev->discov_timeout > 0) {
e0f9309f 2376 cancel_delayed_work(&hdev->discov_off);
16ab91ab 2377 hdev->discov_timeout = 0;
5e5282bb 2378 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
310a3d48 2379 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
16ab91ab
JH
2380 }
2381
a8b2d5c2 2382 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
7d78525d
JH
2383 cancel_delayed_work(&hdev->service_cache);
2384
7ba8b4be 2385 cancel_delayed_work_sync(&hdev->le_scan_disable);
4518bb0f
JH
2386
2387 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2388 cancel_delayed_work_sync(&hdev->rpa_expired);
7ba8b4be 2389
09fd0de5 2390 hci_dev_lock(hdev);
1f9b9a5d 2391 hci_inquiry_cache_flush(hdev);
1da177e4 2392 hci_conn_hash_flush(hdev);
6046dc3e 2393 hci_pend_le_conns_clear(hdev);
09fd0de5 2394 hci_dev_unlock(hdev);
1da177e4
LT
2395
2396 hci_notify(hdev, HCI_DEV_DOWN);
2397
2398 if (hdev->flush)
2399 hdev->flush(hdev);
2400
2401 /* Reset device */
2402 skb_queue_purge(&hdev->cmd_q);
2403 atomic_set(&hdev->cmd_cnt, 1);
fee746b0 2404 if (!test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks) &&
3a6afbd2 2405 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
a6c511c6 2406 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1da177e4 2407 set_bit(HCI_INIT, &hdev->flags);
01178cd4 2408 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1da177e4
LT
2409 clear_bit(HCI_INIT, &hdev->flags);
2410 }
2411
c347b765
GP
2412 /* flush cmd work */
2413 flush_work(&hdev->cmd_work);
1da177e4
LT
2414
2415 /* Drop queues */
2416 skb_queue_purge(&hdev->rx_q);
2417 skb_queue_purge(&hdev->cmd_q);
2418 skb_queue_purge(&hdev->raw_q);
2419
2420 /* Drop last sent command */
2421 if (hdev->sent_cmd) {
65cc2b49 2422 cancel_delayed_work_sync(&hdev->cmd_timer);
1da177e4
LT
2423 kfree_skb(hdev->sent_cmd);
2424 hdev->sent_cmd = NULL;
2425 }
2426
b6ddb638
JH
2427 kfree_skb(hdev->recv_evt);
2428 hdev->recv_evt = NULL;
2429
1da177e4
LT
2430 /* After this point our queues are empty
2431 * and no tasks are scheduled. */
2432 hdev->close(hdev);
2433
35b973c9 2434 /* Clear flags */
fee746b0 2435 hdev->flags &= BIT(HCI_RAW);
35b973c9
JH
2436 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2437
93c311a0
MH
2438 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2439 if (hdev->dev_type == HCI_BREDR) {
2440 hci_dev_lock(hdev);
2441 mgmt_powered(hdev, 0);
2442 hci_dev_unlock(hdev);
2443 }
8ee56540 2444 }
5add6af8 2445
ced5c338 2446 /* Controller radio is available but is currently powered down */
536619e8 2447 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
ced5c338 2448
e59fda8d 2449 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 2450 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
7a4cd51d 2451 bacpy(&hdev->random_addr, BDADDR_ANY);
e59fda8d 2452
1da177e4
LT
2453 hci_req_unlock(hdev);
2454
2455 hci_dev_put(hdev);
2456 return 0;
2457}
2458
2459int hci_dev_close(__u16 dev)
2460{
2461 struct hci_dev *hdev;
2462 int err;
2463
70f23020
AE
2464 hdev = hci_dev_get(dev);
2465 if (!hdev)
1da177e4 2466 return -ENODEV;
8ee56540 2467
0736cfa8
MH
2468 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2469 err = -EBUSY;
2470 goto done;
2471 }
2472
8ee56540
MH
2473 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2474 cancel_delayed_work(&hdev->power_off);
2475
1da177e4 2476 err = hci_dev_do_close(hdev);
8ee56540 2477
0736cfa8 2478done:
1da177e4
LT
2479 hci_dev_put(hdev);
2480 return err;
2481}
2482
2483int hci_dev_reset(__u16 dev)
2484{
2485 struct hci_dev *hdev;
2486 int ret = 0;
2487
70f23020
AE
2488 hdev = hci_dev_get(dev);
2489 if (!hdev)
1da177e4
LT
2490 return -ENODEV;
2491
2492 hci_req_lock(hdev);
1da177e4 2493
808a049e
MH
2494 if (!test_bit(HCI_UP, &hdev->flags)) {
2495 ret = -ENETDOWN;
1da177e4 2496 goto done;
808a049e 2497 }
1da177e4 2498
0736cfa8
MH
2499 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2500 ret = -EBUSY;
2501 goto done;
2502 }
2503
fee746b0
MH
2504 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
2505 ret = -EOPNOTSUPP;
2506 goto done;
2507 }
2508
1da177e4
LT
2509 /* Drop queues */
2510 skb_queue_purge(&hdev->rx_q);
2511 skb_queue_purge(&hdev->cmd_q);
2512
09fd0de5 2513 hci_dev_lock(hdev);
1f9b9a5d 2514 hci_inquiry_cache_flush(hdev);
1da177e4 2515 hci_conn_hash_flush(hdev);
09fd0de5 2516 hci_dev_unlock(hdev);
1da177e4
LT
2517
2518 if (hdev->flush)
2519 hdev->flush(hdev);
2520
8e87d142 2521 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 2522 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4 2523
fee746b0 2524 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1da177e4
LT
2525
2526done:
1da177e4
LT
2527 hci_req_unlock(hdev);
2528 hci_dev_put(hdev);
2529 return ret;
2530}
2531
2532int hci_dev_reset_stat(__u16 dev)
2533{
2534 struct hci_dev *hdev;
2535 int ret = 0;
2536
70f23020
AE
2537 hdev = hci_dev_get(dev);
2538 if (!hdev)
1da177e4
LT
2539 return -ENODEV;
2540
0736cfa8
MH
2541 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2542 ret = -EBUSY;
2543 goto done;
2544 }
2545
fee746b0
MH
2546 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
2547 ret = -EOPNOTSUPP;
2548 goto done;
2549 }
2550
1da177e4
LT
2551 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2552
0736cfa8 2553done:
1da177e4 2554 hci_dev_put(hdev);
1da177e4
LT
2555 return ret;
2556}
2557
2558int hci_dev_cmd(unsigned int cmd, void __user *arg)
2559{
2560 struct hci_dev *hdev;
2561 struct hci_dev_req dr;
2562 int err = 0;
2563
2564 if (copy_from_user(&dr, arg, sizeof(dr)))
2565 return -EFAULT;
2566
70f23020
AE
2567 hdev = hci_dev_get(dr.dev_id);
2568 if (!hdev)
1da177e4
LT
2569 return -ENODEV;
2570
0736cfa8
MH
2571 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2572 err = -EBUSY;
2573 goto done;
2574 }
2575
fee746b0
MH
2576 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
2577 err = -EOPNOTSUPP;
2578 goto done;
2579 }
2580
5b69bef5
MH
2581 if (hdev->dev_type != HCI_BREDR) {
2582 err = -EOPNOTSUPP;
2583 goto done;
2584 }
2585
56f87901
JH
2586 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2587 err = -EOPNOTSUPP;
2588 goto done;
2589 }
2590
1da177e4
LT
2591 switch (cmd) {
2592 case HCISETAUTH:
01178cd4
JH
2593 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2594 HCI_INIT_TIMEOUT);
1da177e4
LT
2595 break;
2596
2597 case HCISETENCRYPT:
2598 if (!lmp_encrypt_capable(hdev)) {
2599 err = -EOPNOTSUPP;
2600 break;
2601 }
2602
2603 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2604 /* Auth must be enabled first */
01178cd4
JH
2605 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2606 HCI_INIT_TIMEOUT);
1da177e4
LT
2607 if (err)
2608 break;
2609 }
2610
01178cd4
JH
2611 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2612 HCI_INIT_TIMEOUT);
1da177e4
LT
2613 break;
2614
2615 case HCISETSCAN:
01178cd4
JH
2616 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2617 HCI_INIT_TIMEOUT);
1da177e4
LT
2618 break;
2619
1da177e4 2620 case HCISETLINKPOL:
01178cd4
JH
2621 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2622 HCI_INIT_TIMEOUT);
1da177e4
LT
2623 break;
2624
2625 case HCISETLINKMODE:
e4e8e37c
MH
2626 hdev->link_mode = ((__u16) dr.dev_opt) &
2627 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2628 break;
2629
2630 case HCISETPTYPE:
2631 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
2632 break;
2633
2634 case HCISETACLMTU:
e4e8e37c
MH
2635 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2636 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2637 break;
2638
2639 case HCISETSCOMTU:
e4e8e37c
MH
2640 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2641 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2642 break;
2643
2644 default:
2645 err = -EINVAL;
2646 break;
2647 }
e4e8e37c 2648
0736cfa8 2649done:
1da177e4
LT
2650 hci_dev_put(hdev);
2651 return err;
2652}
2653
2654int hci_get_dev_list(void __user *arg)
2655{
8035ded4 2656 struct hci_dev *hdev;
1da177e4
LT
2657 struct hci_dev_list_req *dl;
2658 struct hci_dev_req *dr;
1da177e4
LT
2659 int n = 0, size, err;
2660 __u16 dev_num;
2661
2662 if (get_user(dev_num, (__u16 __user *) arg))
2663 return -EFAULT;
2664
2665 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2666 return -EINVAL;
2667
2668 size = sizeof(*dl) + dev_num * sizeof(*dr);
2669
70f23020
AE
2670 dl = kzalloc(size, GFP_KERNEL);
2671 if (!dl)
1da177e4
LT
2672 return -ENOMEM;
2673
2674 dr = dl->dev_req;
2675
f20d09d5 2676 read_lock(&hci_dev_list_lock);
8035ded4 2677 list_for_each_entry(hdev, &hci_dev_list, list) {
a8b2d5c2 2678 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
e0f9309f 2679 cancel_delayed_work(&hdev->power_off);
c542a06c 2680
a8b2d5c2
JH
2681 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2682 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 2683
1da177e4
LT
2684 (dr + n)->dev_id = hdev->id;
2685 (dr + n)->dev_opt = hdev->flags;
c542a06c 2686
1da177e4
LT
2687 if (++n >= dev_num)
2688 break;
2689 }
f20d09d5 2690 read_unlock(&hci_dev_list_lock);
1da177e4
LT
2691
2692 dl->dev_num = n;
2693 size = sizeof(*dl) + n * sizeof(*dr);
2694
2695 err = copy_to_user(arg, dl, size);
2696 kfree(dl);
2697
2698 return err ? -EFAULT : 0;
2699}
2700
2701int hci_get_dev_info(void __user *arg)
2702{
2703 struct hci_dev *hdev;
2704 struct hci_dev_info di;
2705 int err = 0;
2706
2707 if (copy_from_user(&di, arg, sizeof(di)))
2708 return -EFAULT;
2709
70f23020
AE
2710 hdev = hci_dev_get(di.dev_id);
2711 if (!hdev)
1da177e4
LT
2712 return -ENODEV;
2713
a8b2d5c2 2714 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
3243553f 2715 cancel_delayed_work_sync(&hdev->power_off);
ab81cbf9 2716
a8b2d5c2
JH
2717 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2718 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 2719
1da177e4
LT
2720 strcpy(di.name, hdev->name);
2721 di.bdaddr = hdev->bdaddr;
60f2a3ed 2722 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
1da177e4
LT
2723 di.flags = hdev->flags;
2724 di.pkt_type = hdev->pkt_type;
572c7f84
JH
2725 if (lmp_bredr_capable(hdev)) {
2726 di.acl_mtu = hdev->acl_mtu;
2727 di.acl_pkts = hdev->acl_pkts;
2728 di.sco_mtu = hdev->sco_mtu;
2729 di.sco_pkts = hdev->sco_pkts;
2730 } else {
2731 di.acl_mtu = hdev->le_mtu;
2732 di.acl_pkts = hdev->le_pkts;
2733 di.sco_mtu = 0;
2734 di.sco_pkts = 0;
2735 }
1da177e4
LT
2736 di.link_policy = hdev->link_policy;
2737 di.link_mode = hdev->link_mode;
2738
2739 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2740 memcpy(&di.features, &hdev->features, sizeof(di.features));
2741
2742 if (copy_to_user(arg, &di, sizeof(di)))
2743 err = -EFAULT;
2744
2745 hci_dev_put(hdev);
2746
2747 return err;
2748}
2749
2750/* ---- Interface to HCI drivers ---- */
2751
611b30f7
MH
2752static int hci_rfkill_set_block(void *data, bool blocked)
2753{
2754 struct hci_dev *hdev = data;
2755
2756 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2757
0736cfa8
MH
2758 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2759 return -EBUSY;
2760
5e130367
JH
2761 if (blocked) {
2762 set_bit(HCI_RFKILLED, &hdev->dev_flags);
bf543036
JH
2763 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2764 hci_dev_do_close(hdev);
5e130367
JH
2765 } else {
2766 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
1025c04c 2767 }
611b30f7
MH
2768
2769 return 0;
2770}
2771
2772static const struct rfkill_ops hci_rfkill_ops = {
2773 .set_block = hci_rfkill_set_block,
2774};
2775
ab81cbf9
JH
2776static void hci_power_on(struct work_struct *work)
2777{
2778 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
96570ffc 2779 int err;
ab81cbf9
JH
2780
2781 BT_DBG("%s", hdev->name);
2782
cbed0ca1 2783 err = hci_dev_do_open(hdev);
96570ffc
JH
2784 if (err < 0) {
2785 mgmt_set_powered_failed(hdev, err);
ab81cbf9 2786 return;
96570ffc 2787 }
ab81cbf9 2788
a5c8f270
MH
2789 /* During the HCI setup phase, a few error conditions are
2790 * ignored and they need to be checked now. If they are still
2791 * valid, it is important to turn the device back off.
2792 */
2793 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2794 (hdev->dev_type == HCI_BREDR &&
2795 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2796 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
bf543036
JH
2797 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2798 hci_dev_do_close(hdev);
2799 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
19202573
JH
2800 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2801 HCI_AUTO_OFF_TIMEOUT);
bf543036 2802 }
ab81cbf9 2803
fee746b0
MH
2804 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
2805 if (!test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2806 mgmt_index_added(hdev);
2807 }
ab81cbf9
JH
2808}
2809
2810static void hci_power_off(struct work_struct *work)
2811{
3243553f 2812 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 2813 power_off.work);
ab81cbf9
JH
2814
2815 BT_DBG("%s", hdev->name);
2816
8ee56540 2817 hci_dev_do_close(hdev);
ab81cbf9
JH
2818}
2819
16ab91ab
JH
2820static void hci_discov_off(struct work_struct *work)
2821{
2822 struct hci_dev *hdev;
16ab91ab
JH
2823
2824 hdev = container_of(work, struct hci_dev, discov_off.work);
2825
2826 BT_DBG("%s", hdev->name);
2827
d1967ff8 2828 mgmt_discoverable_timeout(hdev);
16ab91ab
JH
2829}
2830
35f7498a 2831void hci_uuids_clear(struct hci_dev *hdev)
2aeb9a1a 2832{
4821002c 2833 struct bt_uuid *uuid, *tmp;
2aeb9a1a 2834
4821002c
JH
2835 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2836 list_del(&uuid->list);
2aeb9a1a
JH
2837 kfree(uuid);
2838 }
2aeb9a1a
JH
2839}
2840
35f7498a 2841void hci_link_keys_clear(struct hci_dev *hdev)
55ed8ca1
JH
2842{
2843 struct list_head *p, *n;
2844
2845 list_for_each_safe(p, n, &hdev->link_keys) {
2846 struct link_key *key;
2847
2848 key = list_entry(p, struct link_key, list);
2849
2850 list_del(p);
2851 kfree(key);
2852 }
55ed8ca1
JH
2853}
2854
35f7498a 2855void hci_smp_ltks_clear(struct hci_dev *hdev)
b899efaf
VCG
2856{
2857 struct smp_ltk *k, *tmp;
2858
2859 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2860 list_del(&k->list);
2861 kfree(k);
2862 }
b899efaf
VCG
2863}
2864
970c4e46
JH
2865void hci_smp_irks_clear(struct hci_dev *hdev)
2866{
2867 struct smp_irk *k, *tmp;
2868
2869 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2870 list_del(&k->list);
2871 kfree(k);
2872 }
2873}
2874
55ed8ca1
JH
2875struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2876{
8035ded4 2877 struct link_key *k;
55ed8ca1 2878
8035ded4 2879 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
2880 if (bacmp(bdaddr, &k->bdaddr) == 0)
2881 return k;
55ed8ca1
JH
2882
2883 return NULL;
2884}
2885
745c0ce3 2886static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 2887 u8 key_type, u8 old_key_type)
d25e28ab
JH
2888{
2889 /* Legacy key */
2890 if (key_type < 0x03)
745c0ce3 2891 return true;
d25e28ab
JH
2892
2893 /* Debug keys are insecure so don't store them persistently */
2894 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 2895 return false;
d25e28ab
JH
2896
2897 /* Changed combination key and there's no previous one */
2898 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 2899 return false;
d25e28ab
JH
2900
2901 /* Security mode 3 case */
2902 if (!conn)
745c0ce3 2903 return true;
d25e28ab
JH
2904
2905 /* Neither local nor remote side had no-bonding as requirement */
2906 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 2907 return true;
d25e28ab
JH
2908
2909 /* Local side had dedicated bonding as requirement */
2910 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 2911 return true;
d25e28ab
JH
2912
2913 /* Remote side had dedicated bonding as requirement */
2914 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 2915 return true;
d25e28ab
JH
2916
2917 /* If none of the above criteria match, then don't store the key
2918 * persistently */
745c0ce3 2919 return false;
d25e28ab
JH
2920}
2921
98a0b845
JH
2922static bool ltk_type_master(u8 type)
2923{
d97c9fb0 2924 return (type == SMP_LTK);
98a0b845
JH
2925}
2926
fe39c7b2 2927struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
98a0b845 2928 bool master)
75d262c2 2929{
c9839a11 2930 struct smp_ltk *k;
75d262c2 2931
c9839a11 2932 list_for_each_entry(k, &hdev->long_term_keys, list) {
fe39c7b2 2933 if (k->ediv != ediv || k->rand != rand)
75d262c2
VCG
2934 continue;
2935
98a0b845
JH
2936 if (ltk_type_master(k->type) != master)
2937 continue;
2938
c9839a11 2939 return k;
75d262c2
VCG
2940 }
2941
2942 return NULL;
2943}
75d262c2 2944
c9839a11 2945struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
98a0b845 2946 u8 addr_type, bool master)
75d262c2 2947{
c9839a11 2948 struct smp_ltk *k;
75d262c2 2949
c9839a11
VCG
2950 list_for_each_entry(k, &hdev->long_term_keys, list)
2951 if (addr_type == k->bdaddr_type &&
98a0b845
JH
2952 bacmp(bdaddr, &k->bdaddr) == 0 &&
2953 ltk_type_master(k->type) == master)
75d262c2
VCG
2954 return k;
2955
2956 return NULL;
2957}
75d262c2 2958
970c4e46
JH
2959struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2960{
2961 struct smp_irk *irk;
2962
2963 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2964 if (!bacmp(&irk->rpa, rpa))
2965 return irk;
2966 }
2967
2968 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2969 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
2970 bacpy(&irk->rpa, rpa);
2971 return irk;
2972 }
2973 }
2974
2975 return NULL;
2976}
2977
2978struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2979 u8 addr_type)
2980{
2981 struct smp_irk *irk;
2982
6cfc9988
JH
2983 /* Identity Address must be public or static random */
2984 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2985 return NULL;
2986
970c4e46
JH
2987 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2988 if (addr_type == irk->addr_type &&
2989 bacmp(bdaddr, &irk->bdaddr) == 0)
2990 return irk;
2991 }
2992
2993 return NULL;
2994}
2995
567fa2aa 2996struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
7652ff6a
JH
2997 bdaddr_t *bdaddr, u8 *val, u8 type,
2998 u8 pin_len, bool *persistent)
55ed8ca1
JH
2999{
3000 struct link_key *key, *old_key;
745c0ce3 3001 u8 old_key_type;
55ed8ca1
JH
3002
3003 old_key = hci_find_link_key(hdev, bdaddr);
3004 if (old_key) {
3005 old_key_type = old_key->type;
3006 key = old_key;
3007 } else {
12adcf3a 3008 old_key_type = conn ? conn->key_type : 0xff;
0a14ab41 3009 key = kzalloc(sizeof(*key), GFP_KERNEL);
55ed8ca1 3010 if (!key)
567fa2aa 3011 return NULL;
55ed8ca1
JH
3012 list_add(&key->list, &hdev->link_keys);
3013 }
3014
6ed93dc6 3015 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 3016
d25e28ab
JH
3017 /* Some buggy controller combinations generate a changed
3018 * combination key for legacy pairing even when there's no
3019 * previous key */
3020 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 3021 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 3022 type = HCI_LK_COMBINATION;
655fe6ec
JH
3023 if (conn)
3024 conn->key_type = type;
3025 }
d25e28ab 3026
55ed8ca1 3027 bacpy(&key->bdaddr, bdaddr);
9b3b4460 3028 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
3029 key->pin_len = pin_len;
3030
b6020ba0 3031 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 3032 key->type = old_key_type;
4748fed2
JH
3033 else
3034 key->type = type;
3035
7652ff6a
JH
3036 if (persistent)
3037 *persistent = hci_persistent_key(hdev, conn, type,
3038 old_key_type);
55ed8ca1 3039
567fa2aa 3040 return key;
55ed8ca1
JH
3041}
3042
ca9142b8 3043struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
35d70271 3044 u8 addr_type, u8 type, u8 authenticated,
fe39c7b2 3045 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
75d262c2 3046{
c9839a11 3047 struct smp_ltk *key, *old_key;
98a0b845 3048 bool master = ltk_type_master(type);
75d262c2 3049
98a0b845 3050 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
c9839a11 3051 if (old_key)
75d262c2 3052 key = old_key;
c9839a11 3053 else {
0a14ab41 3054 key = kzalloc(sizeof(*key), GFP_KERNEL);
75d262c2 3055 if (!key)
ca9142b8 3056 return NULL;
c9839a11 3057 list_add(&key->list, &hdev->long_term_keys);
75d262c2
VCG
3058 }
3059
75d262c2 3060 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
3061 key->bdaddr_type = addr_type;
3062 memcpy(key->val, tk, sizeof(key->val));
3063 key->authenticated = authenticated;
3064 key->ediv = ediv;
fe39c7b2 3065 key->rand = rand;
c9839a11
VCG
3066 key->enc_size = enc_size;
3067 key->type = type;
75d262c2 3068
ca9142b8 3069 return key;
75d262c2
VCG
3070}
3071
ca9142b8
JH
3072struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3073 u8 addr_type, u8 val[16], bdaddr_t *rpa)
970c4e46
JH
3074{
3075 struct smp_irk *irk;
3076
3077 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3078 if (!irk) {
3079 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3080 if (!irk)
ca9142b8 3081 return NULL;
970c4e46
JH
3082
3083 bacpy(&irk->bdaddr, bdaddr);
3084 irk->addr_type = addr_type;
3085
3086 list_add(&irk->list, &hdev->identity_resolving_keys);
3087 }
3088
3089 memcpy(irk->val, val, 16);
3090 bacpy(&irk->rpa, rpa);
3091
ca9142b8 3092 return irk;
970c4e46
JH
3093}
3094
55ed8ca1
JH
3095int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3096{
3097 struct link_key *key;
3098
3099 key = hci_find_link_key(hdev, bdaddr);
3100 if (!key)
3101 return -ENOENT;
3102
6ed93dc6 3103 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1
JH
3104
3105 list_del(&key->list);
3106 kfree(key);
3107
3108 return 0;
3109}
3110
e0b2b27e 3111int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
b899efaf
VCG
3112{
3113 struct smp_ltk *k, *tmp;
c51ffa0b 3114 int removed = 0;
b899efaf
VCG
3115
3116 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
e0b2b27e 3117 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
b899efaf
VCG
3118 continue;
3119
6ed93dc6 3120 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf
VCG
3121
3122 list_del(&k->list);
3123 kfree(k);
c51ffa0b 3124 removed++;
b899efaf
VCG
3125 }
3126
c51ffa0b 3127 return removed ? 0 : -ENOENT;
b899efaf
VCG
3128}
3129
a7ec7338
JH
3130void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3131{
3132 struct smp_irk *k, *tmp;
3133
668b7b19 3134 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
a7ec7338
JH
3135 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3136 continue;
3137
3138 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3139
3140 list_del(&k->list);
3141 kfree(k);
3142 }
3143}
3144
6bd32326 3145/* HCI command timer function */
65cc2b49 3146static void hci_cmd_timeout(struct work_struct *work)
6bd32326 3147{
65cc2b49
MH
3148 struct hci_dev *hdev = container_of(work, struct hci_dev,
3149 cmd_timer.work);
6bd32326 3150
bda4f23a
AE
3151 if (hdev->sent_cmd) {
3152 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3153 u16 opcode = __le16_to_cpu(sent->opcode);
3154
3155 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3156 } else {
3157 BT_ERR("%s command tx timeout", hdev->name);
3158 }
3159
6bd32326 3160 atomic_set(&hdev->cmd_cnt, 1);
c347b765 3161 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
3162}
3163
2763eda6 3164struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
04124681 3165 bdaddr_t *bdaddr)
2763eda6
SJ
3166{
3167 struct oob_data *data;
3168
3169 list_for_each_entry(data, &hdev->remote_oob_data, list)
3170 if (bacmp(bdaddr, &data->bdaddr) == 0)
3171 return data;
3172
3173 return NULL;
3174}
3175
3176int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3177{
3178 struct oob_data *data;
3179
3180 data = hci_find_remote_oob_data(hdev, bdaddr);
3181 if (!data)
3182 return -ENOENT;
3183
6ed93dc6 3184 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2763eda6
SJ
3185
3186 list_del(&data->list);
3187 kfree(data);
3188
3189 return 0;
3190}
3191
35f7498a 3192void hci_remote_oob_data_clear(struct hci_dev *hdev)
2763eda6
SJ
3193{
3194 struct oob_data *data, *n;
3195
3196 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3197 list_del(&data->list);
3198 kfree(data);
3199 }
2763eda6
SJ
3200}
3201
0798872e
MH
3202int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3203 u8 *hash, u8 *randomizer)
2763eda6
SJ
3204{
3205 struct oob_data *data;
3206
3207 data = hci_find_remote_oob_data(hdev, bdaddr);
2763eda6 3208 if (!data) {
0a14ab41 3209 data = kmalloc(sizeof(*data), GFP_KERNEL);
2763eda6
SJ
3210 if (!data)
3211 return -ENOMEM;
3212
3213 bacpy(&data->bdaddr, bdaddr);
3214 list_add(&data->list, &hdev->remote_oob_data);
3215 }
3216
519ca9d0
MH
3217 memcpy(data->hash192, hash, sizeof(data->hash192));
3218 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
2763eda6 3219
0798872e
MH
3220 memset(data->hash256, 0, sizeof(data->hash256));
3221 memset(data->randomizer256, 0, sizeof(data->randomizer256));
3222
3223 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3224
3225 return 0;
3226}
3227
3228int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3229 u8 *hash192, u8 *randomizer192,
3230 u8 *hash256, u8 *randomizer256)
3231{
3232 struct oob_data *data;
3233
3234 data = hci_find_remote_oob_data(hdev, bdaddr);
3235 if (!data) {
0a14ab41 3236 data = kmalloc(sizeof(*data), GFP_KERNEL);
0798872e
MH
3237 if (!data)
3238 return -ENOMEM;
3239
3240 bacpy(&data->bdaddr, bdaddr);
3241 list_add(&data->list, &hdev->remote_oob_data);
3242 }
3243
3244 memcpy(data->hash192, hash192, sizeof(data->hash192));
3245 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3246
3247 memcpy(data->hash256, hash256, sizeof(data->hash256));
3248 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3249
6ed93dc6 3250 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
3251
3252 return 0;
3253}
3254
b9ee0a78
MH
3255struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
3256 bdaddr_t *bdaddr, u8 type)
b2a66aad 3257{
8035ded4 3258 struct bdaddr_list *b;
b2a66aad 3259
b9ee0a78
MH
3260 list_for_each_entry(b, &hdev->blacklist, list) {
3261 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
b2a66aad 3262 return b;
b9ee0a78 3263 }
b2a66aad
AJ
3264
3265 return NULL;
3266}
3267
c9507490 3268static void hci_blacklist_clear(struct hci_dev *hdev)
b2a66aad
AJ
3269{
3270 struct list_head *p, *n;
3271
3272 list_for_each_safe(p, n, &hdev->blacklist) {
b9ee0a78 3273 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
b2a66aad
AJ
3274
3275 list_del(p);
3276 kfree(b);
3277 }
b2a66aad
AJ
3278}
3279
88c1fe4b 3280int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
3281{
3282 struct bdaddr_list *entry;
b2a66aad 3283
b9ee0a78 3284 if (!bacmp(bdaddr, BDADDR_ANY))
b2a66aad
AJ
3285 return -EBADF;
3286
b9ee0a78 3287 if (hci_blacklist_lookup(hdev, bdaddr, type))
5e762444 3288 return -EEXIST;
b2a66aad
AJ
3289
3290 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
5e762444
AJ
3291 if (!entry)
3292 return -ENOMEM;
b2a66aad
AJ
3293
3294 bacpy(&entry->bdaddr, bdaddr);
b9ee0a78 3295 entry->bdaddr_type = type;
b2a66aad
AJ
3296
3297 list_add(&entry->list, &hdev->blacklist);
3298
2a8357f2 3299 return 0;
b2a66aad
AJ
3300}
3301
88c1fe4b 3302int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
3303{
3304 struct bdaddr_list *entry;
b2a66aad 3305
35f7498a
JH
3306 if (!bacmp(bdaddr, BDADDR_ANY)) {
3307 hci_blacklist_clear(hdev);
3308 return 0;
3309 }
b2a66aad 3310
b9ee0a78 3311 entry = hci_blacklist_lookup(hdev, bdaddr, type);
1ec918ce 3312 if (!entry)
5e762444 3313 return -ENOENT;
b2a66aad
AJ
3314
3315 list_del(&entry->list);
3316 kfree(entry);
3317
2a8357f2 3318 return 0;
b2a66aad
AJ
3319}
3320
d2ab0ac1
MH
3321struct bdaddr_list *hci_white_list_lookup(struct hci_dev *hdev,
3322 bdaddr_t *bdaddr, u8 type)
3323{
3324 struct bdaddr_list *b;
3325
3326 list_for_each_entry(b, &hdev->le_white_list, list) {
3327 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3328 return b;
3329 }
3330
3331 return NULL;
3332}
3333
3334void hci_white_list_clear(struct hci_dev *hdev)
3335{
3336 struct list_head *p, *n;
3337
3338 list_for_each_safe(p, n, &hdev->le_white_list) {
3339 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3340
3341 list_del(p);
3342 kfree(b);
3343 }
3344}
3345
3346int hci_white_list_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3347{
3348 struct bdaddr_list *entry;
3349
3350 if (!bacmp(bdaddr, BDADDR_ANY))
3351 return -EBADF;
3352
3353 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3354 if (!entry)
3355 return -ENOMEM;
3356
3357 bacpy(&entry->bdaddr, bdaddr);
3358 entry->bdaddr_type = type;
3359
3360 list_add(&entry->list, &hdev->le_white_list);
3361
3362 return 0;
3363}
3364
3365int hci_white_list_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3366{
3367 struct bdaddr_list *entry;
3368
3369 if (!bacmp(bdaddr, BDADDR_ANY))
3370 return -EBADF;
3371
3372 entry = hci_white_list_lookup(hdev, bdaddr, type);
3373 if (!entry)
3374 return -ENOENT;
3375
3376 list_del(&entry->list);
3377 kfree(entry);
3378
3379 return 0;
3380}
3381
15819a70
AG
3382/* This function requires the caller holds hdev->lock */
3383struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3384 bdaddr_t *addr, u8 addr_type)
3385{
3386 struct hci_conn_params *params;
3387
3388 list_for_each_entry(params, &hdev->le_conn_params, list) {
3389 if (bacmp(&params->addr, addr) == 0 &&
3390 params->addr_type == addr_type) {
3391 return params;
3392 }
3393 }
3394
3395 return NULL;
3396}
3397
cef952ce
AG
3398static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3399{
3400 struct hci_conn *conn;
3401
3402 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3403 if (!conn)
3404 return false;
3405
3406 if (conn->dst_type != type)
3407 return false;
3408
3409 if (conn->state != BT_CONNECTED)
3410 return false;
3411
3412 return true;
3413}
3414
a9b0a04c
AG
3415static bool is_identity_address(bdaddr_t *addr, u8 addr_type)
3416{
3417 if (addr_type == ADDR_LE_DEV_PUBLIC)
3418 return true;
3419
3420 /* Check for Random Static address type */
3421 if ((addr->b[5] & 0xc0) == 0xc0)
3422 return true;
3423
3424 return false;
3425}
3426
4b10966f
MH
3427/* This function requires the caller holds hdev->lock */
3428struct bdaddr_list *hci_pend_le_conn_lookup(struct hci_dev *hdev,
3429 bdaddr_t *addr, u8 addr_type)
3430{
3431 struct bdaddr_list *entry;
3432
3433 list_for_each_entry(entry, &hdev->pend_le_conns, list) {
3434 if (bacmp(&entry->bdaddr, addr) == 0 &&
3435 entry->bdaddr_type == addr_type)
3436 return entry;
3437 }
3438
3439 return NULL;
3440}
3441
3442/* This function requires the caller holds hdev->lock */
3443void hci_pend_le_conn_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3444{
3445 struct bdaddr_list *entry;
3446
3447 entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3448 if (entry)
3449 goto done;
3450
3451 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3452 if (!entry) {
3453 BT_ERR("Out of memory");
3454 return;
3455 }
3456
3457 bacpy(&entry->bdaddr, addr);
3458 entry->bdaddr_type = addr_type;
3459
3460 list_add(&entry->list, &hdev->pend_le_conns);
3461
3462 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3463
3464done:
3465 hci_update_background_scan(hdev);
3466}
3467
3468/* This function requires the caller holds hdev->lock */
3469void hci_pend_le_conn_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3470{
3471 struct bdaddr_list *entry;
3472
3473 entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3474 if (!entry)
3475 goto done;
3476
3477 list_del(&entry->list);
3478 kfree(entry);
3479
3480 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3481
3482done:
3483 hci_update_background_scan(hdev);
3484}
3485
3486/* This function requires the caller holds hdev->lock */
3487void hci_pend_le_conns_clear(struct hci_dev *hdev)
3488{
3489 struct bdaddr_list *entry, *tmp;
3490
3491 list_for_each_entry_safe(entry, tmp, &hdev->pend_le_conns, list) {
3492 list_del(&entry->list);
3493 kfree(entry);
3494 }
3495
3496 BT_DBG("All LE pending connections cleared");
1c1697c0
MH
3497
3498 hci_update_background_scan(hdev);
4b10966f
MH
3499}
3500
15819a70 3501/* This function requires the caller holds hdev->lock */
51d167c0
MH
3502struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3503 bdaddr_t *addr, u8 addr_type)
bf5b3c8b
MH
3504{
3505 struct hci_conn_params *params;
3506
3507 if (!is_identity_address(addr, addr_type))
51d167c0 3508 return NULL;
bf5b3c8b
MH
3509
3510 params = hci_conn_params_lookup(hdev, addr, addr_type);
3511 if (params)
51d167c0 3512 return params;
bf5b3c8b
MH
3513
3514 params = kzalloc(sizeof(*params), GFP_KERNEL);
3515 if (!params) {
3516 BT_ERR("Out of memory");
51d167c0 3517 return NULL;
bf5b3c8b
MH
3518 }
3519
3520 bacpy(&params->addr, addr);
3521 params->addr_type = addr_type;
3522
3523 list_add(&params->list, &hdev->le_conn_params);
3524
3525 params->conn_min_interval = hdev->le_conn_min_interval;
3526 params->conn_max_interval = hdev->le_conn_max_interval;
3527 params->conn_latency = hdev->le_conn_latency;
3528 params->supervision_timeout = hdev->le_supv_timeout;
3529 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3530
3531 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3532
51d167c0 3533 return params;
bf5b3c8b
MH
3534}
3535
3536/* This function requires the caller holds hdev->lock */
3537int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
d06b50ce 3538 u8 auto_connect)
15819a70
AG
3539{
3540 struct hci_conn_params *params;
3541
8c87aae1
MH
3542 params = hci_conn_params_add(hdev, addr, addr_type);
3543 if (!params)
3544 return -EIO;
cef952ce 3545
9fcb18ef 3546 params->auto_connect = auto_connect;
15819a70 3547
cef952ce
AG
3548 switch (auto_connect) {
3549 case HCI_AUTO_CONN_DISABLED:
3550 case HCI_AUTO_CONN_LINK_LOSS:
3551 hci_pend_le_conn_del(hdev, addr, addr_type);
3552 break;
3553 case HCI_AUTO_CONN_ALWAYS:
3554 if (!is_connected(hdev, addr, addr_type))
3555 hci_pend_le_conn_add(hdev, addr, addr_type);
3556 break;
3557 }
15819a70 3558
d06b50ce
MH
3559 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3560 auto_connect);
a9b0a04c
AG
3561
3562 return 0;
15819a70
AG
3563}
3564
3565/* This function requires the caller holds hdev->lock */
3566void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3567{
3568 struct hci_conn_params *params;
3569
3570 params = hci_conn_params_lookup(hdev, addr, addr_type);
3571 if (!params)
3572 return;
3573
cef952ce
AG
3574 hci_pend_le_conn_del(hdev, addr, addr_type);
3575
15819a70
AG
3576 list_del(&params->list);
3577 kfree(params);
3578
3579 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3580}
3581
3582/* This function requires the caller holds hdev->lock */
3583void hci_conn_params_clear(struct hci_dev *hdev)
3584{
3585 struct hci_conn_params *params, *tmp;
3586
3587 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3588 list_del(&params->list);
3589 kfree(params);
3590 }
3591
1089b67d
MH
3592 hci_pend_le_conns_clear(hdev);
3593
15819a70
AG
3594 BT_DBG("All LE connection parameters were removed");
3595}
3596
4c87eaab 3597static void inquiry_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 3598{
4c87eaab
AG
3599 if (status) {
3600 BT_ERR("Failed to start inquiry: status %d", status);
7ba8b4be 3601
4c87eaab
AG
3602 hci_dev_lock(hdev);
3603 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3604 hci_dev_unlock(hdev);
3605 return;
3606 }
7ba8b4be
AG
3607}
3608
4c87eaab 3609static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 3610{
4c87eaab
AG
3611 /* General inquiry access code (GIAC) */
3612 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3613 struct hci_request req;
3614 struct hci_cp_inquiry cp;
7ba8b4be
AG
3615 int err;
3616
4c87eaab
AG
3617 if (status) {
3618 BT_ERR("Failed to disable LE scanning: status %d", status);
3619 return;
3620 }
7ba8b4be 3621
4c87eaab
AG
3622 switch (hdev->discovery.type) {
3623 case DISCOV_TYPE_LE:
3624 hci_dev_lock(hdev);
3625 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3626 hci_dev_unlock(hdev);
3627 break;
7ba8b4be 3628
4c87eaab
AG
3629 case DISCOV_TYPE_INTERLEAVED:
3630 hci_req_init(&req, hdev);
7ba8b4be 3631
4c87eaab
AG
3632 memset(&cp, 0, sizeof(cp));
3633 memcpy(&cp.lap, lap, sizeof(cp.lap));
3634 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3635 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
7ba8b4be 3636
4c87eaab 3637 hci_dev_lock(hdev);
7dbfac1d 3638
4c87eaab 3639 hci_inquiry_cache_flush(hdev);
7dbfac1d 3640
4c87eaab
AG
3641 err = hci_req_run(&req, inquiry_complete);
3642 if (err) {
3643 BT_ERR("Inquiry request failed: err %d", err);
3644 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3645 }
7dbfac1d 3646
4c87eaab
AG
3647 hci_dev_unlock(hdev);
3648 break;
7dbfac1d 3649 }
7dbfac1d
AG
3650}
3651
7ba8b4be
AG
3652static void le_scan_disable_work(struct work_struct *work)
3653{
3654 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 3655 le_scan_disable.work);
4c87eaab
AG
3656 struct hci_request req;
3657 int err;
7ba8b4be
AG
3658
3659 BT_DBG("%s", hdev->name);
3660
4c87eaab 3661 hci_req_init(&req, hdev);
28b75a89 3662
b1efcc28 3663 hci_req_add_le_scan_disable(&req);
28b75a89 3664
4c87eaab
AG
3665 err = hci_req_run(&req, le_scan_disable_work_complete);
3666 if (err)
3667 BT_ERR("Disable LE scanning request failed: err %d", err);
28b75a89
AG
3668}
3669
8d97250e
JH
3670static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3671{
3672 struct hci_dev *hdev = req->hdev;
3673
3674 /* If we're advertising or initiating an LE connection we can't
3675 * go ahead and change the random address at this time. This is
3676 * because the eventual initiator address used for the
3677 * subsequently created connection will be undefined (some
3678 * controllers use the new address and others the one we had
3679 * when the operation started).
3680 *
3681 * In this kind of scenario skip the update and let the random
3682 * address be updated at the next cycle.
3683 */
3684 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) ||
3685 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3686 BT_DBG("Deferring random address update");
3687 return;
3688 }
3689
3690 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3691}
3692
94b1fc92
MH
3693int hci_update_random_address(struct hci_request *req, bool require_privacy,
3694 u8 *own_addr_type)
ebd3a747
JH
3695{
3696 struct hci_dev *hdev = req->hdev;
3697 int err;
3698
3699 /* If privacy is enabled use a resolvable private address. If
2b5224dc
MH
3700 * current RPA has expired or there is something else than
3701 * the current RPA in use, then generate a new one.
ebd3a747
JH
3702 */
3703 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
ebd3a747
JH
3704 int to;
3705
3706 *own_addr_type = ADDR_LE_DEV_RANDOM;
3707
3708 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
2b5224dc 3709 !bacmp(&hdev->random_addr, &hdev->rpa))
ebd3a747
JH
3710 return 0;
3711
2b5224dc 3712 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
ebd3a747
JH
3713 if (err < 0) {
3714 BT_ERR("%s failed to generate new RPA", hdev->name);
3715 return err;
3716 }
3717
8d97250e 3718 set_random_addr(req, &hdev->rpa);
ebd3a747
JH
3719
3720 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3721 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3722
3723 return 0;
94b1fc92
MH
3724 }
3725
3726 /* In case of required privacy without resolvable private address,
3727 * use an unresolvable private address. This is useful for active
3728 * scanning and non-connectable advertising.
3729 */
3730 if (require_privacy) {
3731 bdaddr_t urpa;
3732
3733 get_random_bytes(&urpa, 6);
3734 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3735
3736 *own_addr_type = ADDR_LE_DEV_RANDOM;
8d97250e 3737 set_random_addr(req, &urpa);
94b1fc92 3738 return 0;
ebd3a747
JH
3739 }
3740
3741 /* If forcing static address is in use or there is no public
3742 * address use the static address as random address (but skip
3743 * the HCI command if the current random address is already the
3744 * static one.
3745 */
111902f7 3746 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
ebd3a747
JH
3747 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3748 *own_addr_type = ADDR_LE_DEV_RANDOM;
3749 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3750 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3751 &hdev->static_addr);
3752 return 0;
3753 }
3754
3755 /* Neither privacy nor static address is being used so use a
3756 * public address.
3757 */
3758 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3759
3760 return 0;
3761}
3762
a1f4c318
JH
3763/* Copy the Identity Address of the controller.
3764 *
3765 * If the controller has a public BD_ADDR, then by default use that one.
3766 * If this is a LE only controller without a public address, default to
3767 * the static random address.
3768 *
3769 * For debugging purposes it is possible to force controllers with a
3770 * public address to use the static random address instead.
3771 */
3772void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3773 u8 *bdaddr_type)
3774{
111902f7 3775 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
a1f4c318
JH
3776 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3777 bacpy(bdaddr, &hdev->static_addr);
3778 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3779 } else {
3780 bacpy(bdaddr, &hdev->bdaddr);
3781 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3782 }
3783}
3784
9be0dab7
DH
3785/* Alloc HCI device */
3786struct hci_dev *hci_alloc_dev(void)
3787{
3788 struct hci_dev *hdev;
3789
3790 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3791 if (!hdev)
3792 return NULL;
3793
b1b813d4
DH
3794 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3795 hdev->esco_type = (ESCO_HV1);
3796 hdev->link_mode = (HCI_LM_ACCEPT);
b4cb9fb2
MH
3797 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3798 hdev->io_capability = 0x03; /* No Input No Output */
96c2103a 3799 hdev->manufacturer = 0xffff; /* Default to internal use */
bbaf444a
JH
3800 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3801 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
b1b813d4 3802
b1b813d4
DH
3803 hdev->sniff_max_interval = 800;
3804 hdev->sniff_min_interval = 80;
3805
3f959d46 3806 hdev->le_adv_channel_map = 0x07;
bef64738
MH
3807 hdev->le_scan_interval = 0x0060;
3808 hdev->le_scan_window = 0x0030;
4e70c7e7
MH
3809 hdev->le_conn_min_interval = 0x0028;
3810 hdev->le_conn_max_interval = 0x0038;
04fb7d90
MH
3811 hdev->le_conn_latency = 0x0000;
3812 hdev->le_supv_timeout = 0x002a;
bef64738 3813
d6bfd59c 3814 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
b9a7a61e 3815 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
31ad1691
AK
3816 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3817 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
d6bfd59c 3818
b1b813d4
DH
3819 mutex_init(&hdev->lock);
3820 mutex_init(&hdev->req_lock);
3821
3822 INIT_LIST_HEAD(&hdev->mgmt_pending);
3823 INIT_LIST_HEAD(&hdev->blacklist);
3824 INIT_LIST_HEAD(&hdev->uuids);
3825 INIT_LIST_HEAD(&hdev->link_keys);
3826 INIT_LIST_HEAD(&hdev->long_term_keys);
970c4e46 3827 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
b1b813d4 3828 INIT_LIST_HEAD(&hdev->remote_oob_data);
d2ab0ac1 3829 INIT_LIST_HEAD(&hdev->le_white_list);
15819a70 3830 INIT_LIST_HEAD(&hdev->le_conn_params);
77a77a30 3831 INIT_LIST_HEAD(&hdev->pend_le_conns);
6b536b5e 3832 INIT_LIST_HEAD(&hdev->conn_hash.list);
b1b813d4
DH
3833
3834 INIT_WORK(&hdev->rx_work, hci_rx_work);
3835 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3836 INIT_WORK(&hdev->tx_work, hci_tx_work);
3837 INIT_WORK(&hdev->power_on, hci_power_on);
b1b813d4 3838
b1b813d4
DH
3839 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3840 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3841 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3842
b1b813d4
DH
3843 skb_queue_head_init(&hdev->rx_q);
3844 skb_queue_head_init(&hdev->cmd_q);
3845 skb_queue_head_init(&hdev->raw_q);
3846
3847 init_waitqueue_head(&hdev->req_wait_q);
3848
65cc2b49 3849 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
b1b813d4 3850
b1b813d4
DH
3851 hci_init_sysfs(hdev);
3852 discovery_init(hdev);
9be0dab7
DH
3853
3854 return hdev;
3855}
3856EXPORT_SYMBOL(hci_alloc_dev);
3857
3858/* Free HCI device */
3859void hci_free_dev(struct hci_dev *hdev)
3860{
9be0dab7
DH
3861 /* will free via device release */
3862 put_device(&hdev->dev);
3863}
3864EXPORT_SYMBOL(hci_free_dev);
3865
1da177e4
LT
3866/* Register HCI device */
3867int hci_register_dev(struct hci_dev *hdev)
3868{
b1b813d4 3869 int id, error;
1da177e4 3870
010666a1 3871 if (!hdev->open || !hdev->close)
1da177e4
LT
3872 return -EINVAL;
3873
08add513
MM
3874 /* Do not allow HCI_AMP devices to register at index 0,
3875 * so the index can be used as the AMP controller ID.
3876 */
3df92b31
SL
3877 switch (hdev->dev_type) {
3878 case HCI_BREDR:
3879 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3880 break;
3881 case HCI_AMP:
3882 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3883 break;
3884 default:
3885 return -EINVAL;
1da177e4 3886 }
8e87d142 3887
3df92b31
SL
3888 if (id < 0)
3889 return id;
3890
1da177e4
LT
3891 sprintf(hdev->name, "hci%d", id);
3892 hdev->id = id;
2d8b3a11
AE
3893
3894 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3895
d8537548
KC
3896 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3897 WQ_MEM_RECLAIM, 1, hdev->name);
33ca954d
DH
3898 if (!hdev->workqueue) {
3899 error = -ENOMEM;
3900 goto err;
3901 }
f48fd9c8 3902
d8537548
KC
3903 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3904 WQ_MEM_RECLAIM, 1, hdev->name);
6ead1bbc
JH
3905 if (!hdev->req_workqueue) {
3906 destroy_workqueue(hdev->workqueue);
3907 error = -ENOMEM;
3908 goto err;
3909 }
3910
0153e2ec
MH
3911 if (!IS_ERR_OR_NULL(bt_debugfs))
3912 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3913
bdc3e0f1
MH
3914 dev_set_name(&hdev->dev, "%s", hdev->name);
3915
99780a7b
JH
3916 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3917 CRYPTO_ALG_ASYNC);
3918 if (IS_ERR(hdev->tfm_aes)) {
3919 BT_ERR("Unable to create crypto context");
3920 error = PTR_ERR(hdev->tfm_aes);
3921 hdev->tfm_aes = NULL;
3922 goto err_wqueue;
3923 }
3924
bdc3e0f1 3925 error = device_add(&hdev->dev);
33ca954d 3926 if (error < 0)
99780a7b 3927 goto err_tfm;
1da177e4 3928
611b30f7 3929 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
3930 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3931 hdev);
611b30f7
MH
3932 if (hdev->rfkill) {
3933 if (rfkill_register(hdev->rfkill) < 0) {
3934 rfkill_destroy(hdev->rfkill);
3935 hdev->rfkill = NULL;
3936 }
3937 }
3938
5e130367
JH
3939 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3940 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3941
a8b2d5c2 3942 set_bit(HCI_SETUP, &hdev->dev_flags);
004b0258 3943 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
ce2be9ac 3944
01cd3404 3945 if (hdev->dev_type == HCI_BREDR) {
56f87901
JH
3946 /* Assume BR/EDR support until proven otherwise (such as
3947 * through reading supported features during init.
3948 */
3949 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3950 }
ce2be9ac 3951
fcee3377
GP
3952 write_lock(&hci_dev_list_lock);
3953 list_add(&hdev->list, &hci_dev_list);
3954 write_unlock(&hci_dev_list_lock);
3955
fee746b0
MH
3956 /* Devices that are marked for raw-only usage need to set
3957 * the HCI_RAW flag to indicate that only user channel is
3958 * supported.
3959 */
3960 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3961 set_bit(HCI_RAW, &hdev->flags);
3962
1da177e4 3963 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 3964 hci_dev_hold(hdev);
1da177e4 3965
19202573 3966 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 3967
1da177e4 3968 return id;
f48fd9c8 3969
99780a7b
JH
3970err_tfm:
3971 crypto_free_blkcipher(hdev->tfm_aes);
33ca954d
DH
3972err_wqueue:
3973 destroy_workqueue(hdev->workqueue);
6ead1bbc 3974 destroy_workqueue(hdev->req_workqueue);
33ca954d 3975err:
3df92b31 3976 ida_simple_remove(&hci_index_ida, hdev->id);
f48fd9c8 3977
33ca954d 3978 return error;
1da177e4
LT
3979}
3980EXPORT_SYMBOL(hci_register_dev);
3981
3982/* Unregister HCI device */
59735631 3983void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 3984{
3df92b31 3985 int i, id;
ef222013 3986
c13854ce 3987 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 3988
94324962
JH
3989 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3990
3df92b31
SL
3991 id = hdev->id;
3992
f20d09d5 3993 write_lock(&hci_dev_list_lock);
1da177e4 3994 list_del(&hdev->list);
f20d09d5 3995 write_unlock(&hci_dev_list_lock);
1da177e4
LT
3996
3997 hci_dev_do_close(hdev);
3998
cd4c5391 3999 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
4000 kfree_skb(hdev->reassembly[i]);
4001
b9b5ef18
GP
4002 cancel_work_sync(&hdev->power_on);
4003
ab81cbf9 4004 if (!test_bit(HCI_INIT, &hdev->flags) &&
fee746b0
MH
4005 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
4006 !test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
09fd0de5 4007 hci_dev_lock(hdev);
744cf19e 4008 mgmt_index_removed(hdev);
09fd0de5 4009 hci_dev_unlock(hdev);
56e5cb86 4010 }
ab81cbf9 4011
2e58ef3e
JH
4012 /* mgmt_index_removed should take care of emptying the
4013 * pending list */
4014 BUG_ON(!list_empty(&hdev->mgmt_pending));
4015
1da177e4
LT
4016 hci_notify(hdev, HCI_DEV_UNREG);
4017
611b30f7
MH
4018 if (hdev->rfkill) {
4019 rfkill_unregister(hdev->rfkill);
4020 rfkill_destroy(hdev->rfkill);
4021 }
4022
99780a7b
JH
4023 if (hdev->tfm_aes)
4024 crypto_free_blkcipher(hdev->tfm_aes);
4025
bdc3e0f1 4026 device_del(&hdev->dev);
147e2d59 4027
0153e2ec
MH
4028 debugfs_remove_recursive(hdev->debugfs);
4029
f48fd9c8 4030 destroy_workqueue(hdev->workqueue);
6ead1bbc 4031 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 4032
09fd0de5 4033 hci_dev_lock(hdev);
e2e0cacb 4034 hci_blacklist_clear(hdev);
2aeb9a1a 4035 hci_uuids_clear(hdev);
55ed8ca1 4036 hci_link_keys_clear(hdev);
b899efaf 4037 hci_smp_ltks_clear(hdev);
970c4e46 4038 hci_smp_irks_clear(hdev);
2763eda6 4039 hci_remote_oob_data_clear(hdev);
d2ab0ac1 4040 hci_white_list_clear(hdev);
15819a70 4041 hci_conn_params_clear(hdev);
09fd0de5 4042 hci_dev_unlock(hdev);
e2e0cacb 4043
dc946bd8 4044 hci_dev_put(hdev);
3df92b31
SL
4045
4046 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
4047}
4048EXPORT_SYMBOL(hci_unregister_dev);
4049
4050/* Suspend HCI device */
4051int hci_suspend_dev(struct hci_dev *hdev)
4052{
4053 hci_notify(hdev, HCI_DEV_SUSPEND);
4054 return 0;
4055}
4056EXPORT_SYMBOL(hci_suspend_dev);
4057
4058/* Resume HCI device */
4059int hci_resume_dev(struct hci_dev *hdev)
4060{
4061 hci_notify(hdev, HCI_DEV_RESUME);
4062 return 0;
4063}
4064EXPORT_SYMBOL(hci_resume_dev);
4065
76bca880 4066/* Receive frame from HCI drivers */
e1a26170 4067int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
76bca880 4068{
76bca880 4069 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 4070 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
4071 kfree_skb(skb);
4072 return -ENXIO;
4073 }
4074
d82603c6 4075 /* Incoming skb */
76bca880
MH
4076 bt_cb(skb)->incoming = 1;
4077
4078 /* Time stamp */
4079 __net_timestamp(skb);
4080
76bca880 4081 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 4082 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 4083
76bca880
MH
4084 return 0;
4085}
4086EXPORT_SYMBOL(hci_recv_frame);
4087
33e882a5 4088static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
a8c5fb1a 4089 int count, __u8 index)
33e882a5
SS
4090{
4091 int len = 0;
4092 int hlen = 0;
4093 int remain = count;
4094 struct sk_buff *skb;
4095 struct bt_skb_cb *scb;
4096
4097 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
a8c5fb1a 4098 index >= NUM_REASSEMBLY)
33e882a5
SS
4099 return -EILSEQ;
4100
4101 skb = hdev->reassembly[index];
4102
4103 if (!skb) {
4104 switch (type) {
4105 case HCI_ACLDATA_PKT:
4106 len = HCI_MAX_FRAME_SIZE;
4107 hlen = HCI_ACL_HDR_SIZE;
4108 break;
4109 case HCI_EVENT_PKT:
4110 len = HCI_MAX_EVENT_SIZE;
4111 hlen = HCI_EVENT_HDR_SIZE;
4112 break;
4113 case HCI_SCODATA_PKT:
4114 len = HCI_MAX_SCO_SIZE;
4115 hlen = HCI_SCO_HDR_SIZE;
4116 break;
4117 }
4118
1e429f38 4119 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
4120 if (!skb)
4121 return -ENOMEM;
4122
4123 scb = (void *) skb->cb;
4124 scb->expect = hlen;
4125 scb->pkt_type = type;
4126
33e882a5
SS
4127 hdev->reassembly[index] = skb;
4128 }
4129
4130 while (count) {
4131 scb = (void *) skb->cb;
89bb46d0 4132 len = min_t(uint, scb->expect, count);
33e882a5
SS
4133
4134 memcpy(skb_put(skb, len), data, len);
4135
4136 count -= len;
4137 data += len;
4138 scb->expect -= len;
4139 remain = count;
4140
4141 switch (type) {
4142 case HCI_EVENT_PKT:
4143 if (skb->len == HCI_EVENT_HDR_SIZE) {
4144 struct hci_event_hdr *h = hci_event_hdr(skb);
4145 scb->expect = h->plen;
4146
4147 if (skb_tailroom(skb) < scb->expect) {
4148 kfree_skb(skb);
4149 hdev->reassembly[index] = NULL;
4150 return -ENOMEM;
4151 }
4152 }
4153 break;
4154
4155 case HCI_ACLDATA_PKT:
4156 if (skb->len == HCI_ACL_HDR_SIZE) {
4157 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4158 scb->expect = __le16_to_cpu(h->dlen);
4159
4160 if (skb_tailroom(skb) < scb->expect) {
4161 kfree_skb(skb);
4162 hdev->reassembly[index] = NULL;
4163 return -ENOMEM;
4164 }
4165 }
4166 break;
4167
4168 case HCI_SCODATA_PKT:
4169 if (skb->len == HCI_SCO_HDR_SIZE) {
4170 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4171 scb->expect = h->dlen;
4172
4173 if (skb_tailroom(skb) < scb->expect) {
4174 kfree_skb(skb);
4175 hdev->reassembly[index] = NULL;
4176 return -ENOMEM;
4177 }
4178 }
4179 break;
4180 }
4181
4182 if (scb->expect == 0) {
4183 /* Complete frame */
4184
4185 bt_cb(skb)->pkt_type = type;
e1a26170 4186 hci_recv_frame(hdev, skb);
33e882a5
SS
4187
4188 hdev->reassembly[index] = NULL;
4189 return remain;
4190 }
4191 }
4192
4193 return remain;
4194}
4195
ef222013
MH
4196int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4197{
f39a3c06
SS
4198 int rem = 0;
4199
ef222013
MH
4200 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4201 return -EILSEQ;
4202
da5f6c37 4203 while (count) {
1e429f38 4204 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
4205 if (rem < 0)
4206 return rem;
ef222013 4207
f39a3c06
SS
4208 data += (count - rem);
4209 count = rem;
f81c6224 4210 }
ef222013 4211
f39a3c06 4212 return rem;
ef222013
MH
4213}
4214EXPORT_SYMBOL(hci_recv_fragment);
4215
99811510
SS
4216#define STREAM_REASSEMBLY 0
4217
4218int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4219{
4220 int type;
4221 int rem = 0;
4222
da5f6c37 4223 while (count) {
99811510
SS
4224 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4225
4226 if (!skb) {
4227 struct { char type; } *pkt;
4228
4229 /* Start of the frame */
4230 pkt = data;
4231 type = pkt->type;
4232
4233 data++;
4234 count--;
4235 } else
4236 type = bt_cb(skb)->pkt_type;
4237
1e429f38 4238 rem = hci_reassembly(hdev, type, data, count,
a8c5fb1a 4239 STREAM_REASSEMBLY);
99811510
SS
4240 if (rem < 0)
4241 return rem;
4242
4243 data += (count - rem);
4244 count = rem;
f81c6224 4245 }
99811510
SS
4246
4247 return rem;
4248}
4249EXPORT_SYMBOL(hci_recv_stream_fragment);
4250
1da177e4
LT
4251/* ---- Interface to upper protocols ---- */
4252
1da177e4
LT
4253int hci_register_cb(struct hci_cb *cb)
4254{
4255 BT_DBG("%p name %s", cb, cb->name);
4256
f20d09d5 4257 write_lock(&hci_cb_list_lock);
1da177e4 4258 list_add(&cb->list, &hci_cb_list);
f20d09d5 4259 write_unlock(&hci_cb_list_lock);
1da177e4
LT
4260
4261 return 0;
4262}
4263EXPORT_SYMBOL(hci_register_cb);
4264
4265int hci_unregister_cb(struct hci_cb *cb)
4266{
4267 BT_DBG("%p name %s", cb, cb->name);
4268
f20d09d5 4269 write_lock(&hci_cb_list_lock);
1da177e4 4270 list_del(&cb->list);
f20d09d5 4271 write_unlock(&hci_cb_list_lock);
1da177e4
LT
4272
4273 return 0;
4274}
4275EXPORT_SYMBOL(hci_unregister_cb);
4276
51086991 4277static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4 4278{
0d48d939 4279 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 4280
cd82e61c
MH
4281 /* Time stamp */
4282 __net_timestamp(skb);
1da177e4 4283
cd82e61c
MH
4284 /* Send copy to monitor */
4285 hci_send_to_monitor(hdev, skb);
4286
4287 if (atomic_read(&hdev->promisc)) {
4288 /* Send copy to the sockets */
470fe1b5 4289 hci_send_to_sock(hdev, skb);
1da177e4
LT
4290 }
4291
4292 /* Get rid of skb owner, prior to sending to the driver. */
4293 skb_orphan(skb);
4294
7bd8f09f 4295 if (hdev->send(hdev, skb) < 0)
51086991 4296 BT_ERR("%s sending frame failed", hdev->name);
1da177e4
LT
4297}
4298
3119ae95
JH
4299void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4300{
4301 skb_queue_head_init(&req->cmd_q);
4302 req->hdev = hdev;
5d73e034 4303 req->err = 0;
3119ae95
JH
4304}
4305
4306int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4307{
4308 struct hci_dev *hdev = req->hdev;
4309 struct sk_buff *skb;
4310 unsigned long flags;
4311
4312 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4313
5d73e034
AG
4314 /* If an error occured during request building, remove all HCI
4315 * commands queued on the HCI request queue.
4316 */
4317 if (req->err) {
4318 skb_queue_purge(&req->cmd_q);
4319 return req->err;
4320 }
4321
3119ae95
JH
4322 /* Do not allow empty requests */
4323 if (skb_queue_empty(&req->cmd_q))
382b0c39 4324 return -ENODATA;
3119ae95
JH
4325
4326 skb = skb_peek_tail(&req->cmd_q);
4327 bt_cb(skb)->req.complete = complete;
4328
4329 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4330 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4331 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4332
4333 queue_work(hdev->workqueue, &hdev->cmd_work);
4334
4335 return 0;
4336}
4337
1ca3a9d0 4338static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
07dc93dd 4339 u32 plen, const void *param)
1da177e4
LT
4340{
4341 int len = HCI_COMMAND_HDR_SIZE + plen;
4342 struct hci_command_hdr *hdr;
4343 struct sk_buff *skb;
4344
1da177e4 4345 skb = bt_skb_alloc(len, GFP_ATOMIC);
1ca3a9d0
JH
4346 if (!skb)
4347 return NULL;
1da177e4
LT
4348
4349 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 4350 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
4351 hdr->plen = plen;
4352
4353 if (plen)
4354 memcpy(skb_put(skb, plen), param, plen);
4355
4356 BT_DBG("skb len %d", skb->len);
4357
0d48d939 4358 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
c78ae283 4359
1ca3a9d0
JH
4360 return skb;
4361}
4362
4363/* Send HCI command */
07dc93dd
JH
4364int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4365 const void *param)
1ca3a9d0
JH
4366{
4367 struct sk_buff *skb;
4368
4369 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4370
4371 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4372 if (!skb) {
4373 BT_ERR("%s no memory for command", hdev->name);
4374 return -ENOMEM;
4375 }
4376
11714b3d
JH
4377 /* Stand-alone HCI commands must be flaged as
4378 * single-command requests.
4379 */
4380 bt_cb(skb)->req.start = true;
4381
1da177e4 4382 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 4383 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
4384
4385 return 0;
4386}
1da177e4 4387
71c76a17 4388/* Queue a command to an asynchronous HCI request */
07dc93dd
JH
4389void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4390 const void *param, u8 event)
71c76a17
JH
4391{
4392 struct hci_dev *hdev = req->hdev;
4393 struct sk_buff *skb;
4394
4395 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4396
34739c1e
AG
4397 /* If an error occured during request building, there is no point in
4398 * queueing the HCI command. We can simply return.
4399 */
4400 if (req->err)
4401 return;
4402
71c76a17
JH
4403 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4404 if (!skb) {
5d73e034
AG
4405 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4406 hdev->name, opcode);
4407 req->err = -ENOMEM;
e348fe6b 4408 return;
71c76a17
JH
4409 }
4410
4411 if (skb_queue_empty(&req->cmd_q))
4412 bt_cb(skb)->req.start = true;
4413
02350a72
JH
4414 bt_cb(skb)->req.event = event;
4415
71c76a17 4416 skb_queue_tail(&req->cmd_q, skb);
71c76a17
JH
4417}
4418
07dc93dd
JH
4419void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4420 const void *param)
02350a72
JH
4421{
4422 hci_req_add_ev(req, opcode, plen, param, 0);
4423}
4424
1da177e4 4425/* Get data from the previously sent command */
a9de9248 4426void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
4427{
4428 struct hci_command_hdr *hdr;
4429
4430 if (!hdev->sent_cmd)
4431 return NULL;
4432
4433 hdr = (void *) hdev->sent_cmd->data;
4434
a9de9248 4435 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
4436 return NULL;
4437
f0e09510 4438 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
4439
4440 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4441}
4442
4443/* Send ACL data */
4444static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4445{
4446 struct hci_acl_hdr *hdr;
4447 int len = skb->len;
4448
badff6d0
ACM
4449 skb_push(skb, HCI_ACL_HDR_SIZE);
4450 skb_reset_transport_header(skb);
9c70220b 4451 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
4452 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4453 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
4454}
4455
ee22be7e 4456static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 4457 struct sk_buff *skb, __u16 flags)
1da177e4 4458{
ee22be7e 4459 struct hci_conn *conn = chan->conn;
1da177e4
LT
4460 struct hci_dev *hdev = conn->hdev;
4461 struct sk_buff *list;
4462
087bfd99
GP
4463 skb->len = skb_headlen(skb);
4464 skb->data_len = 0;
4465
4466 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
204a6e54
AE
4467
4468 switch (hdev->dev_type) {
4469 case HCI_BREDR:
4470 hci_add_acl_hdr(skb, conn->handle, flags);
4471 break;
4472 case HCI_AMP:
4473 hci_add_acl_hdr(skb, chan->handle, flags);
4474 break;
4475 default:
4476 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4477 return;
4478 }
087bfd99 4479
70f23020
AE
4480 list = skb_shinfo(skb)->frag_list;
4481 if (!list) {
1da177e4
LT
4482 /* Non fragmented */
4483 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4484
73d80deb 4485 skb_queue_tail(queue, skb);
1da177e4
LT
4486 } else {
4487 /* Fragmented */
4488 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4489
4490 skb_shinfo(skb)->frag_list = NULL;
4491
4492 /* Queue all fragments atomically */
af3e6359 4493 spin_lock(&queue->lock);
1da177e4 4494
73d80deb 4495 __skb_queue_tail(queue, skb);
e702112f
AE
4496
4497 flags &= ~ACL_START;
4498 flags |= ACL_CONT;
1da177e4
LT
4499 do {
4500 skb = list; list = list->next;
8e87d142 4501
0d48d939 4502 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 4503 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
4504
4505 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4506
73d80deb 4507 __skb_queue_tail(queue, skb);
1da177e4
LT
4508 } while (list);
4509
af3e6359 4510 spin_unlock(&queue->lock);
1da177e4 4511 }
73d80deb
LAD
4512}
4513
4514void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4515{
ee22be7e 4516 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 4517
f0e09510 4518 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb 4519
ee22be7e 4520 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 4521
3eff45ea 4522 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 4523}
1da177e4
LT
4524
4525/* Send SCO data */
0d861d8b 4526void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
4527{
4528 struct hci_dev *hdev = conn->hdev;
4529 struct hci_sco_hdr hdr;
4530
4531 BT_DBG("%s len %d", hdev->name, skb->len);
4532
aca3192c 4533 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
4534 hdr.dlen = skb->len;
4535
badff6d0
ACM
4536 skb_push(skb, HCI_SCO_HDR_SIZE);
4537 skb_reset_transport_header(skb);
9c70220b 4538 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4 4539
0d48d939 4540 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 4541
1da177e4 4542 skb_queue_tail(&conn->data_q, skb);
3eff45ea 4543 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 4544}
1da177e4
LT
4545
4546/* ---- HCI TX task (outgoing data) ---- */
4547
4548/* HCI Connection scheduler */
6039aa73
GP
4549static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4550 int *quote)
1da177e4
LT
4551{
4552 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 4553 struct hci_conn *conn = NULL, *c;
abc5de8f 4554 unsigned int num = 0, min = ~0;
1da177e4 4555
8e87d142 4556 /* We don't have to lock device here. Connections are always
1da177e4 4557 * added and removed with TX task disabled. */
bf4c6325
GP
4558
4559 rcu_read_lock();
4560
4561 list_for_each_entry_rcu(c, &h->list, list) {
769be974 4562 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 4563 continue;
769be974
MH
4564
4565 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4566 continue;
4567
1da177e4
LT
4568 num++;
4569
4570 if (c->sent < min) {
4571 min = c->sent;
4572 conn = c;
4573 }
52087a79
LAD
4574
4575 if (hci_conn_num(hdev, type) == num)
4576 break;
1da177e4
LT
4577 }
4578
bf4c6325
GP
4579 rcu_read_unlock();
4580
1da177e4 4581 if (conn) {
6ed58ec5
VT
4582 int cnt, q;
4583
4584 switch (conn->type) {
4585 case ACL_LINK:
4586 cnt = hdev->acl_cnt;
4587 break;
4588 case SCO_LINK:
4589 case ESCO_LINK:
4590 cnt = hdev->sco_cnt;
4591 break;
4592 case LE_LINK:
4593 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4594 break;
4595 default:
4596 cnt = 0;
4597 BT_ERR("Unknown link type");
4598 }
4599
4600 q = cnt / num;
1da177e4
LT
4601 *quote = q ? q : 1;
4602 } else
4603 *quote = 0;
4604
4605 BT_DBG("conn %p quote %d", conn, *quote);
4606 return conn;
4607}
4608
6039aa73 4609static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
4610{
4611 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 4612 struct hci_conn *c;
1da177e4 4613
bae1f5d9 4614 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 4615
bf4c6325
GP
4616 rcu_read_lock();
4617
1da177e4 4618 /* Kill stalled connections */
bf4c6325 4619 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 4620 if (c->type == type && c->sent) {
6ed93dc6
AE
4621 BT_ERR("%s killing stalled connection %pMR",
4622 hdev->name, &c->dst);
bed71748 4623 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
4624 }
4625 }
bf4c6325
GP
4626
4627 rcu_read_unlock();
1da177e4
LT
4628}
4629
6039aa73
GP
4630static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4631 int *quote)
1da177e4 4632{
73d80deb
LAD
4633 struct hci_conn_hash *h = &hdev->conn_hash;
4634 struct hci_chan *chan = NULL;
abc5de8f 4635 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 4636 struct hci_conn *conn;
73d80deb
LAD
4637 int cnt, q, conn_num = 0;
4638
4639 BT_DBG("%s", hdev->name);
4640
bf4c6325
GP
4641 rcu_read_lock();
4642
4643 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
4644 struct hci_chan *tmp;
4645
4646 if (conn->type != type)
4647 continue;
4648
4649 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4650 continue;
4651
4652 conn_num++;
4653
8192edef 4654 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
4655 struct sk_buff *skb;
4656
4657 if (skb_queue_empty(&tmp->data_q))
4658 continue;
4659
4660 skb = skb_peek(&tmp->data_q);
4661 if (skb->priority < cur_prio)
4662 continue;
4663
4664 if (skb->priority > cur_prio) {
4665 num = 0;
4666 min = ~0;
4667 cur_prio = skb->priority;
4668 }
4669
4670 num++;
4671
4672 if (conn->sent < min) {
4673 min = conn->sent;
4674 chan = tmp;
4675 }
4676 }
4677
4678 if (hci_conn_num(hdev, type) == conn_num)
4679 break;
4680 }
4681
bf4c6325
GP
4682 rcu_read_unlock();
4683
73d80deb
LAD
4684 if (!chan)
4685 return NULL;
4686
4687 switch (chan->conn->type) {
4688 case ACL_LINK:
4689 cnt = hdev->acl_cnt;
4690 break;
bd1eb66b
AE
4691 case AMP_LINK:
4692 cnt = hdev->block_cnt;
4693 break;
73d80deb
LAD
4694 case SCO_LINK:
4695 case ESCO_LINK:
4696 cnt = hdev->sco_cnt;
4697 break;
4698 case LE_LINK:
4699 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4700 break;
4701 default:
4702 cnt = 0;
4703 BT_ERR("Unknown link type");
4704 }
4705
4706 q = cnt / num;
4707 *quote = q ? q : 1;
4708 BT_DBG("chan %p quote %d", chan, *quote);
4709 return chan;
4710}
4711
02b20f0b
LAD
4712static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4713{
4714 struct hci_conn_hash *h = &hdev->conn_hash;
4715 struct hci_conn *conn;
4716 int num = 0;
4717
4718 BT_DBG("%s", hdev->name);
4719
bf4c6325
GP
4720 rcu_read_lock();
4721
4722 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
4723 struct hci_chan *chan;
4724
4725 if (conn->type != type)
4726 continue;
4727
4728 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4729 continue;
4730
4731 num++;
4732
8192edef 4733 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
4734 struct sk_buff *skb;
4735
4736 if (chan->sent) {
4737 chan->sent = 0;
4738 continue;
4739 }
4740
4741 if (skb_queue_empty(&chan->data_q))
4742 continue;
4743
4744 skb = skb_peek(&chan->data_q);
4745 if (skb->priority >= HCI_PRIO_MAX - 1)
4746 continue;
4747
4748 skb->priority = HCI_PRIO_MAX - 1;
4749
4750 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 4751 skb->priority);
02b20f0b
LAD
4752 }
4753
4754 if (hci_conn_num(hdev, type) == num)
4755 break;
4756 }
bf4c6325
GP
4757
4758 rcu_read_unlock();
4759
02b20f0b
LAD
4760}
4761
b71d385a
AE
4762static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4763{
4764 /* Calculate count of blocks used by this packet */
4765 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4766}
4767
6039aa73 4768static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 4769{
fee746b0 4770 if (!test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
1da177e4
LT
4771 /* ACL tx timeout must be longer than maximum
4772 * link supervision timeout (40.9 seconds) */
63d2bc1b 4773 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 4774 HCI_ACL_TX_TIMEOUT))
bae1f5d9 4775 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 4776 }
63d2bc1b 4777}
1da177e4 4778
6039aa73 4779static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
4780{
4781 unsigned int cnt = hdev->acl_cnt;
4782 struct hci_chan *chan;
4783 struct sk_buff *skb;
4784 int quote;
4785
4786 __check_timeout(hdev, cnt);
04837f64 4787
73d80deb 4788 while (hdev->acl_cnt &&
a8c5fb1a 4789 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
4790 u32 priority = (skb_peek(&chan->data_q))->priority;
4791 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4792 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4793 skb->len, skb->priority);
73d80deb 4794
ec1cce24
LAD
4795 /* Stop if priority has changed */
4796 if (skb->priority < priority)
4797 break;
4798
4799 skb = skb_dequeue(&chan->data_q);
4800
73d80deb 4801 hci_conn_enter_active_mode(chan->conn,
04124681 4802 bt_cb(skb)->force_active);
04837f64 4803
57d17d70 4804 hci_send_frame(hdev, skb);
1da177e4
LT
4805 hdev->acl_last_tx = jiffies;
4806
4807 hdev->acl_cnt--;
73d80deb
LAD
4808 chan->sent++;
4809 chan->conn->sent++;
1da177e4
LT
4810 }
4811 }
02b20f0b
LAD
4812
4813 if (cnt != hdev->acl_cnt)
4814 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
4815}
4816
6039aa73 4817static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 4818{
63d2bc1b 4819 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
4820 struct hci_chan *chan;
4821 struct sk_buff *skb;
4822 int quote;
bd1eb66b 4823 u8 type;
b71d385a 4824
63d2bc1b 4825 __check_timeout(hdev, cnt);
b71d385a 4826
bd1eb66b
AE
4827 BT_DBG("%s", hdev->name);
4828
4829 if (hdev->dev_type == HCI_AMP)
4830 type = AMP_LINK;
4831 else
4832 type = ACL_LINK;
4833
b71d385a 4834 while (hdev->block_cnt > 0 &&
bd1eb66b 4835 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
4836 u32 priority = (skb_peek(&chan->data_q))->priority;
4837 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4838 int blocks;
4839
4840 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4841 skb->len, skb->priority);
b71d385a
AE
4842
4843 /* Stop if priority has changed */
4844 if (skb->priority < priority)
4845 break;
4846
4847 skb = skb_dequeue(&chan->data_q);
4848
4849 blocks = __get_blocks(hdev, skb);
4850 if (blocks > hdev->block_cnt)
4851 return;
4852
4853 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 4854 bt_cb(skb)->force_active);
b71d385a 4855
57d17d70 4856 hci_send_frame(hdev, skb);
b71d385a
AE
4857 hdev->acl_last_tx = jiffies;
4858
4859 hdev->block_cnt -= blocks;
4860 quote -= blocks;
4861
4862 chan->sent += blocks;
4863 chan->conn->sent += blocks;
4864 }
4865 }
4866
4867 if (cnt != hdev->block_cnt)
bd1eb66b 4868 hci_prio_recalculate(hdev, type);
b71d385a
AE
4869}
4870
6039aa73 4871static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
4872{
4873 BT_DBG("%s", hdev->name);
4874
bd1eb66b
AE
4875 /* No ACL link over BR/EDR controller */
4876 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4877 return;
4878
4879 /* No AMP link over AMP controller */
4880 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
4881 return;
4882
4883 switch (hdev->flow_ctl_mode) {
4884 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4885 hci_sched_acl_pkt(hdev);
4886 break;
4887
4888 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4889 hci_sched_acl_blk(hdev);
4890 break;
4891 }
4892}
4893
1da177e4 4894/* Schedule SCO */
6039aa73 4895static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
4896{
4897 struct hci_conn *conn;
4898 struct sk_buff *skb;
4899 int quote;
4900
4901 BT_DBG("%s", hdev->name);
4902
52087a79
LAD
4903 if (!hci_conn_num(hdev, SCO_LINK))
4904 return;
4905
1da177e4
LT
4906 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4907 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4908 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 4909 hci_send_frame(hdev, skb);
1da177e4
LT
4910
4911 conn->sent++;
4912 if (conn->sent == ~0)
4913 conn->sent = 0;
4914 }
4915 }
4916}
4917
6039aa73 4918static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
4919{
4920 struct hci_conn *conn;
4921 struct sk_buff *skb;
4922 int quote;
4923
4924 BT_DBG("%s", hdev->name);
4925
52087a79
LAD
4926 if (!hci_conn_num(hdev, ESCO_LINK))
4927 return;
4928
8fc9ced3
GP
4929 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4930 &quote))) {
b6a0dc82
MH
4931 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4932 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 4933 hci_send_frame(hdev, skb);
b6a0dc82
MH
4934
4935 conn->sent++;
4936 if (conn->sent == ~0)
4937 conn->sent = 0;
4938 }
4939 }
4940}
4941
6039aa73 4942static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 4943{
73d80deb 4944 struct hci_chan *chan;
6ed58ec5 4945 struct sk_buff *skb;
02b20f0b 4946 int quote, cnt, tmp;
6ed58ec5
VT
4947
4948 BT_DBG("%s", hdev->name);
4949
52087a79
LAD
4950 if (!hci_conn_num(hdev, LE_LINK))
4951 return;
4952
fee746b0 4953 if (!test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
6ed58ec5
VT
4954 /* LE tx timeout must be longer than maximum
4955 * link supervision timeout (40.9 seconds) */
bae1f5d9 4956 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 4957 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 4958 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
4959 }
4960
4961 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 4962 tmp = cnt;
73d80deb 4963 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
4964 u32 priority = (skb_peek(&chan->data_q))->priority;
4965 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4966 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4967 skb->len, skb->priority);
6ed58ec5 4968
ec1cce24
LAD
4969 /* Stop if priority has changed */
4970 if (skb->priority < priority)
4971 break;
4972
4973 skb = skb_dequeue(&chan->data_q);
4974
57d17d70 4975 hci_send_frame(hdev, skb);
6ed58ec5
VT
4976 hdev->le_last_tx = jiffies;
4977
4978 cnt--;
73d80deb
LAD
4979 chan->sent++;
4980 chan->conn->sent++;
6ed58ec5
VT
4981 }
4982 }
73d80deb 4983
6ed58ec5
VT
4984 if (hdev->le_pkts)
4985 hdev->le_cnt = cnt;
4986 else
4987 hdev->acl_cnt = cnt;
02b20f0b
LAD
4988
4989 if (cnt != tmp)
4990 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
4991}
4992
3eff45ea 4993static void hci_tx_work(struct work_struct *work)
1da177e4 4994{
3eff45ea 4995 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
4996 struct sk_buff *skb;
4997
6ed58ec5 4998 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 4999 hdev->sco_cnt, hdev->le_cnt);
1da177e4 5000
52de599e
MH
5001 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5002 /* Schedule queues and send stuff to HCI driver */
5003 hci_sched_acl(hdev);
5004 hci_sched_sco(hdev);
5005 hci_sched_esco(hdev);
5006 hci_sched_le(hdev);
5007 }
6ed58ec5 5008
1da177e4
LT
5009 /* Send next queued raw (unknown type) packet */
5010 while ((skb = skb_dequeue(&hdev->raw_q)))
57d17d70 5011 hci_send_frame(hdev, skb);
1da177e4
LT
5012}
5013
25985edc 5014/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
5015
5016/* ACL data packet */
6039aa73 5017static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
5018{
5019 struct hci_acl_hdr *hdr = (void *) skb->data;
5020 struct hci_conn *conn;
5021 __u16 handle, flags;
5022
5023 skb_pull(skb, HCI_ACL_HDR_SIZE);
5024
5025 handle = __le16_to_cpu(hdr->handle);
5026 flags = hci_flags(handle);
5027 handle = hci_handle(handle);
5028
f0e09510 5029 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 5030 handle, flags);
1da177e4
LT
5031
5032 hdev->stat.acl_rx++;
5033
5034 hci_dev_lock(hdev);
5035 conn = hci_conn_hash_lookup_handle(hdev, handle);
5036 hci_dev_unlock(hdev);
8e87d142 5037
1da177e4 5038 if (conn) {
65983fc7 5039 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 5040
1da177e4 5041 /* Send to upper protocol */
686ebf28
UF
5042 l2cap_recv_acldata(conn, skb, flags);
5043 return;
1da177e4 5044 } else {
8e87d142 5045 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 5046 hdev->name, handle);
1da177e4
LT
5047 }
5048
5049 kfree_skb(skb);
5050}
5051
5052/* SCO data packet */
6039aa73 5053static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
5054{
5055 struct hci_sco_hdr *hdr = (void *) skb->data;
5056 struct hci_conn *conn;
5057 __u16 handle;
5058
5059 skb_pull(skb, HCI_SCO_HDR_SIZE);
5060
5061 handle = __le16_to_cpu(hdr->handle);
5062
f0e09510 5063 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
1da177e4
LT
5064
5065 hdev->stat.sco_rx++;
5066
5067 hci_dev_lock(hdev);
5068 conn = hci_conn_hash_lookup_handle(hdev, handle);
5069 hci_dev_unlock(hdev);
5070
5071 if (conn) {
1da177e4 5072 /* Send to upper protocol */
686ebf28
UF
5073 sco_recv_scodata(conn, skb);
5074 return;
1da177e4 5075 } else {
8e87d142 5076 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 5077 hdev->name, handle);
1da177e4
LT
5078 }
5079
5080 kfree_skb(skb);
5081}
5082
9238f36a
JH
5083static bool hci_req_is_complete(struct hci_dev *hdev)
5084{
5085 struct sk_buff *skb;
5086
5087 skb = skb_peek(&hdev->cmd_q);
5088 if (!skb)
5089 return true;
5090
5091 return bt_cb(skb)->req.start;
5092}
5093
42c6b129
JH
5094static void hci_resend_last(struct hci_dev *hdev)
5095{
5096 struct hci_command_hdr *sent;
5097 struct sk_buff *skb;
5098 u16 opcode;
5099
5100 if (!hdev->sent_cmd)
5101 return;
5102
5103 sent = (void *) hdev->sent_cmd->data;
5104 opcode = __le16_to_cpu(sent->opcode);
5105 if (opcode == HCI_OP_RESET)
5106 return;
5107
5108 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5109 if (!skb)
5110 return;
5111
5112 skb_queue_head(&hdev->cmd_q, skb);
5113 queue_work(hdev->workqueue, &hdev->cmd_work);
5114}
5115
9238f36a
JH
5116void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5117{
5118 hci_req_complete_t req_complete = NULL;
5119 struct sk_buff *skb;
5120 unsigned long flags;
5121
5122 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5123
42c6b129
JH
5124 /* If the completed command doesn't match the last one that was
5125 * sent we need to do special handling of it.
9238f36a 5126 */
42c6b129
JH
5127 if (!hci_sent_cmd_data(hdev, opcode)) {
5128 /* Some CSR based controllers generate a spontaneous
5129 * reset complete event during init and any pending
5130 * command will never be completed. In such a case we
5131 * need to resend whatever was the last sent
5132 * command.
5133 */
5134 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5135 hci_resend_last(hdev);
5136
9238f36a 5137 return;
42c6b129 5138 }
9238f36a
JH
5139
5140 /* If the command succeeded and there's still more commands in
5141 * this request the request is not yet complete.
5142 */
5143 if (!status && !hci_req_is_complete(hdev))
5144 return;
5145
5146 /* If this was the last command in a request the complete
5147 * callback would be found in hdev->sent_cmd instead of the
5148 * command queue (hdev->cmd_q).
5149 */
5150 if (hdev->sent_cmd) {
5151 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
53e21fbc
JH
5152
5153 if (req_complete) {
5154 /* We must set the complete callback to NULL to
5155 * avoid calling the callback more than once if
5156 * this function gets called again.
5157 */
5158 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5159
9238f36a 5160 goto call_complete;
53e21fbc 5161 }
9238f36a
JH
5162 }
5163
5164 /* Remove all pending commands belonging to this request */
5165 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5166 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5167 if (bt_cb(skb)->req.start) {
5168 __skb_queue_head(&hdev->cmd_q, skb);
5169 break;
5170 }
5171
5172 req_complete = bt_cb(skb)->req.complete;
5173 kfree_skb(skb);
5174 }
5175 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5176
5177call_complete:
5178 if (req_complete)
5179 req_complete(hdev, status);
5180}
5181
b78752cc 5182static void hci_rx_work(struct work_struct *work)
1da177e4 5183{
b78752cc 5184 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
5185 struct sk_buff *skb;
5186
5187 BT_DBG("%s", hdev->name);
5188
1da177e4 5189 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
5190 /* Send copy to monitor */
5191 hci_send_to_monitor(hdev, skb);
5192
1da177e4
LT
5193 if (atomic_read(&hdev->promisc)) {
5194 /* Send copy to the sockets */
470fe1b5 5195 hci_send_to_sock(hdev, skb);
1da177e4
LT
5196 }
5197
fee746b0 5198 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1da177e4
LT
5199 kfree_skb(skb);
5200 continue;
5201 }
5202
5203 if (test_bit(HCI_INIT, &hdev->flags)) {
5204 /* Don't process data packets in this states. */
0d48d939 5205 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
5206 case HCI_ACLDATA_PKT:
5207 case HCI_SCODATA_PKT:
5208 kfree_skb(skb);
5209 continue;
3ff50b79 5210 }
1da177e4
LT
5211 }
5212
5213 /* Process frame */
0d48d939 5214 switch (bt_cb(skb)->pkt_type) {
1da177e4 5215 case HCI_EVENT_PKT:
b78752cc 5216 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
5217 hci_event_packet(hdev, skb);
5218 break;
5219
5220 case HCI_ACLDATA_PKT:
5221 BT_DBG("%s ACL data packet", hdev->name);
5222 hci_acldata_packet(hdev, skb);
5223 break;
5224
5225 case HCI_SCODATA_PKT:
5226 BT_DBG("%s SCO data packet", hdev->name);
5227 hci_scodata_packet(hdev, skb);
5228 break;
5229
5230 default:
5231 kfree_skb(skb);
5232 break;
5233 }
5234 }
1da177e4
LT
5235}
5236
c347b765 5237static void hci_cmd_work(struct work_struct *work)
1da177e4 5238{
c347b765 5239 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
5240 struct sk_buff *skb;
5241
2104786b
AE
5242 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5243 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 5244
1da177e4 5245 /* Send queued commands */
5a08ecce
AE
5246 if (atomic_read(&hdev->cmd_cnt)) {
5247 skb = skb_dequeue(&hdev->cmd_q);
5248 if (!skb)
5249 return;
5250
7585b97a 5251 kfree_skb(hdev->sent_cmd);
1da177e4 5252
a675d7f1 5253 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
70f23020 5254 if (hdev->sent_cmd) {
1da177e4 5255 atomic_dec(&hdev->cmd_cnt);
57d17d70 5256 hci_send_frame(hdev, skb);
7bdb8a5c 5257 if (test_bit(HCI_RESET, &hdev->flags))
65cc2b49 5258 cancel_delayed_work(&hdev->cmd_timer);
7bdb8a5c 5259 else
65cc2b49
MH
5260 schedule_delayed_work(&hdev->cmd_timer,
5261 HCI_CMD_TIMEOUT);
1da177e4
LT
5262 } else {
5263 skb_queue_head(&hdev->cmd_q, skb);
c347b765 5264 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
5265 }
5266 }
5267}
b1efcc28
AG
5268
5269void hci_req_add_le_scan_disable(struct hci_request *req)
5270{
5271 struct hci_cp_le_set_scan_enable cp;
5272
5273 memset(&cp, 0, sizeof(cp));
5274 cp.enable = LE_SCAN_DISABLE;
5275 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5276}
a4790dbd 5277
8ef30fd3
AG
5278void hci_req_add_le_passive_scan(struct hci_request *req)
5279{
5280 struct hci_cp_le_set_scan_param param_cp;
5281 struct hci_cp_le_set_scan_enable enable_cp;
5282 struct hci_dev *hdev = req->hdev;
5283 u8 own_addr_type;
5284
6ab535a7
MH
5285 /* Set require_privacy to false since no SCAN_REQ are send
5286 * during passive scanning. Not using an unresolvable address
5287 * here is important so that peer devices using direct
5288 * advertising with our address will be correctly reported
5289 * by the controller.
8ef30fd3 5290 */
6ab535a7 5291 if (hci_update_random_address(req, false, &own_addr_type))
8ef30fd3
AG
5292 return;
5293
5294 memset(&param_cp, 0, sizeof(param_cp));
5295 param_cp.type = LE_SCAN_PASSIVE;
5296 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5297 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5298 param_cp.own_address_type = own_addr_type;
5299 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5300 &param_cp);
5301
5302 memset(&enable_cp, 0, sizeof(enable_cp));
5303 enable_cp.enable = LE_SCAN_ENABLE;
4340a124 5304 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
8ef30fd3
AG
5305 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5306 &enable_cp);
5307}
5308
a4790dbd
AG
5309static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5310{
5311 if (status)
5312 BT_DBG("HCI request failed to update background scanning: "
5313 "status 0x%2.2x", status);
5314}
5315
5316/* This function controls the background scanning based on hdev->pend_le_conns
5317 * list. If there are pending LE connection we start the background scanning,
5318 * otherwise we stop it.
5319 *
5320 * This function requires the caller holds hdev->lock.
5321 */
5322void hci_update_background_scan(struct hci_dev *hdev)
5323{
a4790dbd
AG
5324 struct hci_request req;
5325 struct hci_conn *conn;
5326 int err;
5327
c20c02d5
MH
5328 if (!test_bit(HCI_UP, &hdev->flags) ||
5329 test_bit(HCI_INIT, &hdev->flags) ||
5330 test_bit(HCI_SETUP, &hdev->dev_flags) ||
b8221770 5331 test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
c20c02d5 5332 test_bit(HCI_UNREGISTER, &hdev->dev_flags))
1c1697c0
MH
5333 return;
5334
a4790dbd
AG
5335 hci_req_init(&req, hdev);
5336
5337 if (list_empty(&hdev->pend_le_conns)) {
5338 /* If there is no pending LE connections, we should stop
5339 * the background scanning.
5340 */
5341
5342 /* If controller is not scanning we are done. */
5343 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5344 return;
5345
5346 hci_req_add_le_scan_disable(&req);
5347
5348 BT_DBG("%s stopping background scanning", hdev->name);
5349 } else {
a4790dbd
AG
5350 /* If there is at least one pending LE connection, we should
5351 * keep the background scan running.
5352 */
5353
a4790dbd
AG
5354 /* If controller is connecting, we should not start scanning
5355 * since some controllers are not able to scan and connect at
5356 * the same time.
5357 */
5358 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5359 if (conn)
5360 return;
5361
4340a124
AG
5362 /* If controller is currently scanning, we stop it to ensure we
5363 * don't miss any advertising (due to duplicates filter).
5364 */
5365 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5366 hci_req_add_le_scan_disable(&req);
5367
8ef30fd3 5368 hci_req_add_le_passive_scan(&req);
a4790dbd
AG
5369
5370 BT_DBG("%s starting background scanning", hdev->name);
5371 }
5372
5373 err = hci_req_run(&req, update_background_scan_complete);
5374 if (err)
5375 BT_ERR("Failed to run HCI request: err %d", err);
5376}