]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - net/bluetooth/hci_core.c
Bluetooth: Fix setting conn->pending_sec_level value from link key
[mirror_ubuntu-bionic-kernel.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
8c520a59 30#include <linux/rfkill.h>
baf27f6e 31#include <linux/debugfs.h>
99780a7b 32#include <linux/crypto.h>
47219839 33#include <asm/unaligned.h>
1da177e4
LT
34
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
4bc58f51 37#include <net/bluetooth/l2cap.h>
af58925c 38#include <net/bluetooth/mgmt.h>
1da177e4 39
970c4e46
JH
40#include "smp.h"
41
b78752cc 42static void hci_rx_work(struct work_struct *work);
c347b765 43static void hci_cmd_work(struct work_struct *work);
3eff45ea 44static void hci_tx_work(struct work_struct *work);
1da177e4 45
1da177e4
LT
46/* HCI device list */
47LIST_HEAD(hci_dev_list);
48DEFINE_RWLOCK(hci_dev_list_lock);
49
50/* HCI callback list */
51LIST_HEAD(hci_cb_list);
52DEFINE_RWLOCK(hci_cb_list_lock);
53
3df92b31
SL
54/* HCI ID Numbering */
55static DEFINE_IDA(hci_index_ida);
56
899de765
MH
57/* ----- HCI requests ----- */
58
59#define HCI_REQ_DONE 0
60#define HCI_REQ_PEND 1
61#define HCI_REQ_CANCELED 2
62
63#define hci_req_lock(d) mutex_lock(&d->req_lock)
64#define hci_req_unlock(d) mutex_unlock(&d->req_lock)
65
1da177e4
LT
66/* ---- HCI notifications ---- */
67
6516455d 68static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 69{
040030ef 70 hci_sock_dev_event(hdev, event);
1da177e4
LT
71}
72
baf27f6e
MH
73/* ---- HCI debugfs entries ---- */
74
4b4148e9
MH
75static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
76 size_t count, loff_t *ppos)
77{
78 struct hci_dev *hdev = file->private_data;
79 char buf[3];
80
111902f7 81 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
4b4148e9
MH
82 buf[1] = '\n';
83 buf[2] = '\0';
84 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
85}
86
87static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
88 size_t count, loff_t *ppos)
89{
90 struct hci_dev *hdev = file->private_data;
91 struct sk_buff *skb;
92 char buf[32];
93 size_t buf_size = min(count, (sizeof(buf)-1));
94 bool enable;
95 int err;
96
97 if (!test_bit(HCI_UP, &hdev->flags))
98 return -ENETDOWN;
99
100 if (copy_from_user(buf, user_buf, buf_size))
101 return -EFAULT;
102
103 buf[buf_size] = '\0';
104 if (strtobool(buf, &enable))
105 return -EINVAL;
106
111902f7 107 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
4b4148e9
MH
108 return -EALREADY;
109
110 hci_req_lock(hdev);
111 if (enable)
112 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
113 HCI_CMD_TIMEOUT);
114 else
115 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
116 HCI_CMD_TIMEOUT);
117 hci_req_unlock(hdev);
118
119 if (IS_ERR(skb))
120 return PTR_ERR(skb);
121
122 err = -bt_to_errno(skb->data[0]);
123 kfree_skb(skb);
124
125 if (err < 0)
126 return err;
127
111902f7 128 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
4b4148e9
MH
129
130 return count;
131}
132
133static const struct file_operations dut_mode_fops = {
134 .open = simple_open,
135 .read = dut_mode_read,
136 .write = dut_mode_write,
137 .llseek = default_llseek,
138};
139
dfb826a8
MH
140static int features_show(struct seq_file *f, void *ptr)
141{
142 struct hci_dev *hdev = f->private;
143 u8 p;
144
145 hci_dev_lock(hdev);
146 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
cfbb2b5b 147 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
dfb826a8
MH
148 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
149 hdev->features[p][0], hdev->features[p][1],
150 hdev->features[p][2], hdev->features[p][3],
151 hdev->features[p][4], hdev->features[p][5],
152 hdev->features[p][6], hdev->features[p][7]);
153 }
cfbb2b5b
MH
154 if (lmp_le_capable(hdev))
155 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
156 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
157 hdev->le_features[0], hdev->le_features[1],
158 hdev->le_features[2], hdev->le_features[3],
159 hdev->le_features[4], hdev->le_features[5],
160 hdev->le_features[6], hdev->le_features[7]);
dfb826a8
MH
161 hci_dev_unlock(hdev);
162
163 return 0;
164}
165
166static int features_open(struct inode *inode, struct file *file)
167{
168 return single_open(file, features_show, inode->i_private);
169}
170
171static const struct file_operations features_fops = {
172 .open = features_open,
173 .read = seq_read,
174 .llseek = seq_lseek,
175 .release = single_release,
176};
177
70afe0b8
MH
178static int blacklist_show(struct seq_file *f, void *p)
179{
180 struct hci_dev *hdev = f->private;
181 struct bdaddr_list *b;
182
183 hci_dev_lock(hdev);
184 list_for_each_entry(b, &hdev->blacklist, list)
b25f0785 185 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
70afe0b8
MH
186 hci_dev_unlock(hdev);
187
188 return 0;
189}
190
191static int blacklist_open(struct inode *inode, struct file *file)
192{
193 return single_open(file, blacklist_show, inode->i_private);
194}
195
196static const struct file_operations blacklist_fops = {
197 .open = blacklist_open,
198 .read = seq_read,
199 .llseek = seq_lseek,
200 .release = single_release,
201};
202
47219839
MH
203static int uuids_show(struct seq_file *f, void *p)
204{
205 struct hci_dev *hdev = f->private;
206 struct bt_uuid *uuid;
207
208 hci_dev_lock(hdev);
209 list_for_each_entry(uuid, &hdev->uuids, list) {
58f01aa9
MH
210 u8 i, val[16];
211
212 /* The Bluetooth UUID values are stored in big endian,
213 * but with reversed byte order. So convert them into
214 * the right order for the %pUb modifier.
215 */
216 for (i = 0; i < 16; i++)
217 val[i] = uuid->uuid[15 - i];
218
219 seq_printf(f, "%pUb\n", val);
47219839
MH
220 }
221 hci_dev_unlock(hdev);
222
223 return 0;
224}
225
226static int uuids_open(struct inode *inode, struct file *file)
227{
228 return single_open(file, uuids_show, inode->i_private);
229}
230
231static const struct file_operations uuids_fops = {
232 .open = uuids_open,
233 .read = seq_read,
234 .llseek = seq_lseek,
235 .release = single_release,
236};
237
baf27f6e
MH
238static int inquiry_cache_show(struct seq_file *f, void *p)
239{
240 struct hci_dev *hdev = f->private;
241 struct discovery_state *cache = &hdev->discovery;
242 struct inquiry_entry *e;
243
244 hci_dev_lock(hdev);
245
246 list_for_each_entry(e, &cache->all, all) {
247 struct inquiry_data *data = &e->data;
248 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
249 &data->bdaddr,
250 data->pscan_rep_mode, data->pscan_period_mode,
251 data->pscan_mode, data->dev_class[2],
252 data->dev_class[1], data->dev_class[0],
253 __le16_to_cpu(data->clock_offset),
254 data->rssi, data->ssp_mode, e->timestamp);
255 }
256
257 hci_dev_unlock(hdev);
258
259 return 0;
260}
261
262static int inquiry_cache_open(struct inode *inode, struct file *file)
263{
264 return single_open(file, inquiry_cache_show, inode->i_private);
265}
266
267static const struct file_operations inquiry_cache_fops = {
268 .open = inquiry_cache_open,
269 .read = seq_read,
270 .llseek = seq_lseek,
271 .release = single_release,
272};
273
02d08d15
MH
274static int link_keys_show(struct seq_file *f, void *ptr)
275{
276 struct hci_dev *hdev = f->private;
277 struct list_head *p, *n;
278
279 hci_dev_lock(hdev);
280 list_for_each_safe(p, n, &hdev->link_keys) {
281 struct link_key *key = list_entry(p, struct link_key, list);
282 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
283 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
284 }
285 hci_dev_unlock(hdev);
286
287 return 0;
288}
289
290static int link_keys_open(struct inode *inode, struct file *file)
291{
292 return single_open(file, link_keys_show, inode->i_private);
293}
294
295static const struct file_operations link_keys_fops = {
296 .open = link_keys_open,
297 .read = seq_read,
298 .llseek = seq_lseek,
299 .release = single_release,
300};
301
babdbb3c
MH
302static int dev_class_show(struct seq_file *f, void *ptr)
303{
304 struct hci_dev *hdev = f->private;
305
306 hci_dev_lock(hdev);
307 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
308 hdev->dev_class[1], hdev->dev_class[0]);
309 hci_dev_unlock(hdev);
310
311 return 0;
312}
313
314static int dev_class_open(struct inode *inode, struct file *file)
315{
316 return single_open(file, dev_class_show, inode->i_private);
317}
318
319static const struct file_operations dev_class_fops = {
320 .open = dev_class_open,
321 .read = seq_read,
322 .llseek = seq_lseek,
323 .release = single_release,
324};
325
041000b9
MH
326static int voice_setting_get(void *data, u64 *val)
327{
328 struct hci_dev *hdev = data;
329
330 hci_dev_lock(hdev);
331 *val = hdev->voice_setting;
332 hci_dev_unlock(hdev);
333
334 return 0;
335}
336
337DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
338 NULL, "0x%4.4llx\n");
339
ebd1e33b
MH
340static int auto_accept_delay_set(void *data, u64 val)
341{
342 struct hci_dev *hdev = data;
343
344 hci_dev_lock(hdev);
345 hdev->auto_accept_delay = val;
346 hci_dev_unlock(hdev);
347
348 return 0;
349}
350
351static int auto_accept_delay_get(void *data, u64 *val)
352{
353 struct hci_dev *hdev = data;
354
355 hci_dev_lock(hdev);
356 *val = hdev->auto_accept_delay;
357 hci_dev_unlock(hdev);
358
359 return 0;
360}
361
362DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
363 auto_accept_delay_set, "%llu\n");
364
5afeac14
MH
365static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
366 size_t count, loff_t *ppos)
367{
368 struct hci_dev *hdev = file->private_data;
369 char buf[3];
370
111902f7 371 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
5afeac14
MH
372 buf[1] = '\n';
373 buf[2] = '\0';
374 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
375}
376
377static ssize_t force_sc_support_write(struct file *file,
378 const char __user *user_buf,
379 size_t count, loff_t *ppos)
380{
381 struct hci_dev *hdev = file->private_data;
382 char buf[32];
383 size_t buf_size = min(count, (sizeof(buf)-1));
384 bool enable;
385
386 if (test_bit(HCI_UP, &hdev->flags))
387 return -EBUSY;
388
389 if (copy_from_user(buf, user_buf, buf_size))
390 return -EFAULT;
391
392 buf[buf_size] = '\0';
393 if (strtobool(buf, &enable))
394 return -EINVAL;
395
111902f7 396 if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
5afeac14
MH
397 return -EALREADY;
398
111902f7 399 change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
5afeac14
MH
400
401 return count;
402}
403
404static const struct file_operations force_sc_support_fops = {
405 .open = simple_open,
406 .read = force_sc_support_read,
407 .write = force_sc_support_write,
408 .llseek = default_llseek,
409};
410
134c2a89
MH
411static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
412 size_t count, loff_t *ppos)
413{
414 struct hci_dev *hdev = file->private_data;
415 char buf[3];
416
417 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
418 buf[1] = '\n';
419 buf[2] = '\0';
420 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
421}
422
423static const struct file_operations sc_only_mode_fops = {
424 .open = simple_open,
425 .read = sc_only_mode_read,
426 .llseek = default_llseek,
427};
428
2bfa3531
MH
429static int idle_timeout_set(void *data, u64 val)
430{
431 struct hci_dev *hdev = data;
432
433 if (val != 0 && (val < 500 || val > 3600000))
434 return -EINVAL;
435
436 hci_dev_lock(hdev);
2be48b65 437 hdev->idle_timeout = val;
2bfa3531
MH
438 hci_dev_unlock(hdev);
439
440 return 0;
441}
442
443static int idle_timeout_get(void *data, u64 *val)
444{
445 struct hci_dev *hdev = data;
446
447 hci_dev_lock(hdev);
448 *val = hdev->idle_timeout;
449 hci_dev_unlock(hdev);
450
451 return 0;
452}
453
454DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
455 idle_timeout_set, "%llu\n");
456
c982b2ea
JH
457static int rpa_timeout_set(void *data, u64 val)
458{
459 struct hci_dev *hdev = data;
460
461 /* Require the RPA timeout to be at least 30 seconds and at most
462 * 24 hours.
463 */
464 if (val < 30 || val > (60 * 60 * 24))
465 return -EINVAL;
466
467 hci_dev_lock(hdev);
468 hdev->rpa_timeout = val;
469 hci_dev_unlock(hdev);
470
471 return 0;
472}
473
474static int rpa_timeout_get(void *data, u64 *val)
475{
476 struct hci_dev *hdev = data;
477
478 hci_dev_lock(hdev);
479 *val = hdev->rpa_timeout;
480 hci_dev_unlock(hdev);
481
482 return 0;
483}
484
485DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
486 rpa_timeout_set, "%llu\n");
487
2bfa3531
MH
488static int sniff_min_interval_set(void *data, u64 val)
489{
490 struct hci_dev *hdev = data;
491
492 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
493 return -EINVAL;
494
495 hci_dev_lock(hdev);
2be48b65 496 hdev->sniff_min_interval = val;
2bfa3531
MH
497 hci_dev_unlock(hdev);
498
499 return 0;
500}
501
502static int sniff_min_interval_get(void *data, u64 *val)
503{
504 struct hci_dev *hdev = data;
505
506 hci_dev_lock(hdev);
507 *val = hdev->sniff_min_interval;
508 hci_dev_unlock(hdev);
509
510 return 0;
511}
512
513DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
514 sniff_min_interval_set, "%llu\n");
515
516static int sniff_max_interval_set(void *data, u64 val)
517{
518 struct hci_dev *hdev = data;
519
520 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
521 return -EINVAL;
522
523 hci_dev_lock(hdev);
2be48b65 524 hdev->sniff_max_interval = val;
2bfa3531
MH
525 hci_dev_unlock(hdev);
526
527 return 0;
528}
529
530static int sniff_max_interval_get(void *data, u64 *val)
531{
532 struct hci_dev *hdev = data;
533
534 hci_dev_lock(hdev);
535 *val = hdev->sniff_max_interval;
536 hci_dev_unlock(hdev);
537
538 return 0;
539}
540
541DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
542 sniff_max_interval_set, "%llu\n");
543
31ad1691
AK
544static int conn_info_min_age_set(void *data, u64 val)
545{
546 struct hci_dev *hdev = data;
547
548 if (val == 0 || val > hdev->conn_info_max_age)
549 return -EINVAL;
550
551 hci_dev_lock(hdev);
552 hdev->conn_info_min_age = val;
553 hci_dev_unlock(hdev);
554
555 return 0;
556}
557
558static int conn_info_min_age_get(void *data, u64 *val)
559{
560 struct hci_dev *hdev = data;
561
562 hci_dev_lock(hdev);
563 *val = hdev->conn_info_min_age;
564 hci_dev_unlock(hdev);
565
566 return 0;
567}
568
569DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
570 conn_info_min_age_set, "%llu\n");
571
572static int conn_info_max_age_set(void *data, u64 val)
573{
574 struct hci_dev *hdev = data;
575
576 if (val == 0 || val < hdev->conn_info_min_age)
577 return -EINVAL;
578
579 hci_dev_lock(hdev);
580 hdev->conn_info_max_age = val;
581 hci_dev_unlock(hdev);
582
583 return 0;
584}
585
586static int conn_info_max_age_get(void *data, u64 *val)
587{
588 struct hci_dev *hdev = data;
589
590 hci_dev_lock(hdev);
591 *val = hdev->conn_info_max_age;
592 hci_dev_unlock(hdev);
593
594 return 0;
595}
596
597DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
598 conn_info_max_age_set, "%llu\n");
599
ac345813
MH
600static int identity_show(struct seq_file *f, void *p)
601{
602 struct hci_dev *hdev = f->private;
a1f4c318 603 bdaddr_t addr;
ac345813
MH
604 u8 addr_type;
605
606 hci_dev_lock(hdev);
607
a1f4c318 608 hci_copy_identity_address(hdev, &addr, &addr_type);
ac345813 609
a1f4c318 610 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
473deef2 611 16, hdev->irk, &hdev->rpa);
ac345813
MH
612
613 hci_dev_unlock(hdev);
614
615 return 0;
616}
617
618static int identity_open(struct inode *inode, struct file *file)
619{
620 return single_open(file, identity_show, inode->i_private);
621}
622
623static const struct file_operations identity_fops = {
624 .open = identity_open,
625 .read = seq_read,
626 .llseek = seq_lseek,
627 .release = single_release,
628};
629
7a4cd51d
MH
630static int random_address_show(struct seq_file *f, void *p)
631{
632 struct hci_dev *hdev = f->private;
633
634 hci_dev_lock(hdev);
635 seq_printf(f, "%pMR\n", &hdev->random_addr);
636 hci_dev_unlock(hdev);
637
638 return 0;
639}
640
641static int random_address_open(struct inode *inode, struct file *file)
642{
643 return single_open(file, random_address_show, inode->i_private);
644}
645
646static const struct file_operations random_address_fops = {
647 .open = random_address_open,
648 .read = seq_read,
649 .llseek = seq_lseek,
650 .release = single_release,
651};
652
e7b8fc92
MH
653static int static_address_show(struct seq_file *f, void *p)
654{
655 struct hci_dev *hdev = f->private;
656
657 hci_dev_lock(hdev);
658 seq_printf(f, "%pMR\n", &hdev->static_addr);
659 hci_dev_unlock(hdev);
660
661 return 0;
662}
663
664static int static_address_open(struct inode *inode, struct file *file)
665{
666 return single_open(file, static_address_show, inode->i_private);
667}
668
669static const struct file_operations static_address_fops = {
670 .open = static_address_open,
671 .read = seq_read,
672 .llseek = seq_lseek,
673 .release = single_release,
674};
675
b32bba6c
MH
676static ssize_t force_static_address_read(struct file *file,
677 char __user *user_buf,
678 size_t count, loff_t *ppos)
92202185 679{
b32bba6c
MH
680 struct hci_dev *hdev = file->private_data;
681 char buf[3];
92202185 682
111902f7 683 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
b32bba6c
MH
684 buf[1] = '\n';
685 buf[2] = '\0';
686 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
92202185
MH
687}
688
b32bba6c
MH
689static ssize_t force_static_address_write(struct file *file,
690 const char __user *user_buf,
691 size_t count, loff_t *ppos)
92202185 692{
b32bba6c
MH
693 struct hci_dev *hdev = file->private_data;
694 char buf[32];
695 size_t buf_size = min(count, (sizeof(buf)-1));
696 bool enable;
92202185 697
b32bba6c
MH
698 if (test_bit(HCI_UP, &hdev->flags))
699 return -EBUSY;
92202185 700
b32bba6c
MH
701 if (copy_from_user(buf, user_buf, buf_size))
702 return -EFAULT;
703
704 buf[buf_size] = '\0';
705 if (strtobool(buf, &enable))
706 return -EINVAL;
707
111902f7 708 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
b32bba6c
MH
709 return -EALREADY;
710
111902f7 711 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
b32bba6c
MH
712
713 return count;
92202185
MH
714}
715
b32bba6c
MH
716static const struct file_operations force_static_address_fops = {
717 .open = simple_open,
718 .read = force_static_address_read,
719 .write = force_static_address_write,
720 .llseek = default_llseek,
721};
92202185 722
d2ab0ac1
MH
723static int white_list_show(struct seq_file *f, void *ptr)
724{
725 struct hci_dev *hdev = f->private;
726 struct bdaddr_list *b;
727
728 hci_dev_lock(hdev);
729 list_for_each_entry(b, &hdev->le_white_list, list)
730 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
731 hci_dev_unlock(hdev);
732
733 return 0;
734}
735
736static int white_list_open(struct inode *inode, struct file *file)
737{
738 return single_open(file, white_list_show, inode->i_private);
739}
740
741static const struct file_operations white_list_fops = {
742 .open = white_list_open,
743 .read = seq_read,
744 .llseek = seq_lseek,
745 .release = single_release,
746};
747
3698d704
MH
748static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
749{
750 struct hci_dev *hdev = f->private;
adae20cb 751 struct smp_irk *irk;
3698d704 752
adae20cb
JH
753 rcu_read_lock();
754 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
3698d704
MH
755 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
756 &irk->bdaddr, irk->addr_type,
757 16, irk->val, &irk->rpa);
758 }
adae20cb 759 rcu_read_unlock();
3698d704
MH
760
761 return 0;
762}
763
764static int identity_resolving_keys_open(struct inode *inode, struct file *file)
765{
766 return single_open(file, identity_resolving_keys_show,
767 inode->i_private);
768}
769
770static const struct file_operations identity_resolving_keys_fops = {
771 .open = identity_resolving_keys_open,
772 .read = seq_read,
773 .llseek = seq_lseek,
774 .release = single_release,
775};
776
8f8625cd
MH
777static int long_term_keys_show(struct seq_file *f, void *ptr)
778{
779 struct hci_dev *hdev = f->private;
970d0f1b 780 struct smp_ltk *ltk;
8f8625cd 781
970d0f1b
JH
782 rcu_read_lock();
783 list_for_each_entry_rcu(ltk, &hdev->long_term_keys, list)
fe39c7b2 784 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
8f8625cd
MH
785 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
786 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
fe39c7b2 787 __le64_to_cpu(ltk->rand), 16, ltk->val);
970d0f1b 788 rcu_read_unlock();
8f8625cd
MH
789
790 return 0;
791}
792
793static int long_term_keys_open(struct inode *inode, struct file *file)
794{
795 return single_open(file, long_term_keys_show, inode->i_private);
796}
797
798static const struct file_operations long_term_keys_fops = {
799 .open = long_term_keys_open,
800 .read = seq_read,
801 .llseek = seq_lseek,
802 .release = single_release,
803};
804
4e70c7e7
MH
805static int conn_min_interval_set(void *data, u64 val)
806{
807 struct hci_dev *hdev = data;
808
809 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
810 return -EINVAL;
811
812 hci_dev_lock(hdev);
2be48b65 813 hdev->le_conn_min_interval = val;
4e70c7e7
MH
814 hci_dev_unlock(hdev);
815
816 return 0;
817}
818
819static int conn_min_interval_get(void *data, u64 *val)
820{
821 struct hci_dev *hdev = data;
822
823 hci_dev_lock(hdev);
824 *val = hdev->le_conn_min_interval;
825 hci_dev_unlock(hdev);
826
827 return 0;
828}
829
830DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
831 conn_min_interval_set, "%llu\n");
832
833static int conn_max_interval_set(void *data, u64 val)
834{
835 struct hci_dev *hdev = data;
836
837 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
838 return -EINVAL;
839
840 hci_dev_lock(hdev);
2be48b65 841 hdev->le_conn_max_interval = val;
4e70c7e7
MH
842 hci_dev_unlock(hdev);
843
844 return 0;
845}
846
847static int conn_max_interval_get(void *data, u64 *val)
848{
849 struct hci_dev *hdev = data;
850
851 hci_dev_lock(hdev);
852 *val = hdev->le_conn_max_interval;
853 hci_dev_unlock(hdev);
854
855 return 0;
856}
857
858DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
859 conn_max_interval_set, "%llu\n");
860
816a93d1 861static int conn_latency_set(void *data, u64 val)
3f959d46
MH
862{
863 struct hci_dev *hdev = data;
864
816a93d1 865 if (val > 0x01f3)
3f959d46
MH
866 return -EINVAL;
867
868 hci_dev_lock(hdev);
816a93d1 869 hdev->le_conn_latency = val;
3f959d46
MH
870 hci_dev_unlock(hdev);
871
872 return 0;
873}
874
816a93d1 875static int conn_latency_get(void *data, u64 *val)
3f959d46
MH
876{
877 struct hci_dev *hdev = data;
878
879 hci_dev_lock(hdev);
816a93d1 880 *val = hdev->le_conn_latency;
3f959d46
MH
881 hci_dev_unlock(hdev);
882
883 return 0;
884}
885
816a93d1
MH
886DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
887 conn_latency_set, "%llu\n");
3f959d46 888
f1649577 889static int supervision_timeout_set(void *data, u64 val)
89863109 890{
f1649577 891 struct hci_dev *hdev = data;
89863109 892
f1649577
MH
893 if (val < 0x000a || val > 0x0c80)
894 return -EINVAL;
895
896 hci_dev_lock(hdev);
897 hdev->le_supv_timeout = val;
898 hci_dev_unlock(hdev);
899
900 return 0;
89863109
JR
901}
902
f1649577 903static int supervision_timeout_get(void *data, u64 *val)
89863109 904{
f1649577 905 struct hci_dev *hdev = data;
89863109 906
f1649577
MH
907 hci_dev_lock(hdev);
908 *val = hdev->le_supv_timeout;
909 hci_dev_unlock(hdev);
89863109 910
f1649577
MH
911 return 0;
912}
89863109 913
f1649577
MH
914DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
915 supervision_timeout_set, "%llu\n");
89863109 916
3f959d46
MH
917static int adv_channel_map_set(void *data, u64 val)
918{
919 struct hci_dev *hdev = data;
89863109 920
3f959d46
MH
921 if (val < 0x01 || val > 0x07)
922 return -EINVAL;
89863109 923
3f959d46
MH
924 hci_dev_lock(hdev);
925 hdev->le_adv_channel_map = val;
926 hci_dev_unlock(hdev);
89863109 927
3f959d46
MH
928 return 0;
929}
89863109 930
3f959d46 931static int adv_channel_map_get(void *data, u64 *val)
7d474e06 932{
3f959d46 933 struct hci_dev *hdev = data;
7d474e06
AG
934
935 hci_dev_lock(hdev);
3f959d46
MH
936 *val = hdev->le_adv_channel_map;
937 hci_dev_unlock(hdev);
7d474e06 938
3f959d46
MH
939 return 0;
940}
941
942DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
943 adv_channel_map_set, "%llu\n");
7d474e06 944
729a1051
GL
945static int adv_min_interval_set(void *data, u64 val)
946{
947 struct hci_dev *hdev = data;
948
949 if (val < 0x0020 || val > 0x4000 || val > hdev->le_adv_max_interval)
950 return -EINVAL;
951
952 hci_dev_lock(hdev);
953 hdev->le_adv_min_interval = val;
7d474e06
AG
954 hci_dev_unlock(hdev);
955
956 return 0;
957}
958
729a1051 959static int adv_min_interval_get(void *data, u64 *val)
7d474e06 960{
729a1051
GL
961 struct hci_dev *hdev = data;
962
963 hci_dev_lock(hdev);
964 *val = hdev->le_adv_min_interval;
965 hci_dev_unlock(hdev);
966
967 return 0;
7d474e06
AG
968}
969
729a1051
GL
970DEFINE_SIMPLE_ATTRIBUTE(adv_min_interval_fops, adv_min_interval_get,
971 adv_min_interval_set, "%llu\n");
972
973static int adv_max_interval_set(void *data, u64 val)
7d474e06 974{
729a1051 975 struct hci_dev *hdev = data;
7d474e06 976
729a1051 977 if (val < 0x0020 || val > 0x4000 || val < hdev->le_adv_min_interval)
7d474e06
AG
978 return -EINVAL;
979
729a1051
GL
980 hci_dev_lock(hdev);
981 hdev->le_adv_max_interval = val;
982 hci_dev_unlock(hdev);
7d474e06 983
729a1051
GL
984 return 0;
985}
7d474e06 986
729a1051
GL
987static int adv_max_interval_get(void *data, u64 *val)
988{
989 struct hci_dev *hdev = data;
7d474e06 990
729a1051
GL
991 hci_dev_lock(hdev);
992 *val = hdev->le_adv_max_interval;
993 hci_dev_unlock(hdev);
7d474e06 994
729a1051
GL
995 return 0;
996}
7d474e06 997
729a1051
GL
998DEFINE_SIMPLE_ATTRIBUTE(adv_max_interval_fops, adv_max_interval_get,
999 adv_max_interval_set, "%llu\n");
7d474e06 1000
0b3c7d37 1001static int device_list_show(struct seq_file *f, void *ptr)
7d474e06 1002{
0b3c7d37 1003 struct hci_dev *hdev = f->private;
7d474e06 1004 struct hci_conn_params *p;
40f4938a 1005 struct bdaddr_list *b;
7d474e06 1006
7d474e06 1007 hci_dev_lock(hdev);
40f4938a
MH
1008 list_for_each_entry(b, &hdev->whitelist, list)
1009 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
7d474e06 1010 list_for_each_entry(p, &hdev->le_conn_params, list) {
40f4938a 1011 seq_printf(f, "%pMR (type %u) %u\n", &p->addr, p->addr_type,
7d474e06 1012 p->auto_connect);
7d474e06 1013 }
7d474e06 1014 hci_dev_unlock(hdev);
7d474e06 1015
7d474e06
AG
1016 return 0;
1017}
7d474e06 1018
0b3c7d37 1019static int device_list_open(struct inode *inode, struct file *file)
7d474e06 1020{
0b3c7d37 1021 return single_open(file, device_list_show, inode->i_private);
7d474e06
AG
1022}
1023
0b3c7d37
MH
1024static const struct file_operations device_list_fops = {
1025 .open = device_list_open,
7d474e06 1026 .read = seq_read,
7d474e06
AG
1027 .llseek = seq_lseek,
1028 .release = single_release,
1029};
1030
1da177e4
LT
1031/* ---- HCI requests ---- */
1032
42c6b129 1033static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1da177e4 1034{
42c6b129 1035 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1da177e4
LT
1036
1037 if (hdev->req_status == HCI_REQ_PEND) {
1038 hdev->req_result = result;
1039 hdev->req_status = HCI_REQ_DONE;
1040 wake_up_interruptible(&hdev->req_wait_q);
1041 }
1042}
1043
1044static void hci_req_cancel(struct hci_dev *hdev, int err)
1045{
1046 BT_DBG("%s err 0x%2.2x", hdev->name, err);
1047
1048 if (hdev->req_status == HCI_REQ_PEND) {
1049 hdev->req_result = err;
1050 hdev->req_status = HCI_REQ_CANCELED;
1051 wake_up_interruptible(&hdev->req_wait_q);
1052 }
1053}
1054
77a63e0a
FW
1055static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1056 u8 event)
75e84b7c
JH
1057{
1058 struct hci_ev_cmd_complete *ev;
1059 struct hci_event_hdr *hdr;
1060 struct sk_buff *skb;
1061
1062 hci_dev_lock(hdev);
1063
1064 skb = hdev->recv_evt;
1065 hdev->recv_evt = NULL;
1066
1067 hci_dev_unlock(hdev);
1068
1069 if (!skb)
1070 return ERR_PTR(-ENODATA);
1071
1072 if (skb->len < sizeof(*hdr)) {
1073 BT_ERR("Too short HCI event");
1074 goto failed;
1075 }
1076
1077 hdr = (void *) skb->data;
1078 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1079
7b1abbbe
JH
1080 if (event) {
1081 if (hdr->evt != event)
1082 goto failed;
1083 return skb;
1084 }
1085
75e84b7c
JH
1086 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1087 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1088 goto failed;
1089 }
1090
1091 if (skb->len < sizeof(*ev)) {
1092 BT_ERR("Too short cmd_complete event");
1093 goto failed;
1094 }
1095
1096 ev = (void *) skb->data;
1097 skb_pull(skb, sizeof(*ev));
1098
1099 if (opcode == __le16_to_cpu(ev->opcode))
1100 return skb;
1101
1102 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1103 __le16_to_cpu(ev->opcode));
1104
1105failed:
1106 kfree_skb(skb);
1107 return ERR_PTR(-ENODATA);
1108}
1109
7b1abbbe 1110struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 1111 const void *param, u8 event, u32 timeout)
75e84b7c
JH
1112{
1113 DECLARE_WAITQUEUE(wait, current);
1114 struct hci_request req;
1115 int err = 0;
1116
1117 BT_DBG("%s", hdev->name);
1118
1119 hci_req_init(&req, hdev);
1120
7b1abbbe 1121 hci_req_add_ev(&req, opcode, plen, param, event);
75e84b7c
JH
1122
1123 hdev->req_status = HCI_REQ_PEND;
1124
75e84b7c
JH
1125 add_wait_queue(&hdev->req_wait_q, &wait);
1126 set_current_state(TASK_INTERRUPTIBLE);
1127
039fada5
CP
1128 err = hci_req_run(&req, hci_req_sync_complete);
1129 if (err < 0) {
1130 remove_wait_queue(&hdev->req_wait_q, &wait);
22a3ceab 1131 set_current_state(TASK_RUNNING);
039fada5
CP
1132 return ERR_PTR(err);
1133 }
1134
75e84b7c
JH
1135 schedule_timeout(timeout);
1136
1137 remove_wait_queue(&hdev->req_wait_q, &wait);
1138
1139 if (signal_pending(current))
1140 return ERR_PTR(-EINTR);
1141
1142 switch (hdev->req_status) {
1143 case HCI_REQ_DONE:
1144 err = -bt_to_errno(hdev->req_result);
1145 break;
1146
1147 case HCI_REQ_CANCELED:
1148 err = -hdev->req_result;
1149 break;
1150
1151 default:
1152 err = -ETIMEDOUT;
1153 break;
1154 }
1155
1156 hdev->req_status = hdev->req_result = 0;
1157
1158 BT_DBG("%s end: err %d", hdev->name, err);
1159
1160 if (err < 0)
1161 return ERR_PTR(err);
1162
7b1abbbe
JH
1163 return hci_get_cmd_complete(hdev, opcode, event);
1164}
1165EXPORT_SYMBOL(__hci_cmd_sync_ev);
1166
1167struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 1168 const void *param, u32 timeout)
7b1abbbe
JH
1169{
1170 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
75e84b7c
JH
1171}
1172EXPORT_SYMBOL(__hci_cmd_sync);
1173
1da177e4 1174/* Execute request and wait for completion. */
01178cd4 1175static int __hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
1176 void (*func)(struct hci_request *req,
1177 unsigned long opt),
01178cd4 1178 unsigned long opt, __u32 timeout)
1da177e4 1179{
42c6b129 1180 struct hci_request req;
1da177e4
LT
1181 DECLARE_WAITQUEUE(wait, current);
1182 int err = 0;
1183
1184 BT_DBG("%s start", hdev->name);
1185
42c6b129
JH
1186 hci_req_init(&req, hdev);
1187
1da177e4
LT
1188 hdev->req_status = HCI_REQ_PEND;
1189
42c6b129 1190 func(&req, opt);
53cce22d 1191
039fada5
CP
1192 add_wait_queue(&hdev->req_wait_q, &wait);
1193 set_current_state(TASK_INTERRUPTIBLE);
1194
42c6b129
JH
1195 err = hci_req_run(&req, hci_req_sync_complete);
1196 if (err < 0) {
53cce22d 1197 hdev->req_status = 0;
920c8300 1198
039fada5 1199 remove_wait_queue(&hdev->req_wait_q, &wait);
22a3ceab 1200 set_current_state(TASK_RUNNING);
039fada5 1201
920c8300
AG
1202 /* ENODATA means the HCI request command queue is empty.
1203 * This can happen when a request with conditionals doesn't
1204 * trigger any commands to be sent. This is normal behavior
1205 * and should not trigger an error return.
42c6b129 1206 */
920c8300
AG
1207 if (err == -ENODATA)
1208 return 0;
1209
1210 return err;
53cce22d
JH
1211 }
1212
1da177e4
LT
1213 schedule_timeout(timeout);
1214
1215 remove_wait_queue(&hdev->req_wait_q, &wait);
1216
1217 if (signal_pending(current))
1218 return -EINTR;
1219
1220 switch (hdev->req_status) {
1221 case HCI_REQ_DONE:
e175072f 1222 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
1223 break;
1224
1225 case HCI_REQ_CANCELED:
1226 err = -hdev->req_result;
1227 break;
1228
1229 default:
1230 err = -ETIMEDOUT;
1231 break;
3ff50b79 1232 }
1da177e4 1233
a5040efa 1234 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
1235
1236 BT_DBG("%s end: err %d", hdev->name, err);
1237
1238 return err;
1239}
1240
01178cd4 1241static int hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
1242 void (*req)(struct hci_request *req,
1243 unsigned long opt),
01178cd4 1244 unsigned long opt, __u32 timeout)
1da177e4
LT
1245{
1246 int ret;
1247
7c6a329e
MH
1248 if (!test_bit(HCI_UP, &hdev->flags))
1249 return -ENETDOWN;
1250
1da177e4
LT
1251 /* Serialize all requests */
1252 hci_req_lock(hdev);
01178cd4 1253 ret = __hci_req_sync(hdev, req, opt, timeout);
1da177e4
LT
1254 hci_req_unlock(hdev);
1255
1256 return ret;
1257}
1258
42c6b129 1259static void hci_reset_req(struct hci_request *req, unsigned long opt)
1da177e4 1260{
42c6b129 1261 BT_DBG("%s %ld", req->hdev->name, opt);
1da177e4
LT
1262
1263 /* Reset device */
42c6b129
JH
1264 set_bit(HCI_RESET, &req->hdev->flags);
1265 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1da177e4
LT
1266}
1267
42c6b129 1268static void bredr_init(struct hci_request *req)
1da177e4 1269{
42c6b129 1270 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
2455a3ea 1271
1da177e4 1272 /* Read Local Supported Features */
42c6b129 1273 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 1274
1143e5a6 1275 /* Read Local Version */
42c6b129 1276 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
2177bab5
JH
1277
1278 /* Read BD Address */
42c6b129 1279 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1da177e4
LT
1280}
1281
42c6b129 1282static void amp_init(struct hci_request *req)
e61ef499 1283{
42c6b129 1284 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
2455a3ea 1285
e61ef499 1286 /* Read Local Version */
42c6b129 1287 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489 1288
f6996cfe
MH
1289 /* Read Local Supported Commands */
1290 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1291
1292 /* Read Local Supported Features */
1293 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1294
6bcbc489 1295 /* Read Local AMP Info */
42c6b129 1296 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
1297
1298 /* Read Data Blk size */
42c6b129 1299 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
7528ca1c 1300
f38ba941
MH
1301 /* Read Flow Control Mode */
1302 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1303
7528ca1c
MH
1304 /* Read Location Data */
1305 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
e61ef499
AE
1306}
1307
42c6b129 1308static void hci_init1_req(struct hci_request *req, unsigned long opt)
e61ef499 1309{
42c6b129 1310 struct hci_dev *hdev = req->hdev;
e61ef499
AE
1311
1312 BT_DBG("%s %ld", hdev->name, opt);
1313
11778716
AE
1314 /* Reset */
1315 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
42c6b129 1316 hci_reset_req(req, 0);
11778716 1317
e61ef499
AE
1318 switch (hdev->dev_type) {
1319 case HCI_BREDR:
42c6b129 1320 bredr_init(req);
e61ef499
AE
1321 break;
1322
1323 case HCI_AMP:
42c6b129 1324 amp_init(req);
e61ef499
AE
1325 break;
1326
1327 default:
1328 BT_ERR("Unknown device type %d", hdev->dev_type);
1329 break;
1330 }
e61ef499
AE
1331}
1332
42c6b129 1333static void bredr_setup(struct hci_request *req)
2177bab5 1334{
4ca048e3
MH
1335 struct hci_dev *hdev = req->hdev;
1336
2177bab5
JH
1337 __le16 param;
1338 __u8 flt_type;
1339
1340 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
42c6b129 1341 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
1342
1343 /* Read Class of Device */
42c6b129 1344 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
2177bab5
JH
1345
1346 /* Read Local Name */
42c6b129 1347 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
2177bab5
JH
1348
1349 /* Read Voice Setting */
42c6b129 1350 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
2177bab5 1351
b4cb9fb2
MH
1352 /* Read Number of Supported IAC */
1353 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1354
4b836f39
MH
1355 /* Read Current IAC LAP */
1356 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1357
2177bab5
JH
1358 /* Clear Event Filters */
1359 flt_type = HCI_FLT_CLEAR_ALL;
42c6b129 1360 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
2177bab5
JH
1361
1362 /* Connection accept timeout ~20 secs */
dcf4adbf 1363 param = cpu_to_le16(0x7d00);
42c6b129 1364 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
2177bab5 1365
4ca048e3
MH
1366 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1367 * but it does not support page scan related HCI commands.
1368 */
1369 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
f332ec66
JH
1370 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1371 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1372 }
2177bab5
JH
1373}
1374
42c6b129 1375static void le_setup(struct hci_request *req)
2177bab5 1376{
c73eee91
JH
1377 struct hci_dev *hdev = req->hdev;
1378
2177bab5 1379 /* Read LE Buffer Size */
42c6b129 1380 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
1381
1382 /* Read LE Local Supported Features */
42c6b129 1383 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
2177bab5 1384
747d3f03
MH
1385 /* Read LE Supported States */
1386 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1387
2177bab5 1388 /* Read LE White List Size */
42c6b129 1389 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
2177bab5 1390
747d3f03
MH
1391 /* Clear LE White List */
1392 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
c73eee91
JH
1393
1394 /* LE-only controllers have LE implicitly enabled */
1395 if (!lmp_bredr_capable(hdev))
1396 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2177bab5
JH
1397}
1398
1399static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1400{
1401 if (lmp_ext_inq_capable(hdev))
1402 return 0x02;
1403
1404 if (lmp_inq_rssi_capable(hdev))
1405 return 0x01;
1406
1407 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1408 hdev->lmp_subver == 0x0757)
1409 return 0x01;
1410
1411 if (hdev->manufacturer == 15) {
1412 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1413 return 0x01;
1414 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1415 return 0x01;
1416 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1417 return 0x01;
1418 }
1419
1420 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1421 hdev->lmp_subver == 0x1805)
1422 return 0x01;
1423
1424 return 0x00;
1425}
1426
42c6b129 1427static void hci_setup_inquiry_mode(struct hci_request *req)
2177bab5
JH
1428{
1429 u8 mode;
1430
42c6b129 1431 mode = hci_get_inquiry_mode(req->hdev);
2177bab5 1432
42c6b129 1433 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
2177bab5
JH
1434}
1435
42c6b129 1436static void hci_setup_event_mask(struct hci_request *req)
2177bab5 1437{
42c6b129
JH
1438 struct hci_dev *hdev = req->hdev;
1439
2177bab5
JH
1440 /* The second byte is 0xff instead of 0x9f (two reserved bits
1441 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1442 * command otherwise.
1443 */
1444 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1445
1446 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1447 * any event mask for pre 1.2 devices.
1448 */
1449 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1450 return;
1451
1452 if (lmp_bredr_capable(hdev)) {
1453 events[4] |= 0x01; /* Flow Specification Complete */
1454 events[4] |= 0x02; /* Inquiry Result with RSSI */
1455 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1456 events[5] |= 0x08; /* Synchronous Connection Complete */
1457 events[5] |= 0x10; /* Synchronous Connection Changed */
c7882cbd
MH
1458 } else {
1459 /* Use a different default for LE-only devices */
1460 memset(events, 0, sizeof(events));
1461 events[0] |= 0x10; /* Disconnection Complete */
c7882cbd
MH
1462 events[1] |= 0x08; /* Read Remote Version Information Complete */
1463 events[1] |= 0x20; /* Command Complete */
1464 events[1] |= 0x40; /* Command Status */
1465 events[1] |= 0x80; /* Hardware Error */
1466 events[2] |= 0x04; /* Number of Completed Packets */
1467 events[3] |= 0x02; /* Data Buffer Overflow */
0da71f1b
MH
1468
1469 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
1470 events[0] |= 0x80; /* Encryption Change */
1471 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1472 }
2177bab5
JH
1473 }
1474
1475 if (lmp_inq_rssi_capable(hdev))
1476 events[4] |= 0x02; /* Inquiry Result with RSSI */
1477
1478 if (lmp_sniffsubr_capable(hdev))
1479 events[5] |= 0x20; /* Sniff Subrating */
1480
1481 if (lmp_pause_enc_capable(hdev))
1482 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1483
1484 if (lmp_ext_inq_capable(hdev))
1485 events[5] |= 0x40; /* Extended Inquiry Result */
1486
1487 if (lmp_no_flush_capable(hdev))
1488 events[7] |= 0x01; /* Enhanced Flush Complete */
1489
1490 if (lmp_lsto_capable(hdev))
1491 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1492
1493 if (lmp_ssp_capable(hdev)) {
1494 events[6] |= 0x01; /* IO Capability Request */
1495 events[6] |= 0x02; /* IO Capability Response */
1496 events[6] |= 0x04; /* User Confirmation Request */
1497 events[6] |= 0x08; /* User Passkey Request */
1498 events[6] |= 0x10; /* Remote OOB Data Request */
1499 events[6] |= 0x20; /* Simple Pairing Complete */
1500 events[7] |= 0x04; /* User Passkey Notification */
1501 events[7] |= 0x08; /* Keypress Notification */
1502 events[7] |= 0x10; /* Remote Host Supported
1503 * Features Notification
1504 */
1505 }
1506
1507 if (lmp_le_capable(hdev))
1508 events[7] |= 0x20; /* LE Meta-Event */
1509
42c6b129 1510 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
2177bab5
JH
1511}
1512
42c6b129 1513static void hci_init2_req(struct hci_request *req, unsigned long opt)
2177bab5 1514{
42c6b129
JH
1515 struct hci_dev *hdev = req->hdev;
1516
2177bab5 1517 if (lmp_bredr_capable(hdev))
42c6b129 1518 bredr_setup(req);
56f87901
JH
1519 else
1520 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2177bab5
JH
1521
1522 if (lmp_le_capable(hdev))
42c6b129 1523 le_setup(req);
2177bab5 1524
3f8e2d75
JH
1525 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1526 * local supported commands HCI command.
1527 */
1528 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
42c6b129 1529 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
2177bab5
JH
1530
1531 if (lmp_ssp_capable(hdev)) {
57af75a8
MH
1532 /* When SSP is available, then the host features page
1533 * should also be available as well. However some
1534 * controllers list the max_page as 0 as long as SSP
1535 * has not been enabled. To achieve proper debugging
1536 * output, force the minimum max_page to 1 at least.
1537 */
1538 hdev->max_page = 0x01;
1539
2177bab5
JH
1540 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1541 u8 mode = 0x01;
42c6b129
JH
1542 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1543 sizeof(mode), &mode);
2177bab5
JH
1544 } else {
1545 struct hci_cp_write_eir cp;
1546
1547 memset(hdev->eir, 0, sizeof(hdev->eir));
1548 memset(&cp, 0, sizeof(cp));
1549
42c6b129 1550 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
2177bab5
JH
1551 }
1552 }
1553
1554 if (lmp_inq_rssi_capable(hdev))
42c6b129 1555 hci_setup_inquiry_mode(req);
2177bab5
JH
1556
1557 if (lmp_inq_tx_pwr_capable(hdev))
42c6b129 1558 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
2177bab5
JH
1559
1560 if (lmp_ext_feat_capable(hdev)) {
1561 struct hci_cp_read_local_ext_features cp;
1562
1563 cp.page = 0x01;
42c6b129
JH
1564 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1565 sizeof(cp), &cp);
2177bab5
JH
1566 }
1567
1568 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1569 u8 enable = 1;
42c6b129
JH
1570 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1571 &enable);
2177bab5
JH
1572 }
1573}
1574
42c6b129 1575static void hci_setup_link_policy(struct hci_request *req)
2177bab5 1576{
42c6b129 1577 struct hci_dev *hdev = req->hdev;
2177bab5
JH
1578 struct hci_cp_write_def_link_policy cp;
1579 u16 link_policy = 0;
1580
1581 if (lmp_rswitch_capable(hdev))
1582 link_policy |= HCI_LP_RSWITCH;
1583 if (lmp_hold_capable(hdev))
1584 link_policy |= HCI_LP_HOLD;
1585 if (lmp_sniff_capable(hdev))
1586 link_policy |= HCI_LP_SNIFF;
1587 if (lmp_park_capable(hdev))
1588 link_policy |= HCI_LP_PARK;
1589
1590 cp.policy = cpu_to_le16(link_policy);
42c6b129 1591 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
2177bab5
JH
1592}
1593
42c6b129 1594static void hci_set_le_support(struct hci_request *req)
2177bab5 1595{
42c6b129 1596 struct hci_dev *hdev = req->hdev;
2177bab5
JH
1597 struct hci_cp_write_le_host_supported cp;
1598
c73eee91
JH
1599 /* LE-only devices do not support explicit enablement */
1600 if (!lmp_bredr_capable(hdev))
1601 return;
1602
2177bab5
JH
1603 memset(&cp, 0, sizeof(cp));
1604
1605 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1606 cp.le = 0x01;
32226e4f 1607 cp.simul = 0x00;
2177bab5
JH
1608 }
1609
1610 if (cp.le != lmp_host_le_capable(hdev))
42c6b129
JH
1611 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1612 &cp);
2177bab5
JH
1613}
1614
d62e6d67
JH
1615static void hci_set_event_mask_page_2(struct hci_request *req)
1616{
1617 struct hci_dev *hdev = req->hdev;
1618 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1619
1620 /* If Connectionless Slave Broadcast master role is supported
1621 * enable all necessary events for it.
1622 */
53b834d2 1623 if (lmp_csb_master_capable(hdev)) {
d62e6d67
JH
1624 events[1] |= 0x40; /* Triggered Clock Capture */
1625 events[1] |= 0x80; /* Synchronization Train Complete */
1626 events[2] |= 0x10; /* Slave Page Response Timeout */
1627 events[2] |= 0x20; /* CSB Channel Map Change */
1628 }
1629
1630 /* If Connectionless Slave Broadcast slave role is supported
1631 * enable all necessary events for it.
1632 */
53b834d2 1633 if (lmp_csb_slave_capable(hdev)) {
d62e6d67
JH
1634 events[2] |= 0x01; /* Synchronization Train Received */
1635 events[2] |= 0x02; /* CSB Receive */
1636 events[2] |= 0x04; /* CSB Timeout */
1637 events[2] |= 0x08; /* Truncated Page Complete */
1638 }
1639
40c59fcb 1640 /* Enable Authenticated Payload Timeout Expired event if supported */
cd7ca0ec 1641 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
40c59fcb
MH
1642 events[2] |= 0x80;
1643
d62e6d67
JH
1644 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1645}
1646
42c6b129 1647static void hci_init3_req(struct hci_request *req, unsigned long opt)
2177bab5 1648{
42c6b129 1649 struct hci_dev *hdev = req->hdev;
d2c5d77f 1650 u8 p;
42c6b129 1651
0da71f1b
MH
1652 hci_setup_event_mask(req);
1653
b8f4e068
GP
1654 /* Some Broadcom based Bluetooth controllers do not support the
1655 * Delete Stored Link Key command. They are clearly indicating its
1656 * absence in the bit mask of supported commands.
1657 *
1658 * Check the supported commands and only if the the command is marked
1659 * as supported send it. If not supported assume that the controller
1660 * does not have actual support for stored link keys which makes this
1661 * command redundant anyway.
f9f462fa
MH
1662 *
1663 * Some controllers indicate that they support handling deleting
1664 * stored link keys, but they don't. The quirk lets a driver
1665 * just disable this command.
637b4cae 1666 */
f9f462fa
MH
1667 if (hdev->commands[6] & 0x80 &&
1668 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
59f45d57
JH
1669 struct hci_cp_delete_stored_link_key cp;
1670
1671 bacpy(&cp.bdaddr, BDADDR_ANY);
1672 cp.delete_all = 0x01;
1673 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1674 sizeof(cp), &cp);
1675 }
1676
2177bab5 1677 if (hdev->commands[5] & 0x10)
42c6b129 1678 hci_setup_link_policy(req);
2177bab5 1679
9193c6e8
AG
1680 if (lmp_le_capable(hdev)) {
1681 u8 events[8];
1682
1683 memset(events, 0, sizeof(events));
4d6c705b
MH
1684 events[0] = 0x0f;
1685
1686 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
1687 events[0] |= 0x10; /* LE Long Term Key Request */
662bc2e6
AG
1688
1689 /* If controller supports the Connection Parameters Request
1690 * Link Layer Procedure, enable the corresponding event.
1691 */
1692 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1693 events[0] |= 0x20; /* LE Remote Connection
1694 * Parameter Request
1695 */
1696
9193c6e8
AG
1697 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1698 events);
1699
15a49cca
MH
1700 if (hdev->commands[25] & 0x40) {
1701 /* Read LE Advertising Channel TX Power */
1702 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1703 }
1704
42c6b129 1705 hci_set_le_support(req);
9193c6e8 1706 }
d2c5d77f
JH
1707
1708 /* Read features beyond page 1 if available */
1709 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1710 struct hci_cp_read_local_ext_features cp;
1711
1712 cp.page = p;
1713 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1714 sizeof(cp), &cp);
1715 }
2177bab5
JH
1716}
1717
5d4e7e8d
JH
1718static void hci_init4_req(struct hci_request *req, unsigned long opt)
1719{
1720 struct hci_dev *hdev = req->hdev;
1721
d62e6d67
JH
1722 /* Set event mask page 2 if the HCI command for it is supported */
1723 if (hdev->commands[22] & 0x04)
1724 hci_set_event_mask_page_2(req);
1725
109e3191
MH
1726 /* Read local codec list if the HCI command is supported */
1727 if (hdev->commands[29] & 0x20)
1728 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
1729
f4fe73ed
MH
1730 /* Get MWS transport configuration if the HCI command is supported */
1731 if (hdev->commands[30] & 0x08)
1732 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
1733
5d4e7e8d 1734 /* Check for Synchronization Train support */
53b834d2 1735 if (lmp_sync_train_capable(hdev))
5d4e7e8d 1736 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
a6d0d690
MH
1737
1738 /* Enable Secure Connections if supported and configured */
5afeac14 1739 if ((lmp_sc_capable(hdev) ||
111902f7 1740 test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
a6d0d690
MH
1741 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1742 u8 support = 0x01;
1743 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1744 sizeof(support), &support);
1745 }
5d4e7e8d
JH
1746}
1747
2177bab5
JH
1748static int __hci_init(struct hci_dev *hdev)
1749{
1750 int err;
1751
1752 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1753 if (err < 0)
1754 return err;
1755
4b4148e9
MH
1756 /* The Device Under Test (DUT) mode is special and available for
1757 * all controller types. So just create it early on.
1758 */
1759 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1760 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1761 &dut_mode_fops);
1762 }
1763
2177bab5
JH
1764 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1765 * BR/EDR/LE type controllers. AMP controllers only need the
1766 * first stage init.
1767 */
1768 if (hdev->dev_type != HCI_BREDR)
1769 return 0;
1770
1771 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1772 if (err < 0)
1773 return err;
1774
5d4e7e8d
JH
1775 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1776 if (err < 0)
1777 return err;
1778
baf27f6e
MH
1779 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1780 if (err < 0)
1781 return err;
1782
1783 /* Only create debugfs entries during the initial setup
1784 * phase and not every time the controller gets powered on.
1785 */
1786 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1787 return 0;
1788
dfb826a8
MH
1789 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1790 &features_fops);
ceeb3bc0
MH
1791 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1792 &hdev->manufacturer);
1793 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1794 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
40f4938a
MH
1795 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1796 &device_list_fops);
70afe0b8
MH
1797 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1798 &blacklist_fops);
47219839
MH
1799 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1800
31ad1691
AK
1801 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1802 &conn_info_min_age_fops);
1803 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1804 &conn_info_max_age_fops);
1805
baf27f6e
MH
1806 if (lmp_bredr_capable(hdev)) {
1807 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1808 hdev, &inquiry_cache_fops);
02d08d15
MH
1809 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1810 hdev, &link_keys_fops);
babdbb3c
MH
1811 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1812 hdev, &dev_class_fops);
041000b9
MH
1813 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1814 hdev, &voice_setting_fops);
baf27f6e
MH
1815 }
1816
06f5b778 1817 if (lmp_ssp_capable(hdev)) {
ebd1e33b
MH
1818 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1819 hdev, &auto_accept_delay_fops);
5afeac14
MH
1820 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1821 hdev, &force_sc_support_fops);
134c2a89
MH
1822 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1823 hdev, &sc_only_mode_fops);
06f5b778 1824 }
ebd1e33b 1825
2bfa3531
MH
1826 if (lmp_sniff_capable(hdev)) {
1827 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1828 hdev, &idle_timeout_fops);
1829 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1830 hdev, &sniff_min_interval_fops);
1831 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1832 hdev, &sniff_max_interval_fops);
1833 }
1834
d0f729b8 1835 if (lmp_le_capable(hdev)) {
ac345813
MH
1836 debugfs_create_file("identity", 0400, hdev->debugfs,
1837 hdev, &identity_fops);
1838 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1839 hdev, &rpa_timeout_fops);
7a4cd51d
MH
1840 debugfs_create_file("random_address", 0444, hdev->debugfs,
1841 hdev, &random_address_fops);
b32bba6c
MH
1842 debugfs_create_file("static_address", 0444, hdev->debugfs,
1843 hdev, &static_address_fops);
1844
1845 /* For controllers with a public address, provide a debug
1846 * option to force the usage of the configured static
1847 * address. By default the public address is used.
1848 */
1849 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1850 debugfs_create_file("force_static_address", 0644,
1851 hdev->debugfs, hdev,
1852 &force_static_address_fops);
1853
d0f729b8
MH
1854 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1855 &hdev->le_white_list_size);
d2ab0ac1
MH
1856 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1857 &white_list_fops);
3698d704
MH
1858 debugfs_create_file("identity_resolving_keys", 0400,
1859 hdev->debugfs, hdev,
1860 &identity_resolving_keys_fops);
8f8625cd
MH
1861 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1862 hdev, &long_term_keys_fops);
4e70c7e7
MH
1863 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1864 hdev, &conn_min_interval_fops);
1865 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1866 hdev, &conn_max_interval_fops);
816a93d1
MH
1867 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1868 hdev, &conn_latency_fops);
f1649577
MH
1869 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1870 hdev, &supervision_timeout_fops);
3f959d46
MH
1871 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1872 hdev, &adv_channel_map_fops);
729a1051
GL
1873 debugfs_create_file("adv_min_interval", 0644, hdev->debugfs,
1874 hdev, &adv_min_interval_fops);
1875 debugfs_create_file("adv_max_interval", 0644, hdev->debugfs,
1876 hdev, &adv_max_interval_fops);
b9a7a61e
LR
1877 debugfs_create_u16("discov_interleaved_timeout", 0644,
1878 hdev->debugfs,
1879 &hdev->discov_interleaved_timeout);
54506918 1880
711eafe3 1881 smp_register(hdev);
d0f729b8 1882 }
e7b8fc92 1883
baf27f6e 1884 return 0;
2177bab5
JH
1885}
1886
0ebca7d6
MH
1887static void hci_init0_req(struct hci_request *req, unsigned long opt)
1888{
1889 struct hci_dev *hdev = req->hdev;
1890
1891 BT_DBG("%s %ld", hdev->name, opt);
1892
1893 /* Reset */
1894 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1895 hci_reset_req(req, 0);
1896
1897 /* Read Local Version */
1898 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1899
1900 /* Read BD Address */
1901 if (hdev->set_bdaddr)
1902 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1903}
1904
1905static int __hci_unconf_init(struct hci_dev *hdev)
1906{
1907 int err;
1908
cc78b44b
MH
1909 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1910 return 0;
1911
0ebca7d6
MH
1912 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1913 if (err < 0)
1914 return err;
1915
1916 return 0;
1917}
1918
42c6b129 1919static void hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1920{
1921 __u8 scan = opt;
1922
42c6b129 1923 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
1924
1925 /* Inquiry and Page scans */
42c6b129 1926 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
1927}
1928
42c6b129 1929static void hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1930{
1931 __u8 auth = opt;
1932
42c6b129 1933 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
1934
1935 /* Authentication */
42c6b129 1936 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
1937}
1938
42c6b129 1939static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1940{
1941 __u8 encrypt = opt;
1942
42c6b129 1943 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 1944
e4e8e37c 1945 /* Encryption */
42c6b129 1946 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
1947}
1948
42c6b129 1949static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
1950{
1951 __le16 policy = cpu_to_le16(opt);
1952
42c6b129 1953 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
1954
1955 /* Default link policy */
42c6b129 1956 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
e4e8e37c
MH
1957}
1958
8e87d142 1959/* Get HCI device by index.
1da177e4
LT
1960 * Device is held on return. */
1961struct hci_dev *hci_dev_get(int index)
1962{
8035ded4 1963 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
1964
1965 BT_DBG("%d", index);
1966
1967 if (index < 0)
1968 return NULL;
1969
1970 read_lock(&hci_dev_list_lock);
8035ded4 1971 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
1972 if (d->id == index) {
1973 hdev = hci_dev_hold(d);
1974 break;
1975 }
1976 }
1977 read_unlock(&hci_dev_list_lock);
1978 return hdev;
1979}
1da177e4
LT
1980
1981/* ---- Inquiry support ---- */
ff9ef578 1982
30dc78e1
JH
1983bool hci_discovery_active(struct hci_dev *hdev)
1984{
1985 struct discovery_state *discov = &hdev->discovery;
1986
6fbe195d 1987 switch (discov->state) {
343f935b 1988 case DISCOVERY_FINDING:
6fbe195d 1989 case DISCOVERY_RESOLVING:
30dc78e1
JH
1990 return true;
1991
6fbe195d
AG
1992 default:
1993 return false;
1994 }
30dc78e1
JH
1995}
1996
ff9ef578
JH
1997void hci_discovery_set_state(struct hci_dev *hdev, int state)
1998{
bb3e0a33
JH
1999 int old_state = hdev->discovery.state;
2000
ff9ef578
JH
2001 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
2002
bb3e0a33 2003 if (old_state == state)
ff9ef578
JH
2004 return;
2005
bb3e0a33
JH
2006 hdev->discovery.state = state;
2007
ff9ef578
JH
2008 switch (state) {
2009 case DISCOVERY_STOPPED:
c54c3860
AG
2010 hci_update_background_scan(hdev);
2011
bb3e0a33 2012 if (old_state != DISCOVERY_STARTING)
7b99b659 2013 mgmt_discovering(hdev, 0);
ff9ef578
JH
2014 break;
2015 case DISCOVERY_STARTING:
2016 break;
343f935b 2017 case DISCOVERY_FINDING:
ff9ef578
JH
2018 mgmt_discovering(hdev, 1);
2019 break;
30dc78e1
JH
2020 case DISCOVERY_RESOLVING:
2021 break;
ff9ef578
JH
2022 case DISCOVERY_STOPPING:
2023 break;
2024 }
ff9ef578
JH
2025}
2026
1f9b9a5d 2027void hci_inquiry_cache_flush(struct hci_dev *hdev)
1da177e4 2028{
30883512 2029 struct discovery_state *cache = &hdev->discovery;
b57c1a56 2030 struct inquiry_entry *p, *n;
1da177e4 2031
561aafbc
JH
2032 list_for_each_entry_safe(p, n, &cache->all, all) {
2033 list_del(&p->all);
b57c1a56 2034 kfree(p);
1da177e4 2035 }
561aafbc
JH
2036
2037 INIT_LIST_HEAD(&cache->unknown);
2038 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
2039}
2040
a8c5fb1a
GP
2041struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
2042 bdaddr_t *bdaddr)
1da177e4 2043{
30883512 2044 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
2045 struct inquiry_entry *e;
2046
6ed93dc6 2047 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 2048
561aafbc
JH
2049 list_for_each_entry(e, &cache->all, all) {
2050 if (!bacmp(&e->data.bdaddr, bdaddr))
2051 return e;
2052 }
2053
2054 return NULL;
2055}
2056
2057struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 2058 bdaddr_t *bdaddr)
561aafbc 2059{
30883512 2060 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
2061 struct inquiry_entry *e;
2062
6ed93dc6 2063 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
2064
2065 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 2066 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
2067 return e;
2068 }
2069
2070 return NULL;
1da177e4
LT
2071}
2072
30dc78e1 2073struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
2074 bdaddr_t *bdaddr,
2075 int state)
30dc78e1
JH
2076{
2077 struct discovery_state *cache = &hdev->discovery;
2078 struct inquiry_entry *e;
2079
6ed93dc6 2080 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
2081
2082 list_for_each_entry(e, &cache->resolve, list) {
2083 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
2084 return e;
2085 if (!bacmp(&e->data.bdaddr, bdaddr))
2086 return e;
2087 }
2088
2089 return NULL;
2090}
2091
a3d4e20a 2092void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 2093 struct inquiry_entry *ie)
a3d4e20a
JH
2094{
2095 struct discovery_state *cache = &hdev->discovery;
2096 struct list_head *pos = &cache->resolve;
2097 struct inquiry_entry *p;
2098
2099 list_del(&ie->list);
2100
2101 list_for_each_entry(p, &cache->resolve, list) {
2102 if (p->name_state != NAME_PENDING &&
a8c5fb1a 2103 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
2104 break;
2105 pos = &p->list;
2106 }
2107
2108 list_add(&ie->list, pos);
2109}
2110
af58925c
MH
2111u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2112 bool name_known)
1da177e4 2113{
30883512 2114 struct discovery_state *cache = &hdev->discovery;
70f23020 2115 struct inquiry_entry *ie;
af58925c 2116 u32 flags = 0;
1da177e4 2117
6ed93dc6 2118 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 2119
2b2fec4d
SJ
2120 hci_remove_remote_oob_data(hdev, &data->bdaddr);
2121
af58925c
MH
2122 if (!data->ssp_mode)
2123 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
388fc8fa 2124
70f23020 2125 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 2126 if (ie) {
af58925c
MH
2127 if (!ie->data.ssp_mode)
2128 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
388fc8fa 2129
a3d4e20a 2130 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 2131 data->rssi != ie->data.rssi) {
a3d4e20a
JH
2132 ie->data.rssi = data->rssi;
2133 hci_inquiry_cache_update_resolve(hdev, ie);
2134 }
2135
561aafbc 2136 goto update;
a3d4e20a 2137 }
561aafbc
JH
2138
2139 /* Entry not in the cache. Add new one. */
27f70f3e 2140 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
af58925c
MH
2141 if (!ie) {
2142 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2143 goto done;
2144 }
561aafbc
JH
2145
2146 list_add(&ie->all, &cache->all);
2147
2148 if (name_known) {
2149 ie->name_state = NAME_KNOWN;
2150 } else {
2151 ie->name_state = NAME_NOT_KNOWN;
2152 list_add(&ie->list, &cache->unknown);
2153 }
70f23020 2154
561aafbc
JH
2155update:
2156 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 2157 ie->name_state != NAME_PENDING) {
561aafbc
JH
2158 ie->name_state = NAME_KNOWN;
2159 list_del(&ie->list);
1da177e4
LT
2160 }
2161
70f23020
AE
2162 memcpy(&ie->data, data, sizeof(*data));
2163 ie->timestamp = jiffies;
1da177e4 2164 cache->timestamp = jiffies;
3175405b
JH
2165
2166 if (ie->name_state == NAME_NOT_KNOWN)
af58925c 2167 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
3175405b 2168
af58925c
MH
2169done:
2170 return flags;
1da177e4
LT
2171}
2172
2173static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2174{
30883512 2175 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
2176 struct inquiry_info *info = (struct inquiry_info *) buf;
2177 struct inquiry_entry *e;
2178 int copied = 0;
2179
561aafbc 2180 list_for_each_entry(e, &cache->all, all) {
1da177e4 2181 struct inquiry_data *data = &e->data;
b57c1a56
JH
2182
2183 if (copied >= num)
2184 break;
2185
1da177e4
LT
2186 bacpy(&info->bdaddr, &data->bdaddr);
2187 info->pscan_rep_mode = data->pscan_rep_mode;
2188 info->pscan_period_mode = data->pscan_period_mode;
2189 info->pscan_mode = data->pscan_mode;
2190 memcpy(info->dev_class, data->dev_class, 3);
2191 info->clock_offset = data->clock_offset;
b57c1a56 2192
1da177e4 2193 info++;
b57c1a56 2194 copied++;
1da177e4
LT
2195 }
2196
2197 BT_DBG("cache %p, copied %d", cache, copied);
2198 return copied;
2199}
2200
42c6b129 2201static void hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
2202{
2203 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 2204 struct hci_dev *hdev = req->hdev;
1da177e4
LT
2205 struct hci_cp_inquiry cp;
2206
2207 BT_DBG("%s", hdev->name);
2208
2209 if (test_bit(HCI_INQUIRY, &hdev->flags))
2210 return;
2211
2212 /* Start Inquiry */
2213 memcpy(&cp.lap, &ir->lap, 3);
2214 cp.length = ir->length;
2215 cp.num_rsp = ir->num_rsp;
42c6b129 2216 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
2217}
2218
2219int hci_inquiry(void __user *arg)
2220{
2221 __u8 __user *ptr = arg;
2222 struct hci_inquiry_req ir;
2223 struct hci_dev *hdev;
2224 int err = 0, do_inquiry = 0, max_rsp;
2225 long timeo;
2226 __u8 *buf;
2227
2228 if (copy_from_user(&ir, ptr, sizeof(ir)))
2229 return -EFAULT;
2230
5a08ecce
AE
2231 hdev = hci_dev_get(ir.dev_id);
2232 if (!hdev)
1da177e4
LT
2233 return -ENODEV;
2234
0736cfa8
MH
2235 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2236 err = -EBUSY;
2237 goto done;
2238 }
2239
4a964404 2240 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
fee746b0
MH
2241 err = -EOPNOTSUPP;
2242 goto done;
2243 }
2244
5b69bef5
MH
2245 if (hdev->dev_type != HCI_BREDR) {
2246 err = -EOPNOTSUPP;
2247 goto done;
2248 }
2249
56f87901
JH
2250 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2251 err = -EOPNOTSUPP;
2252 goto done;
2253 }
2254
09fd0de5 2255 hci_dev_lock(hdev);
8e87d142 2256 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 2257 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1f9b9a5d 2258 hci_inquiry_cache_flush(hdev);
1da177e4
LT
2259 do_inquiry = 1;
2260 }
09fd0de5 2261 hci_dev_unlock(hdev);
1da177e4 2262
04837f64 2263 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
2264
2265 if (do_inquiry) {
01178cd4
JH
2266 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2267 timeo);
70f23020
AE
2268 if (err < 0)
2269 goto done;
3e13fa1e
AG
2270
2271 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2272 * cleared). If it is interrupted by a signal, return -EINTR.
2273 */
74316201 2274 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
3e13fa1e
AG
2275 TASK_INTERRUPTIBLE))
2276 return -EINTR;
70f23020 2277 }
1da177e4 2278
8fc9ced3
GP
2279 /* for unlimited number of responses we will use buffer with
2280 * 255 entries
2281 */
1da177e4
LT
2282 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2283
2284 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2285 * copy it to the user space.
2286 */
01df8c31 2287 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 2288 if (!buf) {
1da177e4
LT
2289 err = -ENOMEM;
2290 goto done;
2291 }
2292
09fd0de5 2293 hci_dev_lock(hdev);
1da177e4 2294 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 2295 hci_dev_unlock(hdev);
1da177e4
LT
2296
2297 BT_DBG("num_rsp %d", ir.num_rsp);
2298
2299 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2300 ptr += sizeof(ir);
2301 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 2302 ir.num_rsp))
1da177e4 2303 err = -EFAULT;
8e87d142 2304 } else
1da177e4
LT
2305 err = -EFAULT;
2306
2307 kfree(buf);
2308
2309done:
2310 hci_dev_put(hdev);
2311 return err;
2312}
2313
cbed0ca1 2314static int hci_dev_do_open(struct hci_dev *hdev)
1da177e4 2315{
1da177e4
LT
2316 int ret = 0;
2317
1da177e4
LT
2318 BT_DBG("%s %p", hdev->name, hdev);
2319
2320 hci_req_lock(hdev);
2321
94324962
JH
2322 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2323 ret = -ENODEV;
2324 goto done;
2325 }
2326
d603b76b
MH
2327 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2328 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
a5c8f270
MH
2329 /* Check for rfkill but allow the HCI setup stage to
2330 * proceed (which in itself doesn't cause any RF activity).
2331 */
2332 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2333 ret = -ERFKILL;
2334 goto done;
2335 }
2336
2337 /* Check for valid public address or a configured static
2338 * random adddress, but let the HCI setup proceed to
2339 * be able to determine if there is a public address
2340 * or not.
2341 *
c6beca0e
MH
2342 * In case of user channel usage, it is not important
2343 * if a public address or static random address is
2344 * available.
2345 *
a5c8f270
MH
2346 * This check is only valid for BR/EDR controllers
2347 * since AMP controllers do not have an address.
2348 */
c6beca0e
MH
2349 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2350 hdev->dev_type == HCI_BREDR &&
a5c8f270
MH
2351 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2352 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2353 ret = -EADDRNOTAVAIL;
2354 goto done;
2355 }
611b30f7
MH
2356 }
2357
1da177e4
LT
2358 if (test_bit(HCI_UP, &hdev->flags)) {
2359 ret = -EALREADY;
2360 goto done;
2361 }
2362
1da177e4
LT
2363 if (hdev->open(hdev)) {
2364 ret = -EIO;
2365 goto done;
2366 }
2367
f41c70c4
MH
2368 atomic_set(&hdev->cmd_cnt, 1);
2369 set_bit(HCI_INIT, &hdev->flags);
2370
af202f84
MH
2371 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
2372 if (hdev->setup)
2373 ret = hdev->setup(hdev);
f41c70c4 2374
af202f84
MH
2375 /* The transport driver can set these quirks before
2376 * creating the HCI device or in its setup callback.
2377 *
2378 * In case any of them is set, the controller has to
2379 * start up as unconfigured.
2380 */
eb1904f4
MH
2381 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
2382 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
89bc22d2 2383 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
f41c70c4 2384
0ebca7d6
MH
2385 /* For an unconfigured controller it is required to
2386 * read at least the version information provided by
2387 * the Read Local Version Information command.
2388 *
2389 * If the set_bdaddr driver callback is provided, then
2390 * also the original Bluetooth public device address
2391 * will be read using the Read BD Address command.
2392 */
2393 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2394 ret = __hci_unconf_init(hdev);
89bc22d2
MH
2395 }
2396
9713c17b
MH
2397 if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2398 /* If public address change is configured, ensure that
2399 * the address gets programmed. If the driver does not
2400 * support changing the public address, fail the power
2401 * on procedure.
2402 */
2403 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
2404 hdev->set_bdaddr)
24c457e2
MH
2405 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2406 else
2407 ret = -EADDRNOTAVAIL;
2408 }
2409
f41c70c4 2410 if (!ret) {
4a964404 2411 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
0736cfa8 2412 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
f41c70c4 2413 ret = __hci_init(hdev);
1da177e4
LT
2414 }
2415
f41c70c4
MH
2416 clear_bit(HCI_INIT, &hdev->flags);
2417
1da177e4
LT
2418 if (!ret) {
2419 hci_dev_hold(hdev);
d6bfd59c 2420 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
1da177e4
LT
2421 set_bit(HCI_UP, &hdev->flags);
2422 hci_notify(hdev, HCI_DEV_UP);
bb4b2a9a 2423 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
d603b76b 2424 !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
4a964404 2425 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
0736cfa8 2426 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1514b892 2427 hdev->dev_type == HCI_BREDR) {
09fd0de5 2428 hci_dev_lock(hdev);
744cf19e 2429 mgmt_powered(hdev, 1);
09fd0de5 2430 hci_dev_unlock(hdev);
56e5cb86 2431 }
8e87d142 2432 } else {
1da177e4 2433 /* Init failed, cleanup */
3eff45ea 2434 flush_work(&hdev->tx_work);
c347b765 2435 flush_work(&hdev->cmd_work);
b78752cc 2436 flush_work(&hdev->rx_work);
1da177e4
LT
2437
2438 skb_queue_purge(&hdev->cmd_q);
2439 skb_queue_purge(&hdev->rx_q);
2440
2441 if (hdev->flush)
2442 hdev->flush(hdev);
2443
2444 if (hdev->sent_cmd) {
2445 kfree_skb(hdev->sent_cmd);
2446 hdev->sent_cmd = NULL;
2447 }
2448
2449 hdev->close(hdev);
fee746b0 2450 hdev->flags &= BIT(HCI_RAW);
1da177e4
LT
2451 }
2452
2453done:
2454 hci_req_unlock(hdev);
1da177e4
LT
2455 return ret;
2456}
2457
cbed0ca1
JH
2458/* ---- HCI ioctl helpers ---- */
2459
2460int hci_dev_open(__u16 dev)
2461{
2462 struct hci_dev *hdev;
2463 int err;
2464
2465 hdev = hci_dev_get(dev);
2466 if (!hdev)
2467 return -ENODEV;
2468
4a964404 2469 /* Devices that are marked as unconfigured can only be powered
fee746b0
MH
2470 * up as user channel. Trying to bring them up as normal devices
2471 * will result into a failure. Only user channel operation is
2472 * possible.
2473 *
2474 * When this function is called for a user channel, the flag
2475 * HCI_USER_CHANNEL will be set first before attempting to
2476 * open the device.
2477 */
4a964404 2478 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
fee746b0
MH
2479 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2480 err = -EOPNOTSUPP;
2481 goto done;
2482 }
2483
e1d08f40
JH
2484 /* We need to ensure that no other power on/off work is pending
2485 * before proceeding to call hci_dev_do_open. This is
2486 * particularly important if the setup procedure has not yet
2487 * completed.
2488 */
2489 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2490 cancel_delayed_work(&hdev->power_off);
2491
a5c8f270
MH
2492 /* After this call it is guaranteed that the setup procedure
2493 * has finished. This means that error conditions like RFKILL
2494 * or no valid public or static random address apply.
2495 */
e1d08f40
JH
2496 flush_workqueue(hdev->req_workqueue);
2497
12aa4f0a 2498 /* For controllers not using the management interface and that
b6ae8457 2499 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
12aa4f0a
MH
2500 * so that pairing works for them. Once the management interface
2501 * is in use this bit will be cleared again and userspace has
2502 * to explicitly enable it.
2503 */
2504 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2505 !test_bit(HCI_MGMT, &hdev->dev_flags))
b6ae8457 2506 set_bit(HCI_BONDABLE, &hdev->dev_flags);
12aa4f0a 2507
cbed0ca1
JH
2508 err = hci_dev_do_open(hdev);
2509
fee746b0 2510done:
cbed0ca1 2511 hci_dev_put(hdev);
cbed0ca1
JH
2512 return err;
2513}
2514
d7347f3c
JH
2515/* This function requires the caller holds hdev->lock */
2516static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2517{
2518 struct hci_conn_params *p;
2519
f161dd41
JH
2520 list_for_each_entry(p, &hdev->le_conn_params, list) {
2521 if (p->conn) {
2522 hci_conn_drop(p->conn);
f8aaf9b6 2523 hci_conn_put(p->conn);
f161dd41
JH
2524 p->conn = NULL;
2525 }
d7347f3c 2526 list_del_init(&p->action);
f161dd41 2527 }
d7347f3c
JH
2528
2529 BT_DBG("All LE pending actions cleared");
2530}
2531
1da177e4
LT
2532static int hci_dev_do_close(struct hci_dev *hdev)
2533{
2534 BT_DBG("%s %p", hdev->name, hdev);
2535
78c04c0b
VCG
2536 cancel_delayed_work(&hdev->power_off);
2537
1da177e4
LT
2538 hci_req_cancel(hdev, ENODEV);
2539 hci_req_lock(hdev);
2540
2541 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
65cc2b49 2542 cancel_delayed_work_sync(&hdev->cmd_timer);
1da177e4
LT
2543 hci_req_unlock(hdev);
2544 return 0;
2545 }
2546
3eff45ea
GP
2547 /* Flush RX and TX works */
2548 flush_work(&hdev->tx_work);
b78752cc 2549 flush_work(&hdev->rx_work);
1da177e4 2550
16ab91ab 2551 if (hdev->discov_timeout > 0) {
e0f9309f 2552 cancel_delayed_work(&hdev->discov_off);
16ab91ab 2553 hdev->discov_timeout = 0;
5e5282bb 2554 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
310a3d48 2555 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
16ab91ab
JH
2556 }
2557
a8b2d5c2 2558 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
7d78525d
JH
2559 cancel_delayed_work(&hdev->service_cache);
2560
7ba8b4be 2561 cancel_delayed_work_sync(&hdev->le_scan_disable);
4518bb0f
JH
2562
2563 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2564 cancel_delayed_work_sync(&hdev->rpa_expired);
7ba8b4be 2565
76727c02
JH
2566 /* Avoid potential lockdep warnings from the *_flush() calls by
2567 * ensuring the workqueue is empty up front.
2568 */
2569 drain_workqueue(hdev->workqueue);
2570
09fd0de5 2571 hci_dev_lock(hdev);
1f9b9a5d 2572 hci_inquiry_cache_flush(hdev);
d7347f3c 2573 hci_pend_le_actions_clear(hdev);
f161dd41 2574 hci_conn_hash_flush(hdev);
09fd0de5 2575 hci_dev_unlock(hdev);
1da177e4
LT
2576
2577 hci_notify(hdev, HCI_DEV_DOWN);
2578
2579 if (hdev->flush)
2580 hdev->flush(hdev);
2581
2582 /* Reset device */
2583 skb_queue_purge(&hdev->cmd_q);
2584 atomic_set(&hdev->cmd_cnt, 1);
4a964404
MH
2585 if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2586 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
a6c511c6 2587 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1da177e4 2588 set_bit(HCI_INIT, &hdev->flags);
01178cd4 2589 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1da177e4
LT
2590 clear_bit(HCI_INIT, &hdev->flags);
2591 }
2592
c347b765
GP
2593 /* flush cmd work */
2594 flush_work(&hdev->cmd_work);
1da177e4
LT
2595
2596 /* Drop queues */
2597 skb_queue_purge(&hdev->rx_q);
2598 skb_queue_purge(&hdev->cmd_q);
2599 skb_queue_purge(&hdev->raw_q);
2600
2601 /* Drop last sent command */
2602 if (hdev->sent_cmd) {
65cc2b49 2603 cancel_delayed_work_sync(&hdev->cmd_timer);
1da177e4
LT
2604 kfree_skb(hdev->sent_cmd);
2605 hdev->sent_cmd = NULL;
2606 }
2607
b6ddb638
JH
2608 kfree_skb(hdev->recv_evt);
2609 hdev->recv_evt = NULL;
2610
1da177e4
LT
2611 /* After this point our queues are empty
2612 * and no tasks are scheduled. */
2613 hdev->close(hdev);
2614
35b973c9 2615 /* Clear flags */
fee746b0 2616 hdev->flags &= BIT(HCI_RAW);
35b973c9
JH
2617 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2618
93c311a0
MH
2619 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2620 if (hdev->dev_type == HCI_BREDR) {
2621 hci_dev_lock(hdev);
2622 mgmt_powered(hdev, 0);
2623 hci_dev_unlock(hdev);
2624 }
8ee56540 2625 }
5add6af8 2626
ced5c338 2627 /* Controller radio is available but is currently powered down */
536619e8 2628 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
ced5c338 2629
e59fda8d 2630 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 2631 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
7a4cd51d 2632 bacpy(&hdev->random_addr, BDADDR_ANY);
e59fda8d 2633
1da177e4
LT
2634 hci_req_unlock(hdev);
2635
2636 hci_dev_put(hdev);
2637 return 0;
2638}
2639
2640int hci_dev_close(__u16 dev)
2641{
2642 struct hci_dev *hdev;
2643 int err;
2644
70f23020
AE
2645 hdev = hci_dev_get(dev);
2646 if (!hdev)
1da177e4 2647 return -ENODEV;
8ee56540 2648
0736cfa8
MH
2649 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2650 err = -EBUSY;
2651 goto done;
2652 }
2653
8ee56540
MH
2654 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2655 cancel_delayed_work(&hdev->power_off);
2656
1da177e4 2657 err = hci_dev_do_close(hdev);
8ee56540 2658
0736cfa8 2659done:
1da177e4
LT
2660 hci_dev_put(hdev);
2661 return err;
2662}
2663
2664int hci_dev_reset(__u16 dev)
2665{
2666 struct hci_dev *hdev;
2667 int ret = 0;
2668
70f23020
AE
2669 hdev = hci_dev_get(dev);
2670 if (!hdev)
1da177e4
LT
2671 return -ENODEV;
2672
2673 hci_req_lock(hdev);
1da177e4 2674
808a049e
MH
2675 if (!test_bit(HCI_UP, &hdev->flags)) {
2676 ret = -ENETDOWN;
1da177e4 2677 goto done;
808a049e 2678 }
1da177e4 2679
0736cfa8
MH
2680 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2681 ret = -EBUSY;
2682 goto done;
2683 }
2684
4a964404 2685 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
fee746b0
MH
2686 ret = -EOPNOTSUPP;
2687 goto done;
2688 }
2689
1da177e4
LT
2690 /* Drop queues */
2691 skb_queue_purge(&hdev->rx_q);
2692 skb_queue_purge(&hdev->cmd_q);
2693
76727c02
JH
2694 /* Avoid potential lockdep warnings from the *_flush() calls by
2695 * ensuring the workqueue is empty up front.
2696 */
2697 drain_workqueue(hdev->workqueue);
2698
09fd0de5 2699 hci_dev_lock(hdev);
1f9b9a5d 2700 hci_inquiry_cache_flush(hdev);
1da177e4 2701 hci_conn_hash_flush(hdev);
09fd0de5 2702 hci_dev_unlock(hdev);
1da177e4
LT
2703
2704 if (hdev->flush)
2705 hdev->flush(hdev);
2706
8e87d142 2707 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 2708 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4 2709
fee746b0 2710 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1da177e4
LT
2711
2712done:
1da177e4
LT
2713 hci_req_unlock(hdev);
2714 hci_dev_put(hdev);
2715 return ret;
2716}
2717
2718int hci_dev_reset_stat(__u16 dev)
2719{
2720 struct hci_dev *hdev;
2721 int ret = 0;
2722
70f23020
AE
2723 hdev = hci_dev_get(dev);
2724 if (!hdev)
1da177e4
LT
2725 return -ENODEV;
2726
0736cfa8
MH
2727 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2728 ret = -EBUSY;
2729 goto done;
2730 }
2731
4a964404 2732 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
fee746b0
MH
2733 ret = -EOPNOTSUPP;
2734 goto done;
2735 }
2736
1da177e4
LT
2737 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2738
0736cfa8 2739done:
1da177e4 2740 hci_dev_put(hdev);
1da177e4
LT
2741 return ret;
2742}
2743
123abc08
JH
2744static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
2745{
bc6d2d04 2746 bool conn_changed, discov_changed;
123abc08
JH
2747
2748 BT_DBG("%s scan 0x%02x", hdev->name, scan);
2749
2750 if ((scan & SCAN_PAGE))
2751 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
2752 &hdev->dev_flags);
2753 else
2754 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
2755 &hdev->dev_flags);
2756
bc6d2d04
JH
2757 if ((scan & SCAN_INQUIRY)) {
2758 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
2759 &hdev->dev_flags);
2760 } else {
2761 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2762 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
2763 &hdev->dev_flags);
2764 }
2765
123abc08
JH
2766 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2767 return;
2768
bc6d2d04
JH
2769 if (conn_changed || discov_changed) {
2770 /* In case this was disabled through mgmt */
2771 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2772
2773 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
2774 mgmt_update_adv_data(hdev);
2775
123abc08 2776 mgmt_new_settings(hdev);
bc6d2d04 2777 }
123abc08
JH
2778}
2779
1da177e4
LT
2780int hci_dev_cmd(unsigned int cmd, void __user *arg)
2781{
2782 struct hci_dev *hdev;
2783 struct hci_dev_req dr;
2784 int err = 0;
2785
2786 if (copy_from_user(&dr, arg, sizeof(dr)))
2787 return -EFAULT;
2788
70f23020
AE
2789 hdev = hci_dev_get(dr.dev_id);
2790 if (!hdev)
1da177e4
LT
2791 return -ENODEV;
2792
0736cfa8
MH
2793 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2794 err = -EBUSY;
2795 goto done;
2796 }
2797
4a964404 2798 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
fee746b0
MH
2799 err = -EOPNOTSUPP;
2800 goto done;
2801 }
2802
5b69bef5
MH
2803 if (hdev->dev_type != HCI_BREDR) {
2804 err = -EOPNOTSUPP;
2805 goto done;
2806 }
2807
56f87901
JH
2808 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2809 err = -EOPNOTSUPP;
2810 goto done;
2811 }
2812
1da177e4
LT
2813 switch (cmd) {
2814 case HCISETAUTH:
01178cd4
JH
2815 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2816 HCI_INIT_TIMEOUT);
1da177e4
LT
2817 break;
2818
2819 case HCISETENCRYPT:
2820 if (!lmp_encrypt_capable(hdev)) {
2821 err = -EOPNOTSUPP;
2822 break;
2823 }
2824
2825 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2826 /* Auth must be enabled first */
01178cd4
JH
2827 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2828 HCI_INIT_TIMEOUT);
1da177e4
LT
2829 if (err)
2830 break;
2831 }
2832
01178cd4
JH
2833 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2834 HCI_INIT_TIMEOUT);
1da177e4
LT
2835 break;
2836
2837 case HCISETSCAN:
01178cd4
JH
2838 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2839 HCI_INIT_TIMEOUT);
91a668b0 2840
bc6d2d04
JH
2841 /* Ensure that the connectable and discoverable states
2842 * get correctly modified as this was a non-mgmt change.
91a668b0 2843 */
123abc08
JH
2844 if (!err)
2845 hci_update_scan_state(hdev, dr.dev_opt);
1da177e4
LT
2846 break;
2847
1da177e4 2848 case HCISETLINKPOL:
01178cd4
JH
2849 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2850 HCI_INIT_TIMEOUT);
1da177e4
LT
2851 break;
2852
2853 case HCISETLINKMODE:
e4e8e37c
MH
2854 hdev->link_mode = ((__u16) dr.dev_opt) &
2855 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2856 break;
2857
2858 case HCISETPTYPE:
2859 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
2860 break;
2861
2862 case HCISETACLMTU:
e4e8e37c
MH
2863 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2864 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2865 break;
2866
2867 case HCISETSCOMTU:
e4e8e37c
MH
2868 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2869 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2870 break;
2871
2872 default:
2873 err = -EINVAL;
2874 break;
2875 }
e4e8e37c 2876
0736cfa8 2877done:
1da177e4
LT
2878 hci_dev_put(hdev);
2879 return err;
2880}
2881
2882int hci_get_dev_list(void __user *arg)
2883{
8035ded4 2884 struct hci_dev *hdev;
1da177e4
LT
2885 struct hci_dev_list_req *dl;
2886 struct hci_dev_req *dr;
1da177e4
LT
2887 int n = 0, size, err;
2888 __u16 dev_num;
2889
2890 if (get_user(dev_num, (__u16 __user *) arg))
2891 return -EFAULT;
2892
2893 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2894 return -EINVAL;
2895
2896 size = sizeof(*dl) + dev_num * sizeof(*dr);
2897
70f23020
AE
2898 dl = kzalloc(size, GFP_KERNEL);
2899 if (!dl)
1da177e4
LT
2900 return -ENOMEM;
2901
2902 dr = dl->dev_req;
2903
f20d09d5 2904 read_lock(&hci_dev_list_lock);
8035ded4 2905 list_for_each_entry(hdev, &hci_dev_list, list) {
2e84d8db 2906 unsigned long flags = hdev->flags;
c542a06c 2907
2e84d8db
MH
2908 /* When the auto-off is configured it means the transport
2909 * is running, but in that case still indicate that the
2910 * device is actually down.
2911 */
2912 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2913 flags &= ~BIT(HCI_UP);
c542a06c 2914
1da177e4 2915 (dr + n)->dev_id = hdev->id;
2e84d8db 2916 (dr + n)->dev_opt = flags;
c542a06c 2917
1da177e4
LT
2918 if (++n >= dev_num)
2919 break;
2920 }
f20d09d5 2921 read_unlock(&hci_dev_list_lock);
1da177e4
LT
2922
2923 dl->dev_num = n;
2924 size = sizeof(*dl) + n * sizeof(*dr);
2925
2926 err = copy_to_user(arg, dl, size);
2927 kfree(dl);
2928
2929 return err ? -EFAULT : 0;
2930}
2931
2932int hci_get_dev_info(void __user *arg)
2933{
2934 struct hci_dev *hdev;
2935 struct hci_dev_info di;
2e84d8db 2936 unsigned long flags;
1da177e4
LT
2937 int err = 0;
2938
2939 if (copy_from_user(&di, arg, sizeof(di)))
2940 return -EFAULT;
2941
70f23020
AE
2942 hdev = hci_dev_get(di.dev_id);
2943 if (!hdev)
1da177e4
LT
2944 return -ENODEV;
2945
2e84d8db
MH
2946 /* When the auto-off is configured it means the transport
2947 * is running, but in that case still indicate that the
2948 * device is actually down.
2949 */
2950 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2951 flags = hdev->flags & ~BIT(HCI_UP);
2952 else
2953 flags = hdev->flags;
c542a06c 2954
1da177e4
LT
2955 strcpy(di.name, hdev->name);
2956 di.bdaddr = hdev->bdaddr;
60f2a3ed 2957 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2e84d8db 2958 di.flags = flags;
1da177e4 2959 di.pkt_type = hdev->pkt_type;
572c7f84
JH
2960 if (lmp_bredr_capable(hdev)) {
2961 di.acl_mtu = hdev->acl_mtu;
2962 di.acl_pkts = hdev->acl_pkts;
2963 di.sco_mtu = hdev->sco_mtu;
2964 di.sco_pkts = hdev->sco_pkts;
2965 } else {
2966 di.acl_mtu = hdev->le_mtu;
2967 di.acl_pkts = hdev->le_pkts;
2968 di.sco_mtu = 0;
2969 di.sco_pkts = 0;
2970 }
1da177e4
LT
2971 di.link_policy = hdev->link_policy;
2972 di.link_mode = hdev->link_mode;
2973
2974 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2975 memcpy(&di.features, &hdev->features, sizeof(di.features));
2976
2977 if (copy_to_user(arg, &di, sizeof(di)))
2978 err = -EFAULT;
2979
2980 hci_dev_put(hdev);
2981
2982 return err;
2983}
2984
2985/* ---- Interface to HCI drivers ---- */
2986
611b30f7
MH
2987static int hci_rfkill_set_block(void *data, bool blocked)
2988{
2989 struct hci_dev *hdev = data;
2990
2991 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2992
0736cfa8
MH
2993 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2994 return -EBUSY;
2995
5e130367
JH
2996 if (blocked) {
2997 set_bit(HCI_RFKILLED, &hdev->dev_flags);
d603b76b
MH
2998 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2999 !test_bit(HCI_CONFIG, &hdev->dev_flags))
bf543036 3000 hci_dev_do_close(hdev);
5e130367
JH
3001 } else {
3002 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
1025c04c 3003 }
611b30f7
MH
3004
3005 return 0;
3006}
3007
3008static const struct rfkill_ops hci_rfkill_ops = {
3009 .set_block = hci_rfkill_set_block,
3010};
3011
ab81cbf9
JH
3012static void hci_power_on(struct work_struct *work)
3013{
3014 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
96570ffc 3015 int err;
ab81cbf9
JH
3016
3017 BT_DBG("%s", hdev->name);
3018
cbed0ca1 3019 err = hci_dev_do_open(hdev);
96570ffc
JH
3020 if (err < 0) {
3021 mgmt_set_powered_failed(hdev, err);
ab81cbf9 3022 return;
96570ffc 3023 }
ab81cbf9 3024
a5c8f270
MH
3025 /* During the HCI setup phase, a few error conditions are
3026 * ignored and they need to be checked now. If they are still
3027 * valid, it is important to turn the device back off.
3028 */
3029 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
4a964404 3030 test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
a5c8f270
MH
3031 (hdev->dev_type == HCI_BREDR &&
3032 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
3033 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
bf543036
JH
3034 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3035 hci_dev_do_close(hdev);
3036 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
19202573
JH
3037 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
3038 HCI_AUTO_OFF_TIMEOUT);
bf543036 3039 }
ab81cbf9 3040
fee746b0 3041 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
4a964404
MH
3042 /* For unconfigured devices, set the HCI_RAW flag
3043 * so that userspace can easily identify them.
4a964404
MH
3044 */
3045 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3046 set_bit(HCI_RAW, &hdev->flags);
0602a8ad
MH
3047
3048 /* For fully configured devices, this will send
3049 * the Index Added event. For unconfigured devices,
3050 * it will send Unconfigued Index Added event.
3051 *
3052 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
3053 * and no event will be send.
3054 */
3055 mgmt_index_added(hdev);
d603b76b 3056 } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
5ea234d3
MH
3057 /* When the controller is now configured, then it
3058 * is important to clear the HCI_RAW flag.
3059 */
3060 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3061 clear_bit(HCI_RAW, &hdev->flags);
3062
d603b76b
MH
3063 /* Powering on the controller with HCI_CONFIG set only
3064 * happens with the transition from unconfigured to
3065 * configured. This will send the Index Added event.
3066 */
744cf19e 3067 mgmt_index_added(hdev);
fee746b0 3068 }
ab81cbf9
JH
3069}
3070
3071static void hci_power_off(struct work_struct *work)
3072{
3243553f 3073 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 3074 power_off.work);
ab81cbf9
JH
3075
3076 BT_DBG("%s", hdev->name);
3077
8ee56540 3078 hci_dev_do_close(hdev);
ab81cbf9
JH
3079}
3080
16ab91ab
JH
3081static void hci_discov_off(struct work_struct *work)
3082{
3083 struct hci_dev *hdev;
16ab91ab
JH
3084
3085 hdev = container_of(work, struct hci_dev, discov_off.work);
3086
3087 BT_DBG("%s", hdev->name);
3088
d1967ff8 3089 mgmt_discoverable_timeout(hdev);
16ab91ab
JH
3090}
3091
35f7498a 3092void hci_uuids_clear(struct hci_dev *hdev)
2aeb9a1a 3093{
4821002c 3094 struct bt_uuid *uuid, *tmp;
2aeb9a1a 3095
4821002c
JH
3096 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
3097 list_del(&uuid->list);
2aeb9a1a
JH
3098 kfree(uuid);
3099 }
2aeb9a1a
JH
3100}
3101
35f7498a 3102void hci_link_keys_clear(struct hci_dev *hdev)
55ed8ca1
JH
3103{
3104 struct list_head *p, *n;
3105
3106 list_for_each_safe(p, n, &hdev->link_keys) {
3107 struct link_key *key;
3108
3109 key = list_entry(p, struct link_key, list);
3110
3111 list_del(p);
3112 kfree(key);
3113 }
55ed8ca1
JH
3114}
3115
35f7498a 3116void hci_smp_ltks_clear(struct hci_dev *hdev)
b899efaf 3117{
970d0f1b 3118 struct smp_ltk *k;
b899efaf 3119
970d0f1b
JH
3120 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
3121 list_del_rcu(&k->list);
3122 kfree_rcu(k, rcu);
b899efaf 3123 }
b899efaf
VCG
3124}
3125
970c4e46
JH
3126void hci_smp_irks_clear(struct hci_dev *hdev)
3127{
adae20cb 3128 struct smp_irk *k;
970c4e46 3129
adae20cb
JH
3130 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
3131 list_del_rcu(&k->list);
3132 kfree_rcu(k, rcu);
970c4e46
JH
3133 }
3134}
3135
55ed8ca1
JH
3136struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3137{
8035ded4 3138 struct link_key *k;
55ed8ca1 3139
8035ded4 3140 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
3141 if (bacmp(bdaddr, &k->bdaddr) == 0)
3142 return k;
55ed8ca1
JH
3143
3144 return NULL;
3145}
3146
745c0ce3 3147static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 3148 u8 key_type, u8 old_key_type)
d25e28ab
JH
3149{
3150 /* Legacy key */
3151 if (key_type < 0x03)
745c0ce3 3152 return true;
d25e28ab
JH
3153
3154 /* Debug keys are insecure so don't store them persistently */
3155 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 3156 return false;
d25e28ab
JH
3157
3158 /* Changed combination key and there's no previous one */
3159 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 3160 return false;
d25e28ab
JH
3161
3162 /* Security mode 3 case */
3163 if (!conn)
745c0ce3 3164 return true;
d25e28ab
JH
3165
3166 /* Neither local nor remote side had no-bonding as requirement */
3167 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 3168 return true;
d25e28ab
JH
3169
3170 /* Local side had dedicated bonding as requirement */
3171 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 3172 return true;
d25e28ab
JH
3173
3174 /* Remote side had dedicated bonding as requirement */
3175 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 3176 return true;
d25e28ab
JH
3177
3178 /* If none of the above criteria match, then don't store the key
3179 * persistently */
745c0ce3 3180 return false;
d25e28ab
JH
3181}
3182
e804d25d 3183static u8 ltk_role(u8 type)
98a0b845 3184{
e804d25d
JH
3185 if (type == SMP_LTK)
3186 return HCI_ROLE_MASTER;
98a0b845 3187
e804d25d 3188 return HCI_ROLE_SLAVE;
98a0b845
JH
3189}
3190
fe39c7b2 3191struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
e804d25d 3192 u8 role)
75d262c2 3193{
c9839a11 3194 struct smp_ltk *k;
75d262c2 3195
970d0f1b
JH
3196 rcu_read_lock();
3197 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
fe39c7b2 3198 if (k->ediv != ediv || k->rand != rand)
75d262c2
VCG
3199 continue;
3200
e804d25d 3201 if (ltk_role(k->type) != role)
98a0b845
JH
3202 continue;
3203
970d0f1b 3204 rcu_read_unlock();
c9839a11 3205 return k;
75d262c2 3206 }
970d0f1b 3207 rcu_read_unlock();
75d262c2
VCG
3208
3209 return NULL;
3210}
75d262c2 3211
c9839a11 3212struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
e804d25d 3213 u8 addr_type, u8 role)
75d262c2 3214{
c9839a11 3215 struct smp_ltk *k;
75d262c2 3216
970d0f1b
JH
3217 rcu_read_lock();
3218 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
c9839a11 3219 if (addr_type == k->bdaddr_type &&
98a0b845 3220 bacmp(bdaddr, &k->bdaddr) == 0 &&
970d0f1b
JH
3221 ltk_role(k->type) == role) {
3222 rcu_read_unlock();
75d262c2 3223 return k;
970d0f1b
JH
3224 }
3225 }
3226 rcu_read_unlock();
75d262c2
VCG
3227
3228 return NULL;
3229}
75d262c2 3230
970c4e46
JH
3231struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3232{
3233 struct smp_irk *irk;
3234
adae20cb
JH
3235 rcu_read_lock();
3236 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
3237 if (!bacmp(&irk->rpa, rpa)) {
3238 rcu_read_unlock();
970c4e46 3239 return irk;
adae20cb 3240 }
970c4e46
JH
3241 }
3242
adae20cb 3243 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
defce9e8 3244 if (smp_irk_matches(hdev, irk->val, rpa)) {
970c4e46 3245 bacpy(&irk->rpa, rpa);
adae20cb 3246 rcu_read_unlock();
970c4e46
JH
3247 return irk;
3248 }
3249 }
adae20cb 3250 rcu_read_unlock();
970c4e46
JH
3251
3252 return NULL;
3253}
3254
3255struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3256 u8 addr_type)
3257{
3258 struct smp_irk *irk;
3259
6cfc9988
JH
3260 /* Identity Address must be public or static random */
3261 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3262 return NULL;
3263
adae20cb
JH
3264 rcu_read_lock();
3265 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
970c4e46 3266 if (addr_type == irk->addr_type &&
adae20cb
JH
3267 bacmp(bdaddr, &irk->bdaddr) == 0) {
3268 rcu_read_unlock();
970c4e46 3269 return irk;
adae20cb 3270 }
970c4e46 3271 }
adae20cb 3272 rcu_read_unlock();
970c4e46
JH
3273
3274 return NULL;
3275}
3276
567fa2aa 3277struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
7652ff6a
JH
3278 bdaddr_t *bdaddr, u8 *val, u8 type,
3279 u8 pin_len, bool *persistent)
55ed8ca1
JH
3280{
3281 struct link_key *key, *old_key;
745c0ce3 3282 u8 old_key_type;
55ed8ca1
JH
3283
3284 old_key = hci_find_link_key(hdev, bdaddr);
3285 if (old_key) {
3286 old_key_type = old_key->type;
3287 key = old_key;
3288 } else {
12adcf3a 3289 old_key_type = conn ? conn->key_type : 0xff;
0a14ab41 3290 key = kzalloc(sizeof(*key), GFP_KERNEL);
55ed8ca1 3291 if (!key)
567fa2aa 3292 return NULL;
55ed8ca1
JH
3293 list_add(&key->list, &hdev->link_keys);
3294 }
3295
6ed93dc6 3296 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 3297
d25e28ab
JH
3298 /* Some buggy controller combinations generate a changed
3299 * combination key for legacy pairing even when there's no
3300 * previous key */
3301 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 3302 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 3303 type = HCI_LK_COMBINATION;
655fe6ec
JH
3304 if (conn)
3305 conn->key_type = type;
3306 }
d25e28ab 3307
55ed8ca1 3308 bacpy(&key->bdaddr, bdaddr);
9b3b4460 3309 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
3310 key->pin_len = pin_len;
3311
b6020ba0 3312 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 3313 key->type = old_key_type;
4748fed2
JH
3314 else
3315 key->type = type;
3316
7652ff6a
JH
3317 if (persistent)
3318 *persistent = hci_persistent_key(hdev, conn, type,
3319 old_key_type);
4df378a1 3320
567fa2aa 3321 return key;
55ed8ca1
JH
3322}
3323
ca9142b8 3324struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
35d70271 3325 u8 addr_type, u8 type, u8 authenticated,
fe39c7b2 3326 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
75d262c2 3327{
c9839a11 3328 struct smp_ltk *key, *old_key;
e804d25d 3329 u8 role = ltk_role(type);
75d262c2 3330
e804d25d 3331 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, role);
c9839a11 3332 if (old_key)
75d262c2 3333 key = old_key;
c9839a11 3334 else {
0a14ab41 3335 key = kzalloc(sizeof(*key), GFP_KERNEL);
75d262c2 3336 if (!key)
ca9142b8 3337 return NULL;
970d0f1b 3338 list_add_rcu(&key->list, &hdev->long_term_keys);
75d262c2
VCG
3339 }
3340
75d262c2 3341 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
3342 key->bdaddr_type = addr_type;
3343 memcpy(key->val, tk, sizeof(key->val));
3344 key->authenticated = authenticated;
3345 key->ediv = ediv;
fe39c7b2 3346 key->rand = rand;
c9839a11
VCG
3347 key->enc_size = enc_size;
3348 key->type = type;
75d262c2 3349
ca9142b8 3350 return key;
75d262c2
VCG
3351}
3352
ca9142b8
JH
3353struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3354 u8 addr_type, u8 val[16], bdaddr_t *rpa)
970c4e46
JH
3355{
3356 struct smp_irk *irk;
3357
3358 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3359 if (!irk) {
3360 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3361 if (!irk)
ca9142b8 3362 return NULL;
970c4e46
JH
3363
3364 bacpy(&irk->bdaddr, bdaddr);
3365 irk->addr_type = addr_type;
3366
adae20cb 3367 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
970c4e46
JH
3368 }
3369
3370 memcpy(irk->val, val, 16);
3371 bacpy(&irk->rpa, rpa);
3372
ca9142b8 3373 return irk;
970c4e46
JH
3374}
3375
55ed8ca1
JH
3376int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3377{
3378 struct link_key *key;
3379
3380 key = hci_find_link_key(hdev, bdaddr);
3381 if (!key)
3382 return -ENOENT;
3383
6ed93dc6 3384 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1
JH
3385
3386 list_del(&key->list);
3387 kfree(key);
3388
3389 return 0;
3390}
3391
e0b2b27e 3392int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
b899efaf 3393{
970d0f1b 3394 struct smp_ltk *k;
c51ffa0b 3395 int removed = 0;
b899efaf 3396
970d0f1b 3397 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
e0b2b27e 3398 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
b899efaf
VCG
3399 continue;
3400
6ed93dc6 3401 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf 3402
970d0f1b
JH
3403 list_del_rcu(&k->list);
3404 kfree_rcu(k, rcu);
c51ffa0b 3405 removed++;
b899efaf
VCG
3406 }
3407
c51ffa0b 3408 return removed ? 0 : -ENOENT;
b899efaf
VCG
3409}
3410
a7ec7338
JH
3411void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3412{
adae20cb 3413 struct smp_irk *k;
a7ec7338 3414
adae20cb 3415 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
a7ec7338
JH
3416 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3417 continue;
3418
3419 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3420
adae20cb
JH
3421 list_del_rcu(&k->list);
3422 kfree_rcu(k, rcu);
a7ec7338
JH
3423 }
3424}
3425
6bd32326 3426/* HCI command timer function */
65cc2b49 3427static void hci_cmd_timeout(struct work_struct *work)
6bd32326 3428{
65cc2b49
MH
3429 struct hci_dev *hdev = container_of(work, struct hci_dev,
3430 cmd_timer.work);
6bd32326 3431
bda4f23a
AE
3432 if (hdev->sent_cmd) {
3433 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3434 u16 opcode = __le16_to_cpu(sent->opcode);
3435
3436 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3437 } else {
3438 BT_ERR("%s command tx timeout", hdev->name);
3439 }
3440
6bd32326 3441 atomic_set(&hdev->cmd_cnt, 1);
c347b765 3442 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
3443}
3444
2763eda6 3445struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
04124681 3446 bdaddr_t *bdaddr)
2763eda6
SJ
3447{
3448 struct oob_data *data;
3449
3450 list_for_each_entry(data, &hdev->remote_oob_data, list)
3451 if (bacmp(bdaddr, &data->bdaddr) == 0)
3452 return data;
3453
3454 return NULL;
3455}
3456
3457int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3458{
3459 struct oob_data *data;
3460
3461 data = hci_find_remote_oob_data(hdev, bdaddr);
3462 if (!data)
3463 return -ENOENT;
3464
6ed93dc6 3465 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2763eda6
SJ
3466
3467 list_del(&data->list);
3468 kfree(data);
3469
3470 return 0;
3471}
3472
35f7498a 3473void hci_remote_oob_data_clear(struct hci_dev *hdev)
2763eda6
SJ
3474{
3475 struct oob_data *data, *n;
3476
3477 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3478 list_del(&data->list);
3479 kfree(data);
3480 }
2763eda6
SJ
3481}
3482
0798872e 3483int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
38da1703 3484 u8 *hash, u8 *rand)
2763eda6
SJ
3485{
3486 struct oob_data *data;
3487
3488 data = hci_find_remote_oob_data(hdev, bdaddr);
2763eda6 3489 if (!data) {
0a14ab41 3490 data = kmalloc(sizeof(*data), GFP_KERNEL);
2763eda6
SJ
3491 if (!data)
3492 return -ENOMEM;
3493
3494 bacpy(&data->bdaddr, bdaddr);
3495 list_add(&data->list, &hdev->remote_oob_data);
3496 }
3497
519ca9d0 3498 memcpy(data->hash192, hash, sizeof(data->hash192));
38da1703 3499 memcpy(data->rand192, rand, sizeof(data->rand192));
2763eda6 3500
0798872e 3501 memset(data->hash256, 0, sizeof(data->hash256));
38da1703 3502 memset(data->rand256, 0, sizeof(data->rand256));
0798872e
MH
3503
3504 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3505
3506 return 0;
3507}
3508
3509int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
38da1703
JH
3510 u8 *hash192, u8 *rand192,
3511 u8 *hash256, u8 *rand256)
0798872e
MH
3512{
3513 struct oob_data *data;
3514
3515 data = hci_find_remote_oob_data(hdev, bdaddr);
3516 if (!data) {
0a14ab41 3517 data = kmalloc(sizeof(*data), GFP_KERNEL);
0798872e
MH
3518 if (!data)
3519 return -ENOMEM;
3520
3521 bacpy(&data->bdaddr, bdaddr);
3522 list_add(&data->list, &hdev->remote_oob_data);
3523 }
3524
3525 memcpy(data->hash192, hash192, sizeof(data->hash192));
38da1703 3526 memcpy(data->rand192, rand192, sizeof(data->rand192));
0798872e
MH
3527
3528 memcpy(data->hash256, hash256, sizeof(data->hash256));
38da1703 3529 memcpy(data->rand256, rand256, sizeof(data->rand256));
0798872e 3530
6ed93dc6 3531 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
3532
3533 return 0;
3534}
3535
dcc36c16 3536struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
b9ee0a78 3537 bdaddr_t *bdaddr, u8 type)
b2a66aad 3538{
8035ded4 3539 struct bdaddr_list *b;
b2a66aad 3540
dcc36c16 3541 list_for_each_entry(b, bdaddr_list, list) {
b9ee0a78 3542 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
b2a66aad 3543 return b;
b9ee0a78 3544 }
b2a66aad
AJ
3545
3546 return NULL;
3547}
3548
dcc36c16 3549void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
b2a66aad
AJ
3550{
3551 struct list_head *p, *n;
3552
dcc36c16 3553 list_for_each_safe(p, n, bdaddr_list) {
b9ee0a78 3554 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
b2a66aad
AJ
3555
3556 list_del(p);
3557 kfree(b);
3558 }
b2a66aad
AJ
3559}
3560
dcc36c16 3561int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
3562{
3563 struct bdaddr_list *entry;
b2a66aad 3564
b9ee0a78 3565 if (!bacmp(bdaddr, BDADDR_ANY))
b2a66aad
AJ
3566 return -EBADF;
3567
dcc36c16 3568 if (hci_bdaddr_list_lookup(list, bdaddr, type))
5e762444 3569 return -EEXIST;
b2a66aad 3570
27f70f3e 3571 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
5e762444
AJ
3572 if (!entry)
3573 return -ENOMEM;
b2a66aad
AJ
3574
3575 bacpy(&entry->bdaddr, bdaddr);
b9ee0a78 3576 entry->bdaddr_type = type;
b2a66aad 3577
dcc36c16 3578 list_add(&entry->list, list);
b2a66aad 3579
2a8357f2 3580 return 0;
b2a66aad
AJ
3581}
3582
dcc36c16 3583int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
3584{
3585 struct bdaddr_list *entry;
b2a66aad 3586
35f7498a 3587 if (!bacmp(bdaddr, BDADDR_ANY)) {
dcc36c16 3588 hci_bdaddr_list_clear(list);
35f7498a
JH
3589 return 0;
3590 }
b2a66aad 3591
dcc36c16 3592 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
d2ab0ac1
MH
3593 if (!entry)
3594 return -ENOENT;
3595
3596 list_del(&entry->list);
3597 kfree(entry);
3598
3599 return 0;
3600}
3601
15819a70
AG
3602/* This function requires the caller holds hdev->lock */
3603struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3604 bdaddr_t *addr, u8 addr_type)
3605{
3606 struct hci_conn_params *params;
3607
738f6185
JH
3608 /* The conn params list only contains identity addresses */
3609 if (!hci_is_identity_address(addr, addr_type))
3610 return NULL;
3611
15819a70
AG
3612 list_for_each_entry(params, &hdev->le_conn_params, list) {
3613 if (bacmp(&params->addr, addr) == 0 &&
3614 params->addr_type == addr_type) {
3615 return params;
3616 }
3617 }
3618
3619 return NULL;
3620}
3621
cef952ce
AG
3622static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3623{
3624 struct hci_conn *conn;
3625
3626 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3627 if (!conn)
3628 return false;
3629
3630 if (conn->dst_type != type)
3631 return false;
3632
3633 if (conn->state != BT_CONNECTED)
3634 return false;
3635
3636 return true;
3637}
3638
4b10966f 3639/* This function requires the caller holds hdev->lock */
501f8827
JH
3640struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3641 bdaddr_t *addr, u8 addr_type)
a9b0a04c 3642{
912b42ef 3643 struct hci_conn_params *param;
a9b0a04c 3644
738f6185
JH
3645 /* The list only contains identity addresses */
3646 if (!hci_is_identity_address(addr, addr_type))
3647 return NULL;
a9b0a04c 3648
501f8827 3649 list_for_each_entry(param, list, action) {
912b42ef
JH
3650 if (bacmp(&param->addr, addr) == 0 &&
3651 param->addr_type == addr_type)
3652 return param;
4b10966f
MH
3653 }
3654
3655 return NULL;
a9b0a04c
AG
3656}
3657
15819a70 3658/* This function requires the caller holds hdev->lock */
51d167c0
MH
3659struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3660 bdaddr_t *addr, u8 addr_type)
15819a70
AG
3661{
3662 struct hci_conn_params *params;
3663
c46245b3 3664 if (!hci_is_identity_address(addr, addr_type))
51d167c0 3665 return NULL;
a9b0a04c 3666
15819a70 3667 params = hci_conn_params_lookup(hdev, addr, addr_type);
cef952ce 3668 if (params)
51d167c0 3669 return params;
15819a70
AG
3670
3671 params = kzalloc(sizeof(*params), GFP_KERNEL);
3672 if (!params) {
3673 BT_ERR("Out of memory");
51d167c0 3674 return NULL;
15819a70
AG
3675 }
3676
3677 bacpy(&params->addr, addr);
3678 params->addr_type = addr_type;
cef952ce
AG
3679
3680 list_add(&params->list, &hdev->le_conn_params);
93450c75 3681 INIT_LIST_HEAD(&params->action);
cef952ce 3682
bf5b3c8b
MH
3683 params->conn_min_interval = hdev->le_conn_min_interval;
3684 params->conn_max_interval = hdev->le_conn_max_interval;
3685 params->conn_latency = hdev->le_conn_latency;
3686 params->supervision_timeout = hdev->le_supv_timeout;
3687 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3688
3689 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3690
51d167c0 3691 return params;
bf5b3c8b
MH
3692}
3693
3694/* This function requires the caller holds hdev->lock */
3695int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
d06b50ce 3696 u8 auto_connect)
15819a70
AG
3697{
3698 struct hci_conn_params *params;
3699
8c87aae1
MH
3700 params = hci_conn_params_add(hdev, addr, addr_type);
3701 if (!params)
3702 return -EIO;
cef952ce 3703
42ce26de
JH
3704 if (params->auto_connect == auto_connect)
3705 return 0;
3706
95305baa 3707 list_del_init(&params->action);
15819a70 3708
cef952ce
AG
3709 switch (auto_connect) {
3710 case HCI_AUTO_CONN_DISABLED:
3711 case HCI_AUTO_CONN_LINK_LOSS:
95305baa 3712 hci_update_background_scan(hdev);
cef952ce 3713 break;
851efca8 3714 case HCI_AUTO_CONN_REPORT:
95305baa
JH
3715 list_add(&params->action, &hdev->pend_le_reports);
3716 hci_update_background_scan(hdev);
cef952ce 3717 break;
4b9e7e75 3718 case HCI_AUTO_CONN_DIRECT:
cef952ce 3719 case HCI_AUTO_CONN_ALWAYS:
95305baa
JH
3720 if (!is_connected(hdev, addr, addr_type)) {
3721 list_add(&params->action, &hdev->pend_le_conns);
3722 hci_update_background_scan(hdev);
3723 }
cef952ce
AG
3724 break;
3725 }
15819a70 3726
851efca8
JH
3727 params->auto_connect = auto_connect;
3728
d06b50ce
MH
3729 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3730 auto_connect);
a9b0a04c
AG
3731
3732 return 0;
15819a70
AG
3733}
3734
f6c63249 3735static void hci_conn_params_free(struct hci_conn_params *params)
15819a70 3736{
f8aaf9b6 3737 if (params->conn) {
f161dd41 3738 hci_conn_drop(params->conn);
f8aaf9b6
JH
3739 hci_conn_put(params->conn);
3740 }
f161dd41 3741
95305baa 3742 list_del(&params->action);
15819a70
AG
3743 list_del(&params->list);
3744 kfree(params);
f6c63249
JH
3745}
3746
3747/* This function requires the caller holds hdev->lock */
3748void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3749{
3750 struct hci_conn_params *params;
3751
3752 params = hci_conn_params_lookup(hdev, addr, addr_type);
3753 if (!params)
3754 return;
3755
3756 hci_conn_params_free(params);
15819a70 3757
95305baa
JH
3758 hci_update_background_scan(hdev);
3759
15819a70
AG
3760 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3761}
3762
3763/* This function requires the caller holds hdev->lock */
55af49a8 3764void hci_conn_params_clear_disabled(struct hci_dev *hdev)
15819a70
AG
3765{
3766 struct hci_conn_params *params, *tmp;
3767
3768 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
55af49a8
JH
3769 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3770 continue;
15819a70
AG
3771 list_del(&params->list);
3772 kfree(params);
3773 }
3774
55af49a8 3775 BT_DBG("All LE disabled connection parameters were removed");
77a77a30
AG
3776}
3777
3778/* This function requires the caller holds hdev->lock */
373110c5 3779void hci_conn_params_clear_all(struct hci_dev *hdev)
77a77a30 3780{
15819a70 3781 struct hci_conn_params *params, *tmp;
77a77a30 3782
f6c63249
JH
3783 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3784 hci_conn_params_free(params);
77a77a30 3785
a4790dbd 3786 hci_update_background_scan(hdev);
77a77a30 3787
15819a70 3788 BT_DBG("All LE connection parameters were removed");
77a77a30
AG
3789}
3790
4c87eaab 3791static void inquiry_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 3792{
4c87eaab
AG
3793 if (status) {
3794 BT_ERR("Failed to start inquiry: status %d", status);
7ba8b4be 3795
4c87eaab
AG
3796 hci_dev_lock(hdev);
3797 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3798 hci_dev_unlock(hdev);
3799 return;
3800 }
7ba8b4be
AG
3801}
3802
4c87eaab 3803static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 3804{
4c87eaab
AG
3805 /* General inquiry access code (GIAC) */
3806 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3807 struct hci_request req;
3808 struct hci_cp_inquiry cp;
7ba8b4be
AG
3809 int err;
3810
4c87eaab
AG
3811 if (status) {
3812 BT_ERR("Failed to disable LE scanning: status %d", status);
3813 return;
3814 }
7ba8b4be 3815
4c87eaab
AG
3816 switch (hdev->discovery.type) {
3817 case DISCOV_TYPE_LE:
3818 hci_dev_lock(hdev);
3819 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3820 hci_dev_unlock(hdev);
3821 break;
7ba8b4be 3822
4c87eaab
AG
3823 case DISCOV_TYPE_INTERLEAVED:
3824 hci_req_init(&req, hdev);
7ba8b4be 3825
4c87eaab
AG
3826 memset(&cp, 0, sizeof(cp));
3827 memcpy(&cp.lap, lap, sizeof(cp.lap));
3828 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3829 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
7ba8b4be 3830
4c87eaab 3831 hci_dev_lock(hdev);
7dbfac1d 3832
4c87eaab 3833 hci_inquiry_cache_flush(hdev);
7dbfac1d 3834
4c87eaab
AG
3835 err = hci_req_run(&req, inquiry_complete);
3836 if (err) {
3837 BT_ERR("Inquiry request failed: err %d", err);
3838 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3839 }
7dbfac1d 3840
4c87eaab
AG
3841 hci_dev_unlock(hdev);
3842 break;
7dbfac1d 3843 }
7dbfac1d
AG
3844}
3845
7ba8b4be
AG
3846static void le_scan_disable_work(struct work_struct *work)
3847{
3848 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 3849 le_scan_disable.work);
4c87eaab
AG
3850 struct hci_request req;
3851 int err;
7ba8b4be
AG
3852
3853 BT_DBG("%s", hdev->name);
3854
4c87eaab 3855 hci_req_init(&req, hdev);
28b75a89 3856
b1efcc28 3857 hci_req_add_le_scan_disable(&req);
28b75a89 3858
4c87eaab
AG
3859 err = hci_req_run(&req, le_scan_disable_work_complete);
3860 if (err)
3861 BT_ERR("Disable LE scanning request failed: err %d", err);
28b75a89
AG
3862}
3863
8d97250e
JH
3864static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3865{
3866 struct hci_dev *hdev = req->hdev;
3867
3868 /* If we're advertising or initiating an LE connection we can't
3869 * go ahead and change the random address at this time. This is
3870 * because the eventual initiator address used for the
3871 * subsequently created connection will be undefined (some
3872 * controllers use the new address and others the one we had
3873 * when the operation started).
3874 *
3875 * In this kind of scenario skip the update and let the random
3876 * address be updated at the next cycle.
3877 */
5ce194c4 3878 if (test_bit(HCI_LE_ADV, &hdev->dev_flags) ||
8d97250e
JH
3879 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3880 BT_DBG("Deferring random address update");
9a783a13 3881 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
8d97250e
JH
3882 return;
3883 }
3884
3885 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3886}
3887
94b1fc92
MH
3888int hci_update_random_address(struct hci_request *req, bool require_privacy,
3889 u8 *own_addr_type)
ebd3a747
JH
3890{
3891 struct hci_dev *hdev = req->hdev;
3892 int err;
3893
3894 /* If privacy is enabled use a resolvable private address. If
2b5224dc
MH
3895 * current RPA has expired or there is something else than
3896 * the current RPA in use, then generate a new one.
ebd3a747
JH
3897 */
3898 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
ebd3a747
JH
3899 int to;
3900
3901 *own_addr_type = ADDR_LE_DEV_RANDOM;
3902
3903 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
2b5224dc 3904 !bacmp(&hdev->random_addr, &hdev->rpa))
ebd3a747
JH
3905 return 0;
3906
defce9e8 3907 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
ebd3a747
JH
3908 if (err < 0) {
3909 BT_ERR("%s failed to generate new RPA", hdev->name);
3910 return err;
3911 }
3912
8d97250e 3913 set_random_addr(req, &hdev->rpa);
ebd3a747
JH
3914
3915 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3916 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3917
3918 return 0;
94b1fc92
MH
3919 }
3920
3921 /* In case of required privacy without resolvable private address,
3922 * use an unresolvable private address. This is useful for active
3923 * scanning and non-connectable advertising.
3924 */
3925 if (require_privacy) {
3926 bdaddr_t urpa;
3927
3928 get_random_bytes(&urpa, 6);
3929 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3930
3931 *own_addr_type = ADDR_LE_DEV_RANDOM;
8d97250e 3932 set_random_addr(req, &urpa);
94b1fc92 3933 return 0;
ebd3a747
JH
3934 }
3935
3936 /* If forcing static address is in use or there is no public
3937 * address use the static address as random address (but skip
3938 * the HCI command if the current random address is already the
3939 * static one.
3940 */
111902f7 3941 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
ebd3a747
JH
3942 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3943 *own_addr_type = ADDR_LE_DEV_RANDOM;
3944 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3945 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3946 &hdev->static_addr);
3947 return 0;
3948 }
3949
3950 /* Neither privacy nor static address is being used so use a
3951 * public address.
3952 */
3953 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3954
3955 return 0;
3956}
3957
a1f4c318
JH
3958/* Copy the Identity Address of the controller.
3959 *
3960 * If the controller has a public BD_ADDR, then by default use that one.
3961 * If this is a LE only controller without a public address, default to
3962 * the static random address.
3963 *
3964 * For debugging purposes it is possible to force controllers with a
3965 * public address to use the static random address instead.
3966 */
3967void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3968 u8 *bdaddr_type)
3969{
111902f7 3970 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
a1f4c318
JH
3971 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3972 bacpy(bdaddr, &hdev->static_addr);
3973 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3974 } else {
3975 bacpy(bdaddr, &hdev->bdaddr);
3976 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3977 }
3978}
3979
9be0dab7
DH
3980/* Alloc HCI device */
3981struct hci_dev *hci_alloc_dev(void)
3982{
3983 struct hci_dev *hdev;
3984
27f70f3e 3985 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
9be0dab7
DH
3986 if (!hdev)
3987 return NULL;
3988
b1b813d4
DH
3989 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3990 hdev->esco_type = (ESCO_HV1);
3991 hdev->link_mode = (HCI_LM_ACCEPT);
b4cb9fb2
MH
3992 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3993 hdev->io_capability = 0x03; /* No Input No Output */
96c2103a 3994 hdev->manufacturer = 0xffff; /* Default to internal use */
bbaf444a
JH
3995 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3996 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
b1b813d4 3997
b1b813d4
DH
3998 hdev->sniff_max_interval = 800;
3999 hdev->sniff_min_interval = 80;
4000
3f959d46 4001 hdev->le_adv_channel_map = 0x07;
628531c9
GL
4002 hdev->le_adv_min_interval = 0x0800;
4003 hdev->le_adv_max_interval = 0x0800;
bef64738
MH
4004 hdev->le_scan_interval = 0x0060;
4005 hdev->le_scan_window = 0x0030;
4e70c7e7
MH
4006 hdev->le_conn_min_interval = 0x0028;
4007 hdev->le_conn_max_interval = 0x0038;
04fb7d90
MH
4008 hdev->le_conn_latency = 0x0000;
4009 hdev->le_supv_timeout = 0x002a;
bef64738 4010
d6bfd59c 4011 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
b9a7a61e 4012 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
31ad1691
AK
4013 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
4014 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
d6bfd59c 4015
b1b813d4
DH
4016 mutex_init(&hdev->lock);
4017 mutex_init(&hdev->req_lock);
4018
4019 INIT_LIST_HEAD(&hdev->mgmt_pending);
4020 INIT_LIST_HEAD(&hdev->blacklist);
6659358e 4021 INIT_LIST_HEAD(&hdev->whitelist);
b1b813d4
DH
4022 INIT_LIST_HEAD(&hdev->uuids);
4023 INIT_LIST_HEAD(&hdev->link_keys);
4024 INIT_LIST_HEAD(&hdev->long_term_keys);
970c4e46 4025 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
b1b813d4 4026 INIT_LIST_HEAD(&hdev->remote_oob_data);
d2ab0ac1 4027 INIT_LIST_HEAD(&hdev->le_white_list);
15819a70 4028 INIT_LIST_HEAD(&hdev->le_conn_params);
77a77a30 4029 INIT_LIST_HEAD(&hdev->pend_le_conns);
66f8455a 4030 INIT_LIST_HEAD(&hdev->pend_le_reports);
6b536b5e 4031 INIT_LIST_HEAD(&hdev->conn_hash.list);
b1b813d4
DH
4032
4033 INIT_WORK(&hdev->rx_work, hci_rx_work);
4034 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
4035 INIT_WORK(&hdev->tx_work, hci_tx_work);
4036 INIT_WORK(&hdev->power_on, hci_power_on);
b1b813d4 4037
b1b813d4
DH
4038 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
4039 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
4040 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
4041
b1b813d4
DH
4042 skb_queue_head_init(&hdev->rx_q);
4043 skb_queue_head_init(&hdev->cmd_q);
4044 skb_queue_head_init(&hdev->raw_q);
4045
4046 init_waitqueue_head(&hdev->req_wait_q);
4047
65cc2b49 4048 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
b1b813d4 4049
b1b813d4
DH
4050 hci_init_sysfs(hdev);
4051 discovery_init(hdev);
9be0dab7
DH
4052
4053 return hdev;
4054}
4055EXPORT_SYMBOL(hci_alloc_dev);
4056
4057/* Free HCI device */
4058void hci_free_dev(struct hci_dev *hdev)
4059{
9be0dab7
DH
4060 /* will free via device release */
4061 put_device(&hdev->dev);
4062}
4063EXPORT_SYMBOL(hci_free_dev);
4064
1da177e4
LT
4065/* Register HCI device */
4066int hci_register_dev(struct hci_dev *hdev)
4067{
b1b813d4 4068 int id, error;
1da177e4 4069
74292d5a 4070 if (!hdev->open || !hdev->close || !hdev->send)
1da177e4
LT
4071 return -EINVAL;
4072
08add513
MM
4073 /* Do not allow HCI_AMP devices to register at index 0,
4074 * so the index can be used as the AMP controller ID.
4075 */
3df92b31
SL
4076 switch (hdev->dev_type) {
4077 case HCI_BREDR:
4078 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
4079 break;
4080 case HCI_AMP:
4081 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
4082 break;
4083 default:
4084 return -EINVAL;
1da177e4 4085 }
8e87d142 4086
3df92b31
SL
4087 if (id < 0)
4088 return id;
4089
1da177e4
LT
4090 sprintf(hdev->name, "hci%d", id);
4091 hdev->id = id;
2d8b3a11
AE
4092
4093 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4094
d8537548
KC
4095 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4096 WQ_MEM_RECLAIM, 1, hdev->name);
33ca954d
DH
4097 if (!hdev->workqueue) {
4098 error = -ENOMEM;
4099 goto err;
4100 }
f48fd9c8 4101
d8537548
KC
4102 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4103 WQ_MEM_RECLAIM, 1, hdev->name);
6ead1bbc
JH
4104 if (!hdev->req_workqueue) {
4105 destroy_workqueue(hdev->workqueue);
4106 error = -ENOMEM;
4107 goto err;
4108 }
4109
0153e2ec
MH
4110 if (!IS_ERR_OR_NULL(bt_debugfs))
4111 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
4112
bdc3e0f1
MH
4113 dev_set_name(&hdev->dev, "%s", hdev->name);
4114
4115 error = device_add(&hdev->dev);
33ca954d 4116 if (error < 0)
54506918 4117 goto err_wqueue;
1da177e4 4118
611b30f7 4119 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
4120 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
4121 hdev);
611b30f7
MH
4122 if (hdev->rfkill) {
4123 if (rfkill_register(hdev->rfkill) < 0) {
4124 rfkill_destroy(hdev->rfkill);
4125 hdev->rfkill = NULL;
4126 }
4127 }
4128
5e130367
JH
4129 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
4130 set_bit(HCI_RFKILLED, &hdev->dev_flags);
4131
a8b2d5c2 4132 set_bit(HCI_SETUP, &hdev->dev_flags);
004b0258 4133 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
ce2be9ac 4134
01cd3404 4135 if (hdev->dev_type == HCI_BREDR) {
56f87901
JH
4136 /* Assume BR/EDR support until proven otherwise (such as
4137 * through reading supported features during init.
4138 */
4139 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4140 }
ce2be9ac 4141
fcee3377
GP
4142 write_lock(&hci_dev_list_lock);
4143 list_add(&hdev->list, &hci_dev_list);
4144 write_unlock(&hci_dev_list_lock);
4145
4a964404
MH
4146 /* Devices that are marked for raw-only usage are unconfigured
4147 * and should not be included in normal operation.
fee746b0
MH
4148 */
4149 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
4a964404 4150 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
fee746b0 4151
1da177e4 4152 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 4153 hci_dev_hold(hdev);
1da177e4 4154
19202573 4155 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 4156
1da177e4 4157 return id;
f48fd9c8 4158
33ca954d
DH
4159err_wqueue:
4160 destroy_workqueue(hdev->workqueue);
6ead1bbc 4161 destroy_workqueue(hdev->req_workqueue);
33ca954d 4162err:
3df92b31 4163 ida_simple_remove(&hci_index_ida, hdev->id);
f48fd9c8 4164
33ca954d 4165 return error;
1da177e4
LT
4166}
4167EXPORT_SYMBOL(hci_register_dev);
4168
4169/* Unregister HCI device */
59735631 4170void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 4171{
3df92b31 4172 int i, id;
ef222013 4173
c13854ce 4174 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 4175
94324962
JH
4176 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4177
3df92b31
SL
4178 id = hdev->id;
4179
f20d09d5 4180 write_lock(&hci_dev_list_lock);
1da177e4 4181 list_del(&hdev->list);
f20d09d5 4182 write_unlock(&hci_dev_list_lock);
1da177e4
LT
4183
4184 hci_dev_do_close(hdev);
4185
cd4c5391 4186 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
4187 kfree_skb(hdev->reassembly[i]);
4188
b9b5ef18
GP
4189 cancel_work_sync(&hdev->power_on);
4190
ab81cbf9 4191 if (!test_bit(HCI_INIT, &hdev->flags) &&
d603b76b
MH
4192 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
4193 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
09fd0de5 4194 hci_dev_lock(hdev);
744cf19e 4195 mgmt_index_removed(hdev);
09fd0de5 4196 hci_dev_unlock(hdev);
56e5cb86 4197 }
ab81cbf9 4198
2e58ef3e
JH
4199 /* mgmt_index_removed should take care of emptying the
4200 * pending list */
4201 BUG_ON(!list_empty(&hdev->mgmt_pending));
4202
1da177e4
LT
4203 hci_notify(hdev, HCI_DEV_UNREG);
4204
611b30f7
MH
4205 if (hdev->rfkill) {
4206 rfkill_unregister(hdev->rfkill);
4207 rfkill_destroy(hdev->rfkill);
4208 }
4209
711eafe3 4210 smp_unregister(hdev);
99780a7b 4211
bdc3e0f1 4212 device_del(&hdev->dev);
147e2d59 4213
0153e2ec
MH
4214 debugfs_remove_recursive(hdev->debugfs);
4215
f48fd9c8 4216 destroy_workqueue(hdev->workqueue);
6ead1bbc 4217 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 4218
09fd0de5 4219 hci_dev_lock(hdev);
dcc36c16 4220 hci_bdaddr_list_clear(&hdev->blacklist);
6659358e 4221 hci_bdaddr_list_clear(&hdev->whitelist);
2aeb9a1a 4222 hci_uuids_clear(hdev);
55ed8ca1 4223 hci_link_keys_clear(hdev);
b899efaf 4224 hci_smp_ltks_clear(hdev);
970c4e46 4225 hci_smp_irks_clear(hdev);
2763eda6 4226 hci_remote_oob_data_clear(hdev);
dcc36c16 4227 hci_bdaddr_list_clear(&hdev->le_white_list);
373110c5 4228 hci_conn_params_clear_all(hdev);
09fd0de5 4229 hci_dev_unlock(hdev);
e2e0cacb 4230
dc946bd8 4231 hci_dev_put(hdev);
3df92b31
SL
4232
4233 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
4234}
4235EXPORT_SYMBOL(hci_unregister_dev);
4236
4237/* Suspend HCI device */
4238int hci_suspend_dev(struct hci_dev *hdev)
4239{
4240 hci_notify(hdev, HCI_DEV_SUSPEND);
4241 return 0;
4242}
4243EXPORT_SYMBOL(hci_suspend_dev);
4244
4245/* Resume HCI device */
4246int hci_resume_dev(struct hci_dev *hdev)
4247{
4248 hci_notify(hdev, HCI_DEV_RESUME);
4249 return 0;
4250}
4251EXPORT_SYMBOL(hci_resume_dev);
4252
75e0569f
MH
4253/* Reset HCI device */
4254int hci_reset_dev(struct hci_dev *hdev)
4255{
4256 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
4257 struct sk_buff *skb;
4258
4259 skb = bt_skb_alloc(3, GFP_ATOMIC);
4260 if (!skb)
4261 return -ENOMEM;
4262
4263 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
4264 memcpy(skb_put(skb, 3), hw_err, 3);
4265
4266 /* Send Hardware Error to upper stack */
4267 return hci_recv_frame(hdev, skb);
4268}
4269EXPORT_SYMBOL(hci_reset_dev);
4270
76bca880 4271/* Receive frame from HCI drivers */
e1a26170 4272int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
76bca880 4273{
76bca880 4274 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 4275 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
4276 kfree_skb(skb);
4277 return -ENXIO;
4278 }
4279
d82603c6 4280 /* Incoming skb */
76bca880
MH
4281 bt_cb(skb)->incoming = 1;
4282
4283 /* Time stamp */
4284 __net_timestamp(skb);
4285
76bca880 4286 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 4287 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 4288
76bca880
MH
4289 return 0;
4290}
4291EXPORT_SYMBOL(hci_recv_frame);
4292
33e882a5 4293static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
a8c5fb1a 4294 int count, __u8 index)
33e882a5
SS
4295{
4296 int len = 0;
4297 int hlen = 0;
4298 int remain = count;
4299 struct sk_buff *skb;
4300 struct bt_skb_cb *scb;
4301
4302 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
a8c5fb1a 4303 index >= NUM_REASSEMBLY)
33e882a5
SS
4304 return -EILSEQ;
4305
4306 skb = hdev->reassembly[index];
4307
4308 if (!skb) {
4309 switch (type) {
4310 case HCI_ACLDATA_PKT:
4311 len = HCI_MAX_FRAME_SIZE;
4312 hlen = HCI_ACL_HDR_SIZE;
4313 break;
4314 case HCI_EVENT_PKT:
4315 len = HCI_MAX_EVENT_SIZE;
4316 hlen = HCI_EVENT_HDR_SIZE;
4317 break;
4318 case HCI_SCODATA_PKT:
4319 len = HCI_MAX_SCO_SIZE;
4320 hlen = HCI_SCO_HDR_SIZE;
4321 break;
4322 }
4323
1e429f38 4324 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
4325 if (!skb)
4326 return -ENOMEM;
4327
4328 scb = (void *) skb->cb;
4329 scb->expect = hlen;
4330 scb->pkt_type = type;
4331
33e882a5
SS
4332 hdev->reassembly[index] = skb;
4333 }
4334
4335 while (count) {
4336 scb = (void *) skb->cb;
89bb46d0 4337 len = min_t(uint, scb->expect, count);
33e882a5
SS
4338
4339 memcpy(skb_put(skb, len), data, len);
4340
4341 count -= len;
4342 data += len;
4343 scb->expect -= len;
4344 remain = count;
4345
4346 switch (type) {
4347 case HCI_EVENT_PKT:
4348 if (skb->len == HCI_EVENT_HDR_SIZE) {
4349 struct hci_event_hdr *h = hci_event_hdr(skb);
4350 scb->expect = h->plen;
4351
4352 if (skb_tailroom(skb) < scb->expect) {
4353 kfree_skb(skb);
4354 hdev->reassembly[index] = NULL;
4355 return -ENOMEM;
4356 }
4357 }
4358 break;
4359
4360 case HCI_ACLDATA_PKT:
4361 if (skb->len == HCI_ACL_HDR_SIZE) {
4362 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4363 scb->expect = __le16_to_cpu(h->dlen);
4364
4365 if (skb_tailroom(skb) < scb->expect) {
4366 kfree_skb(skb);
4367 hdev->reassembly[index] = NULL;
4368 return -ENOMEM;
4369 }
4370 }
4371 break;
4372
4373 case HCI_SCODATA_PKT:
4374 if (skb->len == HCI_SCO_HDR_SIZE) {
4375 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4376 scb->expect = h->dlen;
4377
4378 if (skb_tailroom(skb) < scb->expect) {
4379 kfree_skb(skb);
4380 hdev->reassembly[index] = NULL;
4381 return -ENOMEM;
4382 }
4383 }
4384 break;
4385 }
4386
4387 if (scb->expect == 0) {
4388 /* Complete frame */
4389
4390 bt_cb(skb)->pkt_type = type;
e1a26170 4391 hci_recv_frame(hdev, skb);
33e882a5
SS
4392
4393 hdev->reassembly[index] = NULL;
4394 return remain;
4395 }
4396 }
4397
4398 return remain;
4399}
4400
99811510
SS
4401#define STREAM_REASSEMBLY 0
4402
4403int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4404{
4405 int type;
4406 int rem = 0;
4407
da5f6c37 4408 while (count) {
99811510
SS
4409 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4410
4411 if (!skb) {
4412 struct { char type; } *pkt;
4413
4414 /* Start of the frame */
4415 pkt = data;
4416 type = pkt->type;
4417
4418 data++;
4419 count--;
4420 } else
4421 type = bt_cb(skb)->pkt_type;
4422
1e429f38 4423 rem = hci_reassembly(hdev, type, data, count,
a8c5fb1a 4424 STREAM_REASSEMBLY);
99811510
SS
4425 if (rem < 0)
4426 return rem;
4427
4428 data += (count - rem);
4429 count = rem;
f81c6224 4430 }
99811510
SS
4431
4432 return rem;
4433}
4434EXPORT_SYMBOL(hci_recv_stream_fragment);
4435
1da177e4
LT
4436/* ---- Interface to upper protocols ---- */
4437
1da177e4
LT
4438int hci_register_cb(struct hci_cb *cb)
4439{
4440 BT_DBG("%p name %s", cb, cb->name);
4441
f20d09d5 4442 write_lock(&hci_cb_list_lock);
1da177e4 4443 list_add(&cb->list, &hci_cb_list);
f20d09d5 4444 write_unlock(&hci_cb_list_lock);
1da177e4
LT
4445
4446 return 0;
4447}
4448EXPORT_SYMBOL(hci_register_cb);
4449
4450int hci_unregister_cb(struct hci_cb *cb)
4451{
4452 BT_DBG("%p name %s", cb, cb->name);
4453
f20d09d5 4454 write_lock(&hci_cb_list_lock);
1da177e4 4455 list_del(&cb->list);
f20d09d5 4456 write_unlock(&hci_cb_list_lock);
1da177e4
LT
4457
4458 return 0;
4459}
4460EXPORT_SYMBOL(hci_unregister_cb);
4461
51086991 4462static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4 4463{
cdc52faa
MH
4464 int err;
4465
0d48d939 4466 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 4467
cd82e61c
MH
4468 /* Time stamp */
4469 __net_timestamp(skb);
1da177e4 4470
cd82e61c
MH
4471 /* Send copy to monitor */
4472 hci_send_to_monitor(hdev, skb);
4473
4474 if (atomic_read(&hdev->promisc)) {
4475 /* Send copy to the sockets */
470fe1b5 4476 hci_send_to_sock(hdev, skb);
1da177e4
LT
4477 }
4478
4479 /* Get rid of skb owner, prior to sending to the driver. */
4480 skb_orphan(skb);
4481
cdc52faa
MH
4482 err = hdev->send(hdev, skb);
4483 if (err < 0) {
4484 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
4485 kfree_skb(skb);
4486 }
1da177e4
LT
4487}
4488
3119ae95
JH
4489void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4490{
4491 skb_queue_head_init(&req->cmd_q);
4492 req->hdev = hdev;
5d73e034 4493 req->err = 0;
3119ae95
JH
4494}
4495
4496int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4497{
4498 struct hci_dev *hdev = req->hdev;
4499 struct sk_buff *skb;
4500 unsigned long flags;
4501
4502 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4503
49c922bb 4504 /* If an error occurred during request building, remove all HCI
5d73e034
AG
4505 * commands queued on the HCI request queue.
4506 */
4507 if (req->err) {
4508 skb_queue_purge(&req->cmd_q);
4509 return req->err;
4510 }
4511
3119ae95
JH
4512 /* Do not allow empty requests */
4513 if (skb_queue_empty(&req->cmd_q))
382b0c39 4514 return -ENODATA;
3119ae95
JH
4515
4516 skb = skb_peek_tail(&req->cmd_q);
4517 bt_cb(skb)->req.complete = complete;
4518
4519 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4520 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4521 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4522
4523 queue_work(hdev->workqueue, &hdev->cmd_work);
4524
4525 return 0;
4526}
4527
899de765
MH
4528bool hci_req_pending(struct hci_dev *hdev)
4529{
4530 return (hdev->req_status == HCI_REQ_PEND);
4531}
4532
1ca3a9d0 4533static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
07dc93dd 4534 u32 plen, const void *param)
1da177e4
LT
4535{
4536 int len = HCI_COMMAND_HDR_SIZE + plen;
4537 struct hci_command_hdr *hdr;
4538 struct sk_buff *skb;
4539
1da177e4 4540 skb = bt_skb_alloc(len, GFP_ATOMIC);
1ca3a9d0
JH
4541 if (!skb)
4542 return NULL;
1da177e4
LT
4543
4544 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 4545 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
4546 hdr->plen = plen;
4547
4548 if (plen)
4549 memcpy(skb_put(skb, plen), param, plen);
4550
4551 BT_DBG("skb len %d", skb->len);
4552
0d48d939 4553 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
43e73e4e 4554 bt_cb(skb)->opcode = opcode;
c78ae283 4555
1ca3a9d0
JH
4556 return skb;
4557}
4558
4559/* Send HCI command */
07dc93dd
JH
4560int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4561 const void *param)
1ca3a9d0
JH
4562{
4563 struct sk_buff *skb;
4564
4565 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4566
4567 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4568 if (!skb) {
4569 BT_ERR("%s no memory for command", hdev->name);
4570 return -ENOMEM;
4571 }
4572
49c922bb 4573 /* Stand-alone HCI commands must be flagged as
11714b3d
JH
4574 * single-command requests.
4575 */
4576 bt_cb(skb)->req.start = true;
4577
1da177e4 4578 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 4579 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
4580
4581 return 0;
4582}
1da177e4 4583
71c76a17 4584/* Queue a command to an asynchronous HCI request */
07dc93dd
JH
4585void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4586 const void *param, u8 event)
71c76a17
JH
4587{
4588 struct hci_dev *hdev = req->hdev;
4589 struct sk_buff *skb;
4590
4591 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4592
49c922bb 4593 /* If an error occurred during request building, there is no point in
34739c1e
AG
4594 * queueing the HCI command. We can simply return.
4595 */
4596 if (req->err)
4597 return;
4598
71c76a17
JH
4599 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4600 if (!skb) {
5d73e034
AG
4601 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4602 hdev->name, opcode);
4603 req->err = -ENOMEM;
e348fe6b 4604 return;
71c76a17
JH
4605 }
4606
4607 if (skb_queue_empty(&req->cmd_q))
4608 bt_cb(skb)->req.start = true;
4609
02350a72
JH
4610 bt_cb(skb)->req.event = event;
4611
71c76a17 4612 skb_queue_tail(&req->cmd_q, skb);
71c76a17
JH
4613}
4614
07dc93dd
JH
4615void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4616 const void *param)
02350a72
JH
4617{
4618 hci_req_add_ev(req, opcode, plen, param, 0);
4619}
4620
1da177e4 4621/* Get data from the previously sent command */
a9de9248 4622void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
4623{
4624 struct hci_command_hdr *hdr;
4625
4626 if (!hdev->sent_cmd)
4627 return NULL;
4628
4629 hdr = (void *) hdev->sent_cmd->data;
4630
a9de9248 4631 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
4632 return NULL;
4633
f0e09510 4634 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
4635
4636 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4637}
4638
4639/* Send ACL data */
4640static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4641{
4642 struct hci_acl_hdr *hdr;
4643 int len = skb->len;
4644
badff6d0
ACM
4645 skb_push(skb, HCI_ACL_HDR_SIZE);
4646 skb_reset_transport_header(skb);
9c70220b 4647 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
4648 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4649 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
4650}
4651
ee22be7e 4652static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 4653 struct sk_buff *skb, __u16 flags)
1da177e4 4654{
ee22be7e 4655 struct hci_conn *conn = chan->conn;
1da177e4
LT
4656 struct hci_dev *hdev = conn->hdev;
4657 struct sk_buff *list;
4658
087bfd99
GP
4659 skb->len = skb_headlen(skb);
4660 skb->data_len = 0;
4661
4662 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
204a6e54
AE
4663
4664 switch (hdev->dev_type) {
4665 case HCI_BREDR:
4666 hci_add_acl_hdr(skb, conn->handle, flags);
4667 break;
4668 case HCI_AMP:
4669 hci_add_acl_hdr(skb, chan->handle, flags);
4670 break;
4671 default:
4672 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4673 return;
4674 }
087bfd99 4675
70f23020
AE
4676 list = skb_shinfo(skb)->frag_list;
4677 if (!list) {
1da177e4
LT
4678 /* Non fragmented */
4679 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4680
73d80deb 4681 skb_queue_tail(queue, skb);
1da177e4
LT
4682 } else {
4683 /* Fragmented */
4684 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4685
4686 skb_shinfo(skb)->frag_list = NULL;
4687
9cfd5a23
JR
4688 /* Queue all fragments atomically. We need to use spin_lock_bh
4689 * here because of 6LoWPAN links, as there this function is
4690 * called from softirq and using normal spin lock could cause
4691 * deadlocks.
4692 */
4693 spin_lock_bh(&queue->lock);
1da177e4 4694
73d80deb 4695 __skb_queue_tail(queue, skb);
e702112f
AE
4696
4697 flags &= ~ACL_START;
4698 flags |= ACL_CONT;
1da177e4
LT
4699 do {
4700 skb = list; list = list->next;
8e87d142 4701
0d48d939 4702 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 4703 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
4704
4705 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4706
73d80deb 4707 __skb_queue_tail(queue, skb);
1da177e4
LT
4708 } while (list);
4709
9cfd5a23 4710 spin_unlock_bh(&queue->lock);
1da177e4 4711 }
73d80deb
LAD
4712}
4713
4714void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4715{
ee22be7e 4716 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 4717
f0e09510 4718 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb 4719
ee22be7e 4720 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 4721
3eff45ea 4722 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 4723}
1da177e4
LT
4724
4725/* Send SCO data */
0d861d8b 4726void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
4727{
4728 struct hci_dev *hdev = conn->hdev;
4729 struct hci_sco_hdr hdr;
4730
4731 BT_DBG("%s len %d", hdev->name, skb->len);
4732
aca3192c 4733 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
4734 hdr.dlen = skb->len;
4735
badff6d0
ACM
4736 skb_push(skb, HCI_SCO_HDR_SIZE);
4737 skb_reset_transport_header(skb);
9c70220b 4738 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4 4739
0d48d939 4740 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 4741
1da177e4 4742 skb_queue_tail(&conn->data_q, skb);
3eff45ea 4743 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 4744}
1da177e4
LT
4745
4746/* ---- HCI TX task (outgoing data) ---- */
4747
4748/* HCI Connection scheduler */
6039aa73
GP
4749static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4750 int *quote)
1da177e4
LT
4751{
4752 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 4753 struct hci_conn *conn = NULL, *c;
abc5de8f 4754 unsigned int num = 0, min = ~0;
1da177e4 4755
8e87d142 4756 /* We don't have to lock device here. Connections are always
1da177e4 4757 * added and removed with TX task disabled. */
bf4c6325
GP
4758
4759 rcu_read_lock();
4760
4761 list_for_each_entry_rcu(c, &h->list, list) {
769be974 4762 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 4763 continue;
769be974
MH
4764
4765 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4766 continue;
4767
1da177e4
LT
4768 num++;
4769
4770 if (c->sent < min) {
4771 min = c->sent;
4772 conn = c;
4773 }
52087a79
LAD
4774
4775 if (hci_conn_num(hdev, type) == num)
4776 break;
1da177e4
LT
4777 }
4778
bf4c6325
GP
4779 rcu_read_unlock();
4780
1da177e4 4781 if (conn) {
6ed58ec5
VT
4782 int cnt, q;
4783
4784 switch (conn->type) {
4785 case ACL_LINK:
4786 cnt = hdev->acl_cnt;
4787 break;
4788 case SCO_LINK:
4789 case ESCO_LINK:
4790 cnt = hdev->sco_cnt;
4791 break;
4792 case LE_LINK:
4793 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4794 break;
4795 default:
4796 cnt = 0;
4797 BT_ERR("Unknown link type");
4798 }
4799
4800 q = cnt / num;
1da177e4
LT
4801 *quote = q ? q : 1;
4802 } else
4803 *quote = 0;
4804
4805 BT_DBG("conn %p quote %d", conn, *quote);
4806 return conn;
4807}
4808
6039aa73 4809static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
4810{
4811 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 4812 struct hci_conn *c;
1da177e4 4813
bae1f5d9 4814 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 4815
bf4c6325
GP
4816 rcu_read_lock();
4817
1da177e4 4818 /* Kill stalled connections */
bf4c6325 4819 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 4820 if (c->type == type && c->sent) {
6ed93dc6
AE
4821 BT_ERR("%s killing stalled connection %pMR",
4822 hdev->name, &c->dst);
bed71748 4823 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
4824 }
4825 }
bf4c6325
GP
4826
4827 rcu_read_unlock();
1da177e4
LT
4828}
4829
6039aa73
GP
4830static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4831 int *quote)
1da177e4 4832{
73d80deb
LAD
4833 struct hci_conn_hash *h = &hdev->conn_hash;
4834 struct hci_chan *chan = NULL;
abc5de8f 4835 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 4836 struct hci_conn *conn;
73d80deb
LAD
4837 int cnt, q, conn_num = 0;
4838
4839 BT_DBG("%s", hdev->name);
4840
bf4c6325
GP
4841 rcu_read_lock();
4842
4843 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
4844 struct hci_chan *tmp;
4845
4846 if (conn->type != type)
4847 continue;
4848
4849 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4850 continue;
4851
4852 conn_num++;
4853
8192edef 4854 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
4855 struct sk_buff *skb;
4856
4857 if (skb_queue_empty(&tmp->data_q))
4858 continue;
4859
4860 skb = skb_peek(&tmp->data_q);
4861 if (skb->priority < cur_prio)
4862 continue;
4863
4864 if (skb->priority > cur_prio) {
4865 num = 0;
4866 min = ~0;
4867 cur_prio = skb->priority;
4868 }
4869
4870 num++;
4871
4872 if (conn->sent < min) {
4873 min = conn->sent;
4874 chan = tmp;
4875 }
4876 }
4877
4878 if (hci_conn_num(hdev, type) == conn_num)
4879 break;
4880 }
4881
bf4c6325
GP
4882 rcu_read_unlock();
4883
73d80deb
LAD
4884 if (!chan)
4885 return NULL;
4886
4887 switch (chan->conn->type) {
4888 case ACL_LINK:
4889 cnt = hdev->acl_cnt;
4890 break;
bd1eb66b
AE
4891 case AMP_LINK:
4892 cnt = hdev->block_cnt;
4893 break;
73d80deb
LAD
4894 case SCO_LINK:
4895 case ESCO_LINK:
4896 cnt = hdev->sco_cnt;
4897 break;
4898 case LE_LINK:
4899 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4900 break;
4901 default:
4902 cnt = 0;
4903 BT_ERR("Unknown link type");
4904 }
4905
4906 q = cnt / num;
4907 *quote = q ? q : 1;
4908 BT_DBG("chan %p quote %d", chan, *quote);
4909 return chan;
4910}
4911
02b20f0b
LAD
4912static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4913{
4914 struct hci_conn_hash *h = &hdev->conn_hash;
4915 struct hci_conn *conn;
4916 int num = 0;
4917
4918 BT_DBG("%s", hdev->name);
4919
bf4c6325
GP
4920 rcu_read_lock();
4921
4922 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
4923 struct hci_chan *chan;
4924
4925 if (conn->type != type)
4926 continue;
4927
4928 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4929 continue;
4930
4931 num++;
4932
8192edef 4933 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
4934 struct sk_buff *skb;
4935
4936 if (chan->sent) {
4937 chan->sent = 0;
4938 continue;
4939 }
4940
4941 if (skb_queue_empty(&chan->data_q))
4942 continue;
4943
4944 skb = skb_peek(&chan->data_q);
4945 if (skb->priority >= HCI_PRIO_MAX - 1)
4946 continue;
4947
4948 skb->priority = HCI_PRIO_MAX - 1;
4949
4950 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 4951 skb->priority);
02b20f0b
LAD
4952 }
4953
4954 if (hci_conn_num(hdev, type) == num)
4955 break;
4956 }
bf4c6325
GP
4957
4958 rcu_read_unlock();
4959
02b20f0b
LAD
4960}
4961
b71d385a
AE
4962static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4963{
4964 /* Calculate count of blocks used by this packet */
4965 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4966}
4967
6039aa73 4968static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 4969{
4a964404 4970 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
1da177e4
LT
4971 /* ACL tx timeout must be longer than maximum
4972 * link supervision timeout (40.9 seconds) */
63d2bc1b 4973 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 4974 HCI_ACL_TX_TIMEOUT))
bae1f5d9 4975 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 4976 }
63d2bc1b 4977}
1da177e4 4978
6039aa73 4979static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
4980{
4981 unsigned int cnt = hdev->acl_cnt;
4982 struct hci_chan *chan;
4983 struct sk_buff *skb;
4984 int quote;
4985
4986 __check_timeout(hdev, cnt);
04837f64 4987
73d80deb 4988 while (hdev->acl_cnt &&
a8c5fb1a 4989 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
4990 u32 priority = (skb_peek(&chan->data_q))->priority;
4991 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4992 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4993 skb->len, skb->priority);
73d80deb 4994
ec1cce24
LAD
4995 /* Stop if priority has changed */
4996 if (skb->priority < priority)
4997 break;
4998
4999 skb = skb_dequeue(&chan->data_q);
5000
73d80deb 5001 hci_conn_enter_active_mode(chan->conn,
04124681 5002 bt_cb(skb)->force_active);
04837f64 5003
57d17d70 5004 hci_send_frame(hdev, skb);
1da177e4
LT
5005 hdev->acl_last_tx = jiffies;
5006
5007 hdev->acl_cnt--;
73d80deb
LAD
5008 chan->sent++;
5009 chan->conn->sent++;
1da177e4
LT
5010 }
5011 }
02b20f0b
LAD
5012
5013 if (cnt != hdev->acl_cnt)
5014 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
5015}
5016
6039aa73 5017static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 5018{
63d2bc1b 5019 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
5020 struct hci_chan *chan;
5021 struct sk_buff *skb;
5022 int quote;
bd1eb66b 5023 u8 type;
b71d385a 5024
63d2bc1b 5025 __check_timeout(hdev, cnt);
b71d385a 5026
bd1eb66b
AE
5027 BT_DBG("%s", hdev->name);
5028
5029 if (hdev->dev_type == HCI_AMP)
5030 type = AMP_LINK;
5031 else
5032 type = ACL_LINK;
5033
b71d385a 5034 while (hdev->block_cnt > 0 &&
bd1eb66b 5035 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
5036 u32 priority = (skb_peek(&chan->data_q))->priority;
5037 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
5038 int blocks;
5039
5040 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 5041 skb->len, skb->priority);
b71d385a
AE
5042
5043 /* Stop if priority has changed */
5044 if (skb->priority < priority)
5045 break;
5046
5047 skb = skb_dequeue(&chan->data_q);
5048
5049 blocks = __get_blocks(hdev, skb);
5050 if (blocks > hdev->block_cnt)
5051 return;
5052
5053 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 5054 bt_cb(skb)->force_active);
b71d385a 5055
57d17d70 5056 hci_send_frame(hdev, skb);
b71d385a
AE
5057 hdev->acl_last_tx = jiffies;
5058
5059 hdev->block_cnt -= blocks;
5060 quote -= blocks;
5061
5062 chan->sent += blocks;
5063 chan->conn->sent += blocks;
5064 }
5065 }
5066
5067 if (cnt != hdev->block_cnt)
bd1eb66b 5068 hci_prio_recalculate(hdev, type);
b71d385a
AE
5069}
5070
6039aa73 5071static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
5072{
5073 BT_DBG("%s", hdev->name);
5074
bd1eb66b
AE
5075 /* No ACL link over BR/EDR controller */
5076 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
5077 return;
5078
5079 /* No AMP link over AMP controller */
5080 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
5081 return;
5082
5083 switch (hdev->flow_ctl_mode) {
5084 case HCI_FLOW_CTL_MODE_PACKET_BASED:
5085 hci_sched_acl_pkt(hdev);
5086 break;
5087
5088 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
5089 hci_sched_acl_blk(hdev);
5090 break;
5091 }
5092}
5093
1da177e4 5094/* Schedule SCO */
6039aa73 5095static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
5096{
5097 struct hci_conn *conn;
5098 struct sk_buff *skb;
5099 int quote;
5100
5101 BT_DBG("%s", hdev->name);
5102
52087a79
LAD
5103 if (!hci_conn_num(hdev, SCO_LINK))
5104 return;
5105
1da177e4
LT
5106 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
5107 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5108 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 5109 hci_send_frame(hdev, skb);
1da177e4
LT
5110
5111 conn->sent++;
5112 if (conn->sent == ~0)
5113 conn->sent = 0;
5114 }
5115 }
5116}
5117
6039aa73 5118static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
5119{
5120 struct hci_conn *conn;
5121 struct sk_buff *skb;
5122 int quote;
5123
5124 BT_DBG("%s", hdev->name);
5125
52087a79
LAD
5126 if (!hci_conn_num(hdev, ESCO_LINK))
5127 return;
5128
8fc9ced3
GP
5129 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
5130 &quote))) {
b6a0dc82
MH
5131 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5132 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 5133 hci_send_frame(hdev, skb);
b6a0dc82
MH
5134
5135 conn->sent++;
5136 if (conn->sent == ~0)
5137 conn->sent = 0;
5138 }
5139 }
5140}
5141
6039aa73 5142static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 5143{
73d80deb 5144 struct hci_chan *chan;
6ed58ec5 5145 struct sk_buff *skb;
02b20f0b 5146 int quote, cnt, tmp;
6ed58ec5
VT
5147
5148 BT_DBG("%s", hdev->name);
5149
52087a79
LAD
5150 if (!hci_conn_num(hdev, LE_LINK))
5151 return;
5152
4a964404 5153 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
6ed58ec5
VT
5154 /* LE tx timeout must be longer than maximum
5155 * link supervision timeout (40.9 seconds) */
bae1f5d9 5156 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 5157 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 5158 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
5159 }
5160
5161 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 5162 tmp = cnt;
73d80deb 5163 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
5164 u32 priority = (skb_peek(&chan->data_q))->priority;
5165 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 5166 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 5167 skb->len, skb->priority);
6ed58ec5 5168
ec1cce24
LAD
5169 /* Stop if priority has changed */
5170 if (skb->priority < priority)
5171 break;
5172
5173 skb = skb_dequeue(&chan->data_q);
5174
57d17d70 5175 hci_send_frame(hdev, skb);
6ed58ec5
VT
5176 hdev->le_last_tx = jiffies;
5177
5178 cnt--;
73d80deb
LAD
5179 chan->sent++;
5180 chan->conn->sent++;
6ed58ec5
VT
5181 }
5182 }
73d80deb 5183
6ed58ec5
VT
5184 if (hdev->le_pkts)
5185 hdev->le_cnt = cnt;
5186 else
5187 hdev->acl_cnt = cnt;
02b20f0b
LAD
5188
5189 if (cnt != tmp)
5190 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
5191}
5192
3eff45ea 5193static void hci_tx_work(struct work_struct *work)
1da177e4 5194{
3eff45ea 5195 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
5196 struct sk_buff *skb;
5197
6ed58ec5 5198 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 5199 hdev->sco_cnt, hdev->le_cnt);
1da177e4 5200
52de599e
MH
5201 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5202 /* Schedule queues and send stuff to HCI driver */
5203 hci_sched_acl(hdev);
5204 hci_sched_sco(hdev);
5205 hci_sched_esco(hdev);
5206 hci_sched_le(hdev);
5207 }
6ed58ec5 5208
1da177e4
LT
5209 /* Send next queued raw (unknown type) packet */
5210 while ((skb = skb_dequeue(&hdev->raw_q)))
57d17d70 5211 hci_send_frame(hdev, skb);
1da177e4
LT
5212}
5213
25985edc 5214/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
5215
5216/* ACL data packet */
6039aa73 5217static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
5218{
5219 struct hci_acl_hdr *hdr = (void *) skb->data;
5220 struct hci_conn *conn;
5221 __u16 handle, flags;
5222
5223 skb_pull(skb, HCI_ACL_HDR_SIZE);
5224
5225 handle = __le16_to_cpu(hdr->handle);
5226 flags = hci_flags(handle);
5227 handle = hci_handle(handle);
5228
f0e09510 5229 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 5230 handle, flags);
1da177e4
LT
5231
5232 hdev->stat.acl_rx++;
5233
5234 hci_dev_lock(hdev);
5235 conn = hci_conn_hash_lookup_handle(hdev, handle);
5236 hci_dev_unlock(hdev);
8e87d142 5237
1da177e4 5238 if (conn) {
65983fc7 5239 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 5240
1da177e4 5241 /* Send to upper protocol */
686ebf28
UF
5242 l2cap_recv_acldata(conn, skb, flags);
5243 return;
1da177e4 5244 } else {
8e87d142 5245 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 5246 hdev->name, handle);
1da177e4
LT
5247 }
5248
5249 kfree_skb(skb);
5250}
5251
5252/* SCO data packet */
6039aa73 5253static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
5254{
5255 struct hci_sco_hdr *hdr = (void *) skb->data;
5256 struct hci_conn *conn;
5257 __u16 handle;
5258
5259 skb_pull(skb, HCI_SCO_HDR_SIZE);
5260
5261 handle = __le16_to_cpu(hdr->handle);
5262
f0e09510 5263 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
1da177e4
LT
5264
5265 hdev->stat.sco_rx++;
5266
5267 hci_dev_lock(hdev);
5268 conn = hci_conn_hash_lookup_handle(hdev, handle);
5269 hci_dev_unlock(hdev);
5270
5271 if (conn) {
1da177e4 5272 /* Send to upper protocol */
686ebf28
UF
5273 sco_recv_scodata(conn, skb);
5274 return;
1da177e4 5275 } else {
8e87d142 5276 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 5277 hdev->name, handle);
1da177e4
LT
5278 }
5279
5280 kfree_skb(skb);
5281}
5282
9238f36a
JH
5283static bool hci_req_is_complete(struct hci_dev *hdev)
5284{
5285 struct sk_buff *skb;
5286
5287 skb = skb_peek(&hdev->cmd_q);
5288 if (!skb)
5289 return true;
5290
5291 return bt_cb(skb)->req.start;
5292}
5293
42c6b129
JH
5294static void hci_resend_last(struct hci_dev *hdev)
5295{
5296 struct hci_command_hdr *sent;
5297 struct sk_buff *skb;
5298 u16 opcode;
5299
5300 if (!hdev->sent_cmd)
5301 return;
5302
5303 sent = (void *) hdev->sent_cmd->data;
5304 opcode = __le16_to_cpu(sent->opcode);
5305 if (opcode == HCI_OP_RESET)
5306 return;
5307
5308 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5309 if (!skb)
5310 return;
5311
5312 skb_queue_head(&hdev->cmd_q, skb);
5313 queue_work(hdev->workqueue, &hdev->cmd_work);
5314}
5315
9238f36a
JH
5316void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5317{
5318 hci_req_complete_t req_complete = NULL;
5319 struct sk_buff *skb;
5320 unsigned long flags;
5321
5322 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5323
42c6b129
JH
5324 /* If the completed command doesn't match the last one that was
5325 * sent we need to do special handling of it.
9238f36a 5326 */
42c6b129
JH
5327 if (!hci_sent_cmd_data(hdev, opcode)) {
5328 /* Some CSR based controllers generate a spontaneous
5329 * reset complete event during init and any pending
5330 * command will never be completed. In such a case we
5331 * need to resend whatever was the last sent
5332 * command.
5333 */
5334 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5335 hci_resend_last(hdev);
5336
9238f36a 5337 return;
42c6b129 5338 }
9238f36a
JH
5339
5340 /* If the command succeeded and there's still more commands in
5341 * this request the request is not yet complete.
5342 */
5343 if (!status && !hci_req_is_complete(hdev))
5344 return;
5345
5346 /* If this was the last command in a request the complete
5347 * callback would be found in hdev->sent_cmd instead of the
5348 * command queue (hdev->cmd_q).
5349 */
5350 if (hdev->sent_cmd) {
5351 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
53e21fbc
JH
5352
5353 if (req_complete) {
5354 /* We must set the complete callback to NULL to
5355 * avoid calling the callback more than once if
5356 * this function gets called again.
5357 */
5358 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5359
9238f36a 5360 goto call_complete;
53e21fbc 5361 }
9238f36a
JH
5362 }
5363
5364 /* Remove all pending commands belonging to this request */
5365 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5366 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5367 if (bt_cb(skb)->req.start) {
5368 __skb_queue_head(&hdev->cmd_q, skb);
5369 break;
5370 }
5371
5372 req_complete = bt_cb(skb)->req.complete;
5373 kfree_skb(skb);
5374 }
5375 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5376
5377call_complete:
5378 if (req_complete)
5379 req_complete(hdev, status);
5380}
5381
b78752cc 5382static void hci_rx_work(struct work_struct *work)
1da177e4 5383{
b78752cc 5384 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
5385 struct sk_buff *skb;
5386
5387 BT_DBG("%s", hdev->name);
5388
1da177e4 5389 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
5390 /* Send copy to monitor */
5391 hci_send_to_monitor(hdev, skb);
5392
1da177e4
LT
5393 if (atomic_read(&hdev->promisc)) {
5394 /* Send copy to the sockets */
470fe1b5 5395 hci_send_to_sock(hdev, skb);
1da177e4
LT
5396 }
5397
fee746b0 5398 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1da177e4
LT
5399 kfree_skb(skb);
5400 continue;
5401 }
5402
5403 if (test_bit(HCI_INIT, &hdev->flags)) {
5404 /* Don't process data packets in this states. */
0d48d939 5405 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
5406 case HCI_ACLDATA_PKT:
5407 case HCI_SCODATA_PKT:
5408 kfree_skb(skb);
5409 continue;
3ff50b79 5410 }
1da177e4
LT
5411 }
5412
5413 /* Process frame */
0d48d939 5414 switch (bt_cb(skb)->pkt_type) {
1da177e4 5415 case HCI_EVENT_PKT:
b78752cc 5416 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
5417 hci_event_packet(hdev, skb);
5418 break;
5419
5420 case HCI_ACLDATA_PKT:
5421 BT_DBG("%s ACL data packet", hdev->name);
5422 hci_acldata_packet(hdev, skb);
5423 break;
5424
5425 case HCI_SCODATA_PKT:
5426 BT_DBG("%s SCO data packet", hdev->name);
5427 hci_scodata_packet(hdev, skb);
5428 break;
5429
5430 default:
5431 kfree_skb(skb);
5432 break;
5433 }
5434 }
1da177e4
LT
5435}
5436
c347b765 5437static void hci_cmd_work(struct work_struct *work)
1da177e4 5438{
c347b765 5439 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
5440 struct sk_buff *skb;
5441
2104786b
AE
5442 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5443 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 5444
1da177e4 5445 /* Send queued commands */
5a08ecce
AE
5446 if (atomic_read(&hdev->cmd_cnt)) {
5447 skb = skb_dequeue(&hdev->cmd_q);
5448 if (!skb)
5449 return;
5450
7585b97a 5451 kfree_skb(hdev->sent_cmd);
1da177e4 5452
a675d7f1 5453 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
70f23020 5454 if (hdev->sent_cmd) {
1da177e4 5455 atomic_dec(&hdev->cmd_cnt);
57d17d70 5456 hci_send_frame(hdev, skb);
7bdb8a5c 5457 if (test_bit(HCI_RESET, &hdev->flags))
65cc2b49 5458 cancel_delayed_work(&hdev->cmd_timer);
7bdb8a5c 5459 else
65cc2b49
MH
5460 schedule_delayed_work(&hdev->cmd_timer,
5461 HCI_CMD_TIMEOUT);
1da177e4
LT
5462 } else {
5463 skb_queue_head(&hdev->cmd_q, skb);
c347b765 5464 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
5465 }
5466 }
5467}
b1efcc28
AG
5468
5469void hci_req_add_le_scan_disable(struct hci_request *req)
5470{
5471 struct hci_cp_le_set_scan_enable cp;
5472
5473 memset(&cp, 0, sizeof(cp));
5474 cp.enable = LE_SCAN_DISABLE;
5475 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5476}
a4790dbd 5477
8540f6c0
MH
5478static void add_to_white_list(struct hci_request *req,
5479 struct hci_conn_params *params)
5480{
5481 struct hci_cp_le_add_to_white_list cp;
5482
5483 cp.bdaddr_type = params->addr_type;
5484 bacpy(&cp.bdaddr, &params->addr);
5485
5486 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
5487}
5488
5489static u8 update_white_list(struct hci_request *req)
5490{
5491 struct hci_dev *hdev = req->hdev;
5492 struct hci_conn_params *params;
5493 struct bdaddr_list *b;
5494 uint8_t white_list_entries = 0;
5495
5496 /* Go through the current white list programmed into the
5497 * controller one by one and check if that address is still
5498 * in the list of pending connections or list of devices to
5499 * report. If not present in either list, then queue the
5500 * command to remove it from the controller.
5501 */
5502 list_for_each_entry(b, &hdev->le_white_list, list) {
5503 struct hci_cp_le_del_from_white_list cp;
5504
5505 if (hci_pend_le_action_lookup(&hdev->pend_le_conns,
5506 &b->bdaddr, b->bdaddr_type) ||
5507 hci_pend_le_action_lookup(&hdev->pend_le_reports,
5508 &b->bdaddr, b->bdaddr_type)) {
5509 white_list_entries++;
5510 continue;
5511 }
5512
5513 cp.bdaddr_type = b->bdaddr_type;
5514 bacpy(&cp.bdaddr, &b->bdaddr);
5515
5516 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
5517 sizeof(cp), &cp);
5518 }
5519
5520 /* Since all no longer valid white list entries have been
5521 * removed, walk through the list of pending connections
5522 * and ensure that any new device gets programmed into
5523 * the controller.
5524 *
5525 * If the list of the devices is larger than the list of
5526 * available white list entries in the controller, then
5527 * just abort and return filer policy value to not use the
5528 * white list.
5529 */
5530 list_for_each_entry(params, &hdev->pend_le_conns, action) {
5531 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5532 &params->addr, params->addr_type))
5533 continue;
5534
5535 if (white_list_entries >= hdev->le_white_list_size) {
5536 /* Select filter policy to accept all advertising */
5537 return 0x00;
5538 }
5539
66d8e837
MH
5540 if (hci_find_irk_by_addr(hdev, &params->addr,
5541 params->addr_type)) {
5542 /* White list can not be used with RPAs */
5543 return 0x00;
5544 }
5545
8540f6c0
MH
5546 white_list_entries++;
5547 add_to_white_list(req, params);
5548 }
5549
5550 /* After adding all new pending connections, walk through
5551 * the list of pending reports and also add these to the
5552 * white list if there is still space.
5553 */
5554 list_for_each_entry(params, &hdev->pend_le_reports, action) {
5555 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5556 &params->addr, params->addr_type))
5557 continue;
5558
5559 if (white_list_entries >= hdev->le_white_list_size) {
5560 /* Select filter policy to accept all advertising */
5561 return 0x00;
5562 }
5563
66d8e837
MH
5564 if (hci_find_irk_by_addr(hdev, &params->addr,
5565 params->addr_type)) {
5566 /* White list can not be used with RPAs */
5567 return 0x00;
5568 }
5569
8540f6c0
MH
5570 white_list_entries++;
5571 add_to_white_list(req, params);
5572 }
5573
5574 /* Select filter policy to use white list */
5575 return 0x01;
5576}
5577
8ef30fd3
AG
5578void hci_req_add_le_passive_scan(struct hci_request *req)
5579{
5580 struct hci_cp_le_set_scan_param param_cp;
5581 struct hci_cp_le_set_scan_enable enable_cp;
5582 struct hci_dev *hdev = req->hdev;
5583 u8 own_addr_type;
8540f6c0 5584 u8 filter_policy;
8ef30fd3 5585
6ab535a7
MH
5586 /* Set require_privacy to false since no SCAN_REQ are send
5587 * during passive scanning. Not using an unresolvable address
5588 * here is important so that peer devices using direct
5589 * advertising with our address will be correctly reported
5590 * by the controller.
8ef30fd3 5591 */
6ab535a7 5592 if (hci_update_random_address(req, false, &own_addr_type))
8ef30fd3
AG
5593 return;
5594
8540f6c0
MH
5595 /* Adding or removing entries from the white list must
5596 * happen before enabling scanning. The controller does
5597 * not allow white list modification while scanning.
5598 */
5599 filter_policy = update_white_list(req);
5600
8ef30fd3
AG
5601 memset(&param_cp, 0, sizeof(param_cp));
5602 param_cp.type = LE_SCAN_PASSIVE;
5603 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5604 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5605 param_cp.own_address_type = own_addr_type;
8540f6c0 5606 param_cp.filter_policy = filter_policy;
8ef30fd3
AG
5607 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5608 &param_cp);
5609
5610 memset(&enable_cp, 0, sizeof(enable_cp));
5611 enable_cp.enable = LE_SCAN_ENABLE;
4340a124 5612 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
8ef30fd3
AG
5613 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5614 &enable_cp);
5615}
5616
a4790dbd
AG
5617static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5618{
5619 if (status)
5620 BT_DBG("HCI request failed to update background scanning: "
5621 "status 0x%2.2x", status);
5622}
5623
5624/* This function controls the background scanning based on hdev->pend_le_conns
5625 * list. If there are pending LE connection we start the background scanning,
5626 * otherwise we stop it.
5627 *
5628 * This function requires the caller holds hdev->lock.
5629 */
5630void hci_update_background_scan(struct hci_dev *hdev)
5631{
a4790dbd
AG
5632 struct hci_request req;
5633 struct hci_conn *conn;
5634 int err;
5635
c20c02d5
MH
5636 if (!test_bit(HCI_UP, &hdev->flags) ||
5637 test_bit(HCI_INIT, &hdev->flags) ||
5638 test_bit(HCI_SETUP, &hdev->dev_flags) ||
d603b76b 5639 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
b8221770 5640 test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
c20c02d5 5641 test_bit(HCI_UNREGISTER, &hdev->dev_flags))
1c1697c0
MH
5642 return;
5643
a70f4b5f
JH
5644 /* No point in doing scanning if LE support hasn't been enabled */
5645 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
5646 return;
5647
ae23ada4
JH
5648 /* If discovery is active don't interfere with it */
5649 if (hdev->discovery.state != DISCOVERY_STOPPED)
5650 return;
5651
a4790dbd
AG
5652 hci_req_init(&req, hdev);
5653
d1d588c1 5654 if (list_empty(&hdev->pend_le_conns) &&
66f8455a 5655 list_empty(&hdev->pend_le_reports)) {
0d2bf134
JH
5656 /* If there is no pending LE connections or devices
5657 * to be scanned for, we should stop the background
5658 * scanning.
a4790dbd
AG
5659 */
5660
5661 /* If controller is not scanning we are done. */
5662 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5663 return;
5664
5665 hci_req_add_le_scan_disable(&req);
5666
5667 BT_DBG("%s stopping background scanning", hdev->name);
5668 } else {
a4790dbd
AG
5669 /* If there is at least one pending LE connection, we should
5670 * keep the background scan running.
5671 */
5672
a4790dbd
AG
5673 /* If controller is connecting, we should not start scanning
5674 * since some controllers are not able to scan and connect at
5675 * the same time.
5676 */
5677 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5678 if (conn)
5679 return;
5680
4340a124
AG
5681 /* If controller is currently scanning, we stop it to ensure we
5682 * don't miss any advertising (due to duplicates filter).
5683 */
5684 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5685 hci_req_add_le_scan_disable(&req);
5686
8ef30fd3 5687 hci_req_add_le_passive_scan(&req);
a4790dbd
AG
5688
5689 BT_DBG("%s starting background scanning", hdev->name);
5690 }
5691
5692 err = hci_req_run(&req, update_background_scan_complete);
5693 if (err)
5694 BT_ERR("Failed to run HCI request: err %d", err);
5695}
432df05e 5696
22f433dc
JH
5697static bool disconnected_whitelist_entries(struct hci_dev *hdev)
5698{
5699 struct bdaddr_list *b;
5700
5701 list_for_each_entry(b, &hdev->whitelist, list) {
5702 struct hci_conn *conn;
5703
5704 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
5705 if (!conn)
5706 return true;
5707
5708 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
5709 return true;
5710 }
5711
5712 return false;
5713}
5714
432df05e
JH
5715void hci_update_page_scan(struct hci_dev *hdev, struct hci_request *req)
5716{
5717 u8 scan;
5718
5719 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
5720 return;
5721
5722 if (!hdev_is_powered(hdev))
5723 return;
5724
5725 if (mgmt_powering_down(hdev))
5726 return;
5727
5728 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) ||
22f433dc 5729 disconnected_whitelist_entries(hdev))
432df05e
JH
5730 scan = SCAN_PAGE;
5731 else
5732 scan = SCAN_DISABLED;
5733
5734 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE))
5735 return;
5736
5737 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
5738 scan |= SCAN_INQUIRY;
5739
5740 if (req)
5741 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
5742 else
5743 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
5744}