]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - net/bluetooth/hci_core.c
Bluetooth: Use bool for smp_ltk_encrypt return value
[mirror_ubuntu-jammy-kernel.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
8c520a59 30#include <linux/rfkill.h>
baf27f6e 31#include <linux/debugfs.h>
99780a7b 32#include <linux/crypto.h>
47219839 33#include <asm/unaligned.h>
1da177e4
LT
34
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
4bc58f51 37#include <net/bluetooth/l2cap.h>
1da177e4 38
970c4e46
JH
39#include "smp.h"
40
b78752cc 41static void hci_rx_work(struct work_struct *work);
c347b765 42static void hci_cmd_work(struct work_struct *work);
3eff45ea 43static void hci_tx_work(struct work_struct *work);
1da177e4 44
1da177e4
LT
45/* HCI device list */
46LIST_HEAD(hci_dev_list);
47DEFINE_RWLOCK(hci_dev_list_lock);
48
49/* HCI callback list */
50LIST_HEAD(hci_cb_list);
51DEFINE_RWLOCK(hci_cb_list_lock);
52
3df92b31
SL
53/* HCI ID Numbering */
54static DEFINE_IDA(hci_index_ida);
55
1da177e4
LT
56/* ---- HCI notifications ---- */
57
6516455d 58static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 59{
040030ef 60 hci_sock_dev_event(hdev, event);
1da177e4
LT
61}
62
baf27f6e
MH
63/* ---- HCI debugfs entries ---- */
64
4b4148e9
MH
65static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
66 size_t count, loff_t *ppos)
67{
68 struct hci_dev *hdev = file->private_data;
69 char buf[3];
70
111902f7 71 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
4b4148e9
MH
72 buf[1] = '\n';
73 buf[2] = '\0';
74 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
75}
76
77static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
78 size_t count, loff_t *ppos)
79{
80 struct hci_dev *hdev = file->private_data;
81 struct sk_buff *skb;
82 char buf[32];
83 size_t buf_size = min(count, (sizeof(buf)-1));
84 bool enable;
85 int err;
86
87 if (!test_bit(HCI_UP, &hdev->flags))
88 return -ENETDOWN;
89
90 if (copy_from_user(buf, user_buf, buf_size))
91 return -EFAULT;
92
93 buf[buf_size] = '\0';
94 if (strtobool(buf, &enable))
95 return -EINVAL;
96
111902f7 97 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
4b4148e9
MH
98 return -EALREADY;
99
100 hci_req_lock(hdev);
101 if (enable)
102 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
103 HCI_CMD_TIMEOUT);
104 else
105 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
106 HCI_CMD_TIMEOUT);
107 hci_req_unlock(hdev);
108
109 if (IS_ERR(skb))
110 return PTR_ERR(skb);
111
112 err = -bt_to_errno(skb->data[0]);
113 kfree_skb(skb);
114
115 if (err < 0)
116 return err;
117
111902f7 118 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
4b4148e9
MH
119
120 return count;
121}
122
123static const struct file_operations dut_mode_fops = {
124 .open = simple_open,
125 .read = dut_mode_read,
126 .write = dut_mode_write,
127 .llseek = default_llseek,
128};
129
dfb826a8
MH
130static int features_show(struct seq_file *f, void *ptr)
131{
132 struct hci_dev *hdev = f->private;
133 u8 p;
134
135 hci_dev_lock(hdev);
136 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
cfbb2b5b 137 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
dfb826a8
MH
138 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
139 hdev->features[p][0], hdev->features[p][1],
140 hdev->features[p][2], hdev->features[p][3],
141 hdev->features[p][4], hdev->features[p][5],
142 hdev->features[p][6], hdev->features[p][7]);
143 }
cfbb2b5b
MH
144 if (lmp_le_capable(hdev))
145 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
146 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
147 hdev->le_features[0], hdev->le_features[1],
148 hdev->le_features[2], hdev->le_features[3],
149 hdev->le_features[4], hdev->le_features[5],
150 hdev->le_features[6], hdev->le_features[7]);
dfb826a8
MH
151 hci_dev_unlock(hdev);
152
153 return 0;
154}
155
156static int features_open(struct inode *inode, struct file *file)
157{
158 return single_open(file, features_show, inode->i_private);
159}
160
161static const struct file_operations features_fops = {
162 .open = features_open,
163 .read = seq_read,
164 .llseek = seq_lseek,
165 .release = single_release,
166};
167
70afe0b8
MH
168static int blacklist_show(struct seq_file *f, void *p)
169{
170 struct hci_dev *hdev = f->private;
171 struct bdaddr_list *b;
172
173 hci_dev_lock(hdev);
174 list_for_each_entry(b, &hdev->blacklist, list)
b25f0785 175 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
70afe0b8
MH
176 hci_dev_unlock(hdev);
177
178 return 0;
179}
180
181static int blacklist_open(struct inode *inode, struct file *file)
182{
183 return single_open(file, blacklist_show, inode->i_private);
184}
185
186static const struct file_operations blacklist_fops = {
187 .open = blacklist_open,
188 .read = seq_read,
189 .llseek = seq_lseek,
190 .release = single_release,
191};
192
47219839
MH
193static int uuids_show(struct seq_file *f, void *p)
194{
195 struct hci_dev *hdev = f->private;
196 struct bt_uuid *uuid;
197
198 hci_dev_lock(hdev);
199 list_for_each_entry(uuid, &hdev->uuids, list) {
58f01aa9
MH
200 u8 i, val[16];
201
202 /* The Bluetooth UUID values are stored in big endian,
203 * but with reversed byte order. So convert them into
204 * the right order for the %pUb modifier.
205 */
206 for (i = 0; i < 16; i++)
207 val[i] = uuid->uuid[15 - i];
208
209 seq_printf(f, "%pUb\n", val);
47219839
MH
210 }
211 hci_dev_unlock(hdev);
212
213 return 0;
214}
215
216static int uuids_open(struct inode *inode, struct file *file)
217{
218 return single_open(file, uuids_show, inode->i_private);
219}
220
221static const struct file_operations uuids_fops = {
222 .open = uuids_open,
223 .read = seq_read,
224 .llseek = seq_lseek,
225 .release = single_release,
226};
227
baf27f6e
MH
228static int inquiry_cache_show(struct seq_file *f, void *p)
229{
230 struct hci_dev *hdev = f->private;
231 struct discovery_state *cache = &hdev->discovery;
232 struct inquiry_entry *e;
233
234 hci_dev_lock(hdev);
235
236 list_for_each_entry(e, &cache->all, all) {
237 struct inquiry_data *data = &e->data;
238 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
239 &data->bdaddr,
240 data->pscan_rep_mode, data->pscan_period_mode,
241 data->pscan_mode, data->dev_class[2],
242 data->dev_class[1], data->dev_class[0],
243 __le16_to_cpu(data->clock_offset),
244 data->rssi, data->ssp_mode, e->timestamp);
245 }
246
247 hci_dev_unlock(hdev);
248
249 return 0;
250}
251
252static int inquiry_cache_open(struct inode *inode, struct file *file)
253{
254 return single_open(file, inquiry_cache_show, inode->i_private);
255}
256
257static const struct file_operations inquiry_cache_fops = {
258 .open = inquiry_cache_open,
259 .read = seq_read,
260 .llseek = seq_lseek,
261 .release = single_release,
262};
263
02d08d15
MH
264static int link_keys_show(struct seq_file *f, void *ptr)
265{
266 struct hci_dev *hdev = f->private;
267 struct list_head *p, *n;
268
269 hci_dev_lock(hdev);
270 list_for_each_safe(p, n, &hdev->link_keys) {
271 struct link_key *key = list_entry(p, struct link_key, list);
272 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
273 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
274 }
275 hci_dev_unlock(hdev);
276
277 return 0;
278}
279
280static int link_keys_open(struct inode *inode, struct file *file)
281{
282 return single_open(file, link_keys_show, inode->i_private);
283}
284
285static const struct file_operations link_keys_fops = {
286 .open = link_keys_open,
287 .read = seq_read,
288 .llseek = seq_lseek,
289 .release = single_release,
290};
291
babdbb3c
MH
292static int dev_class_show(struct seq_file *f, void *ptr)
293{
294 struct hci_dev *hdev = f->private;
295
296 hci_dev_lock(hdev);
297 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
298 hdev->dev_class[1], hdev->dev_class[0]);
299 hci_dev_unlock(hdev);
300
301 return 0;
302}
303
304static int dev_class_open(struct inode *inode, struct file *file)
305{
306 return single_open(file, dev_class_show, inode->i_private);
307}
308
309static const struct file_operations dev_class_fops = {
310 .open = dev_class_open,
311 .read = seq_read,
312 .llseek = seq_lseek,
313 .release = single_release,
314};
315
041000b9
MH
316static int voice_setting_get(void *data, u64 *val)
317{
318 struct hci_dev *hdev = data;
319
320 hci_dev_lock(hdev);
321 *val = hdev->voice_setting;
322 hci_dev_unlock(hdev);
323
324 return 0;
325}
326
327DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
328 NULL, "0x%4.4llx\n");
329
ebd1e33b
MH
330static int auto_accept_delay_set(void *data, u64 val)
331{
332 struct hci_dev *hdev = data;
333
334 hci_dev_lock(hdev);
335 hdev->auto_accept_delay = val;
336 hci_dev_unlock(hdev);
337
338 return 0;
339}
340
341static int auto_accept_delay_get(void *data, u64 *val)
342{
343 struct hci_dev *hdev = data;
344
345 hci_dev_lock(hdev);
346 *val = hdev->auto_accept_delay;
347 hci_dev_unlock(hdev);
348
349 return 0;
350}
351
352DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
353 auto_accept_delay_set, "%llu\n");
354
5afeac14
MH
355static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
356 size_t count, loff_t *ppos)
357{
358 struct hci_dev *hdev = file->private_data;
359 char buf[3];
360
111902f7 361 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
5afeac14
MH
362 buf[1] = '\n';
363 buf[2] = '\0';
364 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
365}
366
367static ssize_t force_sc_support_write(struct file *file,
368 const char __user *user_buf,
369 size_t count, loff_t *ppos)
370{
371 struct hci_dev *hdev = file->private_data;
372 char buf[32];
373 size_t buf_size = min(count, (sizeof(buf)-1));
374 bool enable;
375
376 if (test_bit(HCI_UP, &hdev->flags))
377 return -EBUSY;
378
379 if (copy_from_user(buf, user_buf, buf_size))
380 return -EFAULT;
381
382 buf[buf_size] = '\0';
383 if (strtobool(buf, &enable))
384 return -EINVAL;
385
111902f7 386 if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
5afeac14
MH
387 return -EALREADY;
388
111902f7 389 change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
5afeac14
MH
390
391 return count;
392}
393
394static const struct file_operations force_sc_support_fops = {
395 .open = simple_open,
396 .read = force_sc_support_read,
397 .write = force_sc_support_write,
398 .llseek = default_llseek,
399};
400
134c2a89
MH
401static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
402 size_t count, loff_t *ppos)
403{
404 struct hci_dev *hdev = file->private_data;
405 char buf[3];
406
407 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
408 buf[1] = '\n';
409 buf[2] = '\0';
410 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
411}
412
413static const struct file_operations sc_only_mode_fops = {
414 .open = simple_open,
415 .read = sc_only_mode_read,
416 .llseek = default_llseek,
417};
418
2bfa3531
MH
419static int idle_timeout_set(void *data, u64 val)
420{
421 struct hci_dev *hdev = data;
422
423 if (val != 0 && (val < 500 || val > 3600000))
424 return -EINVAL;
425
426 hci_dev_lock(hdev);
2be48b65 427 hdev->idle_timeout = val;
2bfa3531
MH
428 hci_dev_unlock(hdev);
429
430 return 0;
431}
432
433static int idle_timeout_get(void *data, u64 *val)
434{
435 struct hci_dev *hdev = data;
436
437 hci_dev_lock(hdev);
438 *val = hdev->idle_timeout;
439 hci_dev_unlock(hdev);
440
441 return 0;
442}
443
444DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
445 idle_timeout_set, "%llu\n");
446
c982b2ea
JH
447static int rpa_timeout_set(void *data, u64 val)
448{
449 struct hci_dev *hdev = data;
450
451 /* Require the RPA timeout to be at least 30 seconds and at most
452 * 24 hours.
453 */
454 if (val < 30 || val > (60 * 60 * 24))
455 return -EINVAL;
456
457 hci_dev_lock(hdev);
458 hdev->rpa_timeout = val;
459 hci_dev_unlock(hdev);
460
461 return 0;
462}
463
464static int rpa_timeout_get(void *data, u64 *val)
465{
466 struct hci_dev *hdev = data;
467
468 hci_dev_lock(hdev);
469 *val = hdev->rpa_timeout;
470 hci_dev_unlock(hdev);
471
472 return 0;
473}
474
475DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
476 rpa_timeout_set, "%llu\n");
477
2bfa3531
MH
478static int sniff_min_interval_set(void *data, u64 val)
479{
480 struct hci_dev *hdev = data;
481
482 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
483 return -EINVAL;
484
485 hci_dev_lock(hdev);
2be48b65 486 hdev->sniff_min_interval = val;
2bfa3531
MH
487 hci_dev_unlock(hdev);
488
489 return 0;
490}
491
492static int sniff_min_interval_get(void *data, u64 *val)
493{
494 struct hci_dev *hdev = data;
495
496 hci_dev_lock(hdev);
497 *val = hdev->sniff_min_interval;
498 hci_dev_unlock(hdev);
499
500 return 0;
501}
502
503DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
504 sniff_min_interval_set, "%llu\n");
505
506static int sniff_max_interval_set(void *data, u64 val)
507{
508 struct hci_dev *hdev = data;
509
510 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
511 return -EINVAL;
512
513 hci_dev_lock(hdev);
2be48b65 514 hdev->sniff_max_interval = val;
2bfa3531
MH
515 hci_dev_unlock(hdev);
516
517 return 0;
518}
519
520static int sniff_max_interval_get(void *data, u64 *val)
521{
522 struct hci_dev *hdev = data;
523
524 hci_dev_lock(hdev);
525 *val = hdev->sniff_max_interval;
526 hci_dev_unlock(hdev);
527
528 return 0;
529}
530
531DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
532 sniff_max_interval_set, "%llu\n");
533
31ad1691
AK
534static int conn_info_min_age_set(void *data, u64 val)
535{
536 struct hci_dev *hdev = data;
537
538 if (val == 0 || val > hdev->conn_info_max_age)
539 return -EINVAL;
540
541 hci_dev_lock(hdev);
542 hdev->conn_info_min_age = val;
543 hci_dev_unlock(hdev);
544
545 return 0;
546}
547
548static int conn_info_min_age_get(void *data, u64 *val)
549{
550 struct hci_dev *hdev = data;
551
552 hci_dev_lock(hdev);
553 *val = hdev->conn_info_min_age;
554 hci_dev_unlock(hdev);
555
556 return 0;
557}
558
559DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
560 conn_info_min_age_set, "%llu\n");
561
562static int conn_info_max_age_set(void *data, u64 val)
563{
564 struct hci_dev *hdev = data;
565
566 if (val == 0 || val < hdev->conn_info_min_age)
567 return -EINVAL;
568
569 hci_dev_lock(hdev);
570 hdev->conn_info_max_age = val;
571 hci_dev_unlock(hdev);
572
573 return 0;
574}
575
576static int conn_info_max_age_get(void *data, u64 *val)
577{
578 struct hci_dev *hdev = data;
579
580 hci_dev_lock(hdev);
581 *val = hdev->conn_info_max_age;
582 hci_dev_unlock(hdev);
583
584 return 0;
585}
586
587DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
588 conn_info_max_age_set, "%llu\n");
589
ac345813
MH
590static int identity_show(struct seq_file *f, void *p)
591{
592 struct hci_dev *hdev = f->private;
a1f4c318 593 bdaddr_t addr;
ac345813
MH
594 u8 addr_type;
595
596 hci_dev_lock(hdev);
597
a1f4c318 598 hci_copy_identity_address(hdev, &addr, &addr_type);
ac345813 599
a1f4c318 600 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
473deef2 601 16, hdev->irk, &hdev->rpa);
ac345813
MH
602
603 hci_dev_unlock(hdev);
604
605 return 0;
606}
607
608static int identity_open(struct inode *inode, struct file *file)
609{
610 return single_open(file, identity_show, inode->i_private);
611}
612
613static const struct file_operations identity_fops = {
614 .open = identity_open,
615 .read = seq_read,
616 .llseek = seq_lseek,
617 .release = single_release,
618};
619
7a4cd51d
MH
620static int random_address_show(struct seq_file *f, void *p)
621{
622 struct hci_dev *hdev = f->private;
623
624 hci_dev_lock(hdev);
625 seq_printf(f, "%pMR\n", &hdev->random_addr);
626 hci_dev_unlock(hdev);
627
628 return 0;
629}
630
631static int random_address_open(struct inode *inode, struct file *file)
632{
633 return single_open(file, random_address_show, inode->i_private);
634}
635
636static const struct file_operations random_address_fops = {
637 .open = random_address_open,
638 .read = seq_read,
639 .llseek = seq_lseek,
640 .release = single_release,
641};
642
e7b8fc92
MH
643static int static_address_show(struct seq_file *f, void *p)
644{
645 struct hci_dev *hdev = f->private;
646
647 hci_dev_lock(hdev);
648 seq_printf(f, "%pMR\n", &hdev->static_addr);
649 hci_dev_unlock(hdev);
650
651 return 0;
652}
653
654static int static_address_open(struct inode *inode, struct file *file)
655{
656 return single_open(file, static_address_show, inode->i_private);
657}
658
659static const struct file_operations static_address_fops = {
660 .open = static_address_open,
661 .read = seq_read,
662 .llseek = seq_lseek,
663 .release = single_release,
664};
665
b32bba6c
MH
666static ssize_t force_static_address_read(struct file *file,
667 char __user *user_buf,
668 size_t count, loff_t *ppos)
92202185 669{
b32bba6c
MH
670 struct hci_dev *hdev = file->private_data;
671 char buf[3];
92202185 672
111902f7 673 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
b32bba6c
MH
674 buf[1] = '\n';
675 buf[2] = '\0';
676 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
92202185
MH
677}
678
b32bba6c
MH
679static ssize_t force_static_address_write(struct file *file,
680 const char __user *user_buf,
681 size_t count, loff_t *ppos)
92202185 682{
b32bba6c
MH
683 struct hci_dev *hdev = file->private_data;
684 char buf[32];
685 size_t buf_size = min(count, (sizeof(buf)-1));
686 bool enable;
92202185 687
b32bba6c
MH
688 if (test_bit(HCI_UP, &hdev->flags))
689 return -EBUSY;
92202185 690
b32bba6c
MH
691 if (copy_from_user(buf, user_buf, buf_size))
692 return -EFAULT;
693
694 buf[buf_size] = '\0';
695 if (strtobool(buf, &enable))
696 return -EINVAL;
697
111902f7 698 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
b32bba6c
MH
699 return -EALREADY;
700
111902f7 701 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
b32bba6c
MH
702
703 return count;
92202185
MH
704}
705
b32bba6c
MH
706static const struct file_operations force_static_address_fops = {
707 .open = simple_open,
708 .read = force_static_address_read,
709 .write = force_static_address_write,
710 .llseek = default_llseek,
711};
92202185 712
d2ab0ac1
MH
713static int white_list_show(struct seq_file *f, void *ptr)
714{
715 struct hci_dev *hdev = f->private;
716 struct bdaddr_list *b;
717
718 hci_dev_lock(hdev);
719 list_for_each_entry(b, &hdev->le_white_list, list)
720 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
721 hci_dev_unlock(hdev);
722
723 return 0;
724}
725
726static int white_list_open(struct inode *inode, struct file *file)
727{
728 return single_open(file, white_list_show, inode->i_private);
729}
730
731static const struct file_operations white_list_fops = {
732 .open = white_list_open,
733 .read = seq_read,
734 .llseek = seq_lseek,
735 .release = single_release,
736};
737
3698d704
MH
738static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
739{
740 struct hci_dev *hdev = f->private;
741 struct list_head *p, *n;
742
743 hci_dev_lock(hdev);
744 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
745 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
746 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
747 &irk->bdaddr, irk->addr_type,
748 16, irk->val, &irk->rpa);
749 }
750 hci_dev_unlock(hdev);
751
752 return 0;
753}
754
755static int identity_resolving_keys_open(struct inode *inode, struct file *file)
756{
757 return single_open(file, identity_resolving_keys_show,
758 inode->i_private);
759}
760
761static const struct file_operations identity_resolving_keys_fops = {
762 .open = identity_resolving_keys_open,
763 .read = seq_read,
764 .llseek = seq_lseek,
765 .release = single_release,
766};
767
8f8625cd
MH
768static int long_term_keys_show(struct seq_file *f, void *ptr)
769{
770 struct hci_dev *hdev = f->private;
771 struct list_head *p, *n;
772
773 hci_dev_lock(hdev);
f813f1be 774 list_for_each_safe(p, n, &hdev->long_term_keys) {
8f8625cd 775 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
fe39c7b2 776 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
8f8625cd
MH
777 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
778 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
fe39c7b2 779 __le64_to_cpu(ltk->rand), 16, ltk->val);
8f8625cd
MH
780 }
781 hci_dev_unlock(hdev);
782
783 return 0;
784}
785
786static int long_term_keys_open(struct inode *inode, struct file *file)
787{
788 return single_open(file, long_term_keys_show, inode->i_private);
789}
790
791static const struct file_operations long_term_keys_fops = {
792 .open = long_term_keys_open,
793 .read = seq_read,
794 .llseek = seq_lseek,
795 .release = single_release,
796};
797
4e70c7e7
MH
798static int conn_min_interval_set(void *data, u64 val)
799{
800 struct hci_dev *hdev = data;
801
802 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
803 return -EINVAL;
804
805 hci_dev_lock(hdev);
2be48b65 806 hdev->le_conn_min_interval = val;
4e70c7e7
MH
807 hci_dev_unlock(hdev);
808
809 return 0;
810}
811
812static int conn_min_interval_get(void *data, u64 *val)
813{
814 struct hci_dev *hdev = data;
815
816 hci_dev_lock(hdev);
817 *val = hdev->le_conn_min_interval;
818 hci_dev_unlock(hdev);
819
820 return 0;
821}
822
823DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
824 conn_min_interval_set, "%llu\n");
825
826static int conn_max_interval_set(void *data, u64 val)
827{
828 struct hci_dev *hdev = data;
829
830 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
831 return -EINVAL;
832
833 hci_dev_lock(hdev);
2be48b65 834 hdev->le_conn_max_interval = val;
4e70c7e7
MH
835 hci_dev_unlock(hdev);
836
837 return 0;
838}
839
840static int conn_max_interval_get(void *data, u64 *val)
841{
842 struct hci_dev *hdev = data;
843
844 hci_dev_lock(hdev);
845 *val = hdev->le_conn_max_interval;
846 hci_dev_unlock(hdev);
847
848 return 0;
849}
850
851DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
852 conn_max_interval_set, "%llu\n");
853
816a93d1
MH
854static int conn_latency_set(void *data, u64 val)
855{
856 struct hci_dev *hdev = data;
857
858 if (val > 0x01f3)
859 return -EINVAL;
860
861 hci_dev_lock(hdev);
862 hdev->le_conn_latency = val;
863 hci_dev_unlock(hdev);
864
865 return 0;
866}
867
868static int conn_latency_get(void *data, u64 *val)
869{
870 struct hci_dev *hdev = data;
871
872 hci_dev_lock(hdev);
873 *val = hdev->le_conn_latency;
874 hci_dev_unlock(hdev);
875
876 return 0;
877}
878
879DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
880 conn_latency_set, "%llu\n");
881
f1649577
MH
882static int supervision_timeout_set(void *data, u64 val)
883{
884 struct hci_dev *hdev = data;
885
886 if (val < 0x000a || val > 0x0c80)
887 return -EINVAL;
888
889 hci_dev_lock(hdev);
890 hdev->le_supv_timeout = val;
891 hci_dev_unlock(hdev);
892
893 return 0;
894}
895
896static int supervision_timeout_get(void *data, u64 *val)
897{
898 struct hci_dev *hdev = data;
899
900 hci_dev_lock(hdev);
901 *val = hdev->le_supv_timeout;
902 hci_dev_unlock(hdev);
903
904 return 0;
905}
906
907DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
908 supervision_timeout_set, "%llu\n");
909
3f959d46
MH
910static int adv_channel_map_set(void *data, u64 val)
911{
912 struct hci_dev *hdev = data;
913
914 if (val < 0x01 || val > 0x07)
915 return -EINVAL;
916
917 hci_dev_lock(hdev);
918 hdev->le_adv_channel_map = val;
919 hci_dev_unlock(hdev);
920
921 return 0;
922}
923
924static int adv_channel_map_get(void *data, u64 *val)
925{
926 struct hci_dev *hdev = data;
927
928 hci_dev_lock(hdev);
929 *val = hdev->le_adv_channel_map;
930 hci_dev_unlock(hdev);
931
932 return 0;
933}
934
935DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
936 adv_channel_map_set, "%llu\n");
937
0b3c7d37 938static int device_list_show(struct seq_file *f, void *ptr)
7d474e06 939{
0b3c7d37 940 struct hci_dev *hdev = f->private;
7d474e06
AG
941 struct hci_conn_params *p;
942
943 hci_dev_lock(hdev);
7d474e06 944 list_for_each_entry(p, &hdev->le_conn_params, list) {
0b3c7d37 945 seq_printf(f, "%pMR %u %u\n", &p->addr, p->addr_type,
7d474e06
AG
946 p->auto_connect);
947 }
7d474e06
AG
948 hci_dev_unlock(hdev);
949
950 return 0;
951}
952
0b3c7d37 953static int device_list_open(struct inode *inode, struct file *file)
7d474e06 954{
0b3c7d37 955 return single_open(file, device_list_show, inode->i_private);
7d474e06
AG
956}
957
0b3c7d37
MH
958static const struct file_operations device_list_fops = {
959 .open = device_list_open,
7d474e06 960 .read = seq_read,
7d474e06
AG
961 .llseek = seq_lseek,
962 .release = single_release,
963};
964
1da177e4
LT
965/* ---- HCI requests ---- */
966
42c6b129 967static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1da177e4 968{
42c6b129 969 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1da177e4
LT
970
971 if (hdev->req_status == HCI_REQ_PEND) {
972 hdev->req_result = result;
973 hdev->req_status = HCI_REQ_DONE;
974 wake_up_interruptible(&hdev->req_wait_q);
975 }
976}
977
978static void hci_req_cancel(struct hci_dev *hdev, int err)
979{
980 BT_DBG("%s err 0x%2.2x", hdev->name, err);
981
982 if (hdev->req_status == HCI_REQ_PEND) {
983 hdev->req_result = err;
984 hdev->req_status = HCI_REQ_CANCELED;
985 wake_up_interruptible(&hdev->req_wait_q);
986 }
987}
988
77a63e0a
FW
989static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
990 u8 event)
75e84b7c
JH
991{
992 struct hci_ev_cmd_complete *ev;
993 struct hci_event_hdr *hdr;
994 struct sk_buff *skb;
995
996 hci_dev_lock(hdev);
997
998 skb = hdev->recv_evt;
999 hdev->recv_evt = NULL;
1000
1001 hci_dev_unlock(hdev);
1002
1003 if (!skb)
1004 return ERR_PTR(-ENODATA);
1005
1006 if (skb->len < sizeof(*hdr)) {
1007 BT_ERR("Too short HCI event");
1008 goto failed;
1009 }
1010
1011 hdr = (void *) skb->data;
1012 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1013
7b1abbbe
JH
1014 if (event) {
1015 if (hdr->evt != event)
1016 goto failed;
1017 return skb;
1018 }
1019
75e84b7c
JH
1020 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1021 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1022 goto failed;
1023 }
1024
1025 if (skb->len < sizeof(*ev)) {
1026 BT_ERR("Too short cmd_complete event");
1027 goto failed;
1028 }
1029
1030 ev = (void *) skb->data;
1031 skb_pull(skb, sizeof(*ev));
1032
1033 if (opcode == __le16_to_cpu(ev->opcode))
1034 return skb;
1035
1036 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1037 __le16_to_cpu(ev->opcode));
1038
1039failed:
1040 kfree_skb(skb);
1041 return ERR_PTR(-ENODATA);
1042}
1043
7b1abbbe 1044struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 1045 const void *param, u8 event, u32 timeout)
75e84b7c
JH
1046{
1047 DECLARE_WAITQUEUE(wait, current);
1048 struct hci_request req;
1049 int err = 0;
1050
1051 BT_DBG("%s", hdev->name);
1052
1053 hci_req_init(&req, hdev);
1054
7b1abbbe 1055 hci_req_add_ev(&req, opcode, plen, param, event);
75e84b7c
JH
1056
1057 hdev->req_status = HCI_REQ_PEND;
1058
1059 err = hci_req_run(&req, hci_req_sync_complete);
1060 if (err < 0)
1061 return ERR_PTR(err);
1062
1063 add_wait_queue(&hdev->req_wait_q, &wait);
1064 set_current_state(TASK_INTERRUPTIBLE);
1065
1066 schedule_timeout(timeout);
1067
1068 remove_wait_queue(&hdev->req_wait_q, &wait);
1069
1070 if (signal_pending(current))
1071 return ERR_PTR(-EINTR);
1072
1073 switch (hdev->req_status) {
1074 case HCI_REQ_DONE:
1075 err = -bt_to_errno(hdev->req_result);
1076 break;
1077
1078 case HCI_REQ_CANCELED:
1079 err = -hdev->req_result;
1080 break;
1081
1082 default:
1083 err = -ETIMEDOUT;
1084 break;
1085 }
1086
1087 hdev->req_status = hdev->req_result = 0;
1088
1089 BT_DBG("%s end: err %d", hdev->name, err);
1090
1091 if (err < 0)
1092 return ERR_PTR(err);
1093
7b1abbbe
JH
1094 return hci_get_cmd_complete(hdev, opcode, event);
1095}
1096EXPORT_SYMBOL(__hci_cmd_sync_ev);
1097
1098struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 1099 const void *param, u32 timeout)
7b1abbbe
JH
1100{
1101 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
75e84b7c
JH
1102}
1103EXPORT_SYMBOL(__hci_cmd_sync);
1104
1da177e4 1105/* Execute request and wait for completion. */
01178cd4 1106static int __hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
1107 void (*func)(struct hci_request *req,
1108 unsigned long opt),
01178cd4 1109 unsigned long opt, __u32 timeout)
1da177e4 1110{
42c6b129 1111 struct hci_request req;
1da177e4
LT
1112 DECLARE_WAITQUEUE(wait, current);
1113 int err = 0;
1114
1115 BT_DBG("%s start", hdev->name);
1116
42c6b129
JH
1117 hci_req_init(&req, hdev);
1118
1da177e4
LT
1119 hdev->req_status = HCI_REQ_PEND;
1120
42c6b129 1121 func(&req, opt);
53cce22d 1122
42c6b129
JH
1123 err = hci_req_run(&req, hci_req_sync_complete);
1124 if (err < 0) {
53cce22d 1125 hdev->req_status = 0;
920c8300
AG
1126
1127 /* ENODATA means the HCI request command queue is empty.
1128 * This can happen when a request with conditionals doesn't
1129 * trigger any commands to be sent. This is normal behavior
1130 * and should not trigger an error return.
42c6b129 1131 */
920c8300
AG
1132 if (err == -ENODATA)
1133 return 0;
1134
1135 return err;
53cce22d
JH
1136 }
1137
bc4445c7
AG
1138 add_wait_queue(&hdev->req_wait_q, &wait);
1139 set_current_state(TASK_INTERRUPTIBLE);
1140
1da177e4
LT
1141 schedule_timeout(timeout);
1142
1143 remove_wait_queue(&hdev->req_wait_q, &wait);
1144
1145 if (signal_pending(current))
1146 return -EINTR;
1147
1148 switch (hdev->req_status) {
1149 case HCI_REQ_DONE:
e175072f 1150 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
1151 break;
1152
1153 case HCI_REQ_CANCELED:
1154 err = -hdev->req_result;
1155 break;
1156
1157 default:
1158 err = -ETIMEDOUT;
1159 break;
3ff50b79 1160 }
1da177e4 1161
a5040efa 1162 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
1163
1164 BT_DBG("%s end: err %d", hdev->name, err);
1165
1166 return err;
1167}
1168
01178cd4 1169static int hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
1170 void (*req)(struct hci_request *req,
1171 unsigned long opt),
01178cd4 1172 unsigned long opt, __u32 timeout)
1da177e4
LT
1173{
1174 int ret;
1175
7c6a329e
MH
1176 if (!test_bit(HCI_UP, &hdev->flags))
1177 return -ENETDOWN;
1178
1da177e4
LT
1179 /* Serialize all requests */
1180 hci_req_lock(hdev);
01178cd4 1181 ret = __hci_req_sync(hdev, req, opt, timeout);
1da177e4
LT
1182 hci_req_unlock(hdev);
1183
1184 return ret;
1185}
1186
42c6b129 1187static void hci_reset_req(struct hci_request *req, unsigned long opt)
1da177e4 1188{
42c6b129 1189 BT_DBG("%s %ld", req->hdev->name, opt);
1da177e4
LT
1190
1191 /* Reset device */
42c6b129
JH
1192 set_bit(HCI_RESET, &req->hdev->flags);
1193 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1da177e4
LT
1194}
1195
42c6b129 1196static void bredr_init(struct hci_request *req)
1da177e4 1197{
42c6b129 1198 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
2455a3ea 1199
1da177e4 1200 /* Read Local Supported Features */
42c6b129 1201 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 1202
1143e5a6 1203 /* Read Local Version */
42c6b129 1204 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
2177bab5
JH
1205
1206 /* Read BD Address */
42c6b129 1207 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1da177e4
LT
1208}
1209
42c6b129 1210static void amp_init(struct hci_request *req)
e61ef499 1211{
42c6b129 1212 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
2455a3ea 1213
e61ef499 1214 /* Read Local Version */
42c6b129 1215 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489 1216
f6996cfe
MH
1217 /* Read Local Supported Commands */
1218 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1219
1220 /* Read Local Supported Features */
1221 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1222
6bcbc489 1223 /* Read Local AMP Info */
42c6b129 1224 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
1225
1226 /* Read Data Blk size */
42c6b129 1227 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
7528ca1c 1228
f38ba941
MH
1229 /* Read Flow Control Mode */
1230 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1231
7528ca1c
MH
1232 /* Read Location Data */
1233 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
e61ef499
AE
1234}
1235
42c6b129 1236static void hci_init1_req(struct hci_request *req, unsigned long opt)
e61ef499 1237{
42c6b129 1238 struct hci_dev *hdev = req->hdev;
e61ef499
AE
1239
1240 BT_DBG("%s %ld", hdev->name, opt);
1241
11778716
AE
1242 /* Reset */
1243 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
42c6b129 1244 hci_reset_req(req, 0);
11778716 1245
e61ef499
AE
1246 switch (hdev->dev_type) {
1247 case HCI_BREDR:
42c6b129 1248 bredr_init(req);
e61ef499
AE
1249 break;
1250
1251 case HCI_AMP:
42c6b129 1252 amp_init(req);
e61ef499
AE
1253 break;
1254
1255 default:
1256 BT_ERR("Unknown device type %d", hdev->dev_type);
1257 break;
1258 }
e61ef499
AE
1259}
1260
42c6b129 1261static void bredr_setup(struct hci_request *req)
2177bab5 1262{
4ca048e3
MH
1263 struct hci_dev *hdev = req->hdev;
1264
2177bab5
JH
1265 __le16 param;
1266 __u8 flt_type;
1267
1268 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
42c6b129 1269 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
1270
1271 /* Read Class of Device */
42c6b129 1272 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
2177bab5
JH
1273
1274 /* Read Local Name */
42c6b129 1275 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
2177bab5
JH
1276
1277 /* Read Voice Setting */
42c6b129 1278 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
2177bab5 1279
b4cb9fb2
MH
1280 /* Read Number of Supported IAC */
1281 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1282
4b836f39
MH
1283 /* Read Current IAC LAP */
1284 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1285
2177bab5
JH
1286 /* Clear Event Filters */
1287 flt_type = HCI_FLT_CLEAR_ALL;
42c6b129 1288 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
2177bab5
JH
1289
1290 /* Connection accept timeout ~20 secs */
dcf4adbf 1291 param = cpu_to_le16(0x7d00);
42c6b129 1292 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
2177bab5 1293
4ca048e3
MH
1294 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1295 * but it does not support page scan related HCI commands.
1296 */
1297 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
f332ec66
JH
1298 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1299 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1300 }
2177bab5
JH
1301}
1302
42c6b129 1303static void le_setup(struct hci_request *req)
2177bab5 1304{
c73eee91
JH
1305 struct hci_dev *hdev = req->hdev;
1306
2177bab5 1307 /* Read LE Buffer Size */
42c6b129 1308 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
1309
1310 /* Read LE Local Supported Features */
42c6b129 1311 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
2177bab5 1312
747d3f03
MH
1313 /* Read LE Supported States */
1314 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1315
2177bab5 1316 /* Read LE Advertising Channel TX Power */
42c6b129 1317 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
2177bab5
JH
1318
1319 /* Read LE White List Size */
42c6b129 1320 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
2177bab5 1321
747d3f03
MH
1322 /* Clear LE White List */
1323 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
c73eee91
JH
1324
1325 /* LE-only controllers have LE implicitly enabled */
1326 if (!lmp_bredr_capable(hdev))
1327 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2177bab5
JH
1328}
1329
1330static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1331{
1332 if (lmp_ext_inq_capable(hdev))
1333 return 0x02;
1334
1335 if (lmp_inq_rssi_capable(hdev))
1336 return 0x01;
1337
1338 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1339 hdev->lmp_subver == 0x0757)
1340 return 0x01;
1341
1342 if (hdev->manufacturer == 15) {
1343 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1344 return 0x01;
1345 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1346 return 0x01;
1347 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1348 return 0x01;
1349 }
1350
1351 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1352 hdev->lmp_subver == 0x1805)
1353 return 0x01;
1354
1355 return 0x00;
1356}
1357
42c6b129 1358static void hci_setup_inquiry_mode(struct hci_request *req)
2177bab5
JH
1359{
1360 u8 mode;
1361
42c6b129 1362 mode = hci_get_inquiry_mode(req->hdev);
2177bab5 1363
42c6b129 1364 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
2177bab5
JH
1365}
1366
42c6b129 1367static void hci_setup_event_mask(struct hci_request *req)
2177bab5 1368{
42c6b129
JH
1369 struct hci_dev *hdev = req->hdev;
1370
2177bab5
JH
1371 /* The second byte is 0xff instead of 0x9f (two reserved bits
1372 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1373 * command otherwise.
1374 */
1375 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1376
1377 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1378 * any event mask for pre 1.2 devices.
1379 */
1380 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1381 return;
1382
1383 if (lmp_bredr_capable(hdev)) {
1384 events[4] |= 0x01; /* Flow Specification Complete */
1385 events[4] |= 0x02; /* Inquiry Result with RSSI */
1386 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1387 events[5] |= 0x08; /* Synchronous Connection Complete */
1388 events[5] |= 0x10; /* Synchronous Connection Changed */
c7882cbd
MH
1389 } else {
1390 /* Use a different default for LE-only devices */
1391 memset(events, 0, sizeof(events));
1392 events[0] |= 0x10; /* Disconnection Complete */
1393 events[0] |= 0x80; /* Encryption Change */
1394 events[1] |= 0x08; /* Read Remote Version Information Complete */
1395 events[1] |= 0x20; /* Command Complete */
1396 events[1] |= 0x40; /* Command Status */
1397 events[1] |= 0x80; /* Hardware Error */
1398 events[2] |= 0x04; /* Number of Completed Packets */
1399 events[3] |= 0x02; /* Data Buffer Overflow */
1400 events[5] |= 0x80; /* Encryption Key Refresh Complete */
2177bab5
JH
1401 }
1402
1403 if (lmp_inq_rssi_capable(hdev))
1404 events[4] |= 0x02; /* Inquiry Result with RSSI */
1405
1406 if (lmp_sniffsubr_capable(hdev))
1407 events[5] |= 0x20; /* Sniff Subrating */
1408
1409 if (lmp_pause_enc_capable(hdev))
1410 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1411
1412 if (lmp_ext_inq_capable(hdev))
1413 events[5] |= 0x40; /* Extended Inquiry Result */
1414
1415 if (lmp_no_flush_capable(hdev))
1416 events[7] |= 0x01; /* Enhanced Flush Complete */
1417
1418 if (lmp_lsto_capable(hdev))
1419 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1420
1421 if (lmp_ssp_capable(hdev)) {
1422 events[6] |= 0x01; /* IO Capability Request */
1423 events[6] |= 0x02; /* IO Capability Response */
1424 events[6] |= 0x04; /* User Confirmation Request */
1425 events[6] |= 0x08; /* User Passkey Request */
1426 events[6] |= 0x10; /* Remote OOB Data Request */
1427 events[6] |= 0x20; /* Simple Pairing Complete */
1428 events[7] |= 0x04; /* User Passkey Notification */
1429 events[7] |= 0x08; /* Keypress Notification */
1430 events[7] |= 0x10; /* Remote Host Supported
1431 * Features Notification
1432 */
1433 }
1434
1435 if (lmp_le_capable(hdev))
1436 events[7] |= 0x20; /* LE Meta-Event */
1437
42c6b129 1438 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
2177bab5
JH
1439
1440 if (lmp_le_capable(hdev)) {
1441 memset(events, 0, sizeof(events));
1442 events[0] = 0x1f;
42c6b129
JH
1443 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1444 sizeof(events), events);
2177bab5
JH
1445 }
1446}
1447
42c6b129 1448static void hci_init2_req(struct hci_request *req, unsigned long opt)
2177bab5 1449{
42c6b129
JH
1450 struct hci_dev *hdev = req->hdev;
1451
2177bab5 1452 if (lmp_bredr_capable(hdev))
42c6b129 1453 bredr_setup(req);
56f87901
JH
1454 else
1455 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2177bab5
JH
1456
1457 if (lmp_le_capable(hdev))
42c6b129 1458 le_setup(req);
2177bab5 1459
42c6b129 1460 hci_setup_event_mask(req);
2177bab5 1461
3f8e2d75
JH
1462 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1463 * local supported commands HCI command.
1464 */
1465 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
42c6b129 1466 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
2177bab5
JH
1467
1468 if (lmp_ssp_capable(hdev)) {
57af75a8
MH
1469 /* When SSP is available, then the host features page
1470 * should also be available as well. However some
1471 * controllers list the max_page as 0 as long as SSP
1472 * has not been enabled. To achieve proper debugging
1473 * output, force the minimum max_page to 1 at least.
1474 */
1475 hdev->max_page = 0x01;
1476
2177bab5
JH
1477 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1478 u8 mode = 0x01;
42c6b129
JH
1479 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1480 sizeof(mode), &mode);
2177bab5
JH
1481 } else {
1482 struct hci_cp_write_eir cp;
1483
1484 memset(hdev->eir, 0, sizeof(hdev->eir));
1485 memset(&cp, 0, sizeof(cp));
1486
42c6b129 1487 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
2177bab5
JH
1488 }
1489 }
1490
1491 if (lmp_inq_rssi_capable(hdev))
42c6b129 1492 hci_setup_inquiry_mode(req);
2177bab5
JH
1493
1494 if (lmp_inq_tx_pwr_capable(hdev))
42c6b129 1495 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
2177bab5
JH
1496
1497 if (lmp_ext_feat_capable(hdev)) {
1498 struct hci_cp_read_local_ext_features cp;
1499
1500 cp.page = 0x01;
42c6b129
JH
1501 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1502 sizeof(cp), &cp);
2177bab5
JH
1503 }
1504
1505 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1506 u8 enable = 1;
42c6b129
JH
1507 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1508 &enable);
2177bab5
JH
1509 }
1510}
1511
42c6b129 1512static void hci_setup_link_policy(struct hci_request *req)
2177bab5 1513{
42c6b129 1514 struct hci_dev *hdev = req->hdev;
2177bab5
JH
1515 struct hci_cp_write_def_link_policy cp;
1516 u16 link_policy = 0;
1517
1518 if (lmp_rswitch_capable(hdev))
1519 link_policy |= HCI_LP_RSWITCH;
1520 if (lmp_hold_capable(hdev))
1521 link_policy |= HCI_LP_HOLD;
1522 if (lmp_sniff_capable(hdev))
1523 link_policy |= HCI_LP_SNIFF;
1524 if (lmp_park_capable(hdev))
1525 link_policy |= HCI_LP_PARK;
1526
1527 cp.policy = cpu_to_le16(link_policy);
42c6b129 1528 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
2177bab5
JH
1529}
1530
42c6b129 1531static void hci_set_le_support(struct hci_request *req)
2177bab5 1532{
42c6b129 1533 struct hci_dev *hdev = req->hdev;
2177bab5
JH
1534 struct hci_cp_write_le_host_supported cp;
1535
c73eee91
JH
1536 /* LE-only devices do not support explicit enablement */
1537 if (!lmp_bredr_capable(hdev))
1538 return;
1539
2177bab5
JH
1540 memset(&cp, 0, sizeof(cp));
1541
1542 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1543 cp.le = 0x01;
1544 cp.simul = lmp_le_br_capable(hdev);
1545 }
1546
1547 if (cp.le != lmp_host_le_capable(hdev))
42c6b129
JH
1548 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1549 &cp);
2177bab5
JH
1550}
1551
d62e6d67
JH
1552static void hci_set_event_mask_page_2(struct hci_request *req)
1553{
1554 struct hci_dev *hdev = req->hdev;
1555 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1556
1557 /* If Connectionless Slave Broadcast master role is supported
1558 * enable all necessary events for it.
1559 */
53b834d2 1560 if (lmp_csb_master_capable(hdev)) {
d62e6d67
JH
1561 events[1] |= 0x40; /* Triggered Clock Capture */
1562 events[1] |= 0x80; /* Synchronization Train Complete */
1563 events[2] |= 0x10; /* Slave Page Response Timeout */
1564 events[2] |= 0x20; /* CSB Channel Map Change */
1565 }
1566
1567 /* If Connectionless Slave Broadcast slave role is supported
1568 * enable all necessary events for it.
1569 */
53b834d2 1570 if (lmp_csb_slave_capable(hdev)) {
d62e6d67
JH
1571 events[2] |= 0x01; /* Synchronization Train Received */
1572 events[2] |= 0x02; /* CSB Receive */
1573 events[2] |= 0x04; /* CSB Timeout */
1574 events[2] |= 0x08; /* Truncated Page Complete */
1575 }
1576
40c59fcb
MH
1577 /* Enable Authenticated Payload Timeout Expired event if supported */
1578 if (lmp_ping_capable(hdev))
1579 events[2] |= 0x80;
1580
d62e6d67
JH
1581 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1582}
1583
42c6b129 1584static void hci_init3_req(struct hci_request *req, unsigned long opt)
2177bab5 1585{
42c6b129 1586 struct hci_dev *hdev = req->hdev;
d2c5d77f 1587 u8 p;
42c6b129 1588
b8f4e068
GP
1589 /* Some Broadcom based Bluetooth controllers do not support the
1590 * Delete Stored Link Key command. They are clearly indicating its
1591 * absence in the bit mask of supported commands.
1592 *
1593 * Check the supported commands and only if the the command is marked
1594 * as supported send it. If not supported assume that the controller
1595 * does not have actual support for stored link keys which makes this
1596 * command redundant anyway.
f9f462fa
MH
1597 *
1598 * Some controllers indicate that they support handling deleting
1599 * stored link keys, but they don't. The quirk lets a driver
1600 * just disable this command.
637b4cae 1601 */
f9f462fa
MH
1602 if (hdev->commands[6] & 0x80 &&
1603 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
59f45d57
JH
1604 struct hci_cp_delete_stored_link_key cp;
1605
1606 bacpy(&cp.bdaddr, BDADDR_ANY);
1607 cp.delete_all = 0x01;
1608 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1609 sizeof(cp), &cp);
1610 }
1611
2177bab5 1612 if (hdev->commands[5] & 0x10)
42c6b129 1613 hci_setup_link_policy(req);
2177bab5 1614
7bf32048 1615 if (lmp_le_capable(hdev))
42c6b129 1616 hci_set_le_support(req);
d2c5d77f
JH
1617
1618 /* Read features beyond page 1 if available */
1619 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1620 struct hci_cp_read_local_ext_features cp;
1621
1622 cp.page = p;
1623 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1624 sizeof(cp), &cp);
1625 }
2177bab5
JH
1626}
1627
5d4e7e8d
JH
1628static void hci_init4_req(struct hci_request *req, unsigned long opt)
1629{
1630 struct hci_dev *hdev = req->hdev;
1631
d62e6d67
JH
1632 /* Set event mask page 2 if the HCI command for it is supported */
1633 if (hdev->commands[22] & 0x04)
1634 hci_set_event_mask_page_2(req);
1635
5d4e7e8d 1636 /* Check for Synchronization Train support */
53b834d2 1637 if (lmp_sync_train_capable(hdev))
5d4e7e8d 1638 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
a6d0d690
MH
1639
1640 /* Enable Secure Connections if supported and configured */
5afeac14 1641 if ((lmp_sc_capable(hdev) ||
111902f7 1642 test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
a6d0d690
MH
1643 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1644 u8 support = 0x01;
1645 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1646 sizeof(support), &support);
1647 }
5d4e7e8d
JH
1648}
1649
2177bab5
JH
1650static int __hci_init(struct hci_dev *hdev)
1651{
1652 int err;
1653
1654 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1655 if (err < 0)
1656 return err;
1657
4b4148e9
MH
1658 /* The Device Under Test (DUT) mode is special and available for
1659 * all controller types. So just create it early on.
1660 */
1661 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1662 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1663 &dut_mode_fops);
1664 }
1665
2177bab5
JH
1666 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1667 * BR/EDR/LE type controllers. AMP controllers only need the
1668 * first stage init.
1669 */
1670 if (hdev->dev_type != HCI_BREDR)
1671 return 0;
1672
1673 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1674 if (err < 0)
1675 return err;
1676
5d4e7e8d
JH
1677 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1678 if (err < 0)
1679 return err;
1680
baf27f6e
MH
1681 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1682 if (err < 0)
1683 return err;
1684
1685 /* Only create debugfs entries during the initial setup
1686 * phase and not every time the controller gets powered on.
1687 */
1688 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1689 return 0;
1690
dfb826a8
MH
1691 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1692 &features_fops);
ceeb3bc0
MH
1693 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1694 &hdev->manufacturer);
1695 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1696 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
70afe0b8
MH
1697 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1698 &blacklist_fops);
47219839
MH
1699 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1700
31ad1691
AK
1701 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1702 &conn_info_min_age_fops);
1703 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1704 &conn_info_max_age_fops);
1705
baf27f6e
MH
1706 if (lmp_bredr_capable(hdev)) {
1707 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1708 hdev, &inquiry_cache_fops);
02d08d15
MH
1709 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1710 hdev, &link_keys_fops);
babdbb3c
MH
1711 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1712 hdev, &dev_class_fops);
041000b9
MH
1713 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1714 hdev, &voice_setting_fops);
baf27f6e
MH
1715 }
1716
06f5b778 1717 if (lmp_ssp_capable(hdev)) {
ebd1e33b
MH
1718 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1719 hdev, &auto_accept_delay_fops);
5afeac14
MH
1720 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1721 hdev, &force_sc_support_fops);
134c2a89
MH
1722 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1723 hdev, &sc_only_mode_fops);
06f5b778 1724 }
ebd1e33b 1725
2bfa3531
MH
1726 if (lmp_sniff_capable(hdev)) {
1727 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1728 hdev, &idle_timeout_fops);
1729 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1730 hdev, &sniff_min_interval_fops);
1731 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1732 hdev, &sniff_max_interval_fops);
1733 }
1734
d0f729b8 1735 if (lmp_le_capable(hdev)) {
ac345813
MH
1736 debugfs_create_file("identity", 0400, hdev->debugfs,
1737 hdev, &identity_fops);
1738 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1739 hdev, &rpa_timeout_fops);
7a4cd51d
MH
1740 debugfs_create_file("random_address", 0444, hdev->debugfs,
1741 hdev, &random_address_fops);
b32bba6c
MH
1742 debugfs_create_file("static_address", 0444, hdev->debugfs,
1743 hdev, &static_address_fops);
1744
1745 /* For controllers with a public address, provide a debug
1746 * option to force the usage of the configured static
1747 * address. By default the public address is used.
1748 */
1749 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1750 debugfs_create_file("force_static_address", 0644,
1751 hdev->debugfs, hdev,
1752 &force_static_address_fops);
1753
d0f729b8
MH
1754 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1755 &hdev->le_white_list_size);
d2ab0ac1
MH
1756 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1757 &white_list_fops);
3698d704
MH
1758 debugfs_create_file("identity_resolving_keys", 0400,
1759 hdev->debugfs, hdev,
1760 &identity_resolving_keys_fops);
8f8625cd
MH
1761 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1762 hdev, &long_term_keys_fops);
4e70c7e7
MH
1763 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1764 hdev, &conn_min_interval_fops);
1765 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1766 hdev, &conn_max_interval_fops);
816a93d1
MH
1767 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1768 hdev, &conn_latency_fops);
f1649577
MH
1769 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1770 hdev, &supervision_timeout_fops);
3f959d46
MH
1771 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1772 hdev, &adv_channel_map_fops);
0b3c7d37
MH
1773 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1774 &device_list_fops);
b9a7a61e
LR
1775 debugfs_create_u16("discov_interleaved_timeout", 0644,
1776 hdev->debugfs,
1777 &hdev->discov_interleaved_timeout);
d0f729b8 1778 }
e7b8fc92 1779
baf27f6e 1780 return 0;
2177bab5
JH
1781}
1782
42c6b129 1783static void hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1784{
1785 __u8 scan = opt;
1786
42c6b129 1787 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
1788
1789 /* Inquiry and Page scans */
42c6b129 1790 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
1791}
1792
42c6b129 1793static void hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1794{
1795 __u8 auth = opt;
1796
42c6b129 1797 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
1798
1799 /* Authentication */
42c6b129 1800 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
1801}
1802
42c6b129 1803static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1804{
1805 __u8 encrypt = opt;
1806
42c6b129 1807 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 1808
e4e8e37c 1809 /* Encryption */
42c6b129 1810 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
1811}
1812
42c6b129 1813static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
1814{
1815 __le16 policy = cpu_to_le16(opt);
1816
42c6b129 1817 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
1818
1819 /* Default link policy */
42c6b129 1820 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
e4e8e37c
MH
1821}
1822
8e87d142 1823/* Get HCI device by index.
1da177e4
LT
1824 * Device is held on return. */
1825struct hci_dev *hci_dev_get(int index)
1826{
8035ded4 1827 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
1828
1829 BT_DBG("%d", index);
1830
1831 if (index < 0)
1832 return NULL;
1833
1834 read_lock(&hci_dev_list_lock);
8035ded4 1835 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
1836 if (d->id == index) {
1837 hdev = hci_dev_hold(d);
1838 break;
1839 }
1840 }
1841 read_unlock(&hci_dev_list_lock);
1842 return hdev;
1843}
1da177e4
LT
1844
1845/* ---- Inquiry support ---- */
ff9ef578 1846
30dc78e1
JH
1847bool hci_discovery_active(struct hci_dev *hdev)
1848{
1849 struct discovery_state *discov = &hdev->discovery;
1850
6fbe195d 1851 switch (discov->state) {
343f935b 1852 case DISCOVERY_FINDING:
6fbe195d 1853 case DISCOVERY_RESOLVING:
30dc78e1
JH
1854 return true;
1855
6fbe195d
AG
1856 default:
1857 return false;
1858 }
30dc78e1
JH
1859}
1860
ff9ef578
JH
1861void hci_discovery_set_state(struct hci_dev *hdev, int state)
1862{
1863 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1864
1865 if (hdev->discovery.state == state)
1866 return;
1867
1868 switch (state) {
1869 case DISCOVERY_STOPPED:
c54c3860
AG
1870 hci_update_background_scan(hdev);
1871
7b99b659
AG
1872 if (hdev->discovery.state != DISCOVERY_STARTING)
1873 mgmt_discovering(hdev, 0);
ff9ef578
JH
1874 break;
1875 case DISCOVERY_STARTING:
1876 break;
343f935b 1877 case DISCOVERY_FINDING:
ff9ef578
JH
1878 mgmt_discovering(hdev, 1);
1879 break;
30dc78e1
JH
1880 case DISCOVERY_RESOLVING:
1881 break;
ff9ef578
JH
1882 case DISCOVERY_STOPPING:
1883 break;
1884 }
1885
1886 hdev->discovery.state = state;
1887}
1888
1f9b9a5d 1889void hci_inquiry_cache_flush(struct hci_dev *hdev)
1da177e4 1890{
30883512 1891 struct discovery_state *cache = &hdev->discovery;
b57c1a56 1892 struct inquiry_entry *p, *n;
1da177e4 1893
561aafbc
JH
1894 list_for_each_entry_safe(p, n, &cache->all, all) {
1895 list_del(&p->all);
b57c1a56 1896 kfree(p);
1da177e4 1897 }
561aafbc
JH
1898
1899 INIT_LIST_HEAD(&cache->unknown);
1900 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
1901}
1902
a8c5fb1a
GP
1903struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1904 bdaddr_t *bdaddr)
1da177e4 1905{
30883512 1906 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1907 struct inquiry_entry *e;
1908
6ed93dc6 1909 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 1910
561aafbc
JH
1911 list_for_each_entry(e, &cache->all, all) {
1912 if (!bacmp(&e->data.bdaddr, bdaddr))
1913 return e;
1914 }
1915
1916 return NULL;
1917}
1918
1919struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 1920 bdaddr_t *bdaddr)
561aafbc 1921{
30883512 1922 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
1923 struct inquiry_entry *e;
1924
6ed93dc6 1925 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
1926
1927 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 1928 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
1929 return e;
1930 }
1931
1932 return NULL;
1da177e4
LT
1933}
1934
30dc78e1 1935struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
1936 bdaddr_t *bdaddr,
1937 int state)
30dc78e1
JH
1938{
1939 struct discovery_state *cache = &hdev->discovery;
1940 struct inquiry_entry *e;
1941
6ed93dc6 1942 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
1943
1944 list_for_each_entry(e, &cache->resolve, list) {
1945 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1946 return e;
1947 if (!bacmp(&e->data.bdaddr, bdaddr))
1948 return e;
1949 }
1950
1951 return NULL;
1952}
1953
a3d4e20a 1954void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 1955 struct inquiry_entry *ie)
a3d4e20a
JH
1956{
1957 struct discovery_state *cache = &hdev->discovery;
1958 struct list_head *pos = &cache->resolve;
1959 struct inquiry_entry *p;
1960
1961 list_del(&ie->list);
1962
1963 list_for_each_entry(p, &cache->resolve, list) {
1964 if (p->name_state != NAME_PENDING &&
a8c5fb1a 1965 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
1966 break;
1967 pos = &p->list;
1968 }
1969
1970 list_add(&ie->list, pos);
1971}
1972
3175405b 1973bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
04124681 1974 bool name_known, bool *ssp)
1da177e4 1975{
30883512 1976 struct discovery_state *cache = &hdev->discovery;
70f23020 1977 struct inquiry_entry *ie;
1da177e4 1978
6ed93dc6 1979 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 1980
2b2fec4d
SJ
1981 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1982
01735bbd 1983 *ssp = data->ssp_mode;
388fc8fa 1984
70f23020 1985 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 1986 if (ie) {
8002d77c 1987 if (ie->data.ssp_mode)
388fc8fa
JH
1988 *ssp = true;
1989
a3d4e20a 1990 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 1991 data->rssi != ie->data.rssi) {
a3d4e20a
JH
1992 ie->data.rssi = data->rssi;
1993 hci_inquiry_cache_update_resolve(hdev, ie);
1994 }
1995
561aafbc 1996 goto update;
a3d4e20a 1997 }
561aafbc
JH
1998
1999 /* Entry not in the cache. Add new one. */
2000 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
2001 if (!ie)
3175405b 2002 return false;
561aafbc
JH
2003
2004 list_add(&ie->all, &cache->all);
2005
2006 if (name_known) {
2007 ie->name_state = NAME_KNOWN;
2008 } else {
2009 ie->name_state = NAME_NOT_KNOWN;
2010 list_add(&ie->list, &cache->unknown);
2011 }
70f23020 2012
561aafbc
JH
2013update:
2014 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 2015 ie->name_state != NAME_PENDING) {
561aafbc
JH
2016 ie->name_state = NAME_KNOWN;
2017 list_del(&ie->list);
1da177e4
LT
2018 }
2019
70f23020
AE
2020 memcpy(&ie->data, data, sizeof(*data));
2021 ie->timestamp = jiffies;
1da177e4 2022 cache->timestamp = jiffies;
3175405b
JH
2023
2024 if (ie->name_state == NAME_NOT_KNOWN)
2025 return false;
2026
2027 return true;
1da177e4
LT
2028}
2029
2030static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2031{
30883512 2032 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
2033 struct inquiry_info *info = (struct inquiry_info *) buf;
2034 struct inquiry_entry *e;
2035 int copied = 0;
2036
561aafbc 2037 list_for_each_entry(e, &cache->all, all) {
1da177e4 2038 struct inquiry_data *data = &e->data;
b57c1a56
JH
2039
2040 if (copied >= num)
2041 break;
2042
1da177e4
LT
2043 bacpy(&info->bdaddr, &data->bdaddr);
2044 info->pscan_rep_mode = data->pscan_rep_mode;
2045 info->pscan_period_mode = data->pscan_period_mode;
2046 info->pscan_mode = data->pscan_mode;
2047 memcpy(info->dev_class, data->dev_class, 3);
2048 info->clock_offset = data->clock_offset;
b57c1a56 2049
1da177e4 2050 info++;
b57c1a56 2051 copied++;
1da177e4
LT
2052 }
2053
2054 BT_DBG("cache %p, copied %d", cache, copied);
2055 return copied;
2056}
2057
42c6b129 2058static void hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
2059{
2060 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 2061 struct hci_dev *hdev = req->hdev;
1da177e4
LT
2062 struct hci_cp_inquiry cp;
2063
2064 BT_DBG("%s", hdev->name);
2065
2066 if (test_bit(HCI_INQUIRY, &hdev->flags))
2067 return;
2068
2069 /* Start Inquiry */
2070 memcpy(&cp.lap, &ir->lap, 3);
2071 cp.length = ir->length;
2072 cp.num_rsp = ir->num_rsp;
42c6b129 2073 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
2074}
2075
3e13fa1e
AG
2076static int wait_inquiry(void *word)
2077{
2078 schedule();
2079 return signal_pending(current);
2080}
2081
1da177e4
LT
2082int hci_inquiry(void __user *arg)
2083{
2084 __u8 __user *ptr = arg;
2085 struct hci_inquiry_req ir;
2086 struct hci_dev *hdev;
2087 int err = 0, do_inquiry = 0, max_rsp;
2088 long timeo;
2089 __u8 *buf;
2090
2091 if (copy_from_user(&ir, ptr, sizeof(ir)))
2092 return -EFAULT;
2093
5a08ecce
AE
2094 hdev = hci_dev_get(ir.dev_id);
2095 if (!hdev)
1da177e4
LT
2096 return -ENODEV;
2097
0736cfa8
MH
2098 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2099 err = -EBUSY;
2100 goto done;
2101 }
2102
fee746b0
MH
2103 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
2104 err = -EOPNOTSUPP;
2105 goto done;
2106 }
2107
5b69bef5
MH
2108 if (hdev->dev_type != HCI_BREDR) {
2109 err = -EOPNOTSUPP;
2110 goto done;
2111 }
2112
56f87901
JH
2113 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2114 err = -EOPNOTSUPP;
2115 goto done;
2116 }
2117
09fd0de5 2118 hci_dev_lock(hdev);
8e87d142 2119 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 2120 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1f9b9a5d 2121 hci_inquiry_cache_flush(hdev);
1da177e4
LT
2122 do_inquiry = 1;
2123 }
09fd0de5 2124 hci_dev_unlock(hdev);
1da177e4 2125
04837f64 2126 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
2127
2128 if (do_inquiry) {
01178cd4
JH
2129 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2130 timeo);
70f23020
AE
2131 if (err < 0)
2132 goto done;
3e13fa1e
AG
2133
2134 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2135 * cleared). If it is interrupted by a signal, return -EINTR.
2136 */
2137 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2138 TASK_INTERRUPTIBLE))
2139 return -EINTR;
70f23020 2140 }
1da177e4 2141
8fc9ced3
GP
2142 /* for unlimited number of responses we will use buffer with
2143 * 255 entries
2144 */
1da177e4
LT
2145 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2146
2147 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2148 * copy it to the user space.
2149 */
01df8c31 2150 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 2151 if (!buf) {
1da177e4
LT
2152 err = -ENOMEM;
2153 goto done;
2154 }
2155
09fd0de5 2156 hci_dev_lock(hdev);
1da177e4 2157 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 2158 hci_dev_unlock(hdev);
1da177e4
LT
2159
2160 BT_DBG("num_rsp %d", ir.num_rsp);
2161
2162 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2163 ptr += sizeof(ir);
2164 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 2165 ir.num_rsp))
1da177e4 2166 err = -EFAULT;
8e87d142 2167 } else
1da177e4
LT
2168 err = -EFAULT;
2169
2170 kfree(buf);
2171
2172done:
2173 hci_dev_put(hdev);
2174 return err;
2175}
2176
cbed0ca1 2177static int hci_dev_do_open(struct hci_dev *hdev)
1da177e4 2178{
1da177e4
LT
2179 int ret = 0;
2180
1da177e4
LT
2181 BT_DBG("%s %p", hdev->name, hdev);
2182
2183 hci_req_lock(hdev);
2184
94324962
JH
2185 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2186 ret = -ENODEV;
2187 goto done;
2188 }
2189
a5c8f270
MH
2190 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
2191 /* Check for rfkill but allow the HCI setup stage to
2192 * proceed (which in itself doesn't cause any RF activity).
2193 */
2194 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2195 ret = -ERFKILL;
2196 goto done;
2197 }
2198
2199 /* Check for valid public address or a configured static
2200 * random adddress, but let the HCI setup proceed to
2201 * be able to determine if there is a public address
2202 * or not.
2203 *
c6beca0e
MH
2204 * In case of user channel usage, it is not important
2205 * if a public address or static random address is
2206 * available.
2207 *
a5c8f270
MH
2208 * This check is only valid for BR/EDR controllers
2209 * since AMP controllers do not have an address.
2210 */
c6beca0e
MH
2211 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2212 hdev->dev_type == HCI_BREDR &&
a5c8f270
MH
2213 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2214 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2215 ret = -EADDRNOTAVAIL;
2216 goto done;
2217 }
611b30f7
MH
2218 }
2219
1da177e4
LT
2220 if (test_bit(HCI_UP, &hdev->flags)) {
2221 ret = -EALREADY;
2222 goto done;
2223 }
2224
1da177e4
LT
2225 if (hdev->open(hdev)) {
2226 ret = -EIO;
2227 goto done;
2228 }
2229
f41c70c4
MH
2230 atomic_set(&hdev->cmd_cnt, 1);
2231 set_bit(HCI_INIT, &hdev->flags);
2232
2233 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
2234 ret = hdev->setup(hdev);
2235
2236 if (!ret) {
fee746b0 2237 if (!test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks) &&
0736cfa8 2238 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
f41c70c4 2239 ret = __hci_init(hdev);
1da177e4
LT
2240 }
2241
f41c70c4
MH
2242 clear_bit(HCI_INIT, &hdev->flags);
2243
1da177e4
LT
2244 if (!ret) {
2245 hci_dev_hold(hdev);
d6bfd59c 2246 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
1da177e4
LT
2247 set_bit(HCI_UP, &hdev->flags);
2248 hci_notify(hdev, HCI_DEV_UP);
bb4b2a9a 2249 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
0736cfa8 2250 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1514b892 2251 hdev->dev_type == HCI_BREDR) {
09fd0de5 2252 hci_dev_lock(hdev);
744cf19e 2253 mgmt_powered(hdev, 1);
09fd0de5 2254 hci_dev_unlock(hdev);
56e5cb86 2255 }
8e87d142 2256 } else {
1da177e4 2257 /* Init failed, cleanup */
3eff45ea 2258 flush_work(&hdev->tx_work);
c347b765 2259 flush_work(&hdev->cmd_work);
b78752cc 2260 flush_work(&hdev->rx_work);
1da177e4
LT
2261
2262 skb_queue_purge(&hdev->cmd_q);
2263 skb_queue_purge(&hdev->rx_q);
2264
2265 if (hdev->flush)
2266 hdev->flush(hdev);
2267
2268 if (hdev->sent_cmd) {
2269 kfree_skb(hdev->sent_cmd);
2270 hdev->sent_cmd = NULL;
2271 }
2272
2273 hdev->close(hdev);
fee746b0 2274 hdev->flags &= BIT(HCI_RAW);
1da177e4
LT
2275 }
2276
2277done:
2278 hci_req_unlock(hdev);
1da177e4
LT
2279 return ret;
2280}
2281
cbed0ca1
JH
2282/* ---- HCI ioctl helpers ---- */
2283
2284int hci_dev_open(__u16 dev)
2285{
2286 struct hci_dev *hdev;
2287 int err;
2288
2289 hdev = hci_dev_get(dev);
2290 if (!hdev)
2291 return -ENODEV;
2292
fee746b0
MH
2293 /* Devices that are marked for raw-only usage can only be powered
2294 * up as user channel. Trying to bring them up as normal devices
2295 * will result into a failure. Only user channel operation is
2296 * possible.
2297 *
2298 * When this function is called for a user channel, the flag
2299 * HCI_USER_CHANNEL will be set first before attempting to
2300 * open the device.
2301 */
2302 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks) &&
2303 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2304 err = -EOPNOTSUPP;
2305 goto done;
2306 }
2307
e1d08f40
JH
2308 /* We need to ensure that no other power on/off work is pending
2309 * before proceeding to call hci_dev_do_open. This is
2310 * particularly important if the setup procedure has not yet
2311 * completed.
2312 */
2313 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2314 cancel_delayed_work(&hdev->power_off);
2315
a5c8f270
MH
2316 /* After this call it is guaranteed that the setup procedure
2317 * has finished. This means that error conditions like RFKILL
2318 * or no valid public or static random address apply.
2319 */
e1d08f40
JH
2320 flush_workqueue(hdev->req_workqueue);
2321
cbed0ca1
JH
2322 err = hci_dev_do_open(hdev);
2323
fee746b0 2324done:
cbed0ca1 2325 hci_dev_put(hdev);
cbed0ca1
JH
2326 return err;
2327}
2328
1da177e4
LT
2329static int hci_dev_do_close(struct hci_dev *hdev)
2330{
2331 BT_DBG("%s %p", hdev->name, hdev);
2332
78c04c0b
VCG
2333 cancel_delayed_work(&hdev->power_off);
2334
1da177e4
LT
2335 hci_req_cancel(hdev, ENODEV);
2336 hci_req_lock(hdev);
2337
2338 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
65cc2b49 2339 cancel_delayed_work_sync(&hdev->cmd_timer);
1da177e4
LT
2340 hci_req_unlock(hdev);
2341 return 0;
2342 }
2343
3eff45ea
GP
2344 /* Flush RX and TX works */
2345 flush_work(&hdev->tx_work);
b78752cc 2346 flush_work(&hdev->rx_work);
1da177e4 2347
16ab91ab 2348 if (hdev->discov_timeout > 0) {
e0f9309f 2349 cancel_delayed_work(&hdev->discov_off);
16ab91ab 2350 hdev->discov_timeout = 0;
5e5282bb 2351 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
310a3d48 2352 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
16ab91ab
JH
2353 }
2354
a8b2d5c2 2355 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
7d78525d
JH
2356 cancel_delayed_work(&hdev->service_cache);
2357
7ba8b4be 2358 cancel_delayed_work_sync(&hdev->le_scan_disable);
4518bb0f
JH
2359
2360 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2361 cancel_delayed_work_sync(&hdev->rpa_expired);
7ba8b4be 2362
09fd0de5 2363 hci_dev_lock(hdev);
1f9b9a5d 2364 hci_inquiry_cache_flush(hdev);
1da177e4 2365 hci_conn_hash_flush(hdev);
6046dc3e 2366 hci_pend_le_conns_clear(hdev);
09fd0de5 2367 hci_dev_unlock(hdev);
1da177e4
LT
2368
2369 hci_notify(hdev, HCI_DEV_DOWN);
2370
2371 if (hdev->flush)
2372 hdev->flush(hdev);
2373
2374 /* Reset device */
2375 skb_queue_purge(&hdev->cmd_q);
2376 atomic_set(&hdev->cmd_cnt, 1);
fee746b0 2377 if (!test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks) &&
3a6afbd2 2378 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
a6c511c6 2379 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1da177e4 2380 set_bit(HCI_INIT, &hdev->flags);
01178cd4 2381 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1da177e4
LT
2382 clear_bit(HCI_INIT, &hdev->flags);
2383 }
2384
c347b765
GP
2385 /* flush cmd work */
2386 flush_work(&hdev->cmd_work);
1da177e4
LT
2387
2388 /* Drop queues */
2389 skb_queue_purge(&hdev->rx_q);
2390 skb_queue_purge(&hdev->cmd_q);
2391 skb_queue_purge(&hdev->raw_q);
2392
2393 /* Drop last sent command */
2394 if (hdev->sent_cmd) {
65cc2b49 2395 cancel_delayed_work_sync(&hdev->cmd_timer);
1da177e4
LT
2396 kfree_skb(hdev->sent_cmd);
2397 hdev->sent_cmd = NULL;
2398 }
2399
b6ddb638
JH
2400 kfree_skb(hdev->recv_evt);
2401 hdev->recv_evt = NULL;
2402
1da177e4
LT
2403 /* After this point our queues are empty
2404 * and no tasks are scheduled. */
2405 hdev->close(hdev);
2406
35b973c9 2407 /* Clear flags */
fee746b0 2408 hdev->flags &= BIT(HCI_RAW);
35b973c9
JH
2409 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2410
93c311a0
MH
2411 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2412 if (hdev->dev_type == HCI_BREDR) {
2413 hci_dev_lock(hdev);
2414 mgmt_powered(hdev, 0);
2415 hci_dev_unlock(hdev);
2416 }
8ee56540 2417 }
5add6af8 2418
ced5c338 2419 /* Controller radio is available but is currently powered down */
536619e8 2420 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
ced5c338 2421
e59fda8d 2422 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 2423 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
7a4cd51d 2424 bacpy(&hdev->random_addr, BDADDR_ANY);
e59fda8d 2425
1da177e4
LT
2426 hci_req_unlock(hdev);
2427
2428 hci_dev_put(hdev);
2429 return 0;
2430}
2431
2432int hci_dev_close(__u16 dev)
2433{
2434 struct hci_dev *hdev;
2435 int err;
2436
70f23020
AE
2437 hdev = hci_dev_get(dev);
2438 if (!hdev)
1da177e4 2439 return -ENODEV;
8ee56540 2440
0736cfa8
MH
2441 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2442 err = -EBUSY;
2443 goto done;
2444 }
2445
8ee56540
MH
2446 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2447 cancel_delayed_work(&hdev->power_off);
2448
1da177e4 2449 err = hci_dev_do_close(hdev);
8ee56540 2450
0736cfa8 2451done:
1da177e4
LT
2452 hci_dev_put(hdev);
2453 return err;
2454}
2455
2456int hci_dev_reset(__u16 dev)
2457{
2458 struct hci_dev *hdev;
2459 int ret = 0;
2460
70f23020
AE
2461 hdev = hci_dev_get(dev);
2462 if (!hdev)
1da177e4
LT
2463 return -ENODEV;
2464
2465 hci_req_lock(hdev);
1da177e4 2466
808a049e
MH
2467 if (!test_bit(HCI_UP, &hdev->flags)) {
2468 ret = -ENETDOWN;
1da177e4 2469 goto done;
808a049e 2470 }
1da177e4 2471
0736cfa8
MH
2472 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2473 ret = -EBUSY;
2474 goto done;
2475 }
2476
fee746b0
MH
2477 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
2478 ret = -EOPNOTSUPP;
2479 goto done;
2480 }
2481
1da177e4
LT
2482 /* Drop queues */
2483 skb_queue_purge(&hdev->rx_q);
2484 skb_queue_purge(&hdev->cmd_q);
2485
09fd0de5 2486 hci_dev_lock(hdev);
1f9b9a5d 2487 hci_inquiry_cache_flush(hdev);
1da177e4 2488 hci_conn_hash_flush(hdev);
09fd0de5 2489 hci_dev_unlock(hdev);
1da177e4
LT
2490
2491 if (hdev->flush)
2492 hdev->flush(hdev);
2493
8e87d142 2494 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 2495 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4 2496
fee746b0 2497 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1da177e4
LT
2498
2499done:
1da177e4
LT
2500 hci_req_unlock(hdev);
2501 hci_dev_put(hdev);
2502 return ret;
2503}
2504
2505int hci_dev_reset_stat(__u16 dev)
2506{
2507 struct hci_dev *hdev;
2508 int ret = 0;
2509
70f23020
AE
2510 hdev = hci_dev_get(dev);
2511 if (!hdev)
1da177e4
LT
2512 return -ENODEV;
2513
0736cfa8
MH
2514 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2515 ret = -EBUSY;
2516 goto done;
2517 }
2518
fee746b0
MH
2519 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
2520 ret = -EOPNOTSUPP;
2521 goto done;
2522 }
2523
1da177e4
LT
2524 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2525
0736cfa8 2526done:
1da177e4 2527 hci_dev_put(hdev);
1da177e4
LT
2528 return ret;
2529}
2530
2531int hci_dev_cmd(unsigned int cmd, void __user *arg)
2532{
2533 struct hci_dev *hdev;
2534 struct hci_dev_req dr;
2535 int err = 0;
2536
2537 if (copy_from_user(&dr, arg, sizeof(dr)))
2538 return -EFAULT;
2539
70f23020
AE
2540 hdev = hci_dev_get(dr.dev_id);
2541 if (!hdev)
1da177e4
LT
2542 return -ENODEV;
2543
0736cfa8
MH
2544 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2545 err = -EBUSY;
2546 goto done;
2547 }
2548
fee746b0
MH
2549 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
2550 err = -EOPNOTSUPP;
2551 goto done;
2552 }
2553
5b69bef5
MH
2554 if (hdev->dev_type != HCI_BREDR) {
2555 err = -EOPNOTSUPP;
2556 goto done;
2557 }
2558
56f87901
JH
2559 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2560 err = -EOPNOTSUPP;
2561 goto done;
2562 }
2563
1da177e4
LT
2564 switch (cmd) {
2565 case HCISETAUTH:
01178cd4
JH
2566 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2567 HCI_INIT_TIMEOUT);
1da177e4
LT
2568 break;
2569
2570 case HCISETENCRYPT:
2571 if (!lmp_encrypt_capable(hdev)) {
2572 err = -EOPNOTSUPP;
2573 break;
2574 }
2575
2576 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2577 /* Auth must be enabled first */
01178cd4
JH
2578 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2579 HCI_INIT_TIMEOUT);
1da177e4
LT
2580 if (err)
2581 break;
2582 }
2583
01178cd4
JH
2584 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2585 HCI_INIT_TIMEOUT);
1da177e4
LT
2586 break;
2587
2588 case HCISETSCAN:
01178cd4
JH
2589 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2590 HCI_INIT_TIMEOUT);
1da177e4
LT
2591 break;
2592
1da177e4 2593 case HCISETLINKPOL:
01178cd4
JH
2594 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2595 HCI_INIT_TIMEOUT);
1da177e4
LT
2596 break;
2597
2598 case HCISETLINKMODE:
e4e8e37c
MH
2599 hdev->link_mode = ((__u16) dr.dev_opt) &
2600 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2601 break;
2602
2603 case HCISETPTYPE:
2604 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
2605 break;
2606
2607 case HCISETACLMTU:
e4e8e37c
MH
2608 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2609 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2610 break;
2611
2612 case HCISETSCOMTU:
e4e8e37c
MH
2613 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2614 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2615 break;
2616
2617 default:
2618 err = -EINVAL;
2619 break;
2620 }
e4e8e37c 2621
0736cfa8 2622done:
1da177e4
LT
2623 hci_dev_put(hdev);
2624 return err;
2625}
2626
2627int hci_get_dev_list(void __user *arg)
2628{
8035ded4 2629 struct hci_dev *hdev;
1da177e4
LT
2630 struct hci_dev_list_req *dl;
2631 struct hci_dev_req *dr;
1da177e4
LT
2632 int n = 0, size, err;
2633 __u16 dev_num;
2634
2635 if (get_user(dev_num, (__u16 __user *) arg))
2636 return -EFAULT;
2637
2638 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2639 return -EINVAL;
2640
2641 size = sizeof(*dl) + dev_num * sizeof(*dr);
2642
70f23020
AE
2643 dl = kzalloc(size, GFP_KERNEL);
2644 if (!dl)
1da177e4
LT
2645 return -ENOMEM;
2646
2647 dr = dl->dev_req;
2648
f20d09d5 2649 read_lock(&hci_dev_list_lock);
8035ded4 2650 list_for_each_entry(hdev, &hci_dev_list, list) {
a8b2d5c2 2651 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
e0f9309f 2652 cancel_delayed_work(&hdev->power_off);
c542a06c 2653
a8b2d5c2
JH
2654 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2655 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 2656
1da177e4
LT
2657 (dr + n)->dev_id = hdev->id;
2658 (dr + n)->dev_opt = hdev->flags;
c542a06c 2659
1da177e4
LT
2660 if (++n >= dev_num)
2661 break;
2662 }
f20d09d5 2663 read_unlock(&hci_dev_list_lock);
1da177e4
LT
2664
2665 dl->dev_num = n;
2666 size = sizeof(*dl) + n * sizeof(*dr);
2667
2668 err = copy_to_user(arg, dl, size);
2669 kfree(dl);
2670
2671 return err ? -EFAULT : 0;
2672}
2673
2674int hci_get_dev_info(void __user *arg)
2675{
2676 struct hci_dev *hdev;
2677 struct hci_dev_info di;
2678 int err = 0;
2679
2680 if (copy_from_user(&di, arg, sizeof(di)))
2681 return -EFAULT;
2682
70f23020
AE
2683 hdev = hci_dev_get(di.dev_id);
2684 if (!hdev)
1da177e4
LT
2685 return -ENODEV;
2686
a8b2d5c2 2687 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
3243553f 2688 cancel_delayed_work_sync(&hdev->power_off);
ab81cbf9 2689
a8b2d5c2
JH
2690 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2691 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 2692
1da177e4
LT
2693 strcpy(di.name, hdev->name);
2694 di.bdaddr = hdev->bdaddr;
60f2a3ed 2695 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
1da177e4
LT
2696 di.flags = hdev->flags;
2697 di.pkt_type = hdev->pkt_type;
572c7f84
JH
2698 if (lmp_bredr_capable(hdev)) {
2699 di.acl_mtu = hdev->acl_mtu;
2700 di.acl_pkts = hdev->acl_pkts;
2701 di.sco_mtu = hdev->sco_mtu;
2702 di.sco_pkts = hdev->sco_pkts;
2703 } else {
2704 di.acl_mtu = hdev->le_mtu;
2705 di.acl_pkts = hdev->le_pkts;
2706 di.sco_mtu = 0;
2707 di.sco_pkts = 0;
2708 }
1da177e4
LT
2709 di.link_policy = hdev->link_policy;
2710 di.link_mode = hdev->link_mode;
2711
2712 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2713 memcpy(&di.features, &hdev->features, sizeof(di.features));
2714
2715 if (copy_to_user(arg, &di, sizeof(di)))
2716 err = -EFAULT;
2717
2718 hci_dev_put(hdev);
2719
2720 return err;
2721}
2722
2723/* ---- Interface to HCI drivers ---- */
2724
611b30f7
MH
2725static int hci_rfkill_set_block(void *data, bool blocked)
2726{
2727 struct hci_dev *hdev = data;
2728
2729 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2730
0736cfa8
MH
2731 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2732 return -EBUSY;
2733
5e130367
JH
2734 if (blocked) {
2735 set_bit(HCI_RFKILLED, &hdev->dev_flags);
bf543036
JH
2736 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2737 hci_dev_do_close(hdev);
5e130367
JH
2738 } else {
2739 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
1025c04c 2740 }
611b30f7
MH
2741
2742 return 0;
2743}
2744
2745static const struct rfkill_ops hci_rfkill_ops = {
2746 .set_block = hci_rfkill_set_block,
2747};
2748
ab81cbf9
JH
2749static void hci_power_on(struct work_struct *work)
2750{
2751 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
96570ffc 2752 int err;
ab81cbf9
JH
2753
2754 BT_DBG("%s", hdev->name);
2755
cbed0ca1 2756 err = hci_dev_do_open(hdev);
96570ffc
JH
2757 if (err < 0) {
2758 mgmt_set_powered_failed(hdev, err);
ab81cbf9 2759 return;
96570ffc 2760 }
ab81cbf9 2761
a5c8f270
MH
2762 /* During the HCI setup phase, a few error conditions are
2763 * ignored and they need to be checked now. If they are still
2764 * valid, it is important to turn the device back off.
2765 */
2766 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2767 (hdev->dev_type == HCI_BREDR &&
2768 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2769 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
bf543036
JH
2770 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2771 hci_dev_do_close(hdev);
2772 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
19202573
JH
2773 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2774 HCI_AUTO_OFF_TIMEOUT);
bf543036 2775 }
ab81cbf9 2776
fee746b0
MH
2777 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
2778 if (!test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2779 mgmt_index_added(hdev);
2780 }
ab81cbf9
JH
2781}
2782
2783static void hci_power_off(struct work_struct *work)
2784{
3243553f 2785 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 2786 power_off.work);
ab81cbf9
JH
2787
2788 BT_DBG("%s", hdev->name);
2789
8ee56540 2790 hci_dev_do_close(hdev);
ab81cbf9
JH
2791}
2792
16ab91ab
JH
2793static void hci_discov_off(struct work_struct *work)
2794{
2795 struct hci_dev *hdev;
16ab91ab
JH
2796
2797 hdev = container_of(work, struct hci_dev, discov_off.work);
2798
2799 BT_DBG("%s", hdev->name);
2800
d1967ff8 2801 mgmt_discoverable_timeout(hdev);
16ab91ab
JH
2802}
2803
35f7498a 2804void hci_uuids_clear(struct hci_dev *hdev)
2aeb9a1a 2805{
4821002c 2806 struct bt_uuid *uuid, *tmp;
2aeb9a1a 2807
4821002c
JH
2808 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2809 list_del(&uuid->list);
2aeb9a1a
JH
2810 kfree(uuid);
2811 }
2aeb9a1a
JH
2812}
2813
35f7498a 2814void hci_link_keys_clear(struct hci_dev *hdev)
55ed8ca1
JH
2815{
2816 struct list_head *p, *n;
2817
2818 list_for_each_safe(p, n, &hdev->link_keys) {
2819 struct link_key *key;
2820
2821 key = list_entry(p, struct link_key, list);
2822
2823 list_del(p);
2824 kfree(key);
2825 }
55ed8ca1
JH
2826}
2827
35f7498a 2828void hci_smp_ltks_clear(struct hci_dev *hdev)
b899efaf
VCG
2829{
2830 struct smp_ltk *k, *tmp;
2831
2832 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2833 list_del(&k->list);
2834 kfree(k);
2835 }
b899efaf
VCG
2836}
2837
970c4e46
JH
2838void hci_smp_irks_clear(struct hci_dev *hdev)
2839{
2840 struct smp_irk *k, *tmp;
2841
2842 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2843 list_del(&k->list);
2844 kfree(k);
2845 }
2846}
2847
55ed8ca1
JH
2848struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2849{
8035ded4 2850 struct link_key *k;
55ed8ca1 2851
8035ded4 2852 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
2853 if (bacmp(bdaddr, &k->bdaddr) == 0)
2854 return k;
55ed8ca1
JH
2855
2856 return NULL;
2857}
2858
745c0ce3 2859static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 2860 u8 key_type, u8 old_key_type)
d25e28ab
JH
2861{
2862 /* Legacy key */
2863 if (key_type < 0x03)
745c0ce3 2864 return true;
d25e28ab
JH
2865
2866 /* Debug keys are insecure so don't store them persistently */
2867 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 2868 return false;
d25e28ab
JH
2869
2870 /* Changed combination key and there's no previous one */
2871 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 2872 return false;
d25e28ab
JH
2873
2874 /* Security mode 3 case */
2875 if (!conn)
745c0ce3 2876 return true;
d25e28ab
JH
2877
2878 /* Neither local nor remote side had no-bonding as requirement */
2879 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 2880 return true;
d25e28ab
JH
2881
2882 /* Local side had dedicated bonding as requirement */
2883 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 2884 return true;
d25e28ab
JH
2885
2886 /* Remote side had dedicated bonding as requirement */
2887 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 2888 return true;
d25e28ab
JH
2889
2890 /* If none of the above criteria match, then don't store the key
2891 * persistently */
745c0ce3 2892 return false;
d25e28ab
JH
2893}
2894
98a0b845
JH
2895static bool ltk_type_master(u8 type)
2896{
d97c9fb0 2897 return (type == SMP_LTK);
98a0b845
JH
2898}
2899
fe39c7b2 2900struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
98a0b845 2901 bool master)
75d262c2 2902{
c9839a11 2903 struct smp_ltk *k;
75d262c2 2904
c9839a11 2905 list_for_each_entry(k, &hdev->long_term_keys, list) {
fe39c7b2 2906 if (k->ediv != ediv || k->rand != rand)
75d262c2
VCG
2907 continue;
2908
98a0b845
JH
2909 if (ltk_type_master(k->type) != master)
2910 continue;
2911
c9839a11 2912 return k;
75d262c2
VCG
2913 }
2914
2915 return NULL;
2916}
75d262c2 2917
c9839a11 2918struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
98a0b845 2919 u8 addr_type, bool master)
75d262c2 2920{
c9839a11 2921 struct smp_ltk *k;
75d262c2 2922
c9839a11
VCG
2923 list_for_each_entry(k, &hdev->long_term_keys, list)
2924 if (addr_type == k->bdaddr_type &&
98a0b845
JH
2925 bacmp(bdaddr, &k->bdaddr) == 0 &&
2926 ltk_type_master(k->type) == master)
75d262c2
VCG
2927 return k;
2928
2929 return NULL;
2930}
75d262c2 2931
970c4e46
JH
2932struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2933{
2934 struct smp_irk *irk;
2935
2936 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2937 if (!bacmp(&irk->rpa, rpa))
2938 return irk;
2939 }
2940
2941 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2942 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
2943 bacpy(&irk->rpa, rpa);
2944 return irk;
2945 }
2946 }
2947
2948 return NULL;
2949}
2950
2951struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2952 u8 addr_type)
2953{
2954 struct smp_irk *irk;
2955
6cfc9988
JH
2956 /* Identity Address must be public or static random */
2957 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2958 return NULL;
2959
970c4e46
JH
2960 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2961 if (addr_type == irk->addr_type &&
2962 bacmp(bdaddr, &irk->bdaddr) == 0)
2963 return irk;
2964 }
2965
2966 return NULL;
2967}
2968
567fa2aa 2969struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
7652ff6a
JH
2970 bdaddr_t *bdaddr, u8 *val, u8 type,
2971 u8 pin_len, bool *persistent)
55ed8ca1
JH
2972{
2973 struct link_key *key, *old_key;
745c0ce3 2974 u8 old_key_type;
55ed8ca1
JH
2975
2976 old_key = hci_find_link_key(hdev, bdaddr);
2977 if (old_key) {
2978 old_key_type = old_key->type;
2979 key = old_key;
2980 } else {
12adcf3a 2981 old_key_type = conn ? conn->key_type : 0xff;
0a14ab41 2982 key = kzalloc(sizeof(*key), GFP_KERNEL);
55ed8ca1 2983 if (!key)
567fa2aa 2984 return NULL;
55ed8ca1
JH
2985 list_add(&key->list, &hdev->link_keys);
2986 }
2987
6ed93dc6 2988 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 2989
d25e28ab
JH
2990 /* Some buggy controller combinations generate a changed
2991 * combination key for legacy pairing even when there's no
2992 * previous key */
2993 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 2994 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 2995 type = HCI_LK_COMBINATION;
655fe6ec
JH
2996 if (conn)
2997 conn->key_type = type;
2998 }
d25e28ab 2999
55ed8ca1 3000 bacpy(&key->bdaddr, bdaddr);
9b3b4460 3001 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
3002 key->pin_len = pin_len;
3003
b6020ba0 3004 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 3005 key->type = old_key_type;
4748fed2
JH
3006 else
3007 key->type = type;
3008
7652ff6a
JH
3009 if (persistent)
3010 *persistent = hci_persistent_key(hdev, conn, type,
3011 old_key_type);
55ed8ca1 3012
567fa2aa 3013 return key;
55ed8ca1
JH
3014}
3015
ca9142b8 3016struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
35d70271 3017 u8 addr_type, u8 type, u8 authenticated,
fe39c7b2 3018 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
75d262c2 3019{
c9839a11 3020 struct smp_ltk *key, *old_key;
98a0b845 3021 bool master = ltk_type_master(type);
75d262c2 3022
98a0b845 3023 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
c9839a11 3024 if (old_key)
75d262c2 3025 key = old_key;
c9839a11 3026 else {
0a14ab41 3027 key = kzalloc(sizeof(*key), GFP_KERNEL);
75d262c2 3028 if (!key)
ca9142b8 3029 return NULL;
c9839a11 3030 list_add(&key->list, &hdev->long_term_keys);
75d262c2
VCG
3031 }
3032
75d262c2 3033 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
3034 key->bdaddr_type = addr_type;
3035 memcpy(key->val, tk, sizeof(key->val));
3036 key->authenticated = authenticated;
3037 key->ediv = ediv;
fe39c7b2 3038 key->rand = rand;
c9839a11
VCG
3039 key->enc_size = enc_size;
3040 key->type = type;
75d262c2 3041
ca9142b8 3042 return key;
75d262c2
VCG
3043}
3044
ca9142b8
JH
3045struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3046 u8 addr_type, u8 val[16], bdaddr_t *rpa)
970c4e46
JH
3047{
3048 struct smp_irk *irk;
3049
3050 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3051 if (!irk) {
3052 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3053 if (!irk)
ca9142b8 3054 return NULL;
970c4e46
JH
3055
3056 bacpy(&irk->bdaddr, bdaddr);
3057 irk->addr_type = addr_type;
3058
3059 list_add(&irk->list, &hdev->identity_resolving_keys);
3060 }
3061
3062 memcpy(irk->val, val, 16);
3063 bacpy(&irk->rpa, rpa);
3064
ca9142b8 3065 return irk;
970c4e46
JH
3066}
3067
55ed8ca1
JH
3068int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3069{
3070 struct link_key *key;
3071
3072 key = hci_find_link_key(hdev, bdaddr);
3073 if (!key)
3074 return -ENOENT;
3075
6ed93dc6 3076 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1
JH
3077
3078 list_del(&key->list);
3079 kfree(key);
3080
3081 return 0;
3082}
3083
e0b2b27e 3084int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
b899efaf
VCG
3085{
3086 struct smp_ltk *k, *tmp;
c51ffa0b 3087 int removed = 0;
b899efaf
VCG
3088
3089 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
e0b2b27e 3090 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
b899efaf
VCG
3091 continue;
3092
6ed93dc6 3093 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf
VCG
3094
3095 list_del(&k->list);
3096 kfree(k);
c51ffa0b 3097 removed++;
b899efaf
VCG
3098 }
3099
c51ffa0b 3100 return removed ? 0 : -ENOENT;
b899efaf
VCG
3101}
3102
a7ec7338
JH
3103void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3104{
3105 struct smp_irk *k, *tmp;
3106
668b7b19 3107 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
a7ec7338
JH
3108 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3109 continue;
3110
3111 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3112
3113 list_del(&k->list);
3114 kfree(k);
3115 }
3116}
3117
6bd32326 3118/* HCI command timer function */
65cc2b49 3119static void hci_cmd_timeout(struct work_struct *work)
6bd32326 3120{
65cc2b49
MH
3121 struct hci_dev *hdev = container_of(work, struct hci_dev,
3122 cmd_timer.work);
6bd32326 3123
bda4f23a
AE
3124 if (hdev->sent_cmd) {
3125 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3126 u16 opcode = __le16_to_cpu(sent->opcode);
3127
3128 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3129 } else {
3130 BT_ERR("%s command tx timeout", hdev->name);
3131 }
3132
6bd32326 3133 atomic_set(&hdev->cmd_cnt, 1);
c347b765 3134 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
3135}
3136
2763eda6 3137struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
04124681 3138 bdaddr_t *bdaddr)
2763eda6
SJ
3139{
3140 struct oob_data *data;
3141
3142 list_for_each_entry(data, &hdev->remote_oob_data, list)
3143 if (bacmp(bdaddr, &data->bdaddr) == 0)
3144 return data;
3145
3146 return NULL;
3147}
3148
3149int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3150{
3151 struct oob_data *data;
3152
3153 data = hci_find_remote_oob_data(hdev, bdaddr);
3154 if (!data)
3155 return -ENOENT;
3156
6ed93dc6 3157 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2763eda6
SJ
3158
3159 list_del(&data->list);
3160 kfree(data);
3161
3162 return 0;
3163}
3164
35f7498a 3165void hci_remote_oob_data_clear(struct hci_dev *hdev)
2763eda6
SJ
3166{
3167 struct oob_data *data, *n;
3168
3169 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3170 list_del(&data->list);
3171 kfree(data);
3172 }
2763eda6
SJ
3173}
3174
0798872e
MH
3175int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3176 u8 *hash, u8 *randomizer)
2763eda6
SJ
3177{
3178 struct oob_data *data;
3179
3180 data = hci_find_remote_oob_data(hdev, bdaddr);
2763eda6 3181 if (!data) {
0a14ab41 3182 data = kmalloc(sizeof(*data), GFP_KERNEL);
2763eda6
SJ
3183 if (!data)
3184 return -ENOMEM;
3185
3186 bacpy(&data->bdaddr, bdaddr);
3187 list_add(&data->list, &hdev->remote_oob_data);
3188 }
3189
519ca9d0
MH
3190 memcpy(data->hash192, hash, sizeof(data->hash192));
3191 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
2763eda6 3192
0798872e
MH
3193 memset(data->hash256, 0, sizeof(data->hash256));
3194 memset(data->randomizer256, 0, sizeof(data->randomizer256));
3195
3196 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3197
3198 return 0;
3199}
3200
3201int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3202 u8 *hash192, u8 *randomizer192,
3203 u8 *hash256, u8 *randomizer256)
3204{
3205 struct oob_data *data;
3206
3207 data = hci_find_remote_oob_data(hdev, bdaddr);
3208 if (!data) {
0a14ab41 3209 data = kmalloc(sizeof(*data), GFP_KERNEL);
0798872e
MH
3210 if (!data)
3211 return -ENOMEM;
3212
3213 bacpy(&data->bdaddr, bdaddr);
3214 list_add(&data->list, &hdev->remote_oob_data);
3215 }
3216
3217 memcpy(data->hash192, hash192, sizeof(data->hash192));
3218 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3219
3220 memcpy(data->hash256, hash256, sizeof(data->hash256));
3221 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3222
6ed93dc6 3223 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
3224
3225 return 0;
3226}
3227
b9ee0a78
MH
3228struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
3229 bdaddr_t *bdaddr, u8 type)
b2a66aad 3230{
8035ded4 3231 struct bdaddr_list *b;
b2a66aad 3232
b9ee0a78
MH
3233 list_for_each_entry(b, &hdev->blacklist, list) {
3234 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
b2a66aad 3235 return b;
b9ee0a78 3236 }
b2a66aad
AJ
3237
3238 return NULL;
3239}
3240
c9507490 3241static void hci_blacklist_clear(struct hci_dev *hdev)
b2a66aad
AJ
3242{
3243 struct list_head *p, *n;
3244
3245 list_for_each_safe(p, n, &hdev->blacklist) {
b9ee0a78 3246 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
b2a66aad
AJ
3247
3248 list_del(p);
3249 kfree(b);
3250 }
b2a66aad
AJ
3251}
3252
88c1fe4b 3253int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
3254{
3255 struct bdaddr_list *entry;
b2a66aad 3256
b9ee0a78 3257 if (!bacmp(bdaddr, BDADDR_ANY))
b2a66aad
AJ
3258 return -EBADF;
3259
b9ee0a78 3260 if (hci_blacklist_lookup(hdev, bdaddr, type))
5e762444 3261 return -EEXIST;
b2a66aad
AJ
3262
3263 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
5e762444
AJ
3264 if (!entry)
3265 return -ENOMEM;
b2a66aad
AJ
3266
3267 bacpy(&entry->bdaddr, bdaddr);
b9ee0a78 3268 entry->bdaddr_type = type;
b2a66aad
AJ
3269
3270 list_add(&entry->list, &hdev->blacklist);
3271
88c1fe4b 3272 return mgmt_device_blocked(hdev, bdaddr, type);
b2a66aad
AJ
3273}
3274
88c1fe4b 3275int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
3276{
3277 struct bdaddr_list *entry;
b2a66aad 3278
35f7498a
JH
3279 if (!bacmp(bdaddr, BDADDR_ANY)) {
3280 hci_blacklist_clear(hdev);
3281 return 0;
3282 }
b2a66aad 3283
b9ee0a78 3284 entry = hci_blacklist_lookup(hdev, bdaddr, type);
1ec918ce 3285 if (!entry)
5e762444 3286 return -ENOENT;
b2a66aad
AJ
3287
3288 list_del(&entry->list);
3289 kfree(entry);
3290
88c1fe4b 3291 return mgmt_device_unblocked(hdev, bdaddr, type);
b2a66aad
AJ
3292}
3293
d2ab0ac1
MH
3294struct bdaddr_list *hci_white_list_lookup(struct hci_dev *hdev,
3295 bdaddr_t *bdaddr, u8 type)
3296{
3297 struct bdaddr_list *b;
3298
3299 list_for_each_entry(b, &hdev->le_white_list, list) {
3300 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3301 return b;
3302 }
3303
3304 return NULL;
3305}
3306
3307void hci_white_list_clear(struct hci_dev *hdev)
3308{
3309 struct list_head *p, *n;
3310
3311 list_for_each_safe(p, n, &hdev->le_white_list) {
3312 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3313
3314 list_del(p);
3315 kfree(b);
3316 }
3317}
3318
3319int hci_white_list_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3320{
3321 struct bdaddr_list *entry;
3322
3323 if (!bacmp(bdaddr, BDADDR_ANY))
3324 return -EBADF;
3325
3326 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3327 if (!entry)
3328 return -ENOMEM;
3329
3330 bacpy(&entry->bdaddr, bdaddr);
3331 entry->bdaddr_type = type;
3332
3333 list_add(&entry->list, &hdev->le_white_list);
3334
3335 return 0;
3336}
3337
3338int hci_white_list_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3339{
3340 struct bdaddr_list *entry;
3341
3342 if (!bacmp(bdaddr, BDADDR_ANY))
3343 return -EBADF;
3344
3345 entry = hci_white_list_lookup(hdev, bdaddr, type);
3346 if (!entry)
3347 return -ENOENT;
3348
3349 list_del(&entry->list);
3350 kfree(entry);
3351
3352 return 0;
3353}
3354
15819a70
AG
3355/* This function requires the caller holds hdev->lock */
3356struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3357 bdaddr_t *addr, u8 addr_type)
3358{
3359 struct hci_conn_params *params;
3360
3361 list_for_each_entry(params, &hdev->le_conn_params, list) {
3362 if (bacmp(&params->addr, addr) == 0 &&
3363 params->addr_type == addr_type) {
3364 return params;
3365 }
3366 }
3367
3368 return NULL;
3369}
3370
cef952ce
AG
3371static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3372{
3373 struct hci_conn *conn;
3374
3375 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3376 if (!conn)
3377 return false;
3378
3379 if (conn->dst_type != type)
3380 return false;
3381
3382 if (conn->state != BT_CONNECTED)
3383 return false;
3384
3385 return true;
3386}
3387
a9b0a04c
AG
3388static bool is_identity_address(bdaddr_t *addr, u8 addr_type)
3389{
3390 if (addr_type == ADDR_LE_DEV_PUBLIC)
3391 return true;
3392
3393 /* Check for Random Static address type */
3394 if ((addr->b[5] & 0xc0) == 0xc0)
3395 return true;
3396
3397 return false;
3398}
3399
4b10966f
MH
3400/* This function requires the caller holds hdev->lock */
3401struct bdaddr_list *hci_pend_le_conn_lookup(struct hci_dev *hdev,
3402 bdaddr_t *addr, u8 addr_type)
3403{
3404 struct bdaddr_list *entry;
3405
3406 list_for_each_entry(entry, &hdev->pend_le_conns, list) {
3407 if (bacmp(&entry->bdaddr, addr) == 0 &&
3408 entry->bdaddr_type == addr_type)
3409 return entry;
3410 }
3411
3412 return NULL;
3413}
3414
3415/* This function requires the caller holds hdev->lock */
3416void hci_pend_le_conn_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3417{
3418 struct bdaddr_list *entry;
3419
3420 entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3421 if (entry)
3422 goto done;
3423
3424 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3425 if (!entry) {
3426 BT_ERR("Out of memory");
3427 return;
3428 }
3429
3430 bacpy(&entry->bdaddr, addr);
3431 entry->bdaddr_type = addr_type;
3432
3433 list_add(&entry->list, &hdev->pend_le_conns);
3434
3435 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3436
3437done:
3438 hci_update_background_scan(hdev);
3439}
3440
3441/* This function requires the caller holds hdev->lock */
3442void hci_pend_le_conn_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3443{
3444 struct bdaddr_list *entry;
3445
3446 entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3447 if (!entry)
3448 goto done;
3449
3450 list_del(&entry->list);
3451 kfree(entry);
3452
3453 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3454
3455done:
3456 hci_update_background_scan(hdev);
3457}
3458
3459/* This function requires the caller holds hdev->lock */
3460void hci_pend_le_conns_clear(struct hci_dev *hdev)
3461{
3462 struct bdaddr_list *entry, *tmp;
3463
3464 list_for_each_entry_safe(entry, tmp, &hdev->pend_le_conns, list) {
3465 list_del(&entry->list);
3466 kfree(entry);
3467 }
3468
3469 BT_DBG("All LE pending connections cleared");
1c1697c0
MH
3470
3471 hci_update_background_scan(hdev);
4b10966f
MH
3472}
3473
15819a70 3474/* This function requires the caller holds hdev->lock */
bf5b3c8b
MH
3475int hci_conn_params_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3476{
3477 struct hci_conn_params *params;
3478
3479 if (!is_identity_address(addr, addr_type))
3480 return -EINVAL;
3481
3482 params = hci_conn_params_lookup(hdev, addr, addr_type);
3483 if (params)
3484 return 0;
3485
3486 params = kzalloc(sizeof(*params), GFP_KERNEL);
3487 if (!params) {
3488 BT_ERR("Out of memory");
3489 return -ENOMEM;
3490 }
3491
3492 bacpy(&params->addr, addr);
3493 params->addr_type = addr_type;
3494
3495 list_add(&params->list, &hdev->le_conn_params);
3496
3497 params->conn_min_interval = hdev->le_conn_min_interval;
3498 params->conn_max_interval = hdev->le_conn_max_interval;
3499 params->conn_latency = hdev->le_conn_latency;
3500 params->supervision_timeout = hdev->le_supv_timeout;
3501 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3502
3503 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3504
3505 return 0;
3506}
3507
3508/* This function requires the caller holds hdev->lock */
3509int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
a9b0a04c
AG
3510 u8 auto_connect, u16 conn_min_interval,
3511 u16 conn_max_interval)
15819a70
AG
3512{
3513 struct hci_conn_params *params;
3514
a9b0a04c
AG
3515 if (!is_identity_address(addr, addr_type))
3516 return -EINVAL;
3517
15819a70 3518 params = hci_conn_params_lookup(hdev, addr, addr_type);
cef952ce
AG
3519 if (params)
3520 goto update;
15819a70
AG
3521
3522 params = kzalloc(sizeof(*params), GFP_KERNEL);
3523 if (!params) {
3524 BT_ERR("Out of memory");
a9b0a04c 3525 return -ENOMEM;
15819a70
AG
3526 }
3527
3528 bacpy(&params->addr, addr);
3529 params->addr_type = addr_type;
cef952ce
AG
3530
3531 list_add(&params->list, &hdev->le_conn_params);
3532
3533update:
15819a70
AG
3534 params->conn_min_interval = conn_min_interval;
3535 params->conn_max_interval = conn_max_interval;
04fb7d90
MH
3536 params->conn_latency = hdev->le_conn_latency;
3537 params->supervision_timeout = hdev->le_supv_timeout;
9fcb18ef 3538 params->auto_connect = auto_connect;
15819a70 3539
cef952ce
AG
3540 switch (auto_connect) {
3541 case HCI_AUTO_CONN_DISABLED:
3542 case HCI_AUTO_CONN_LINK_LOSS:
3543 hci_pend_le_conn_del(hdev, addr, addr_type);
3544 break;
3545 case HCI_AUTO_CONN_ALWAYS:
3546 if (!is_connected(hdev, addr, addr_type))
3547 hci_pend_le_conn_add(hdev, addr, addr_type);
3548 break;
3549 }
15819a70 3550
9fcb18ef
AG
3551 BT_DBG("addr %pMR (type %u) auto_connect %u conn_min_interval 0x%.4x "
3552 "conn_max_interval 0x%.4x", addr, addr_type, auto_connect,
3553 conn_min_interval, conn_max_interval);
a9b0a04c
AG
3554
3555 return 0;
15819a70
AG
3556}
3557
3558/* This function requires the caller holds hdev->lock */
3559void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3560{
3561 struct hci_conn_params *params;
3562
3563 params = hci_conn_params_lookup(hdev, addr, addr_type);
3564 if (!params)
3565 return;
3566
cef952ce
AG
3567 hci_pend_le_conn_del(hdev, addr, addr_type);
3568
15819a70
AG
3569 list_del(&params->list);
3570 kfree(params);
3571
3572 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3573}
3574
3575/* This function requires the caller holds hdev->lock */
3576void hci_conn_params_clear(struct hci_dev *hdev)
3577{
3578 struct hci_conn_params *params, *tmp;
3579
3580 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3581 list_del(&params->list);
3582 kfree(params);
3583 }
3584
1089b67d
MH
3585 hci_pend_le_conns_clear(hdev);
3586
15819a70
AG
3587 BT_DBG("All LE connection parameters were removed");
3588}
3589
4c87eaab 3590static void inquiry_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 3591{
4c87eaab
AG
3592 if (status) {
3593 BT_ERR("Failed to start inquiry: status %d", status);
7ba8b4be 3594
4c87eaab
AG
3595 hci_dev_lock(hdev);
3596 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3597 hci_dev_unlock(hdev);
3598 return;
3599 }
7ba8b4be
AG
3600}
3601
4c87eaab 3602static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 3603{
4c87eaab
AG
3604 /* General inquiry access code (GIAC) */
3605 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3606 struct hci_request req;
3607 struct hci_cp_inquiry cp;
7ba8b4be
AG
3608 int err;
3609
4c87eaab
AG
3610 if (status) {
3611 BT_ERR("Failed to disable LE scanning: status %d", status);
3612 return;
3613 }
7ba8b4be 3614
4c87eaab
AG
3615 switch (hdev->discovery.type) {
3616 case DISCOV_TYPE_LE:
3617 hci_dev_lock(hdev);
3618 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3619 hci_dev_unlock(hdev);
3620 break;
7ba8b4be 3621
4c87eaab
AG
3622 case DISCOV_TYPE_INTERLEAVED:
3623 hci_req_init(&req, hdev);
7ba8b4be 3624
4c87eaab
AG
3625 memset(&cp, 0, sizeof(cp));
3626 memcpy(&cp.lap, lap, sizeof(cp.lap));
3627 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3628 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
7ba8b4be 3629
4c87eaab 3630 hci_dev_lock(hdev);
7dbfac1d 3631
4c87eaab 3632 hci_inquiry_cache_flush(hdev);
7dbfac1d 3633
4c87eaab
AG
3634 err = hci_req_run(&req, inquiry_complete);
3635 if (err) {
3636 BT_ERR("Inquiry request failed: err %d", err);
3637 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3638 }
7dbfac1d 3639
4c87eaab
AG
3640 hci_dev_unlock(hdev);
3641 break;
7dbfac1d 3642 }
7dbfac1d
AG
3643}
3644
7ba8b4be
AG
3645static void le_scan_disable_work(struct work_struct *work)
3646{
3647 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 3648 le_scan_disable.work);
4c87eaab
AG
3649 struct hci_request req;
3650 int err;
7ba8b4be
AG
3651
3652 BT_DBG("%s", hdev->name);
3653
4c87eaab 3654 hci_req_init(&req, hdev);
28b75a89 3655
b1efcc28 3656 hci_req_add_le_scan_disable(&req);
28b75a89 3657
4c87eaab
AG
3658 err = hci_req_run(&req, le_scan_disable_work_complete);
3659 if (err)
3660 BT_ERR("Disable LE scanning request failed: err %d", err);
28b75a89
AG
3661}
3662
8d97250e
JH
3663static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3664{
3665 struct hci_dev *hdev = req->hdev;
3666
3667 /* If we're advertising or initiating an LE connection we can't
3668 * go ahead and change the random address at this time. This is
3669 * because the eventual initiator address used for the
3670 * subsequently created connection will be undefined (some
3671 * controllers use the new address and others the one we had
3672 * when the operation started).
3673 *
3674 * In this kind of scenario skip the update and let the random
3675 * address be updated at the next cycle.
3676 */
3677 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) ||
3678 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3679 BT_DBG("Deferring random address update");
3680 return;
3681 }
3682
3683 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3684}
3685
94b1fc92
MH
3686int hci_update_random_address(struct hci_request *req, bool require_privacy,
3687 u8 *own_addr_type)
ebd3a747
JH
3688{
3689 struct hci_dev *hdev = req->hdev;
3690 int err;
3691
3692 /* If privacy is enabled use a resolvable private address. If
2b5224dc
MH
3693 * current RPA has expired or there is something else than
3694 * the current RPA in use, then generate a new one.
ebd3a747
JH
3695 */
3696 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
ebd3a747
JH
3697 int to;
3698
3699 *own_addr_type = ADDR_LE_DEV_RANDOM;
3700
3701 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
2b5224dc 3702 !bacmp(&hdev->random_addr, &hdev->rpa))
ebd3a747
JH
3703 return 0;
3704
2b5224dc 3705 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
ebd3a747
JH
3706 if (err < 0) {
3707 BT_ERR("%s failed to generate new RPA", hdev->name);
3708 return err;
3709 }
3710
8d97250e 3711 set_random_addr(req, &hdev->rpa);
ebd3a747
JH
3712
3713 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3714 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3715
3716 return 0;
94b1fc92
MH
3717 }
3718
3719 /* In case of required privacy without resolvable private address,
3720 * use an unresolvable private address. This is useful for active
3721 * scanning and non-connectable advertising.
3722 */
3723 if (require_privacy) {
3724 bdaddr_t urpa;
3725
3726 get_random_bytes(&urpa, 6);
3727 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3728
3729 *own_addr_type = ADDR_LE_DEV_RANDOM;
8d97250e 3730 set_random_addr(req, &urpa);
94b1fc92 3731 return 0;
ebd3a747
JH
3732 }
3733
3734 /* If forcing static address is in use or there is no public
3735 * address use the static address as random address (but skip
3736 * the HCI command if the current random address is already the
3737 * static one.
3738 */
111902f7 3739 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
ebd3a747
JH
3740 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3741 *own_addr_type = ADDR_LE_DEV_RANDOM;
3742 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3743 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3744 &hdev->static_addr);
3745 return 0;
3746 }
3747
3748 /* Neither privacy nor static address is being used so use a
3749 * public address.
3750 */
3751 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3752
3753 return 0;
3754}
3755
a1f4c318
JH
3756/* Copy the Identity Address of the controller.
3757 *
3758 * If the controller has a public BD_ADDR, then by default use that one.
3759 * If this is a LE only controller without a public address, default to
3760 * the static random address.
3761 *
3762 * For debugging purposes it is possible to force controllers with a
3763 * public address to use the static random address instead.
3764 */
3765void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3766 u8 *bdaddr_type)
3767{
111902f7 3768 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
a1f4c318
JH
3769 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3770 bacpy(bdaddr, &hdev->static_addr);
3771 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3772 } else {
3773 bacpy(bdaddr, &hdev->bdaddr);
3774 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3775 }
3776}
3777
9be0dab7
DH
3778/* Alloc HCI device */
3779struct hci_dev *hci_alloc_dev(void)
3780{
3781 struct hci_dev *hdev;
3782
3783 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3784 if (!hdev)
3785 return NULL;
3786
b1b813d4
DH
3787 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3788 hdev->esco_type = (ESCO_HV1);
3789 hdev->link_mode = (HCI_LM_ACCEPT);
b4cb9fb2
MH
3790 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3791 hdev->io_capability = 0x03; /* No Input No Output */
bbaf444a
JH
3792 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3793 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
b1b813d4 3794
b1b813d4
DH
3795 hdev->sniff_max_interval = 800;
3796 hdev->sniff_min_interval = 80;
3797
3f959d46 3798 hdev->le_adv_channel_map = 0x07;
bef64738
MH
3799 hdev->le_scan_interval = 0x0060;
3800 hdev->le_scan_window = 0x0030;
4e70c7e7
MH
3801 hdev->le_conn_min_interval = 0x0028;
3802 hdev->le_conn_max_interval = 0x0038;
04fb7d90
MH
3803 hdev->le_conn_latency = 0x0000;
3804 hdev->le_supv_timeout = 0x002a;
bef64738 3805
d6bfd59c 3806 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
b9a7a61e 3807 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
31ad1691
AK
3808 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3809 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
d6bfd59c 3810
b1b813d4
DH
3811 mutex_init(&hdev->lock);
3812 mutex_init(&hdev->req_lock);
3813
3814 INIT_LIST_HEAD(&hdev->mgmt_pending);
3815 INIT_LIST_HEAD(&hdev->blacklist);
3816 INIT_LIST_HEAD(&hdev->uuids);
3817 INIT_LIST_HEAD(&hdev->link_keys);
3818 INIT_LIST_HEAD(&hdev->long_term_keys);
970c4e46 3819 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
b1b813d4 3820 INIT_LIST_HEAD(&hdev->remote_oob_data);
d2ab0ac1 3821 INIT_LIST_HEAD(&hdev->le_white_list);
15819a70 3822 INIT_LIST_HEAD(&hdev->le_conn_params);
77a77a30 3823 INIT_LIST_HEAD(&hdev->pend_le_conns);
6b536b5e 3824 INIT_LIST_HEAD(&hdev->conn_hash.list);
b1b813d4
DH
3825
3826 INIT_WORK(&hdev->rx_work, hci_rx_work);
3827 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3828 INIT_WORK(&hdev->tx_work, hci_tx_work);
3829 INIT_WORK(&hdev->power_on, hci_power_on);
b1b813d4 3830
b1b813d4
DH
3831 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3832 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3833 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3834
b1b813d4
DH
3835 skb_queue_head_init(&hdev->rx_q);
3836 skb_queue_head_init(&hdev->cmd_q);
3837 skb_queue_head_init(&hdev->raw_q);
3838
3839 init_waitqueue_head(&hdev->req_wait_q);
3840
65cc2b49 3841 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
b1b813d4 3842
b1b813d4
DH
3843 hci_init_sysfs(hdev);
3844 discovery_init(hdev);
9be0dab7
DH
3845
3846 return hdev;
3847}
3848EXPORT_SYMBOL(hci_alloc_dev);
3849
3850/* Free HCI device */
3851void hci_free_dev(struct hci_dev *hdev)
3852{
9be0dab7
DH
3853 /* will free via device release */
3854 put_device(&hdev->dev);
3855}
3856EXPORT_SYMBOL(hci_free_dev);
3857
1da177e4
LT
3858/* Register HCI device */
3859int hci_register_dev(struct hci_dev *hdev)
3860{
b1b813d4 3861 int id, error;
1da177e4 3862
010666a1 3863 if (!hdev->open || !hdev->close)
1da177e4
LT
3864 return -EINVAL;
3865
08add513
MM
3866 /* Do not allow HCI_AMP devices to register at index 0,
3867 * so the index can be used as the AMP controller ID.
3868 */
3df92b31
SL
3869 switch (hdev->dev_type) {
3870 case HCI_BREDR:
3871 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3872 break;
3873 case HCI_AMP:
3874 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3875 break;
3876 default:
3877 return -EINVAL;
1da177e4 3878 }
8e87d142 3879
3df92b31
SL
3880 if (id < 0)
3881 return id;
3882
1da177e4
LT
3883 sprintf(hdev->name, "hci%d", id);
3884 hdev->id = id;
2d8b3a11
AE
3885
3886 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3887
d8537548
KC
3888 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3889 WQ_MEM_RECLAIM, 1, hdev->name);
33ca954d
DH
3890 if (!hdev->workqueue) {
3891 error = -ENOMEM;
3892 goto err;
3893 }
f48fd9c8 3894
d8537548
KC
3895 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3896 WQ_MEM_RECLAIM, 1, hdev->name);
6ead1bbc
JH
3897 if (!hdev->req_workqueue) {
3898 destroy_workqueue(hdev->workqueue);
3899 error = -ENOMEM;
3900 goto err;
3901 }
3902
0153e2ec
MH
3903 if (!IS_ERR_OR_NULL(bt_debugfs))
3904 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3905
bdc3e0f1
MH
3906 dev_set_name(&hdev->dev, "%s", hdev->name);
3907
99780a7b
JH
3908 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3909 CRYPTO_ALG_ASYNC);
3910 if (IS_ERR(hdev->tfm_aes)) {
3911 BT_ERR("Unable to create crypto context");
3912 error = PTR_ERR(hdev->tfm_aes);
3913 hdev->tfm_aes = NULL;
3914 goto err_wqueue;
3915 }
3916
bdc3e0f1 3917 error = device_add(&hdev->dev);
33ca954d 3918 if (error < 0)
99780a7b 3919 goto err_tfm;
1da177e4 3920
611b30f7 3921 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
3922 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3923 hdev);
611b30f7
MH
3924 if (hdev->rfkill) {
3925 if (rfkill_register(hdev->rfkill) < 0) {
3926 rfkill_destroy(hdev->rfkill);
3927 hdev->rfkill = NULL;
3928 }
3929 }
3930
5e130367
JH
3931 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3932 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3933
a8b2d5c2 3934 set_bit(HCI_SETUP, &hdev->dev_flags);
004b0258 3935 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
ce2be9ac 3936
01cd3404 3937 if (hdev->dev_type == HCI_BREDR) {
56f87901
JH
3938 /* Assume BR/EDR support until proven otherwise (such as
3939 * through reading supported features during init.
3940 */
3941 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3942 }
ce2be9ac 3943
fcee3377
GP
3944 write_lock(&hci_dev_list_lock);
3945 list_add(&hdev->list, &hci_dev_list);
3946 write_unlock(&hci_dev_list_lock);
3947
fee746b0
MH
3948 /* Devices that are marked for raw-only usage need to set
3949 * the HCI_RAW flag to indicate that only user channel is
3950 * supported.
3951 */
3952 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3953 set_bit(HCI_RAW, &hdev->flags);
3954
1da177e4 3955 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 3956 hci_dev_hold(hdev);
1da177e4 3957
19202573 3958 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 3959
1da177e4 3960 return id;
f48fd9c8 3961
99780a7b
JH
3962err_tfm:
3963 crypto_free_blkcipher(hdev->tfm_aes);
33ca954d
DH
3964err_wqueue:
3965 destroy_workqueue(hdev->workqueue);
6ead1bbc 3966 destroy_workqueue(hdev->req_workqueue);
33ca954d 3967err:
3df92b31 3968 ida_simple_remove(&hci_index_ida, hdev->id);
f48fd9c8 3969
33ca954d 3970 return error;
1da177e4
LT
3971}
3972EXPORT_SYMBOL(hci_register_dev);
3973
3974/* Unregister HCI device */
59735631 3975void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 3976{
3df92b31 3977 int i, id;
ef222013 3978
c13854ce 3979 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 3980
94324962
JH
3981 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3982
3df92b31
SL
3983 id = hdev->id;
3984
f20d09d5 3985 write_lock(&hci_dev_list_lock);
1da177e4 3986 list_del(&hdev->list);
f20d09d5 3987 write_unlock(&hci_dev_list_lock);
1da177e4
LT
3988
3989 hci_dev_do_close(hdev);
3990
cd4c5391 3991 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
3992 kfree_skb(hdev->reassembly[i]);
3993
b9b5ef18
GP
3994 cancel_work_sync(&hdev->power_on);
3995
ab81cbf9 3996 if (!test_bit(HCI_INIT, &hdev->flags) &&
fee746b0
MH
3997 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
3998 !test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
09fd0de5 3999 hci_dev_lock(hdev);
744cf19e 4000 mgmt_index_removed(hdev);
09fd0de5 4001 hci_dev_unlock(hdev);
56e5cb86 4002 }
ab81cbf9 4003
2e58ef3e
JH
4004 /* mgmt_index_removed should take care of emptying the
4005 * pending list */
4006 BUG_ON(!list_empty(&hdev->mgmt_pending));
4007
1da177e4
LT
4008 hci_notify(hdev, HCI_DEV_UNREG);
4009
611b30f7
MH
4010 if (hdev->rfkill) {
4011 rfkill_unregister(hdev->rfkill);
4012 rfkill_destroy(hdev->rfkill);
4013 }
4014
99780a7b
JH
4015 if (hdev->tfm_aes)
4016 crypto_free_blkcipher(hdev->tfm_aes);
4017
bdc3e0f1 4018 device_del(&hdev->dev);
147e2d59 4019
0153e2ec
MH
4020 debugfs_remove_recursive(hdev->debugfs);
4021
f48fd9c8 4022 destroy_workqueue(hdev->workqueue);
6ead1bbc 4023 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 4024
09fd0de5 4025 hci_dev_lock(hdev);
e2e0cacb 4026 hci_blacklist_clear(hdev);
2aeb9a1a 4027 hci_uuids_clear(hdev);
55ed8ca1 4028 hci_link_keys_clear(hdev);
b899efaf 4029 hci_smp_ltks_clear(hdev);
970c4e46 4030 hci_smp_irks_clear(hdev);
2763eda6 4031 hci_remote_oob_data_clear(hdev);
d2ab0ac1 4032 hci_white_list_clear(hdev);
15819a70 4033 hci_conn_params_clear(hdev);
09fd0de5 4034 hci_dev_unlock(hdev);
e2e0cacb 4035
dc946bd8 4036 hci_dev_put(hdev);
3df92b31
SL
4037
4038 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
4039}
4040EXPORT_SYMBOL(hci_unregister_dev);
4041
4042/* Suspend HCI device */
4043int hci_suspend_dev(struct hci_dev *hdev)
4044{
4045 hci_notify(hdev, HCI_DEV_SUSPEND);
4046 return 0;
4047}
4048EXPORT_SYMBOL(hci_suspend_dev);
4049
4050/* Resume HCI device */
4051int hci_resume_dev(struct hci_dev *hdev)
4052{
4053 hci_notify(hdev, HCI_DEV_RESUME);
4054 return 0;
4055}
4056EXPORT_SYMBOL(hci_resume_dev);
4057
76bca880 4058/* Receive frame from HCI drivers */
e1a26170 4059int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
76bca880 4060{
76bca880 4061 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 4062 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
4063 kfree_skb(skb);
4064 return -ENXIO;
4065 }
4066
d82603c6 4067 /* Incoming skb */
76bca880
MH
4068 bt_cb(skb)->incoming = 1;
4069
4070 /* Time stamp */
4071 __net_timestamp(skb);
4072
76bca880 4073 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 4074 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 4075
76bca880
MH
4076 return 0;
4077}
4078EXPORT_SYMBOL(hci_recv_frame);
4079
33e882a5 4080static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
a8c5fb1a 4081 int count, __u8 index)
33e882a5
SS
4082{
4083 int len = 0;
4084 int hlen = 0;
4085 int remain = count;
4086 struct sk_buff *skb;
4087 struct bt_skb_cb *scb;
4088
4089 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
a8c5fb1a 4090 index >= NUM_REASSEMBLY)
33e882a5
SS
4091 return -EILSEQ;
4092
4093 skb = hdev->reassembly[index];
4094
4095 if (!skb) {
4096 switch (type) {
4097 case HCI_ACLDATA_PKT:
4098 len = HCI_MAX_FRAME_SIZE;
4099 hlen = HCI_ACL_HDR_SIZE;
4100 break;
4101 case HCI_EVENT_PKT:
4102 len = HCI_MAX_EVENT_SIZE;
4103 hlen = HCI_EVENT_HDR_SIZE;
4104 break;
4105 case HCI_SCODATA_PKT:
4106 len = HCI_MAX_SCO_SIZE;
4107 hlen = HCI_SCO_HDR_SIZE;
4108 break;
4109 }
4110
1e429f38 4111 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
4112 if (!skb)
4113 return -ENOMEM;
4114
4115 scb = (void *) skb->cb;
4116 scb->expect = hlen;
4117 scb->pkt_type = type;
4118
33e882a5
SS
4119 hdev->reassembly[index] = skb;
4120 }
4121
4122 while (count) {
4123 scb = (void *) skb->cb;
89bb46d0 4124 len = min_t(uint, scb->expect, count);
33e882a5
SS
4125
4126 memcpy(skb_put(skb, len), data, len);
4127
4128 count -= len;
4129 data += len;
4130 scb->expect -= len;
4131 remain = count;
4132
4133 switch (type) {
4134 case HCI_EVENT_PKT:
4135 if (skb->len == HCI_EVENT_HDR_SIZE) {
4136 struct hci_event_hdr *h = hci_event_hdr(skb);
4137 scb->expect = h->plen;
4138
4139 if (skb_tailroom(skb) < scb->expect) {
4140 kfree_skb(skb);
4141 hdev->reassembly[index] = NULL;
4142 return -ENOMEM;
4143 }
4144 }
4145 break;
4146
4147 case HCI_ACLDATA_PKT:
4148 if (skb->len == HCI_ACL_HDR_SIZE) {
4149 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4150 scb->expect = __le16_to_cpu(h->dlen);
4151
4152 if (skb_tailroom(skb) < scb->expect) {
4153 kfree_skb(skb);
4154 hdev->reassembly[index] = NULL;
4155 return -ENOMEM;
4156 }
4157 }
4158 break;
4159
4160 case HCI_SCODATA_PKT:
4161 if (skb->len == HCI_SCO_HDR_SIZE) {
4162 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4163 scb->expect = h->dlen;
4164
4165 if (skb_tailroom(skb) < scb->expect) {
4166 kfree_skb(skb);
4167 hdev->reassembly[index] = NULL;
4168 return -ENOMEM;
4169 }
4170 }
4171 break;
4172 }
4173
4174 if (scb->expect == 0) {
4175 /* Complete frame */
4176
4177 bt_cb(skb)->pkt_type = type;
e1a26170 4178 hci_recv_frame(hdev, skb);
33e882a5
SS
4179
4180 hdev->reassembly[index] = NULL;
4181 return remain;
4182 }
4183 }
4184
4185 return remain;
4186}
4187
ef222013
MH
4188int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4189{
f39a3c06
SS
4190 int rem = 0;
4191
ef222013
MH
4192 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4193 return -EILSEQ;
4194
da5f6c37 4195 while (count) {
1e429f38 4196 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
4197 if (rem < 0)
4198 return rem;
ef222013 4199
f39a3c06
SS
4200 data += (count - rem);
4201 count = rem;
f81c6224 4202 }
ef222013 4203
f39a3c06 4204 return rem;
ef222013
MH
4205}
4206EXPORT_SYMBOL(hci_recv_fragment);
4207
99811510
SS
4208#define STREAM_REASSEMBLY 0
4209
4210int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4211{
4212 int type;
4213 int rem = 0;
4214
da5f6c37 4215 while (count) {
99811510
SS
4216 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4217
4218 if (!skb) {
4219 struct { char type; } *pkt;
4220
4221 /* Start of the frame */
4222 pkt = data;
4223 type = pkt->type;
4224
4225 data++;
4226 count--;
4227 } else
4228 type = bt_cb(skb)->pkt_type;
4229
1e429f38 4230 rem = hci_reassembly(hdev, type, data, count,
a8c5fb1a 4231 STREAM_REASSEMBLY);
99811510
SS
4232 if (rem < 0)
4233 return rem;
4234
4235 data += (count - rem);
4236 count = rem;
f81c6224 4237 }
99811510
SS
4238
4239 return rem;
4240}
4241EXPORT_SYMBOL(hci_recv_stream_fragment);
4242
1da177e4
LT
4243/* ---- Interface to upper protocols ---- */
4244
1da177e4
LT
4245int hci_register_cb(struct hci_cb *cb)
4246{
4247 BT_DBG("%p name %s", cb, cb->name);
4248
f20d09d5 4249 write_lock(&hci_cb_list_lock);
1da177e4 4250 list_add(&cb->list, &hci_cb_list);
f20d09d5 4251 write_unlock(&hci_cb_list_lock);
1da177e4
LT
4252
4253 return 0;
4254}
4255EXPORT_SYMBOL(hci_register_cb);
4256
4257int hci_unregister_cb(struct hci_cb *cb)
4258{
4259 BT_DBG("%p name %s", cb, cb->name);
4260
f20d09d5 4261 write_lock(&hci_cb_list_lock);
1da177e4 4262 list_del(&cb->list);
f20d09d5 4263 write_unlock(&hci_cb_list_lock);
1da177e4
LT
4264
4265 return 0;
4266}
4267EXPORT_SYMBOL(hci_unregister_cb);
4268
51086991 4269static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4 4270{
0d48d939 4271 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 4272
cd82e61c
MH
4273 /* Time stamp */
4274 __net_timestamp(skb);
1da177e4 4275
cd82e61c
MH
4276 /* Send copy to monitor */
4277 hci_send_to_monitor(hdev, skb);
4278
4279 if (atomic_read(&hdev->promisc)) {
4280 /* Send copy to the sockets */
470fe1b5 4281 hci_send_to_sock(hdev, skb);
1da177e4
LT
4282 }
4283
4284 /* Get rid of skb owner, prior to sending to the driver. */
4285 skb_orphan(skb);
4286
7bd8f09f 4287 if (hdev->send(hdev, skb) < 0)
51086991 4288 BT_ERR("%s sending frame failed", hdev->name);
1da177e4
LT
4289}
4290
3119ae95
JH
4291void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4292{
4293 skb_queue_head_init(&req->cmd_q);
4294 req->hdev = hdev;
5d73e034 4295 req->err = 0;
3119ae95
JH
4296}
4297
4298int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4299{
4300 struct hci_dev *hdev = req->hdev;
4301 struct sk_buff *skb;
4302 unsigned long flags;
4303
4304 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4305
5d73e034
AG
4306 /* If an error occured during request building, remove all HCI
4307 * commands queued on the HCI request queue.
4308 */
4309 if (req->err) {
4310 skb_queue_purge(&req->cmd_q);
4311 return req->err;
4312 }
4313
3119ae95
JH
4314 /* Do not allow empty requests */
4315 if (skb_queue_empty(&req->cmd_q))
382b0c39 4316 return -ENODATA;
3119ae95
JH
4317
4318 skb = skb_peek_tail(&req->cmd_q);
4319 bt_cb(skb)->req.complete = complete;
4320
4321 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4322 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4323 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4324
4325 queue_work(hdev->workqueue, &hdev->cmd_work);
4326
4327 return 0;
4328}
4329
1ca3a9d0 4330static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
07dc93dd 4331 u32 plen, const void *param)
1da177e4
LT
4332{
4333 int len = HCI_COMMAND_HDR_SIZE + plen;
4334 struct hci_command_hdr *hdr;
4335 struct sk_buff *skb;
4336
1da177e4 4337 skb = bt_skb_alloc(len, GFP_ATOMIC);
1ca3a9d0
JH
4338 if (!skb)
4339 return NULL;
1da177e4
LT
4340
4341 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 4342 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
4343 hdr->plen = plen;
4344
4345 if (plen)
4346 memcpy(skb_put(skb, plen), param, plen);
4347
4348 BT_DBG("skb len %d", skb->len);
4349
0d48d939 4350 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
c78ae283 4351
1ca3a9d0
JH
4352 return skb;
4353}
4354
4355/* Send HCI command */
07dc93dd
JH
4356int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4357 const void *param)
1ca3a9d0
JH
4358{
4359 struct sk_buff *skb;
4360
4361 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4362
4363 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4364 if (!skb) {
4365 BT_ERR("%s no memory for command", hdev->name);
4366 return -ENOMEM;
4367 }
4368
11714b3d
JH
4369 /* Stand-alone HCI commands must be flaged as
4370 * single-command requests.
4371 */
4372 bt_cb(skb)->req.start = true;
4373
1da177e4 4374 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 4375 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
4376
4377 return 0;
4378}
1da177e4 4379
71c76a17 4380/* Queue a command to an asynchronous HCI request */
07dc93dd
JH
4381void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4382 const void *param, u8 event)
71c76a17
JH
4383{
4384 struct hci_dev *hdev = req->hdev;
4385 struct sk_buff *skb;
4386
4387 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4388
34739c1e
AG
4389 /* If an error occured during request building, there is no point in
4390 * queueing the HCI command. We can simply return.
4391 */
4392 if (req->err)
4393 return;
4394
71c76a17
JH
4395 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4396 if (!skb) {
5d73e034
AG
4397 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4398 hdev->name, opcode);
4399 req->err = -ENOMEM;
e348fe6b 4400 return;
71c76a17
JH
4401 }
4402
4403 if (skb_queue_empty(&req->cmd_q))
4404 bt_cb(skb)->req.start = true;
4405
02350a72
JH
4406 bt_cb(skb)->req.event = event;
4407
71c76a17 4408 skb_queue_tail(&req->cmd_q, skb);
71c76a17
JH
4409}
4410
07dc93dd
JH
4411void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4412 const void *param)
02350a72
JH
4413{
4414 hci_req_add_ev(req, opcode, plen, param, 0);
4415}
4416
1da177e4 4417/* Get data from the previously sent command */
a9de9248 4418void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
4419{
4420 struct hci_command_hdr *hdr;
4421
4422 if (!hdev->sent_cmd)
4423 return NULL;
4424
4425 hdr = (void *) hdev->sent_cmd->data;
4426
a9de9248 4427 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
4428 return NULL;
4429
f0e09510 4430 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
4431
4432 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4433}
4434
4435/* Send ACL data */
4436static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4437{
4438 struct hci_acl_hdr *hdr;
4439 int len = skb->len;
4440
badff6d0
ACM
4441 skb_push(skb, HCI_ACL_HDR_SIZE);
4442 skb_reset_transport_header(skb);
9c70220b 4443 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
4444 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4445 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
4446}
4447
ee22be7e 4448static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 4449 struct sk_buff *skb, __u16 flags)
1da177e4 4450{
ee22be7e 4451 struct hci_conn *conn = chan->conn;
1da177e4
LT
4452 struct hci_dev *hdev = conn->hdev;
4453 struct sk_buff *list;
4454
087bfd99
GP
4455 skb->len = skb_headlen(skb);
4456 skb->data_len = 0;
4457
4458 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
204a6e54
AE
4459
4460 switch (hdev->dev_type) {
4461 case HCI_BREDR:
4462 hci_add_acl_hdr(skb, conn->handle, flags);
4463 break;
4464 case HCI_AMP:
4465 hci_add_acl_hdr(skb, chan->handle, flags);
4466 break;
4467 default:
4468 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4469 return;
4470 }
087bfd99 4471
70f23020
AE
4472 list = skb_shinfo(skb)->frag_list;
4473 if (!list) {
1da177e4
LT
4474 /* Non fragmented */
4475 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4476
73d80deb 4477 skb_queue_tail(queue, skb);
1da177e4
LT
4478 } else {
4479 /* Fragmented */
4480 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4481
4482 skb_shinfo(skb)->frag_list = NULL;
4483
4484 /* Queue all fragments atomically */
af3e6359 4485 spin_lock(&queue->lock);
1da177e4 4486
73d80deb 4487 __skb_queue_tail(queue, skb);
e702112f
AE
4488
4489 flags &= ~ACL_START;
4490 flags |= ACL_CONT;
1da177e4
LT
4491 do {
4492 skb = list; list = list->next;
8e87d142 4493
0d48d939 4494 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 4495 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
4496
4497 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4498
73d80deb 4499 __skb_queue_tail(queue, skb);
1da177e4
LT
4500 } while (list);
4501
af3e6359 4502 spin_unlock(&queue->lock);
1da177e4 4503 }
73d80deb
LAD
4504}
4505
4506void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4507{
ee22be7e 4508 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 4509
f0e09510 4510 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb 4511
ee22be7e 4512 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 4513
3eff45ea 4514 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 4515}
1da177e4
LT
4516
4517/* Send SCO data */
0d861d8b 4518void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
4519{
4520 struct hci_dev *hdev = conn->hdev;
4521 struct hci_sco_hdr hdr;
4522
4523 BT_DBG("%s len %d", hdev->name, skb->len);
4524
aca3192c 4525 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
4526 hdr.dlen = skb->len;
4527
badff6d0
ACM
4528 skb_push(skb, HCI_SCO_HDR_SIZE);
4529 skb_reset_transport_header(skb);
9c70220b 4530 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4 4531
0d48d939 4532 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 4533
1da177e4 4534 skb_queue_tail(&conn->data_q, skb);
3eff45ea 4535 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 4536}
1da177e4
LT
4537
4538/* ---- HCI TX task (outgoing data) ---- */
4539
4540/* HCI Connection scheduler */
6039aa73
GP
4541static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4542 int *quote)
1da177e4
LT
4543{
4544 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 4545 struct hci_conn *conn = NULL, *c;
abc5de8f 4546 unsigned int num = 0, min = ~0;
1da177e4 4547
8e87d142 4548 /* We don't have to lock device here. Connections are always
1da177e4 4549 * added and removed with TX task disabled. */
bf4c6325
GP
4550
4551 rcu_read_lock();
4552
4553 list_for_each_entry_rcu(c, &h->list, list) {
769be974 4554 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 4555 continue;
769be974
MH
4556
4557 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4558 continue;
4559
1da177e4
LT
4560 num++;
4561
4562 if (c->sent < min) {
4563 min = c->sent;
4564 conn = c;
4565 }
52087a79
LAD
4566
4567 if (hci_conn_num(hdev, type) == num)
4568 break;
1da177e4
LT
4569 }
4570
bf4c6325
GP
4571 rcu_read_unlock();
4572
1da177e4 4573 if (conn) {
6ed58ec5
VT
4574 int cnt, q;
4575
4576 switch (conn->type) {
4577 case ACL_LINK:
4578 cnt = hdev->acl_cnt;
4579 break;
4580 case SCO_LINK:
4581 case ESCO_LINK:
4582 cnt = hdev->sco_cnt;
4583 break;
4584 case LE_LINK:
4585 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4586 break;
4587 default:
4588 cnt = 0;
4589 BT_ERR("Unknown link type");
4590 }
4591
4592 q = cnt / num;
1da177e4
LT
4593 *quote = q ? q : 1;
4594 } else
4595 *quote = 0;
4596
4597 BT_DBG("conn %p quote %d", conn, *quote);
4598 return conn;
4599}
4600
6039aa73 4601static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
4602{
4603 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 4604 struct hci_conn *c;
1da177e4 4605
bae1f5d9 4606 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 4607
bf4c6325
GP
4608 rcu_read_lock();
4609
1da177e4 4610 /* Kill stalled connections */
bf4c6325 4611 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 4612 if (c->type == type && c->sent) {
6ed93dc6
AE
4613 BT_ERR("%s killing stalled connection %pMR",
4614 hdev->name, &c->dst);
bed71748 4615 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
4616 }
4617 }
bf4c6325
GP
4618
4619 rcu_read_unlock();
1da177e4
LT
4620}
4621
6039aa73
GP
4622static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4623 int *quote)
1da177e4 4624{
73d80deb
LAD
4625 struct hci_conn_hash *h = &hdev->conn_hash;
4626 struct hci_chan *chan = NULL;
abc5de8f 4627 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 4628 struct hci_conn *conn;
73d80deb
LAD
4629 int cnt, q, conn_num = 0;
4630
4631 BT_DBG("%s", hdev->name);
4632
bf4c6325
GP
4633 rcu_read_lock();
4634
4635 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
4636 struct hci_chan *tmp;
4637
4638 if (conn->type != type)
4639 continue;
4640
4641 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4642 continue;
4643
4644 conn_num++;
4645
8192edef 4646 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
4647 struct sk_buff *skb;
4648
4649 if (skb_queue_empty(&tmp->data_q))
4650 continue;
4651
4652 skb = skb_peek(&tmp->data_q);
4653 if (skb->priority < cur_prio)
4654 continue;
4655
4656 if (skb->priority > cur_prio) {
4657 num = 0;
4658 min = ~0;
4659 cur_prio = skb->priority;
4660 }
4661
4662 num++;
4663
4664 if (conn->sent < min) {
4665 min = conn->sent;
4666 chan = tmp;
4667 }
4668 }
4669
4670 if (hci_conn_num(hdev, type) == conn_num)
4671 break;
4672 }
4673
bf4c6325
GP
4674 rcu_read_unlock();
4675
73d80deb
LAD
4676 if (!chan)
4677 return NULL;
4678
4679 switch (chan->conn->type) {
4680 case ACL_LINK:
4681 cnt = hdev->acl_cnt;
4682 break;
bd1eb66b
AE
4683 case AMP_LINK:
4684 cnt = hdev->block_cnt;
4685 break;
73d80deb
LAD
4686 case SCO_LINK:
4687 case ESCO_LINK:
4688 cnt = hdev->sco_cnt;
4689 break;
4690 case LE_LINK:
4691 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4692 break;
4693 default:
4694 cnt = 0;
4695 BT_ERR("Unknown link type");
4696 }
4697
4698 q = cnt / num;
4699 *quote = q ? q : 1;
4700 BT_DBG("chan %p quote %d", chan, *quote);
4701 return chan;
4702}
4703
02b20f0b
LAD
4704static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4705{
4706 struct hci_conn_hash *h = &hdev->conn_hash;
4707 struct hci_conn *conn;
4708 int num = 0;
4709
4710 BT_DBG("%s", hdev->name);
4711
bf4c6325
GP
4712 rcu_read_lock();
4713
4714 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
4715 struct hci_chan *chan;
4716
4717 if (conn->type != type)
4718 continue;
4719
4720 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4721 continue;
4722
4723 num++;
4724
8192edef 4725 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
4726 struct sk_buff *skb;
4727
4728 if (chan->sent) {
4729 chan->sent = 0;
4730 continue;
4731 }
4732
4733 if (skb_queue_empty(&chan->data_q))
4734 continue;
4735
4736 skb = skb_peek(&chan->data_q);
4737 if (skb->priority >= HCI_PRIO_MAX - 1)
4738 continue;
4739
4740 skb->priority = HCI_PRIO_MAX - 1;
4741
4742 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 4743 skb->priority);
02b20f0b
LAD
4744 }
4745
4746 if (hci_conn_num(hdev, type) == num)
4747 break;
4748 }
bf4c6325
GP
4749
4750 rcu_read_unlock();
4751
02b20f0b
LAD
4752}
4753
b71d385a
AE
4754static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4755{
4756 /* Calculate count of blocks used by this packet */
4757 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4758}
4759
6039aa73 4760static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 4761{
fee746b0 4762 if (!test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
1da177e4
LT
4763 /* ACL tx timeout must be longer than maximum
4764 * link supervision timeout (40.9 seconds) */
63d2bc1b 4765 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 4766 HCI_ACL_TX_TIMEOUT))
bae1f5d9 4767 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 4768 }
63d2bc1b 4769}
1da177e4 4770
6039aa73 4771static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
4772{
4773 unsigned int cnt = hdev->acl_cnt;
4774 struct hci_chan *chan;
4775 struct sk_buff *skb;
4776 int quote;
4777
4778 __check_timeout(hdev, cnt);
04837f64 4779
73d80deb 4780 while (hdev->acl_cnt &&
a8c5fb1a 4781 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
4782 u32 priority = (skb_peek(&chan->data_q))->priority;
4783 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4784 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4785 skb->len, skb->priority);
73d80deb 4786
ec1cce24
LAD
4787 /* Stop if priority has changed */
4788 if (skb->priority < priority)
4789 break;
4790
4791 skb = skb_dequeue(&chan->data_q);
4792
73d80deb 4793 hci_conn_enter_active_mode(chan->conn,
04124681 4794 bt_cb(skb)->force_active);
04837f64 4795
57d17d70 4796 hci_send_frame(hdev, skb);
1da177e4
LT
4797 hdev->acl_last_tx = jiffies;
4798
4799 hdev->acl_cnt--;
73d80deb
LAD
4800 chan->sent++;
4801 chan->conn->sent++;
1da177e4
LT
4802 }
4803 }
02b20f0b
LAD
4804
4805 if (cnt != hdev->acl_cnt)
4806 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
4807}
4808
6039aa73 4809static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 4810{
63d2bc1b 4811 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
4812 struct hci_chan *chan;
4813 struct sk_buff *skb;
4814 int quote;
bd1eb66b 4815 u8 type;
b71d385a 4816
63d2bc1b 4817 __check_timeout(hdev, cnt);
b71d385a 4818
bd1eb66b
AE
4819 BT_DBG("%s", hdev->name);
4820
4821 if (hdev->dev_type == HCI_AMP)
4822 type = AMP_LINK;
4823 else
4824 type = ACL_LINK;
4825
b71d385a 4826 while (hdev->block_cnt > 0 &&
bd1eb66b 4827 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
4828 u32 priority = (skb_peek(&chan->data_q))->priority;
4829 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4830 int blocks;
4831
4832 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4833 skb->len, skb->priority);
b71d385a
AE
4834
4835 /* Stop if priority has changed */
4836 if (skb->priority < priority)
4837 break;
4838
4839 skb = skb_dequeue(&chan->data_q);
4840
4841 blocks = __get_blocks(hdev, skb);
4842 if (blocks > hdev->block_cnt)
4843 return;
4844
4845 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 4846 bt_cb(skb)->force_active);
b71d385a 4847
57d17d70 4848 hci_send_frame(hdev, skb);
b71d385a
AE
4849 hdev->acl_last_tx = jiffies;
4850
4851 hdev->block_cnt -= blocks;
4852 quote -= blocks;
4853
4854 chan->sent += blocks;
4855 chan->conn->sent += blocks;
4856 }
4857 }
4858
4859 if (cnt != hdev->block_cnt)
bd1eb66b 4860 hci_prio_recalculate(hdev, type);
b71d385a
AE
4861}
4862
6039aa73 4863static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
4864{
4865 BT_DBG("%s", hdev->name);
4866
bd1eb66b
AE
4867 /* No ACL link over BR/EDR controller */
4868 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4869 return;
4870
4871 /* No AMP link over AMP controller */
4872 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
4873 return;
4874
4875 switch (hdev->flow_ctl_mode) {
4876 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4877 hci_sched_acl_pkt(hdev);
4878 break;
4879
4880 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4881 hci_sched_acl_blk(hdev);
4882 break;
4883 }
4884}
4885
1da177e4 4886/* Schedule SCO */
6039aa73 4887static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
4888{
4889 struct hci_conn *conn;
4890 struct sk_buff *skb;
4891 int quote;
4892
4893 BT_DBG("%s", hdev->name);
4894
52087a79
LAD
4895 if (!hci_conn_num(hdev, SCO_LINK))
4896 return;
4897
1da177e4
LT
4898 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4899 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4900 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 4901 hci_send_frame(hdev, skb);
1da177e4
LT
4902
4903 conn->sent++;
4904 if (conn->sent == ~0)
4905 conn->sent = 0;
4906 }
4907 }
4908}
4909
6039aa73 4910static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
4911{
4912 struct hci_conn *conn;
4913 struct sk_buff *skb;
4914 int quote;
4915
4916 BT_DBG("%s", hdev->name);
4917
52087a79
LAD
4918 if (!hci_conn_num(hdev, ESCO_LINK))
4919 return;
4920
8fc9ced3
GP
4921 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4922 &quote))) {
b6a0dc82
MH
4923 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4924 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 4925 hci_send_frame(hdev, skb);
b6a0dc82
MH
4926
4927 conn->sent++;
4928 if (conn->sent == ~0)
4929 conn->sent = 0;
4930 }
4931 }
4932}
4933
6039aa73 4934static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 4935{
73d80deb 4936 struct hci_chan *chan;
6ed58ec5 4937 struct sk_buff *skb;
02b20f0b 4938 int quote, cnt, tmp;
6ed58ec5
VT
4939
4940 BT_DBG("%s", hdev->name);
4941
52087a79
LAD
4942 if (!hci_conn_num(hdev, LE_LINK))
4943 return;
4944
fee746b0 4945 if (!test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
6ed58ec5
VT
4946 /* LE tx timeout must be longer than maximum
4947 * link supervision timeout (40.9 seconds) */
bae1f5d9 4948 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 4949 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 4950 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
4951 }
4952
4953 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 4954 tmp = cnt;
73d80deb 4955 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
4956 u32 priority = (skb_peek(&chan->data_q))->priority;
4957 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4958 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4959 skb->len, skb->priority);
6ed58ec5 4960
ec1cce24
LAD
4961 /* Stop if priority has changed */
4962 if (skb->priority < priority)
4963 break;
4964
4965 skb = skb_dequeue(&chan->data_q);
4966
57d17d70 4967 hci_send_frame(hdev, skb);
6ed58ec5
VT
4968 hdev->le_last_tx = jiffies;
4969
4970 cnt--;
73d80deb
LAD
4971 chan->sent++;
4972 chan->conn->sent++;
6ed58ec5
VT
4973 }
4974 }
73d80deb 4975
6ed58ec5
VT
4976 if (hdev->le_pkts)
4977 hdev->le_cnt = cnt;
4978 else
4979 hdev->acl_cnt = cnt;
02b20f0b
LAD
4980
4981 if (cnt != tmp)
4982 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
4983}
4984
3eff45ea 4985static void hci_tx_work(struct work_struct *work)
1da177e4 4986{
3eff45ea 4987 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
4988 struct sk_buff *skb;
4989
6ed58ec5 4990 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 4991 hdev->sco_cnt, hdev->le_cnt);
1da177e4 4992
52de599e
MH
4993 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4994 /* Schedule queues and send stuff to HCI driver */
4995 hci_sched_acl(hdev);
4996 hci_sched_sco(hdev);
4997 hci_sched_esco(hdev);
4998 hci_sched_le(hdev);
4999 }
6ed58ec5 5000
1da177e4
LT
5001 /* Send next queued raw (unknown type) packet */
5002 while ((skb = skb_dequeue(&hdev->raw_q)))
57d17d70 5003 hci_send_frame(hdev, skb);
1da177e4
LT
5004}
5005
25985edc 5006/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
5007
5008/* ACL data packet */
6039aa73 5009static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
5010{
5011 struct hci_acl_hdr *hdr = (void *) skb->data;
5012 struct hci_conn *conn;
5013 __u16 handle, flags;
5014
5015 skb_pull(skb, HCI_ACL_HDR_SIZE);
5016
5017 handle = __le16_to_cpu(hdr->handle);
5018 flags = hci_flags(handle);
5019 handle = hci_handle(handle);
5020
f0e09510 5021 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 5022 handle, flags);
1da177e4
LT
5023
5024 hdev->stat.acl_rx++;
5025
5026 hci_dev_lock(hdev);
5027 conn = hci_conn_hash_lookup_handle(hdev, handle);
5028 hci_dev_unlock(hdev);
8e87d142 5029
1da177e4 5030 if (conn) {
65983fc7 5031 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 5032
1da177e4 5033 /* Send to upper protocol */
686ebf28
UF
5034 l2cap_recv_acldata(conn, skb, flags);
5035 return;
1da177e4 5036 } else {
8e87d142 5037 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 5038 hdev->name, handle);
1da177e4
LT
5039 }
5040
5041 kfree_skb(skb);
5042}
5043
5044/* SCO data packet */
6039aa73 5045static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
5046{
5047 struct hci_sco_hdr *hdr = (void *) skb->data;
5048 struct hci_conn *conn;
5049 __u16 handle;
5050
5051 skb_pull(skb, HCI_SCO_HDR_SIZE);
5052
5053 handle = __le16_to_cpu(hdr->handle);
5054
f0e09510 5055 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
1da177e4
LT
5056
5057 hdev->stat.sco_rx++;
5058
5059 hci_dev_lock(hdev);
5060 conn = hci_conn_hash_lookup_handle(hdev, handle);
5061 hci_dev_unlock(hdev);
5062
5063 if (conn) {
1da177e4 5064 /* Send to upper protocol */
686ebf28
UF
5065 sco_recv_scodata(conn, skb);
5066 return;
1da177e4 5067 } else {
8e87d142 5068 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 5069 hdev->name, handle);
1da177e4
LT
5070 }
5071
5072 kfree_skb(skb);
5073}
5074
9238f36a
JH
5075static bool hci_req_is_complete(struct hci_dev *hdev)
5076{
5077 struct sk_buff *skb;
5078
5079 skb = skb_peek(&hdev->cmd_q);
5080 if (!skb)
5081 return true;
5082
5083 return bt_cb(skb)->req.start;
5084}
5085
42c6b129
JH
5086static void hci_resend_last(struct hci_dev *hdev)
5087{
5088 struct hci_command_hdr *sent;
5089 struct sk_buff *skb;
5090 u16 opcode;
5091
5092 if (!hdev->sent_cmd)
5093 return;
5094
5095 sent = (void *) hdev->sent_cmd->data;
5096 opcode = __le16_to_cpu(sent->opcode);
5097 if (opcode == HCI_OP_RESET)
5098 return;
5099
5100 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5101 if (!skb)
5102 return;
5103
5104 skb_queue_head(&hdev->cmd_q, skb);
5105 queue_work(hdev->workqueue, &hdev->cmd_work);
5106}
5107
9238f36a
JH
5108void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5109{
5110 hci_req_complete_t req_complete = NULL;
5111 struct sk_buff *skb;
5112 unsigned long flags;
5113
5114 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5115
42c6b129
JH
5116 /* If the completed command doesn't match the last one that was
5117 * sent we need to do special handling of it.
9238f36a 5118 */
42c6b129
JH
5119 if (!hci_sent_cmd_data(hdev, opcode)) {
5120 /* Some CSR based controllers generate a spontaneous
5121 * reset complete event during init and any pending
5122 * command will never be completed. In such a case we
5123 * need to resend whatever was the last sent
5124 * command.
5125 */
5126 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5127 hci_resend_last(hdev);
5128
9238f36a 5129 return;
42c6b129 5130 }
9238f36a
JH
5131
5132 /* If the command succeeded and there's still more commands in
5133 * this request the request is not yet complete.
5134 */
5135 if (!status && !hci_req_is_complete(hdev))
5136 return;
5137
5138 /* If this was the last command in a request the complete
5139 * callback would be found in hdev->sent_cmd instead of the
5140 * command queue (hdev->cmd_q).
5141 */
5142 if (hdev->sent_cmd) {
5143 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
53e21fbc
JH
5144
5145 if (req_complete) {
5146 /* We must set the complete callback to NULL to
5147 * avoid calling the callback more than once if
5148 * this function gets called again.
5149 */
5150 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5151
9238f36a 5152 goto call_complete;
53e21fbc 5153 }
9238f36a
JH
5154 }
5155
5156 /* Remove all pending commands belonging to this request */
5157 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5158 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5159 if (bt_cb(skb)->req.start) {
5160 __skb_queue_head(&hdev->cmd_q, skb);
5161 break;
5162 }
5163
5164 req_complete = bt_cb(skb)->req.complete;
5165 kfree_skb(skb);
5166 }
5167 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5168
5169call_complete:
5170 if (req_complete)
5171 req_complete(hdev, status);
5172}
5173
b78752cc 5174static void hci_rx_work(struct work_struct *work)
1da177e4 5175{
b78752cc 5176 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
5177 struct sk_buff *skb;
5178
5179 BT_DBG("%s", hdev->name);
5180
1da177e4 5181 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
5182 /* Send copy to monitor */
5183 hci_send_to_monitor(hdev, skb);
5184
1da177e4
LT
5185 if (atomic_read(&hdev->promisc)) {
5186 /* Send copy to the sockets */
470fe1b5 5187 hci_send_to_sock(hdev, skb);
1da177e4
LT
5188 }
5189
fee746b0 5190 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1da177e4
LT
5191 kfree_skb(skb);
5192 continue;
5193 }
5194
5195 if (test_bit(HCI_INIT, &hdev->flags)) {
5196 /* Don't process data packets in this states. */
0d48d939 5197 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
5198 case HCI_ACLDATA_PKT:
5199 case HCI_SCODATA_PKT:
5200 kfree_skb(skb);
5201 continue;
3ff50b79 5202 }
1da177e4
LT
5203 }
5204
5205 /* Process frame */
0d48d939 5206 switch (bt_cb(skb)->pkt_type) {
1da177e4 5207 case HCI_EVENT_PKT:
b78752cc 5208 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
5209 hci_event_packet(hdev, skb);
5210 break;
5211
5212 case HCI_ACLDATA_PKT:
5213 BT_DBG("%s ACL data packet", hdev->name);
5214 hci_acldata_packet(hdev, skb);
5215 break;
5216
5217 case HCI_SCODATA_PKT:
5218 BT_DBG("%s SCO data packet", hdev->name);
5219 hci_scodata_packet(hdev, skb);
5220 break;
5221
5222 default:
5223 kfree_skb(skb);
5224 break;
5225 }
5226 }
1da177e4
LT
5227}
5228
c347b765 5229static void hci_cmd_work(struct work_struct *work)
1da177e4 5230{
c347b765 5231 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
5232 struct sk_buff *skb;
5233
2104786b
AE
5234 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5235 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 5236
1da177e4 5237 /* Send queued commands */
5a08ecce
AE
5238 if (atomic_read(&hdev->cmd_cnt)) {
5239 skb = skb_dequeue(&hdev->cmd_q);
5240 if (!skb)
5241 return;
5242
7585b97a 5243 kfree_skb(hdev->sent_cmd);
1da177e4 5244
a675d7f1 5245 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
70f23020 5246 if (hdev->sent_cmd) {
1da177e4 5247 atomic_dec(&hdev->cmd_cnt);
57d17d70 5248 hci_send_frame(hdev, skb);
7bdb8a5c 5249 if (test_bit(HCI_RESET, &hdev->flags))
65cc2b49 5250 cancel_delayed_work(&hdev->cmd_timer);
7bdb8a5c 5251 else
65cc2b49
MH
5252 schedule_delayed_work(&hdev->cmd_timer,
5253 HCI_CMD_TIMEOUT);
1da177e4
LT
5254 } else {
5255 skb_queue_head(&hdev->cmd_q, skb);
c347b765 5256 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
5257 }
5258 }
5259}
b1efcc28
AG
5260
5261void hci_req_add_le_scan_disable(struct hci_request *req)
5262{
5263 struct hci_cp_le_set_scan_enable cp;
5264
5265 memset(&cp, 0, sizeof(cp));
5266 cp.enable = LE_SCAN_DISABLE;
5267 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5268}
a4790dbd 5269
8ef30fd3
AG
5270void hci_req_add_le_passive_scan(struct hci_request *req)
5271{
5272 struct hci_cp_le_set_scan_param param_cp;
5273 struct hci_cp_le_set_scan_enable enable_cp;
5274 struct hci_dev *hdev = req->hdev;
5275 u8 own_addr_type;
5276
6ab535a7
MH
5277 /* Set require_privacy to false since no SCAN_REQ are send
5278 * during passive scanning. Not using an unresolvable address
5279 * here is important so that peer devices using direct
5280 * advertising with our address will be correctly reported
5281 * by the controller.
8ef30fd3 5282 */
6ab535a7 5283 if (hci_update_random_address(req, false, &own_addr_type))
8ef30fd3
AG
5284 return;
5285
5286 memset(&param_cp, 0, sizeof(param_cp));
5287 param_cp.type = LE_SCAN_PASSIVE;
5288 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5289 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5290 param_cp.own_address_type = own_addr_type;
5291 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5292 &param_cp);
5293
5294 memset(&enable_cp, 0, sizeof(enable_cp));
5295 enable_cp.enable = LE_SCAN_ENABLE;
4340a124 5296 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
8ef30fd3
AG
5297 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5298 &enable_cp);
5299}
5300
a4790dbd
AG
5301static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5302{
5303 if (status)
5304 BT_DBG("HCI request failed to update background scanning: "
5305 "status 0x%2.2x", status);
5306}
5307
5308/* This function controls the background scanning based on hdev->pend_le_conns
5309 * list. If there are pending LE connection we start the background scanning,
5310 * otherwise we stop it.
5311 *
5312 * This function requires the caller holds hdev->lock.
5313 */
5314void hci_update_background_scan(struct hci_dev *hdev)
5315{
a4790dbd
AG
5316 struct hci_request req;
5317 struct hci_conn *conn;
5318 int err;
5319
c20c02d5
MH
5320 if (!test_bit(HCI_UP, &hdev->flags) ||
5321 test_bit(HCI_INIT, &hdev->flags) ||
5322 test_bit(HCI_SETUP, &hdev->dev_flags) ||
5323 test_bit(HCI_UNREGISTER, &hdev->dev_flags))
1c1697c0
MH
5324 return;
5325
a4790dbd
AG
5326 hci_req_init(&req, hdev);
5327
5328 if (list_empty(&hdev->pend_le_conns)) {
5329 /* If there is no pending LE connections, we should stop
5330 * the background scanning.
5331 */
5332
5333 /* If controller is not scanning we are done. */
5334 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5335 return;
5336
5337 hci_req_add_le_scan_disable(&req);
5338
5339 BT_DBG("%s stopping background scanning", hdev->name);
5340 } else {
a4790dbd
AG
5341 /* If there is at least one pending LE connection, we should
5342 * keep the background scan running.
5343 */
5344
a4790dbd
AG
5345 /* If controller is connecting, we should not start scanning
5346 * since some controllers are not able to scan and connect at
5347 * the same time.
5348 */
5349 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5350 if (conn)
5351 return;
5352
4340a124
AG
5353 /* If controller is currently scanning, we stop it to ensure we
5354 * don't miss any advertising (due to duplicates filter).
5355 */
5356 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5357 hci_req_add_le_scan_disable(&req);
5358
8ef30fd3 5359 hci_req_add_le_passive_scan(&req);
a4790dbd
AG
5360
5361 BT_DBG("%s starting background scanning", hdev->name);
5362 }
5363
5364 err = hci_req_run(&req, update_background_scan_complete);
5365 if (err)
5366 BT_ERR("Failed to run HCI request: err %d", err);
5367}