]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - net/bluetooth/hci_core.c
Bluetooth: Move address type conversion to outside hci_connect_le
[mirror_ubuntu-bionic-kernel.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
8c520a59 30#include <linux/rfkill.h>
baf27f6e 31#include <linux/debugfs.h>
99780a7b 32#include <linux/crypto.h>
47219839 33#include <asm/unaligned.h>
1da177e4
LT
34
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
37
970c4e46
JH
38#include "smp.h"
39
b78752cc 40static void hci_rx_work(struct work_struct *work);
c347b765 41static void hci_cmd_work(struct work_struct *work);
3eff45ea 42static void hci_tx_work(struct work_struct *work);
1da177e4 43
1da177e4
LT
44/* HCI device list */
45LIST_HEAD(hci_dev_list);
46DEFINE_RWLOCK(hci_dev_list_lock);
47
48/* HCI callback list */
49LIST_HEAD(hci_cb_list);
50DEFINE_RWLOCK(hci_cb_list_lock);
51
3df92b31
SL
52/* HCI ID Numbering */
53static DEFINE_IDA(hci_index_ida);
54
1da177e4
LT
55/* ---- HCI notifications ---- */
56
6516455d 57static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 58{
040030ef 59 hci_sock_dev_event(hdev, event);
1da177e4
LT
60}
61
baf27f6e
MH
62/* ---- HCI debugfs entries ---- */
63
4b4148e9
MH
64static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
65 size_t count, loff_t *ppos)
66{
67 struct hci_dev *hdev = file->private_data;
68 char buf[3];
69
70 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dev_flags) ? 'Y': 'N';
71 buf[1] = '\n';
72 buf[2] = '\0';
73 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
74}
75
76static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
77 size_t count, loff_t *ppos)
78{
79 struct hci_dev *hdev = file->private_data;
80 struct sk_buff *skb;
81 char buf[32];
82 size_t buf_size = min(count, (sizeof(buf)-1));
83 bool enable;
84 int err;
85
86 if (!test_bit(HCI_UP, &hdev->flags))
87 return -ENETDOWN;
88
89 if (copy_from_user(buf, user_buf, buf_size))
90 return -EFAULT;
91
92 buf[buf_size] = '\0';
93 if (strtobool(buf, &enable))
94 return -EINVAL;
95
96 if (enable == test_bit(HCI_DUT_MODE, &hdev->dev_flags))
97 return -EALREADY;
98
99 hci_req_lock(hdev);
100 if (enable)
101 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
102 HCI_CMD_TIMEOUT);
103 else
104 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
105 HCI_CMD_TIMEOUT);
106 hci_req_unlock(hdev);
107
108 if (IS_ERR(skb))
109 return PTR_ERR(skb);
110
111 err = -bt_to_errno(skb->data[0]);
112 kfree_skb(skb);
113
114 if (err < 0)
115 return err;
116
117 change_bit(HCI_DUT_MODE, &hdev->dev_flags);
118
119 return count;
120}
121
122static const struct file_operations dut_mode_fops = {
123 .open = simple_open,
124 .read = dut_mode_read,
125 .write = dut_mode_write,
126 .llseek = default_llseek,
127};
128
dfb826a8
MH
129static int features_show(struct seq_file *f, void *ptr)
130{
131 struct hci_dev *hdev = f->private;
132 u8 p;
133
134 hci_dev_lock(hdev);
135 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
cfbb2b5b 136 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
dfb826a8
MH
137 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
138 hdev->features[p][0], hdev->features[p][1],
139 hdev->features[p][2], hdev->features[p][3],
140 hdev->features[p][4], hdev->features[p][5],
141 hdev->features[p][6], hdev->features[p][7]);
142 }
cfbb2b5b
MH
143 if (lmp_le_capable(hdev))
144 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
145 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
146 hdev->le_features[0], hdev->le_features[1],
147 hdev->le_features[2], hdev->le_features[3],
148 hdev->le_features[4], hdev->le_features[5],
149 hdev->le_features[6], hdev->le_features[7]);
dfb826a8
MH
150 hci_dev_unlock(hdev);
151
152 return 0;
153}
154
155static int features_open(struct inode *inode, struct file *file)
156{
157 return single_open(file, features_show, inode->i_private);
158}
159
160static const struct file_operations features_fops = {
161 .open = features_open,
162 .read = seq_read,
163 .llseek = seq_lseek,
164 .release = single_release,
165};
166
70afe0b8
MH
167static int blacklist_show(struct seq_file *f, void *p)
168{
169 struct hci_dev *hdev = f->private;
170 struct bdaddr_list *b;
171
172 hci_dev_lock(hdev);
173 list_for_each_entry(b, &hdev->blacklist, list)
b25f0785 174 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
70afe0b8
MH
175 hci_dev_unlock(hdev);
176
177 return 0;
178}
179
180static int blacklist_open(struct inode *inode, struct file *file)
181{
182 return single_open(file, blacklist_show, inode->i_private);
183}
184
185static const struct file_operations blacklist_fops = {
186 .open = blacklist_open,
187 .read = seq_read,
188 .llseek = seq_lseek,
189 .release = single_release,
190};
191
47219839
MH
192static int uuids_show(struct seq_file *f, void *p)
193{
194 struct hci_dev *hdev = f->private;
195 struct bt_uuid *uuid;
196
197 hci_dev_lock(hdev);
198 list_for_each_entry(uuid, &hdev->uuids, list) {
58f01aa9
MH
199 u8 i, val[16];
200
201 /* The Bluetooth UUID values are stored in big endian,
202 * but with reversed byte order. So convert them into
203 * the right order for the %pUb modifier.
204 */
205 for (i = 0; i < 16; i++)
206 val[i] = uuid->uuid[15 - i];
207
208 seq_printf(f, "%pUb\n", val);
47219839
MH
209 }
210 hci_dev_unlock(hdev);
211
212 return 0;
213}
214
215static int uuids_open(struct inode *inode, struct file *file)
216{
217 return single_open(file, uuids_show, inode->i_private);
218}
219
220static const struct file_operations uuids_fops = {
221 .open = uuids_open,
222 .read = seq_read,
223 .llseek = seq_lseek,
224 .release = single_release,
225};
226
baf27f6e
MH
227static int inquiry_cache_show(struct seq_file *f, void *p)
228{
229 struct hci_dev *hdev = f->private;
230 struct discovery_state *cache = &hdev->discovery;
231 struct inquiry_entry *e;
232
233 hci_dev_lock(hdev);
234
235 list_for_each_entry(e, &cache->all, all) {
236 struct inquiry_data *data = &e->data;
237 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
238 &data->bdaddr,
239 data->pscan_rep_mode, data->pscan_period_mode,
240 data->pscan_mode, data->dev_class[2],
241 data->dev_class[1], data->dev_class[0],
242 __le16_to_cpu(data->clock_offset),
243 data->rssi, data->ssp_mode, e->timestamp);
244 }
245
246 hci_dev_unlock(hdev);
247
248 return 0;
249}
250
251static int inquiry_cache_open(struct inode *inode, struct file *file)
252{
253 return single_open(file, inquiry_cache_show, inode->i_private);
254}
255
256static const struct file_operations inquiry_cache_fops = {
257 .open = inquiry_cache_open,
258 .read = seq_read,
259 .llseek = seq_lseek,
260 .release = single_release,
261};
262
02d08d15
MH
263static int link_keys_show(struct seq_file *f, void *ptr)
264{
265 struct hci_dev *hdev = f->private;
266 struct list_head *p, *n;
267
268 hci_dev_lock(hdev);
269 list_for_each_safe(p, n, &hdev->link_keys) {
270 struct link_key *key = list_entry(p, struct link_key, list);
271 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
272 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
273 }
274 hci_dev_unlock(hdev);
275
276 return 0;
277}
278
279static int link_keys_open(struct inode *inode, struct file *file)
280{
281 return single_open(file, link_keys_show, inode->i_private);
282}
283
284static const struct file_operations link_keys_fops = {
285 .open = link_keys_open,
286 .read = seq_read,
287 .llseek = seq_lseek,
288 .release = single_release,
289};
290
babdbb3c
MH
291static int dev_class_show(struct seq_file *f, void *ptr)
292{
293 struct hci_dev *hdev = f->private;
294
295 hci_dev_lock(hdev);
296 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
297 hdev->dev_class[1], hdev->dev_class[0]);
298 hci_dev_unlock(hdev);
299
300 return 0;
301}
302
303static int dev_class_open(struct inode *inode, struct file *file)
304{
305 return single_open(file, dev_class_show, inode->i_private);
306}
307
308static const struct file_operations dev_class_fops = {
309 .open = dev_class_open,
310 .read = seq_read,
311 .llseek = seq_lseek,
312 .release = single_release,
313};
314
041000b9
MH
315static int voice_setting_get(void *data, u64 *val)
316{
317 struct hci_dev *hdev = data;
318
319 hci_dev_lock(hdev);
320 *val = hdev->voice_setting;
321 hci_dev_unlock(hdev);
322
323 return 0;
324}
325
326DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
327 NULL, "0x%4.4llx\n");
328
ebd1e33b
MH
329static int auto_accept_delay_set(void *data, u64 val)
330{
331 struct hci_dev *hdev = data;
332
333 hci_dev_lock(hdev);
334 hdev->auto_accept_delay = val;
335 hci_dev_unlock(hdev);
336
337 return 0;
338}
339
340static int auto_accept_delay_get(void *data, u64 *val)
341{
342 struct hci_dev *hdev = data;
343
344 hci_dev_lock(hdev);
345 *val = hdev->auto_accept_delay;
346 hci_dev_unlock(hdev);
347
348 return 0;
349}
350
351DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
352 auto_accept_delay_set, "%llu\n");
353
06f5b778
MH
354static int ssp_debug_mode_set(void *data, u64 val)
355{
356 struct hci_dev *hdev = data;
357 struct sk_buff *skb;
358 __u8 mode;
359 int err;
360
361 if (val != 0 && val != 1)
362 return -EINVAL;
363
364 if (!test_bit(HCI_UP, &hdev->flags))
365 return -ENETDOWN;
366
367 hci_req_lock(hdev);
368 mode = val;
369 skb = __hci_cmd_sync(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode),
370 &mode, HCI_CMD_TIMEOUT);
371 hci_req_unlock(hdev);
372
373 if (IS_ERR(skb))
374 return PTR_ERR(skb);
375
376 err = -bt_to_errno(skb->data[0]);
377 kfree_skb(skb);
378
379 if (err < 0)
380 return err;
381
382 hci_dev_lock(hdev);
383 hdev->ssp_debug_mode = val;
384 hci_dev_unlock(hdev);
385
386 return 0;
387}
388
389static int ssp_debug_mode_get(void *data, u64 *val)
390{
391 struct hci_dev *hdev = data;
392
393 hci_dev_lock(hdev);
394 *val = hdev->ssp_debug_mode;
395 hci_dev_unlock(hdev);
396
397 return 0;
398}
399
400DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get,
401 ssp_debug_mode_set, "%llu\n");
402
5afeac14
MH
403static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
404 size_t count, loff_t *ppos)
405{
406 struct hci_dev *hdev = file->private_data;
407 char buf[3];
408
409 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dev_flags) ? 'Y': 'N';
410 buf[1] = '\n';
411 buf[2] = '\0';
412 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
413}
414
415static ssize_t force_sc_support_write(struct file *file,
416 const char __user *user_buf,
417 size_t count, loff_t *ppos)
418{
419 struct hci_dev *hdev = file->private_data;
420 char buf[32];
421 size_t buf_size = min(count, (sizeof(buf)-1));
422 bool enable;
423
424 if (test_bit(HCI_UP, &hdev->flags))
425 return -EBUSY;
426
427 if (copy_from_user(buf, user_buf, buf_size))
428 return -EFAULT;
429
430 buf[buf_size] = '\0';
431 if (strtobool(buf, &enable))
432 return -EINVAL;
433
434 if (enable == test_bit(HCI_FORCE_SC, &hdev->dev_flags))
435 return -EALREADY;
436
437 change_bit(HCI_FORCE_SC, &hdev->dev_flags);
438
439 return count;
440}
441
442static const struct file_operations force_sc_support_fops = {
443 .open = simple_open,
444 .read = force_sc_support_read,
445 .write = force_sc_support_write,
446 .llseek = default_llseek,
447};
448
134c2a89
MH
449static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
450 size_t count, loff_t *ppos)
451{
452 struct hci_dev *hdev = file->private_data;
453 char buf[3];
454
455 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
456 buf[1] = '\n';
457 buf[2] = '\0';
458 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
459}
460
461static const struct file_operations sc_only_mode_fops = {
462 .open = simple_open,
463 .read = sc_only_mode_read,
464 .llseek = default_llseek,
465};
466
2bfa3531
MH
467static int idle_timeout_set(void *data, u64 val)
468{
469 struct hci_dev *hdev = data;
470
471 if (val != 0 && (val < 500 || val > 3600000))
472 return -EINVAL;
473
474 hci_dev_lock(hdev);
2be48b65 475 hdev->idle_timeout = val;
2bfa3531
MH
476 hci_dev_unlock(hdev);
477
478 return 0;
479}
480
481static int idle_timeout_get(void *data, u64 *val)
482{
483 struct hci_dev *hdev = data;
484
485 hci_dev_lock(hdev);
486 *val = hdev->idle_timeout;
487 hci_dev_unlock(hdev);
488
489 return 0;
490}
491
492DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
493 idle_timeout_set, "%llu\n");
494
c982b2ea
JH
495static int rpa_timeout_set(void *data, u64 val)
496{
497 struct hci_dev *hdev = data;
498
499 /* Require the RPA timeout to be at least 30 seconds and at most
500 * 24 hours.
501 */
502 if (val < 30 || val > (60 * 60 * 24))
503 return -EINVAL;
504
505 hci_dev_lock(hdev);
506 hdev->rpa_timeout = val;
507 hci_dev_unlock(hdev);
508
509 return 0;
510}
511
512static int rpa_timeout_get(void *data, u64 *val)
513{
514 struct hci_dev *hdev = data;
515
516 hci_dev_lock(hdev);
517 *val = hdev->rpa_timeout;
518 hci_dev_unlock(hdev);
519
520 return 0;
521}
522
523DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
524 rpa_timeout_set, "%llu\n");
525
2bfa3531
MH
526static int sniff_min_interval_set(void *data, u64 val)
527{
528 struct hci_dev *hdev = data;
529
530 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
531 return -EINVAL;
532
533 hci_dev_lock(hdev);
2be48b65 534 hdev->sniff_min_interval = val;
2bfa3531
MH
535 hci_dev_unlock(hdev);
536
537 return 0;
538}
539
540static int sniff_min_interval_get(void *data, u64 *val)
541{
542 struct hci_dev *hdev = data;
543
544 hci_dev_lock(hdev);
545 *val = hdev->sniff_min_interval;
546 hci_dev_unlock(hdev);
547
548 return 0;
549}
550
551DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
552 sniff_min_interval_set, "%llu\n");
553
554static int sniff_max_interval_set(void *data, u64 val)
555{
556 struct hci_dev *hdev = data;
557
558 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
559 return -EINVAL;
560
561 hci_dev_lock(hdev);
2be48b65 562 hdev->sniff_max_interval = val;
2bfa3531
MH
563 hci_dev_unlock(hdev);
564
565 return 0;
566}
567
568static int sniff_max_interval_get(void *data, u64 *val)
569{
570 struct hci_dev *hdev = data;
571
572 hci_dev_lock(hdev);
573 *val = hdev->sniff_max_interval;
574 hci_dev_unlock(hdev);
575
576 return 0;
577}
578
579DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
580 sniff_max_interval_set, "%llu\n");
581
ac345813
MH
582static int identity_show(struct seq_file *f, void *p)
583{
584 struct hci_dev *hdev = f->private;
585 bdaddr_t *addr;
586 u8 addr_type;
587
588 hci_dev_lock(hdev);
589
590 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ||
591 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
592 addr = &hdev->static_addr;
593 addr_type = ADDR_LE_DEV_RANDOM;
594 } else {
595 addr = &hdev->bdaddr;
596 addr_type = ADDR_LE_DEV_PUBLIC;
597 }
598
473deef2
MH
599 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", addr, addr_type,
600 16, hdev->irk, &hdev->rpa);
ac345813
MH
601
602 hci_dev_unlock(hdev);
603
604 return 0;
605}
606
607static int identity_open(struct inode *inode, struct file *file)
608{
609 return single_open(file, identity_show, inode->i_private);
610}
611
612static const struct file_operations identity_fops = {
613 .open = identity_open,
614 .read = seq_read,
615 .llseek = seq_lseek,
616 .release = single_release,
617};
618
7a4cd51d
MH
619static int random_address_show(struct seq_file *f, void *p)
620{
621 struct hci_dev *hdev = f->private;
622
623 hci_dev_lock(hdev);
624 seq_printf(f, "%pMR\n", &hdev->random_addr);
625 hci_dev_unlock(hdev);
626
627 return 0;
628}
629
630static int random_address_open(struct inode *inode, struct file *file)
631{
632 return single_open(file, random_address_show, inode->i_private);
633}
634
635static const struct file_operations random_address_fops = {
636 .open = random_address_open,
637 .read = seq_read,
638 .llseek = seq_lseek,
639 .release = single_release,
640};
641
e7b8fc92
MH
642static int static_address_show(struct seq_file *f, void *p)
643{
644 struct hci_dev *hdev = f->private;
645
646 hci_dev_lock(hdev);
647 seq_printf(f, "%pMR\n", &hdev->static_addr);
648 hci_dev_unlock(hdev);
649
650 return 0;
651}
652
653static int static_address_open(struct inode *inode, struct file *file)
654{
655 return single_open(file, static_address_show, inode->i_private);
656}
657
658static const struct file_operations static_address_fops = {
659 .open = static_address_open,
660 .read = seq_read,
661 .llseek = seq_lseek,
662 .release = single_release,
663};
664
b32bba6c
MH
665static ssize_t force_static_address_read(struct file *file,
666 char __user *user_buf,
667 size_t count, loff_t *ppos)
92202185 668{
b32bba6c
MH
669 struct hci_dev *hdev = file->private_data;
670 char buf[3];
92202185 671
b32bba6c
MH
672 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ? 'Y': 'N';
673 buf[1] = '\n';
674 buf[2] = '\0';
675 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
92202185
MH
676}
677
b32bba6c
MH
678static ssize_t force_static_address_write(struct file *file,
679 const char __user *user_buf,
680 size_t count, loff_t *ppos)
92202185 681{
b32bba6c
MH
682 struct hci_dev *hdev = file->private_data;
683 char buf[32];
684 size_t buf_size = min(count, (sizeof(buf)-1));
685 bool enable;
92202185 686
b32bba6c
MH
687 if (test_bit(HCI_UP, &hdev->flags))
688 return -EBUSY;
92202185 689
b32bba6c
MH
690 if (copy_from_user(buf, user_buf, buf_size))
691 return -EFAULT;
692
693 buf[buf_size] = '\0';
694 if (strtobool(buf, &enable))
695 return -EINVAL;
696
697 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags))
698 return -EALREADY;
699
700 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags);
701
702 return count;
92202185
MH
703}
704
b32bba6c
MH
705static const struct file_operations force_static_address_fops = {
706 .open = simple_open,
707 .read = force_static_address_read,
708 .write = force_static_address_write,
709 .llseek = default_llseek,
710};
92202185 711
3698d704
MH
712static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
713{
714 struct hci_dev *hdev = f->private;
715 struct list_head *p, *n;
716
717 hci_dev_lock(hdev);
718 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
719 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
720 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
721 &irk->bdaddr, irk->addr_type,
722 16, irk->val, &irk->rpa);
723 }
724 hci_dev_unlock(hdev);
725
726 return 0;
727}
728
729static int identity_resolving_keys_open(struct inode *inode, struct file *file)
730{
731 return single_open(file, identity_resolving_keys_show,
732 inode->i_private);
733}
734
735static const struct file_operations identity_resolving_keys_fops = {
736 .open = identity_resolving_keys_open,
737 .read = seq_read,
738 .llseek = seq_lseek,
739 .release = single_release,
740};
741
8f8625cd
MH
742static int long_term_keys_show(struct seq_file *f, void *ptr)
743{
744 struct hci_dev *hdev = f->private;
745 struct list_head *p, *n;
746
747 hci_dev_lock(hdev);
f813f1be 748 list_for_each_safe(p, n, &hdev->long_term_keys) {
8f8625cd 749 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
f813f1be 750 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %*phN %*phN\n",
8f8625cd
MH
751 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
752 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
753 8, ltk->rand, 16, ltk->val);
754 }
755 hci_dev_unlock(hdev);
756
757 return 0;
758}
759
760static int long_term_keys_open(struct inode *inode, struct file *file)
761{
762 return single_open(file, long_term_keys_show, inode->i_private);
763}
764
765static const struct file_operations long_term_keys_fops = {
766 .open = long_term_keys_open,
767 .read = seq_read,
768 .llseek = seq_lseek,
769 .release = single_release,
770};
771
4e70c7e7
MH
772static int conn_min_interval_set(void *data, u64 val)
773{
774 struct hci_dev *hdev = data;
775
776 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
777 return -EINVAL;
778
779 hci_dev_lock(hdev);
2be48b65 780 hdev->le_conn_min_interval = val;
4e70c7e7
MH
781 hci_dev_unlock(hdev);
782
783 return 0;
784}
785
786static int conn_min_interval_get(void *data, u64 *val)
787{
788 struct hci_dev *hdev = data;
789
790 hci_dev_lock(hdev);
791 *val = hdev->le_conn_min_interval;
792 hci_dev_unlock(hdev);
793
794 return 0;
795}
796
797DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
798 conn_min_interval_set, "%llu\n");
799
800static int conn_max_interval_set(void *data, u64 val)
801{
802 struct hci_dev *hdev = data;
803
804 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
805 return -EINVAL;
806
807 hci_dev_lock(hdev);
2be48b65 808 hdev->le_conn_max_interval = val;
4e70c7e7
MH
809 hci_dev_unlock(hdev);
810
811 return 0;
812}
813
814static int conn_max_interval_get(void *data, u64 *val)
815{
816 struct hci_dev *hdev = data;
817
818 hci_dev_lock(hdev);
819 *val = hdev->le_conn_max_interval;
820 hci_dev_unlock(hdev);
821
822 return 0;
823}
824
825DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
826 conn_max_interval_set, "%llu\n");
827
3f959d46
MH
828static int adv_channel_map_set(void *data, u64 val)
829{
830 struct hci_dev *hdev = data;
831
832 if (val < 0x01 || val > 0x07)
833 return -EINVAL;
834
835 hci_dev_lock(hdev);
836 hdev->le_adv_channel_map = val;
837 hci_dev_unlock(hdev);
838
839 return 0;
840}
841
842static int adv_channel_map_get(void *data, u64 *val)
843{
844 struct hci_dev *hdev = data;
845
846 hci_dev_lock(hdev);
847 *val = hdev->le_adv_channel_map;
848 hci_dev_unlock(hdev);
849
850 return 0;
851}
852
853DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
854 adv_channel_map_set, "%llu\n");
855
89863109
JR
856static ssize_t lowpan_read(struct file *file, char __user *user_buf,
857 size_t count, loff_t *ppos)
858{
859 struct hci_dev *hdev = file->private_data;
860 char buf[3];
861
862 buf[0] = test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags) ? 'Y' : 'N';
863 buf[1] = '\n';
864 buf[2] = '\0';
865 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
866}
867
868static ssize_t lowpan_write(struct file *fp, const char __user *user_buffer,
869 size_t count, loff_t *position)
870{
871 struct hci_dev *hdev = fp->private_data;
872 bool enable;
873 char buf[32];
874 size_t buf_size = min(count, (sizeof(buf)-1));
875
876 if (copy_from_user(buf, user_buffer, buf_size))
877 return -EFAULT;
878
879 buf[buf_size] = '\0';
880
881 if (strtobool(buf, &enable) < 0)
882 return -EINVAL;
883
884 if (enable == test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags))
885 return -EALREADY;
886
887 change_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags);
888
889 return count;
890}
891
892static const struct file_operations lowpan_debugfs_fops = {
893 .open = simple_open,
894 .read = lowpan_read,
895 .write = lowpan_write,
896 .llseek = default_llseek,
897};
898
1da177e4
LT
899/* ---- HCI requests ---- */
900
42c6b129 901static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1da177e4 902{
42c6b129 903 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1da177e4
LT
904
905 if (hdev->req_status == HCI_REQ_PEND) {
906 hdev->req_result = result;
907 hdev->req_status = HCI_REQ_DONE;
908 wake_up_interruptible(&hdev->req_wait_q);
909 }
910}
911
912static void hci_req_cancel(struct hci_dev *hdev, int err)
913{
914 BT_DBG("%s err 0x%2.2x", hdev->name, err);
915
916 if (hdev->req_status == HCI_REQ_PEND) {
917 hdev->req_result = err;
918 hdev->req_status = HCI_REQ_CANCELED;
919 wake_up_interruptible(&hdev->req_wait_q);
920 }
921}
922
77a63e0a
FW
923static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
924 u8 event)
75e84b7c
JH
925{
926 struct hci_ev_cmd_complete *ev;
927 struct hci_event_hdr *hdr;
928 struct sk_buff *skb;
929
930 hci_dev_lock(hdev);
931
932 skb = hdev->recv_evt;
933 hdev->recv_evt = NULL;
934
935 hci_dev_unlock(hdev);
936
937 if (!skb)
938 return ERR_PTR(-ENODATA);
939
940 if (skb->len < sizeof(*hdr)) {
941 BT_ERR("Too short HCI event");
942 goto failed;
943 }
944
945 hdr = (void *) skb->data;
946 skb_pull(skb, HCI_EVENT_HDR_SIZE);
947
7b1abbbe
JH
948 if (event) {
949 if (hdr->evt != event)
950 goto failed;
951 return skb;
952 }
953
75e84b7c
JH
954 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
955 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
956 goto failed;
957 }
958
959 if (skb->len < sizeof(*ev)) {
960 BT_ERR("Too short cmd_complete event");
961 goto failed;
962 }
963
964 ev = (void *) skb->data;
965 skb_pull(skb, sizeof(*ev));
966
967 if (opcode == __le16_to_cpu(ev->opcode))
968 return skb;
969
970 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
971 __le16_to_cpu(ev->opcode));
972
973failed:
974 kfree_skb(skb);
975 return ERR_PTR(-ENODATA);
976}
977
7b1abbbe 978struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 979 const void *param, u8 event, u32 timeout)
75e84b7c
JH
980{
981 DECLARE_WAITQUEUE(wait, current);
982 struct hci_request req;
983 int err = 0;
984
985 BT_DBG("%s", hdev->name);
986
987 hci_req_init(&req, hdev);
988
7b1abbbe 989 hci_req_add_ev(&req, opcode, plen, param, event);
75e84b7c
JH
990
991 hdev->req_status = HCI_REQ_PEND;
992
993 err = hci_req_run(&req, hci_req_sync_complete);
994 if (err < 0)
995 return ERR_PTR(err);
996
997 add_wait_queue(&hdev->req_wait_q, &wait);
998 set_current_state(TASK_INTERRUPTIBLE);
999
1000 schedule_timeout(timeout);
1001
1002 remove_wait_queue(&hdev->req_wait_q, &wait);
1003
1004 if (signal_pending(current))
1005 return ERR_PTR(-EINTR);
1006
1007 switch (hdev->req_status) {
1008 case HCI_REQ_DONE:
1009 err = -bt_to_errno(hdev->req_result);
1010 break;
1011
1012 case HCI_REQ_CANCELED:
1013 err = -hdev->req_result;
1014 break;
1015
1016 default:
1017 err = -ETIMEDOUT;
1018 break;
1019 }
1020
1021 hdev->req_status = hdev->req_result = 0;
1022
1023 BT_DBG("%s end: err %d", hdev->name, err);
1024
1025 if (err < 0)
1026 return ERR_PTR(err);
1027
7b1abbbe
JH
1028 return hci_get_cmd_complete(hdev, opcode, event);
1029}
1030EXPORT_SYMBOL(__hci_cmd_sync_ev);
1031
1032struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 1033 const void *param, u32 timeout)
7b1abbbe
JH
1034{
1035 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
75e84b7c
JH
1036}
1037EXPORT_SYMBOL(__hci_cmd_sync);
1038
1da177e4 1039/* Execute request and wait for completion. */
01178cd4 1040static int __hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
1041 void (*func)(struct hci_request *req,
1042 unsigned long opt),
01178cd4 1043 unsigned long opt, __u32 timeout)
1da177e4 1044{
42c6b129 1045 struct hci_request req;
1da177e4
LT
1046 DECLARE_WAITQUEUE(wait, current);
1047 int err = 0;
1048
1049 BT_DBG("%s start", hdev->name);
1050
42c6b129
JH
1051 hci_req_init(&req, hdev);
1052
1da177e4
LT
1053 hdev->req_status = HCI_REQ_PEND;
1054
42c6b129 1055 func(&req, opt);
53cce22d 1056
42c6b129
JH
1057 err = hci_req_run(&req, hci_req_sync_complete);
1058 if (err < 0) {
53cce22d 1059 hdev->req_status = 0;
920c8300
AG
1060
1061 /* ENODATA means the HCI request command queue is empty.
1062 * This can happen when a request with conditionals doesn't
1063 * trigger any commands to be sent. This is normal behavior
1064 * and should not trigger an error return.
42c6b129 1065 */
920c8300
AG
1066 if (err == -ENODATA)
1067 return 0;
1068
1069 return err;
53cce22d
JH
1070 }
1071
bc4445c7
AG
1072 add_wait_queue(&hdev->req_wait_q, &wait);
1073 set_current_state(TASK_INTERRUPTIBLE);
1074
1da177e4
LT
1075 schedule_timeout(timeout);
1076
1077 remove_wait_queue(&hdev->req_wait_q, &wait);
1078
1079 if (signal_pending(current))
1080 return -EINTR;
1081
1082 switch (hdev->req_status) {
1083 case HCI_REQ_DONE:
e175072f 1084 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
1085 break;
1086
1087 case HCI_REQ_CANCELED:
1088 err = -hdev->req_result;
1089 break;
1090
1091 default:
1092 err = -ETIMEDOUT;
1093 break;
3ff50b79 1094 }
1da177e4 1095
a5040efa 1096 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
1097
1098 BT_DBG("%s end: err %d", hdev->name, err);
1099
1100 return err;
1101}
1102
01178cd4 1103static int hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
1104 void (*req)(struct hci_request *req,
1105 unsigned long opt),
01178cd4 1106 unsigned long opt, __u32 timeout)
1da177e4
LT
1107{
1108 int ret;
1109
7c6a329e
MH
1110 if (!test_bit(HCI_UP, &hdev->flags))
1111 return -ENETDOWN;
1112
1da177e4
LT
1113 /* Serialize all requests */
1114 hci_req_lock(hdev);
01178cd4 1115 ret = __hci_req_sync(hdev, req, opt, timeout);
1da177e4
LT
1116 hci_req_unlock(hdev);
1117
1118 return ret;
1119}
1120
42c6b129 1121static void hci_reset_req(struct hci_request *req, unsigned long opt)
1da177e4 1122{
42c6b129 1123 BT_DBG("%s %ld", req->hdev->name, opt);
1da177e4
LT
1124
1125 /* Reset device */
42c6b129
JH
1126 set_bit(HCI_RESET, &req->hdev->flags);
1127 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1da177e4
LT
1128}
1129
42c6b129 1130static void bredr_init(struct hci_request *req)
1da177e4 1131{
42c6b129 1132 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
2455a3ea 1133
1da177e4 1134 /* Read Local Supported Features */
42c6b129 1135 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 1136
1143e5a6 1137 /* Read Local Version */
42c6b129 1138 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
2177bab5
JH
1139
1140 /* Read BD Address */
42c6b129 1141 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1da177e4
LT
1142}
1143
42c6b129 1144static void amp_init(struct hci_request *req)
e61ef499 1145{
42c6b129 1146 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
2455a3ea 1147
e61ef499 1148 /* Read Local Version */
42c6b129 1149 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489 1150
f6996cfe
MH
1151 /* Read Local Supported Commands */
1152 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1153
1154 /* Read Local Supported Features */
1155 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1156
6bcbc489 1157 /* Read Local AMP Info */
42c6b129 1158 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
1159
1160 /* Read Data Blk size */
42c6b129 1161 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
7528ca1c 1162
f38ba941
MH
1163 /* Read Flow Control Mode */
1164 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1165
7528ca1c
MH
1166 /* Read Location Data */
1167 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
e61ef499
AE
1168}
1169
42c6b129 1170static void hci_init1_req(struct hci_request *req, unsigned long opt)
e61ef499 1171{
42c6b129 1172 struct hci_dev *hdev = req->hdev;
e61ef499
AE
1173
1174 BT_DBG("%s %ld", hdev->name, opt);
1175
11778716
AE
1176 /* Reset */
1177 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
42c6b129 1178 hci_reset_req(req, 0);
11778716 1179
e61ef499
AE
1180 switch (hdev->dev_type) {
1181 case HCI_BREDR:
42c6b129 1182 bredr_init(req);
e61ef499
AE
1183 break;
1184
1185 case HCI_AMP:
42c6b129 1186 amp_init(req);
e61ef499
AE
1187 break;
1188
1189 default:
1190 BT_ERR("Unknown device type %d", hdev->dev_type);
1191 break;
1192 }
e61ef499
AE
1193}
1194
42c6b129 1195static void bredr_setup(struct hci_request *req)
2177bab5 1196{
4ca048e3
MH
1197 struct hci_dev *hdev = req->hdev;
1198
2177bab5
JH
1199 __le16 param;
1200 __u8 flt_type;
1201
1202 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
42c6b129 1203 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
1204
1205 /* Read Class of Device */
42c6b129 1206 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
2177bab5
JH
1207
1208 /* Read Local Name */
42c6b129 1209 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
2177bab5
JH
1210
1211 /* Read Voice Setting */
42c6b129 1212 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
2177bab5 1213
b4cb9fb2
MH
1214 /* Read Number of Supported IAC */
1215 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1216
4b836f39
MH
1217 /* Read Current IAC LAP */
1218 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1219
2177bab5
JH
1220 /* Clear Event Filters */
1221 flt_type = HCI_FLT_CLEAR_ALL;
42c6b129 1222 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
2177bab5
JH
1223
1224 /* Connection accept timeout ~20 secs */
1225 param = __constant_cpu_to_le16(0x7d00);
42c6b129 1226 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
2177bab5 1227
4ca048e3
MH
1228 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1229 * but it does not support page scan related HCI commands.
1230 */
1231 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
f332ec66
JH
1232 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1233 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1234 }
2177bab5
JH
1235}
1236
42c6b129 1237static void le_setup(struct hci_request *req)
2177bab5 1238{
c73eee91
JH
1239 struct hci_dev *hdev = req->hdev;
1240
2177bab5 1241 /* Read LE Buffer Size */
42c6b129 1242 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
1243
1244 /* Read LE Local Supported Features */
42c6b129 1245 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
2177bab5
JH
1246
1247 /* Read LE Advertising Channel TX Power */
42c6b129 1248 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
2177bab5
JH
1249
1250 /* Read LE White List Size */
42c6b129 1251 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
2177bab5
JH
1252
1253 /* Read LE Supported States */
42c6b129 1254 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
c73eee91
JH
1255
1256 /* LE-only controllers have LE implicitly enabled */
1257 if (!lmp_bredr_capable(hdev))
1258 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2177bab5
JH
1259}
1260
1261static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1262{
1263 if (lmp_ext_inq_capable(hdev))
1264 return 0x02;
1265
1266 if (lmp_inq_rssi_capable(hdev))
1267 return 0x01;
1268
1269 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1270 hdev->lmp_subver == 0x0757)
1271 return 0x01;
1272
1273 if (hdev->manufacturer == 15) {
1274 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1275 return 0x01;
1276 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1277 return 0x01;
1278 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1279 return 0x01;
1280 }
1281
1282 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1283 hdev->lmp_subver == 0x1805)
1284 return 0x01;
1285
1286 return 0x00;
1287}
1288
42c6b129 1289static void hci_setup_inquiry_mode(struct hci_request *req)
2177bab5
JH
1290{
1291 u8 mode;
1292
42c6b129 1293 mode = hci_get_inquiry_mode(req->hdev);
2177bab5 1294
42c6b129 1295 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
2177bab5
JH
1296}
1297
42c6b129 1298static void hci_setup_event_mask(struct hci_request *req)
2177bab5 1299{
42c6b129
JH
1300 struct hci_dev *hdev = req->hdev;
1301
2177bab5
JH
1302 /* The second byte is 0xff instead of 0x9f (two reserved bits
1303 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1304 * command otherwise.
1305 */
1306 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1307
1308 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1309 * any event mask for pre 1.2 devices.
1310 */
1311 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1312 return;
1313
1314 if (lmp_bredr_capable(hdev)) {
1315 events[4] |= 0x01; /* Flow Specification Complete */
1316 events[4] |= 0x02; /* Inquiry Result with RSSI */
1317 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1318 events[5] |= 0x08; /* Synchronous Connection Complete */
1319 events[5] |= 0x10; /* Synchronous Connection Changed */
c7882cbd
MH
1320 } else {
1321 /* Use a different default for LE-only devices */
1322 memset(events, 0, sizeof(events));
1323 events[0] |= 0x10; /* Disconnection Complete */
1324 events[0] |= 0x80; /* Encryption Change */
1325 events[1] |= 0x08; /* Read Remote Version Information Complete */
1326 events[1] |= 0x20; /* Command Complete */
1327 events[1] |= 0x40; /* Command Status */
1328 events[1] |= 0x80; /* Hardware Error */
1329 events[2] |= 0x04; /* Number of Completed Packets */
1330 events[3] |= 0x02; /* Data Buffer Overflow */
1331 events[5] |= 0x80; /* Encryption Key Refresh Complete */
2177bab5
JH
1332 }
1333
1334 if (lmp_inq_rssi_capable(hdev))
1335 events[4] |= 0x02; /* Inquiry Result with RSSI */
1336
1337 if (lmp_sniffsubr_capable(hdev))
1338 events[5] |= 0x20; /* Sniff Subrating */
1339
1340 if (lmp_pause_enc_capable(hdev))
1341 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1342
1343 if (lmp_ext_inq_capable(hdev))
1344 events[5] |= 0x40; /* Extended Inquiry Result */
1345
1346 if (lmp_no_flush_capable(hdev))
1347 events[7] |= 0x01; /* Enhanced Flush Complete */
1348
1349 if (lmp_lsto_capable(hdev))
1350 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1351
1352 if (lmp_ssp_capable(hdev)) {
1353 events[6] |= 0x01; /* IO Capability Request */
1354 events[6] |= 0x02; /* IO Capability Response */
1355 events[6] |= 0x04; /* User Confirmation Request */
1356 events[6] |= 0x08; /* User Passkey Request */
1357 events[6] |= 0x10; /* Remote OOB Data Request */
1358 events[6] |= 0x20; /* Simple Pairing Complete */
1359 events[7] |= 0x04; /* User Passkey Notification */
1360 events[7] |= 0x08; /* Keypress Notification */
1361 events[7] |= 0x10; /* Remote Host Supported
1362 * Features Notification
1363 */
1364 }
1365
1366 if (lmp_le_capable(hdev))
1367 events[7] |= 0x20; /* LE Meta-Event */
1368
42c6b129 1369 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
2177bab5
JH
1370
1371 if (lmp_le_capable(hdev)) {
1372 memset(events, 0, sizeof(events));
1373 events[0] = 0x1f;
42c6b129
JH
1374 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1375 sizeof(events), events);
2177bab5
JH
1376 }
1377}
1378
42c6b129 1379static void hci_init2_req(struct hci_request *req, unsigned long opt)
2177bab5 1380{
42c6b129
JH
1381 struct hci_dev *hdev = req->hdev;
1382
2177bab5 1383 if (lmp_bredr_capable(hdev))
42c6b129 1384 bredr_setup(req);
56f87901
JH
1385 else
1386 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2177bab5
JH
1387
1388 if (lmp_le_capable(hdev))
42c6b129 1389 le_setup(req);
2177bab5 1390
42c6b129 1391 hci_setup_event_mask(req);
2177bab5 1392
3f8e2d75
JH
1393 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1394 * local supported commands HCI command.
1395 */
1396 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
42c6b129 1397 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
2177bab5
JH
1398
1399 if (lmp_ssp_capable(hdev)) {
57af75a8
MH
1400 /* When SSP is available, then the host features page
1401 * should also be available as well. However some
1402 * controllers list the max_page as 0 as long as SSP
1403 * has not been enabled. To achieve proper debugging
1404 * output, force the minimum max_page to 1 at least.
1405 */
1406 hdev->max_page = 0x01;
1407
2177bab5
JH
1408 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1409 u8 mode = 0x01;
42c6b129
JH
1410 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1411 sizeof(mode), &mode);
2177bab5
JH
1412 } else {
1413 struct hci_cp_write_eir cp;
1414
1415 memset(hdev->eir, 0, sizeof(hdev->eir));
1416 memset(&cp, 0, sizeof(cp));
1417
42c6b129 1418 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
2177bab5
JH
1419 }
1420 }
1421
1422 if (lmp_inq_rssi_capable(hdev))
42c6b129 1423 hci_setup_inquiry_mode(req);
2177bab5
JH
1424
1425 if (lmp_inq_tx_pwr_capable(hdev))
42c6b129 1426 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
2177bab5
JH
1427
1428 if (lmp_ext_feat_capable(hdev)) {
1429 struct hci_cp_read_local_ext_features cp;
1430
1431 cp.page = 0x01;
42c6b129
JH
1432 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1433 sizeof(cp), &cp);
2177bab5
JH
1434 }
1435
1436 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1437 u8 enable = 1;
42c6b129
JH
1438 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1439 &enable);
2177bab5
JH
1440 }
1441}
1442
42c6b129 1443static void hci_setup_link_policy(struct hci_request *req)
2177bab5 1444{
42c6b129 1445 struct hci_dev *hdev = req->hdev;
2177bab5
JH
1446 struct hci_cp_write_def_link_policy cp;
1447 u16 link_policy = 0;
1448
1449 if (lmp_rswitch_capable(hdev))
1450 link_policy |= HCI_LP_RSWITCH;
1451 if (lmp_hold_capable(hdev))
1452 link_policy |= HCI_LP_HOLD;
1453 if (lmp_sniff_capable(hdev))
1454 link_policy |= HCI_LP_SNIFF;
1455 if (lmp_park_capable(hdev))
1456 link_policy |= HCI_LP_PARK;
1457
1458 cp.policy = cpu_to_le16(link_policy);
42c6b129 1459 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
2177bab5
JH
1460}
1461
42c6b129 1462static void hci_set_le_support(struct hci_request *req)
2177bab5 1463{
42c6b129 1464 struct hci_dev *hdev = req->hdev;
2177bab5
JH
1465 struct hci_cp_write_le_host_supported cp;
1466
c73eee91
JH
1467 /* LE-only devices do not support explicit enablement */
1468 if (!lmp_bredr_capable(hdev))
1469 return;
1470
2177bab5
JH
1471 memset(&cp, 0, sizeof(cp));
1472
1473 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1474 cp.le = 0x01;
1475 cp.simul = lmp_le_br_capable(hdev);
1476 }
1477
1478 if (cp.le != lmp_host_le_capable(hdev))
42c6b129
JH
1479 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1480 &cp);
2177bab5
JH
1481}
1482
d62e6d67
JH
1483static void hci_set_event_mask_page_2(struct hci_request *req)
1484{
1485 struct hci_dev *hdev = req->hdev;
1486 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1487
1488 /* If Connectionless Slave Broadcast master role is supported
1489 * enable all necessary events for it.
1490 */
53b834d2 1491 if (lmp_csb_master_capable(hdev)) {
d62e6d67
JH
1492 events[1] |= 0x40; /* Triggered Clock Capture */
1493 events[1] |= 0x80; /* Synchronization Train Complete */
1494 events[2] |= 0x10; /* Slave Page Response Timeout */
1495 events[2] |= 0x20; /* CSB Channel Map Change */
1496 }
1497
1498 /* If Connectionless Slave Broadcast slave role is supported
1499 * enable all necessary events for it.
1500 */
53b834d2 1501 if (lmp_csb_slave_capable(hdev)) {
d62e6d67
JH
1502 events[2] |= 0x01; /* Synchronization Train Received */
1503 events[2] |= 0x02; /* CSB Receive */
1504 events[2] |= 0x04; /* CSB Timeout */
1505 events[2] |= 0x08; /* Truncated Page Complete */
1506 }
1507
40c59fcb
MH
1508 /* Enable Authenticated Payload Timeout Expired event if supported */
1509 if (lmp_ping_capable(hdev))
1510 events[2] |= 0x80;
1511
d62e6d67
JH
1512 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1513}
1514
42c6b129 1515static void hci_init3_req(struct hci_request *req, unsigned long opt)
2177bab5 1516{
42c6b129 1517 struct hci_dev *hdev = req->hdev;
d2c5d77f 1518 u8 p;
42c6b129 1519
b8f4e068
GP
1520 /* Some Broadcom based Bluetooth controllers do not support the
1521 * Delete Stored Link Key command. They are clearly indicating its
1522 * absence in the bit mask of supported commands.
1523 *
1524 * Check the supported commands and only if the the command is marked
1525 * as supported send it. If not supported assume that the controller
1526 * does not have actual support for stored link keys which makes this
1527 * command redundant anyway.
f9f462fa
MH
1528 *
1529 * Some controllers indicate that they support handling deleting
1530 * stored link keys, but they don't. The quirk lets a driver
1531 * just disable this command.
637b4cae 1532 */
f9f462fa
MH
1533 if (hdev->commands[6] & 0x80 &&
1534 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
59f45d57
JH
1535 struct hci_cp_delete_stored_link_key cp;
1536
1537 bacpy(&cp.bdaddr, BDADDR_ANY);
1538 cp.delete_all = 0x01;
1539 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1540 sizeof(cp), &cp);
1541 }
1542
2177bab5 1543 if (hdev->commands[5] & 0x10)
42c6b129 1544 hci_setup_link_policy(req);
2177bab5 1545
7bf32048 1546 if (lmp_le_capable(hdev))
42c6b129 1547 hci_set_le_support(req);
d2c5d77f
JH
1548
1549 /* Read features beyond page 1 if available */
1550 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1551 struct hci_cp_read_local_ext_features cp;
1552
1553 cp.page = p;
1554 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1555 sizeof(cp), &cp);
1556 }
2177bab5
JH
1557}
1558
5d4e7e8d
JH
1559static void hci_init4_req(struct hci_request *req, unsigned long opt)
1560{
1561 struct hci_dev *hdev = req->hdev;
1562
d62e6d67
JH
1563 /* Set event mask page 2 if the HCI command for it is supported */
1564 if (hdev->commands[22] & 0x04)
1565 hci_set_event_mask_page_2(req);
1566
5d4e7e8d 1567 /* Check for Synchronization Train support */
53b834d2 1568 if (lmp_sync_train_capable(hdev))
5d4e7e8d 1569 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
a6d0d690
MH
1570
1571 /* Enable Secure Connections if supported and configured */
5afeac14
MH
1572 if ((lmp_sc_capable(hdev) ||
1573 test_bit(HCI_FORCE_SC, &hdev->dev_flags)) &&
a6d0d690
MH
1574 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1575 u8 support = 0x01;
1576 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1577 sizeof(support), &support);
1578 }
5d4e7e8d
JH
1579}
1580
2177bab5
JH
1581static int __hci_init(struct hci_dev *hdev)
1582{
1583 int err;
1584
1585 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1586 if (err < 0)
1587 return err;
1588
4b4148e9
MH
1589 /* The Device Under Test (DUT) mode is special and available for
1590 * all controller types. So just create it early on.
1591 */
1592 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1593 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1594 &dut_mode_fops);
1595 }
1596
2177bab5
JH
1597 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1598 * BR/EDR/LE type controllers. AMP controllers only need the
1599 * first stage init.
1600 */
1601 if (hdev->dev_type != HCI_BREDR)
1602 return 0;
1603
1604 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1605 if (err < 0)
1606 return err;
1607
5d4e7e8d
JH
1608 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1609 if (err < 0)
1610 return err;
1611
baf27f6e
MH
1612 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1613 if (err < 0)
1614 return err;
1615
1616 /* Only create debugfs entries during the initial setup
1617 * phase and not every time the controller gets powered on.
1618 */
1619 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1620 return 0;
1621
dfb826a8
MH
1622 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1623 &features_fops);
ceeb3bc0
MH
1624 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1625 &hdev->manufacturer);
1626 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1627 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
70afe0b8
MH
1628 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1629 &blacklist_fops);
47219839
MH
1630 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1631
baf27f6e
MH
1632 if (lmp_bredr_capable(hdev)) {
1633 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1634 hdev, &inquiry_cache_fops);
02d08d15
MH
1635 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1636 hdev, &link_keys_fops);
babdbb3c
MH
1637 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1638 hdev, &dev_class_fops);
041000b9
MH
1639 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1640 hdev, &voice_setting_fops);
baf27f6e
MH
1641 }
1642
06f5b778 1643 if (lmp_ssp_capable(hdev)) {
ebd1e33b
MH
1644 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1645 hdev, &auto_accept_delay_fops);
06f5b778
MH
1646 debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs,
1647 hdev, &ssp_debug_mode_fops);
5afeac14
MH
1648 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1649 hdev, &force_sc_support_fops);
134c2a89
MH
1650 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1651 hdev, &sc_only_mode_fops);
06f5b778 1652 }
ebd1e33b 1653
2bfa3531
MH
1654 if (lmp_sniff_capable(hdev)) {
1655 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1656 hdev, &idle_timeout_fops);
1657 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1658 hdev, &sniff_min_interval_fops);
1659 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1660 hdev, &sniff_max_interval_fops);
1661 }
1662
d0f729b8 1663 if (lmp_le_capable(hdev)) {
ac345813
MH
1664 debugfs_create_file("identity", 0400, hdev->debugfs,
1665 hdev, &identity_fops);
1666 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1667 hdev, &rpa_timeout_fops);
7a4cd51d
MH
1668 debugfs_create_file("random_address", 0444, hdev->debugfs,
1669 hdev, &random_address_fops);
b32bba6c
MH
1670 debugfs_create_file("static_address", 0444, hdev->debugfs,
1671 hdev, &static_address_fops);
1672
1673 /* For controllers with a public address, provide a debug
1674 * option to force the usage of the configured static
1675 * address. By default the public address is used.
1676 */
1677 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1678 debugfs_create_file("force_static_address", 0644,
1679 hdev->debugfs, hdev,
1680 &force_static_address_fops);
1681
d0f729b8
MH
1682 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1683 &hdev->le_white_list_size);
3698d704
MH
1684 debugfs_create_file("identity_resolving_keys", 0400,
1685 hdev->debugfs, hdev,
1686 &identity_resolving_keys_fops);
8f8625cd
MH
1687 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1688 hdev, &long_term_keys_fops);
4e70c7e7
MH
1689 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1690 hdev, &conn_min_interval_fops);
1691 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1692 hdev, &conn_max_interval_fops);
3f959d46
MH
1693 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1694 hdev, &adv_channel_map_fops);
89863109
JR
1695 debugfs_create_file("6lowpan", 0644, hdev->debugfs, hdev,
1696 &lowpan_debugfs_fops);
d0f729b8 1697 }
e7b8fc92 1698
baf27f6e 1699 return 0;
2177bab5
JH
1700}
1701
42c6b129 1702static void hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1703{
1704 __u8 scan = opt;
1705
42c6b129 1706 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
1707
1708 /* Inquiry and Page scans */
42c6b129 1709 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
1710}
1711
42c6b129 1712static void hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1713{
1714 __u8 auth = opt;
1715
42c6b129 1716 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
1717
1718 /* Authentication */
42c6b129 1719 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
1720}
1721
42c6b129 1722static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1723{
1724 __u8 encrypt = opt;
1725
42c6b129 1726 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 1727
e4e8e37c 1728 /* Encryption */
42c6b129 1729 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
1730}
1731
42c6b129 1732static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
1733{
1734 __le16 policy = cpu_to_le16(opt);
1735
42c6b129 1736 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
1737
1738 /* Default link policy */
42c6b129 1739 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
e4e8e37c
MH
1740}
1741
8e87d142 1742/* Get HCI device by index.
1da177e4
LT
1743 * Device is held on return. */
1744struct hci_dev *hci_dev_get(int index)
1745{
8035ded4 1746 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
1747
1748 BT_DBG("%d", index);
1749
1750 if (index < 0)
1751 return NULL;
1752
1753 read_lock(&hci_dev_list_lock);
8035ded4 1754 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
1755 if (d->id == index) {
1756 hdev = hci_dev_hold(d);
1757 break;
1758 }
1759 }
1760 read_unlock(&hci_dev_list_lock);
1761 return hdev;
1762}
1da177e4
LT
1763
1764/* ---- Inquiry support ---- */
ff9ef578 1765
30dc78e1
JH
1766bool hci_discovery_active(struct hci_dev *hdev)
1767{
1768 struct discovery_state *discov = &hdev->discovery;
1769
6fbe195d 1770 switch (discov->state) {
343f935b 1771 case DISCOVERY_FINDING:
6fbe195d 1772 case DISCOVERY_RESOLVING:
30dc78e1
JH
1773 return true;
1774
6fbe195d
AG
1775 default:
1776 return false;
1777 }
30dc78e1
JH
1778}
1779
ff9ef578
JH
1780void hci_discovery_set_state(struct hci_dev *hdev, int state)
1781{
1782 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1783
1784 if (hdev->discovery.state == state)
1785 return;
1786
1787 switch (state) {
1788 case DISCOVERY_STOPPED:
7b99b659
AG
1789 if (hdev->discovery.state != DISCOVERY_STARTING)
1790 mgmt_discovering(hdev, 0);
ff9ef578
JH
1791 break;
1792 case DISCOVERY_STARTING:
1793 break;
343f935b 1794 case DISCOVERY_FINDING:
ff9ef578
JH
1795 mgmt_discovering(hdev, 1);
1796 break;
30dc78e1
JH
1797 case DISCOVERY_RESOLVING:
1798 break;
ff9ef578
JH
1799 case DISCOVERY_STOPPING:
1800 break;
1801 }
1802
1803 hdev->discovery.state = state;
1804}
1805
1f9b9a5d 1806void hci_inquiry_cache_flush(struct hci_dev *hdev)
1da177e4 1807{
30883512 1808 struct discovery_state *cache = &hdev->discovery;
b57c1a56 1809 struct inquiry_entry *p, *n;
1da177e4 1810
561aafbc
JH
1811 list_for_each_entry_safe(p, n, &cache->all, all) {
1812 list_del(&p->all);
b57c1a56 1813 kfree(p);
1da177e4 1814 }
561aafbc
JH
1815
1816 INIT_LIST_HEAD(&cache->unknown);
1817 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
1818}
1819
a8c5fb1a
GP
1820struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1821 bdaddr_t *bdaddr)
1da177e4 1822{
30883512 1823 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1824 struct inquiry_entry *e;
1825
6ed93dc6 1826 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 1827
561aafbc
JH
1828 list_for_each_entry(e, &cache->all, all) {
1829 if (!bacmp(&e->data.bdaddr, bdaddr))
1830 return e;
1831 }
1832
1833 return NULL;
1834}
1835
1836struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 1837 bdaddr_t *bdaddr)
561aafbc 1838{
30883512 1839 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
1840 struct inquiry_entry *e;
1841
6ed93dc6 1842 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
1843
1844 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 1845 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
1846 return e;
1847 }
1848
1849 return NULL;
1da177e4
LT
1850}
1851
30dc78e1 1852struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
1853 bdaddr_t *bdaddr,
1854 int state)
30dc78e1
JH
1855{
1856 struct discovery_state *cache = &hdev->discovery;
1857 struct inquiry_entry *e;
1858
6ed93dc6 1859 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
1860
1861 list_for_each_entry(e, &cache->resolve, list) {
1862 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1863 return e;
1864 if (!bacmp(&e->data.bdaddr, bdaddr))
1865 return e;
1866 }
1867
1868 return NULL;
1869}
1870
a3d4e20a 1871void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 1872 struct inquiry_entry *ie)
a3d4e20a
JH
1873{
1874 struct discovery_state *cache = &hdev->discovery;
1875 struct list_head *pos = &cache->resolve;
1876 struct inquiry_entry *p;
1877
1878 list_del(&ie->list);
1879
1880 list_for_each_entry(p, &cache->resolve, list) {
1881 if (p->name_state != NAME_PENDING &&
a8c5fb1a 1882 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
1883 break;
1884 pos = &p->list;
1885 }
1886
1887 list_add(&ie->list, pos);
1888}
1889
3175405b 1890bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
04124681 1891 bool name_known, bool *ssp)
1da177e4 1892{
30883512 1893 struct discovery_state *cache = &hdev->discovery;
70f23020 1894 struct inquiry_entry *ie;
1da177e4 1895
6ed93dc6 1896 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 1897
2b2fec4d
SJ
1898 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1899
388fc8fa
JH
1900 if (ssp)
1901 *ssp = data->ssp_mode;
1902
70f23020 1903 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 1904 if (ie) {
388fc8fa
JH
1905 if (ie->data.ssp_mode && ssp)
1906 *ssp = true;
1907
a3d4e20a 1908 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 1909 data->rssi != ie->data.rssi) {
a3d4e20a
JH
1910 ie->data.rssi = data->rssi;
1911 hci_inquiry_cache_update_resolve(hdev, ie);
1912 }
1913
561aafbc 1914 goto update;
a3d4e20a 1915 }
561aafbc
JH
1916
1917 /* Entry not in the cache. Add new one. */
1918 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
1919 if (!ie)
3175405b 1920 return false;
561aafbc
JH
1921
1922 list_add(&ie->all, &cache->all);
1923
1924 if (name_known) {
1925 ie->name_state = NAME_KNOWN;
1926 } else {
1927 ie->name_state = NAME_NOT_KNOWN;
1928 list_add(&ie->list, &cache->unknown);
1929 }
70f23020 1930
561aafbc
JH
1931update:
1932 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 1933 ie->name_state != NAME_PENDING) {
561aafbc
JH
1934 ie->name_state = NAME_KNOWN;
1935 list_del(&ie->list);
1da177e4
LT
1936 }
1937
70f23020
AE
1938 memcpy(&ie->data, data, sizeof(*data));
1939 ie->timestamp = jiffies;
1da177e4 1940 cache->timestamp = jiffies;
3175405b
JH
1941
1942 if (ie->name_state == NAME_NOT_KNOWN)
1943 return false;
1944
1945 return true;
1da177e4
LT
1946}
1947
1948static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1949{
30883512 1950 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1951 struct inquiry_info *info = (struct inquiry_info *) buf;
1952 struct inquiry_entry *e;
1953 int copied = 0;
1954
561aafbc 1955 list_for_each_entry(e, &cache->all, all) {
1da177e4 1956 struct inquiry_data *data = &e->data;
b57c1a56
JH
1957
1958 if (copied >= num)
1959 break;
1960
1da177e4
LT
1961 bacpy(&info->bdaddr, &data->bdaddr);
1962 info->pscan_rep_mode = data->pscan_rep_mode;
1963 info->pscan_period_mode = data->pscan_period_mode;
1964 info->pscan_mode = data->pscan_mode;
1965 memcpy(info->dev_class, data->dev_class, 3);
1966 info->clock_offset = data->clock_offset;
b57c1a56 1967
1da177e4 1968 info++;
b57c1a56 1969 copied++;
1da177e4
LT
1970 }
1971
1972 BT_DBG("cache %p, copied %d", cache, copied);
1973 return copied;
1974}
1975
42c6b129 1976static void hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1977{
1978 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 1979 struct hci_dev *hdev = req->hdev;
1da177e4
LT
1980 struct hci_cp_inquiry cp;
1981
1982 BT_DBG("%s", hdev->name);
1983
1984 if (test_bit(HCI_INQUIRY, &hdev->flags))
1985 return;
1986
1987 /* Start Inquiry */
1988 memcpy(&cp.lap, &ir->lap, 3);
1989 cp.length = ir->length;
1990 cp.num_rsp = ir->num_rsp;
42c6b129 1991 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
1992}
1993
3e13fa1e
AG
1994static int wait_inquiry(void *word)
1995{
1996 schedule();
1997 return signal_pending(current);
1998}
1999
1da177e4
LT
2000int hci_inquiry(void __user *arg)
2001{
2002 __u8 __user *ptr = arg;
2003 struct hci_inquiry_req ir;
2004 struct hci_dev *hdev;
2005 int err = 0, do_inquiry = 0, max_rsp;
2006 long timeo;
2007 __u8 *buf;
2008
2009 if (copy_from_user(&ir, ptr, sizeof(ir)))
2010 return -EFAULT;
2011
5a08ecce
AE
2012 hdev = hci_dev_get(ir.dev_id);
2013 if (!hdev)
1da177e4
LT
2014 return -ENODEV;
2015
0736cfa8
MH
2016 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2017 err = -EBUSY;
2018 goto done;
2019 }
2020
5b69bef5
MH
2021 if (hdev->dev_type != HCI_BREDR) {
2022 err = -EOPNOTSUPP;
2023 goto done;
2024 }
2025
56f87901
JH
2026 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2027 err = -EOPNOTSUPP;
2028 goto done;
2029 }
2030
09fd0de5 2031 hci_dev_lock(hdev);
8e87d142 2032 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 2033 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1f9b9a5d 2034 hci_inquiry_cache_flush(hdev);
1da177e4
LT
2035 do_inquiry = 1;
2036 }
09fd0de5 2037 hci_dev_unlock(hdev);
1da177e4 2038
04837f64 2039 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
2040
2041 if (do_inquiry) {
01178cd4
JH
2042 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2043 timeo);
70f23020
AE
2044 if (err < 0)
2045 goto done;
3e13fa1e
AG
2046
2047 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2048 * cleared). If it is interrupted by a signal, return -EINTR.
2049 */
2050 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2051 TASK_INTERRUPTIBLE))
2052 return -EINTR;
70f23020 2053 }
1da177e4 2054
8fc9ced3
GP
2055 /* for unlimited number of responses we will use buffer with
2056 * 255 entries
2057 */
1da177e4
LT
2058 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2059
2060 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2061 * copy it to the user space.
2062 */
01df8c31 2063 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 2064 if (!buf) {
1da177e4
LT
2065 err = -ENOMEM;
2066 goto done;
2067 }
2068
09fd0de5 2069 hci_dev_lock(hdev);
1da177e4 2070 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 2071 hci_dev_unlock(hdev);
1da177e4
LT
2072
2073 BT_DBG("num_rsp %d", ir.num_rsp);
2074
2075 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2076 ptr += sizeof(ir);
2077 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 2078 ir.num_rsp))
1da177e4 2079 err = -EFAULT;
8e87d142 2080 } else
1da177e4
LT
2081 err = -EFAULT;
2082
2083 kfree(buf);
2084
2085done:
2086 hci_dev_put(hdev);
2087 return err;
2088}
2089
cbed0ca1 2090static int hci_dev_do_open(struct hci_dev *hdev)
1da177e4 2091{
1da177e4
LT
2092 int ret = 0;
2093
1da177e4
LT
2094 BT_DBG("%s %p", hdev->name, hdev);
2095
2096 hci_req_lock(hdev);
2097
94324962
JH
2098 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2099 ret = -ENODEV;
2100 goto done;
2101 }
2102
a5c8f270
MH
2103 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
2104 /* Check for rfkill but allow the HCI setup stage to
2105 * proceed (which in itself doesn't cause any RF activity).
2106 */
2107 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2108 ret = -ERFKILL;
2109 goto done;
2110 }
2111
2112 /* Check for valid public address or a configured static
2113 * random adddress, but let the HCI setup proceed to
2114 * be able to determine if there is a public address
2115 * or not.
2116 *
c6beca0e
MH
2117 * In case of user channel usage, it is not important
2118 * if a public address or static random address is
2119 * available.
2120 *
a5c8f270
MH
2121 * This check is only valid for BR/EDR controllers
2122 * since AMP controllers do not have an address.
2123 */
c6beca0e
MH
2124 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2125 hdev->dev_type == HCI_BREDR &&
a5c8f270
MH
2126 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2127 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2128 ret = -EADDRNOTAVAIL;
2129 goto done;
2130 }
611b30f7
MH
2131 }
2132
1da177e4
LT
2133 if (test_bit(HCI_UP, &hdev->flags)) {
2134 ret = -EALREADY;
2135 goto done;
2136 }
2137
1da177e4
LT
2138 if (hdev->open(hdev)) {
2139 ret = -EIO;
2140 goto done;
2141 }
2142
f41c70c4
MH
2143 atomic_set(&hdev->cmd_cnt, 1);
2144 set_bit(HCI_INIT, &hdev->flags);
2145
2146 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
2147 ret = hdev->setup(hdev);
2148
2149 if (!ret) {
f41c70c4
MH
2150 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2151 set_bit(HCI_RAW, &hdev->flags);
2152
0736cfa8
MH
2153 if (!test_bit(HCI_RAW, &hdev->flags) &&
2154 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
f41c70c4 2155 ret = __hci_init(hdev);
1da177e4
LT
2156 }
2157
f41c70c4
MH
2158 clear_bit(HCI_INIT, &hdev->flags);
2159
1da177e4
LT
2160 if (!ret) {
2161 hci_dev_hold(hdev);
d6bfd59c 2162 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
1da177e4
LT
2163 set_bit(HCI_UP, &hdev->flags);
2164 hci_notify(hdev, HCI_DEV_UP);
bb4b2a9a 2165 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
0736cfa8 2166 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1514b892 2167 hdev->dev_type == HCI_BREDR) {
09fd0de5 2168 hci_dev_lock(hdev);
744cf19e 2169 mgmt_powered(hdev, 1);
09fd0de5 2170 hci_dev_unlock(hdev);
56e5cb86 2171 }
8e87d142 2172 } else {
1da177e4 2173 /* Init failed, cleanup */
3eff45ea 2174 flush_work(&hdev->tx_work);
c347b765 2175 flush_work(&hdev->cmd_work);
b78752cc 2176 flush_work(&hdev->rx_work);
1da177e4
LT
2177
2178 skb_queue_purge(&hdev->cmd_q);
2179 skb_queue_purge(&hdev->rx_q);
2180
2181 if (hdev->flush)
2182 hdev->flush(hdev);
2183
2184 if (hdev->sent_cmd) {
2185 kfree_skb(hdev->sent_cmd);
2186 hdev->sent_cmd = NULL;
2187 }
2188
2189 hdev->close(hdev);
2190 hdev->flags = 0;
2191 }
2192
2193done:
2194 hci_req_unlock(hdev);
1da177e4
LT
2195 return ret;
2196}
2197
cbed0ca1
JH
2198/* ---- HCI ioctl helpers ---- */
2199
2200int hci_dev_open(__u16 dev)
2201{
2202 struct hci_dev *hdev;
2203 int err;
2204
2205 hdev = hci_dev_get(dev);
2206 if (!hdev)
2207 return -ENODEV;
2208
e1d08f40
JH
2209 /* We need to ensure that no other power on/off work is pending
2210 * before proceeding to call hci_dev_do_open. This is
2211 * particularly important if the setup procedure has not yet
2212 * completed.
2213 */
2214 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2215 cancel_delayed_work(&hdev->power_off);
2216
a5c8f270
MH
2217 /* After this call it is guaranteed that the setup procedure
2218 * has finished. This means that error conditions like RFKILL
2219 * or no valid public or static random address apply.
2220 */
e1d08f40
JH
2221 flush_workqueue(hdev->req_workqueue);
2222
cbed0ca1
JH
2223 err = hci_dev_do_open(hdev);
2224
2225 hci_dev_put(hdev);
2226
2227 return err;
2228}
2229
1da177e4
LT
2230static int hci_dev_do_close(struct hci_dev *hdev)
2231{
2232 BT_DBG("%s %p", hdev->name, hdev);
2233
78c04c0b
VCG
2234 cancel_delayed_work(&hdev->power_off);
2235
1da177e4
LT
2236 hci_req_cancel(hdev, ENODEV);
2237 hci_req_lock(hdev);
2238
2239 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
b79f44c1 2240 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
2241 hci_req_unlock(hdev);
2242 return 0;
2243 }
2244
3eff45ea
GP
2245 /* Flush RX and TX works */
2246 flush_work(&hdev->tx_work);
b78752cc 2247 flush_work(&hdev->rx_work);
1da177e4 2248
16ab91ab 2249 if (hdev->discov_timeout > 0) {
e0f9309f 2250 cancel_delayed_work(&hdev->discov_off);
16ab91ab 2251 hdev->discov_timeout = 0;
5e5282bb 2252 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
310a3d48 2253 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
16ab91ab
JH
2254 }
2255
a8b2d5c2 2256 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
7d78525d
JH
2257 cancel_delayed_work(&hdev->service_cache);
2258
7ba8b4be 2259 cancel_delayed_work_sync(&hdev->le_scan_disable);
4518bb0f
JH
2260
2261 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2262 cancel_delayed_work_sync(&hdev->rpa_expired);
7ba8b4be 2263
09fd0de5 2264 hci_dev_lock(hdev);
1f9b9a5d 2265 hci_inquiry_cache_flush(hdev);
1da177e4 2266 hci_conn_hash_flush(hdev);
09fd0de5 2267 hci_dev_unlock(hdev);
1da177e4
LT
2268
2269 hci_notify(hdev, HCI_DEV_DOWN);
2270
2271 if (hdev->flush)
2272 hdev->flush(hdev);
2273
2274 /* Reset device */
2275 skb_queue_purge(&hdev->cmd_q);
2276 atomic_set(&hdev->cmd_cnt, 1);
8af59467 2277 if (!test_bit(HCI_RAW, &hdev->flags) &&
3a6afbd2 2278 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
a6c511c6 2279 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1da177e4 2280 set_bit(HCI_INIT, &hdev->flags);
01178cd4 2281 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1da177e4
LT
2282 clear_bit(HCI_INIT, &hdev->flags);
2283 }
2284
c347b765
GP
2285 /* flush cmd work */
2286 flush_work(&hdev->cmd_work);
1da177e4
LT
2287
2288 /* Drop queues */
2289 skb_queue_purge(&hdev->rx_q);
2290 skb_queue_purge(&hdev->cmd_q);
2291 skb_queue_purge(&hdev->raw_q);
2292
2293 /* Drop last sent command */
2294 if (hdev->sent_cmd) {
b79f44c1 2295 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
2296 kfree_skb(hdev->sent_cmd);
2297 hdev->sent_cmd = NULL;
2298 }
2299
b6ddb638
JH
2300 kfree_skb(hdev->recv_evt);
2301 hdev->recv_evt = NULL;
2302
1da177e4
LT
2303 /* After this point our queues are empty
2304 * and no tasks are scheduled. */
2305 hdev->close(hdev);
2306
35b973c9
JH
2307 /* Clear flags */
2308 hdev->flags = 0;
2309 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2310
93c311a0
MH
2311 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2312 if (hdev->dev_type == HCI_BREDR) {
2313 hci_dev_lock(hdev);
2314 mgmt_powered(hdev, 0);
2315 hci_dev_unlock(hdev);
2316 }
8ee56540 2317 }
5add6af8 2318
ced5c338 2319 /* Controller radio is available but is currently powered down */
536619e8 2320 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
ced5c338 2321
e59fda8d 2322 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 2323 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
7a4cd51d 2324 bacpy(&hdev->random_addr, BDADDR_ANY);
e59fda8d 2325
1da177e4
LT
2326 hci_req_unlock(hdev);
2327
2328 hci_dev_put(hdev);
2329 return 0;
2330}
2331
2332int hci_dev_close(__u16 dev)
2333{
2334 struct hci_dev *hdev;
2335 int err;
2336
70f23020
AE
2337 hdev = hci_dev_get(dev);
2338 if (!hdev)
1da177e4 2339 return -ENODEV;
8ee56540 2340
0736cfa8
MH
2341 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2342 err = -EBUSY;
2343 goto done;
2344 }
2345
8ee56540
MH
2346 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2347 cancel_delayed_work(&hdev->power_off);
2348
1da177e4 2349 err = hci_dev_do_close(hdev);
8ee56540 2350
0736cfa8 2351done:
1da177e4
LT
2352 hci_dev_put(hdev);
2353 return err;
2354}
2355
2356int hci_dev_reset(__u16 dev)
2357{
2358 struct hci_dev *hdev;
2359 int ret = 0;
2360
70f23020
AE
2361 hdev = hci_dev_get(dev);
2362 if (!hdev)
1da177e4
LT
2363 return -ENODEV;
2364
2365 hci_req_lock(hdev);
1da177e4 2366
808a049e
MH
2367 if (!test_bit(HCI_UP, &hdev->flags)) {
2368 ret = -ENETDOWN;
1da177e4 2369 goto done;
808a049e 2370 }
1da177e4 2371
0736cfa8
MH
2372 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2373 ret = -EBUSY;
2374 goto done;
2375 }
2376
1da177e4
LT
2377 /* Drop queues */
2378 skb_queue_purge(&hdev->rx_q);
2379 skb_queue_purge(&hdev->cmd_q);
2380
09fd0de5 2381 hci_dev_lock(hdev);
1f9b9a5d 2382 hci_inquiry_cache_flush(hdev);
1da177e4 2383 hci_conn_hash_flush(hdev);
09fd0de5 2384 hci_dev_unlock(hdev);
1da177e4
LT
2385
2386 if (hdev->flush)
2387 hdev->flush(hdev);
2388
8e87d142 2389 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 2390 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4
LT
2391
2392 if (!test_bit(HCI_RAW, &hdev->flags))
01178cd4 2393 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1da177e4
LT
2394
2395done:
1da177e4
LT
2396 hci_req_unlock(hdev);
2397 hci_dev_put(hdev);
2398 return ret;
2399}
2400
2401int hci_dev_reset_stat(__u16 dev)
2402{
2403 struct hci_dev *hdev;
2404 int ret = 0;
2405
70f23020
AE
2406 hdev = hci_dev_get(dev);
2407 if (!hdev)
1da177e4
LT
2408 return -ENODEV;
2409
0736cfa8
MH
2410 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2411 ret = -EBUSY;
2412 goto done;
2413 }
2414
1da177e4
LT
2415 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2416
0736cfa8 2417done:
1da177e4 2418 hci_dev_put(hdev);
1da177e4
LT
2419 return ret;
2420}
2421
2422int hci_dev_cmd(unsigned int cmd, void __user *arg)
2423{
2424 struct hci_dev *hdev;
2425 struct hci_dev_req dr;
2426 int err = 0;
2427
2428 if (copy_from_user(&dr, arg, sizeof(dr)))
2429 return -EFAULT;
2430
70f23020
AE
2431 hdev = hci_dev_get(dr.dev_id);
2432 if (!hdev)
1da177e4
LT
2433 return -ENODEV;
2434
0736cfa8
MH
2435 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2436 err = -EBUSY;
2437 goto done;
2438 }
2439
5b69bef5
MH
2440 if (hdev->dev_type != HCI_BREDR) {
2441 err = -EOPNOTSUPP;
2442 goto done;
2443 }
2444
56f87901
JH
2445 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2446 err = -EOPNOTSUPP;
2447 goto done;
2448 }
2449
1da177e4
LT
2450 switch (cmd) {
2451 case HCISETAUTH:
01178cd4
JH
2452 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2453 HCI_INIT_TIMEOUT);
1da177e4
LT
2454 break;
2455
2456 case HCISETENCRYPT:
2457 if (!lmp_encrypt_capable(hdev)) {
2458 err = -EOPNOTSUPP;
2459 break;
2460 }
2461
2462 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2463 /* Auth must be enabled first */
01178cd4
JH
2464 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2465 HCI_INIT_TIMEOUT);
1da177e4
LT
2466 if (err)
2467 break;
2468 }
2469
01178cd4
JH
2470 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2471 HCI_INIT_TIMEOUT);
1da177e4
LT
2472 break;
2473
2474 case HCISETSCAN:
01178cd4
JH
2475 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2476 HCI_INIT_TIMEOUT);
1da177e4
LT
2477 break;
2478
1da177e4 2479 case HCISETLINKPOL:
01178cd4
JH
2480 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2481 HCI_INIT_TIMEOUT);
1da177e4
LT
2482 break;
2483
2484 case HCISETLINKMODE:
e4e8e37c
MH
2485 hdev->link_mode = ((__u16) dr.dev_opt) &
2486 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2487 break;
2488
2489 case HCISETPTYPE:
2490 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
2491 break;
2492
2493 case HCISETACLMTU:
e4e8e37c
MH
2494 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2495 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2496 break;
2497
2498 case HCISETSCOMTU:
e4e8e37c
MH
2499 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2500 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2501 break;
2502
2503 default:
2504 err = -EINVAL;
2505 break;
2506 }
e4e8e37c 2507
0736cfa8 2508done:
1da177e4
LT
2509 hci_dev_put(hdev);
2510 return err;
2511}
2512
2513int hci_get_dev_list(void __user *arg)
2514{
8035ded4 2515 struct hci_dev *hdev;
1da177e4
LT
2516 struct hci_dev_list_req *dl;
2517 struct hci_dev_req *dr;
1da177e4
LT
2518 int n = 0, size, err;
2519 __u16 dev_num;
2520
2521 if (get_user(dev_num, (__u16 __user *) arg))
2522 return -EFAULT;
2523
2524 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2525 return -EINVAL;
2526
2527 size = sizeof(*dl) + dev_num * sizeof(*dr);
2528
70f23020
AE
2529 dl = kzalloc(size, GFP_KERNEL);
2530 if (!dl)
1da177e4
LT
2531 return -ENOMEM;
2532
2533 dr = dl->dev_req;
2534
f20d09d5 2535 read_lock(&hci_dev_list_lock);
8035ded4 2536 list_for_each_entry(hdev, &hci_dev_list, list) {
a8b2d5c2 2537 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
e0f9309f 2538 cancel_delayed_work(&hdev->power_off);
c542a06c 2539
a8b2d5c2
JH
2540 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2541 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 2542
1da177e4
LT
2543 (dr + n)->dev_id = hdev->id;
2544 (dr + n)->dev_opt = hdev->flags;
c542a06c 2545
1da177e4
LT
2546 if (++n >= dev_num)
2547 break;
2548 }
f20d09d5 2549 read_unlock(&hci_dev_list_lock);
1da177e4
LT
2550
2551 dl->dev_num = n;
2552 size = sizeof(*dl) + n * sizeof(*dr);
2553
2554 err = copy_to_user(arg, dl, size);
2555 kfree(dl);
2556
2557 return err ? -EFAULT : 0;
2558}
2559
2560int hci_get_dev_info(void __user *arg)
2561{
2562 struct hci_dev *hdev;
2563 struct hci_dev_info di;
2564 int err = 0;
2565
2566 if (copy_from_user(&di, arg, sizeof(di)))
2567 return -EFAULT;
2568
70f23020
AE
2569 hdev = hci_dev_get(di.dev_id);
2570 if (!hdev)
1da177e4
LT
2571 return -ENODEV;
2572
a8b2d5c2 2573 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
3243553f 2574 cancel_delayed_work_sync(&hdev->power_off);
ab81cbf9 2575
a8b2d5c2
JH
2576 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2577 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 2578
1da177e4
LT
2579 strcpy(di.name, hdev->name);
2580 di.bdaddr = hdev->bdaddr;
60f2a3ed 2581 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
1da177e4
LT
2582 di.flags = hdev->flags;
2583 di.pkt_type = hdev->pkt_type;
572c7f84
JH
2584 if (lmp_bredr_capable(hdev)) {
2585 di.acl_mtu = hdev->acl_mtu;
2586 di.acl_pkts = hdev->acl_pkts;
2587 di.sco_mtu = hdev->sco_mtu;
2588 di.sco_pkts = hdev->sco_pkts;
2589 } else {
2590 di.acl_mtu = hdev->le_mtu;
2591 di.acl_pkts = hdev->le_pkts;
2592 di.sco_mtu = 0;
2593 di.sco_pkts = 0;
2594 }
1da177e4
LT
2595 di.link_policy = hdev->link_policy;
2596 di.link_mode = hdev->link_mode;
2597
2598 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2599 memcpy(&di.features, &hdev->features, sizeof(di.features));
2600
2601 if (copy_to_user(arg, &di, sizeof(di)))
2602 err = -EFAULT;
2603
2604 hci_dev_put(hdev);
2605
2606 return err;
2607}
2608
2609/* ---- Interface to HCI drivers ---- */
2610
611b30f7
MH
2611static int hci_rfkill_set_block(void *data, bool blocked)
2612{
2613 struct hci_dev *hdev = data;
2614
2615 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2616
0736cfa8
MH
2617 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2618 return -EBUSY;
2619
5e130367
JH
2620 if (blocked) {
2621 set_bit(HCI_RFKILLED, &hdev->dev_flags);
bf543036
JH
2622 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2623 hci_dev_do_close(hdev);
5e130367
JH
2624 } else {
2625 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
1025c04c 2626 }
611b30f7
MH
2627
2628 return 0;
2629}
2630
2631static const struct rfkill_ops hci_rfkill_ops = {
2632 .set_block = hci_rfkill_set_block,
2633};
2634
ab81cbf9
JH
2635static void hci_power_on(struct work_struct *work)
2636{
2637 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
96570ffc 2638 int err;
ab81cbf9
JH
2639
2640 BT_DBG("%s", hdev->name);
2641
cbed0ca1 2642 err = hci_dev_do_open(hdev);
96570ffc
JH
2643 if (err < 0) {
2644 mgmt_set_powered_failed(hdev, err);
ab81cbf9 2645 return;
96570ffc 2646 }
ab81cbf9 2647
a5c8f270
MH
2648 /* During the HCI setup phase, a few error conditions are
2649 * ignored and they need to be checked now. If they are still
2650 * valid, it is important to turn the device back off.
2651 */
2652 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2653 (hdev->dev_type == HCI_BREDR &&
2654 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2655 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
bf543036
JH
2656 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2657 hci_dev_do_close(hdev);
2658 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
19202573
JH
2659 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2660 HCI_AUTO_OFF_TIMEOUT);
bf543036 2661 }
ab81cbf9 2662
a8b2d5c2 2663 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
744cf19e 2664 mgmt_index_added(hdev);
ab81cbf9
JH
2665}
2666
2667static void hci_power_off(struct work_struct *work)
2668{
3243553f 2669 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 2670 power_off.work);
ab81cbf9
JH
2671
2672 BT_DBG("%s", hdev->name);
2673
8ee56540 2674 hci_dev_do_close(hdev);
ab81cbf9
JH
2675}
2676
16ab91ab
JH
2677static void hci_discov_off(struct work_struct *work)
2678{
2679 struct hci_dev *hdev;
16ab91ab
JH
2680
2681 hdev = container_of(work, struct hci_dev, discov_off.work);
2682
2683 BT_DBG("%s", hdev->name);
2684
d1967ff8 2685 mgmt_discoverable_timeout(hdev);
16ab91ab
JH
2686}
2687
35f7498a 2688void hci_uuids_clear(struct hci_dev *hdev)
2aeb9a1a 2689{
4821002c 2690 struct bt_uuid *uuid, *tmp;
2aeb9a1a 2691
4821002c
JH
2692 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2693 list_del(&uuid->list);
2aeb9a1a
JH
2694 kfree(uuid);
2695 }
2aeb9a1a
JH
2696}
2697
35f7498a 2698void hci_link_keys_clear(struct hci_dev *hdev)
55ed8ca1
JH
2699{
2700 struct list_head *p, *n;
2701
2702 list_for_each_safe(p, n, &hdev->link_keys) {
2703 struct link_key *key;
2704
2705 key = list_entry(p, struct link_key, list);
2706
2707 list_del(p);
2708 kfree(key);
2709 }
55ed8ca1
JH
2710}
2711
35f7498a 2712void hci_smp_ltks_clear(struct hci_dev *hdev)
b899efaf
VCG
2713{
2714 struct smp_ltk *k, *tmp;
2715
2716 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2717 list_del(&k->list);
2718 kfree(k);
2719 }
b899efaf
VCG
2720}
2721
970c4e46
JH
2722void hci_smp_irks_clear(struct hci_dev *hdev)
2723{
2724 struct smp_irk *k, *tmp;
2725
2726 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2727 list_del(&k->list);
2728 kfree(k);
2729 }
2730}
2731
55ed8ca1
JH
2732struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2733{
8035ded4 2734 struct link_key *k;
55ed8ca1 2735
8035ded4 2736 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
2737 if (bacmp(bdaddr, &k->bdaddr) == 0)
2738 return k;
55ed8ca1
JH
2739
2740 return NULL;
2741}
2742
745c0ce3 2743static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 2744 u8 key_type, u8 old_key_type)
d25e28ab
JH
2745{
2746 /* Legacy key */
2747 if (key_type < 0x03)
745c0ce3 2748 return true;
d25e28ab
JH
2749
2750 /* Debug keys are insecure so don't store them persistently */
2751 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 2752 return false;
d25e28ab
JH
2753
2754 /* Changed combination key and there's no previous one */
2755 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 2756 return false;
d25e28ab
JH
2757
2758 /* Security mode 3 case */
2759 if (!conn)
745c0ce3 2760 return true;
d25e28ab
JH
2761
2762 /* Neither local nor remote side had no-bonding as requirement */
2763 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 2764 return true;
d25e28ab
JH
2765
2766 /* Local side had dedicated bonding as requirement */
2767 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 2768 return true;
d25e28ab
JH
2769
2770 /* Remote side had dedicated bonding as requirement */
2771 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 2772 return true;
d25e28ab
JH
2773
2774 /* If none of the above criteria match, then don't store the key
2775 * persistently */
745c0ce3 2776 return false;
d25e28ab
JH
2777}
2778
98a0b845
JH
2779static bool ltk_type_master(u8 type)
2780{
2781 if (type == HCI_SMP_STK || type == HCI_SMP_LTK)
2782 return true;
2783
2784 return false;
2785}
2786
2787struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8],
2788 bool master)
75d262c2 2789{
c9839a11 2790 struct smp_ltk *k;
75d262c2 2791
c9839a11
VCG
2792 list_for_each_entry(k, &hdev->long_term_keys, list) {
2793 if (k->ediv != ediv ||
a8c5fb1a 2794 memcmp(rand, k->rand, sizeof(k->rand)))
75d262c2
VCG
2795 continue;
2796
98a0b845
JH
2797 if (ltk_type_master(k->type) != master)
2798 continue;
2799
c9839a11 2800 return k;
75d262c2
VCG
2801 }
2802
2803 return NULL;
2804}
75d262c2 2805
c9839a11 2806struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
98a0b845 2807 u8 addr_type, bool master)
75d262c2 2808{
c9839a11 2809 struct smp_ltk *k;
75d262c2 2810
c9839a11
VCG
2811 list_for_each_entry(k, &hdev->long_term_keys, list)
2812 if (addr_type == k->bdaddr_type &&
98a0b845
JH
2813 bacmp(bdaddr, &k->bdaddr) == 0 &&
2814 ltk_type_master(k->type) == master)
75d262c2
VCG
2815 return k;
2816
2817 return NULL;
2818}
75d262c2 2819
970c4e46
JH
2820struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2821{
2822 struct smp_irk *irk;
2823
2824 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2825 if (!bacmp(&irk->rpa, rpa))
2826 return irk;
2827 }
2828
2829 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2830 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
2831 bacpy(&irk->rpa, rpa);
2832 return irk;
2833 }
2834 }
2835
2836 return NULL;
2837}
2838
2839struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2840 u8 addr_type)
2841{
2842 struct smp_irk *irk;
2843
6cfc9988
JH
2844 /* Identity Address must be public or static random */
2845 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2846 return NULL;
2847
970c4e46
JH
2848 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2849 if (addr_type == irk->addr_type &&
2850 bacmp(bdaddr, &irk->bdaddr) == 0)
2851 return irk;
2852 }
2853
2854 return NULL;
2855}
2856
d25e28ab 2857int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
04124681 2858 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
55ed8ca1
JH
2859{
2860 struct link_key *key, *old_key;
745c0ce3
VA
2861 u8 old_key_type;
2862 bool persistent;
55ed8ca1
JH
2863
2864 old_key = hci_find_link_key(hdev, bdaddr);
2865 if (old_key) {
2866 old_key_type = old_key->type;
2867 key = old_key;
2868 } else {
12adcf3a 2869 old_key_type = conn ? conn->key_type : 0xff;
0a14ab41 2870 key = kzalloc(sizeof(*key), GFP_KERNEL);
55ed8ca1
JH
2871 if (!key)
2872 return -ENOMEM;
2873 list_add(&key->list, &hdev->link_keys);
2874 }
2875
6ed93dc6 2876 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 2877
d25e28ab
JH
2878 /* Some buggy controller combinations generate a changed
2879 * combination key for legacy pairing even when there's no
2880 * previous key */
2881 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 2882 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 2883 type = HCI_LK_COMBINATION;
655fe6ec
JH
2884 if (conn)
2885 conn->key_type = type;
2886 }
d25e28ab 2887
55ed8ca1 2888 bacpy(&key->bdaddr, bdaddr);
9b3b4460 2889 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
2890 key->pin_len = pin_len;
2891
b6020ba0 2892 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 2893 key->type = old_key_type;
4748fed2
JH
2894 else
2895 key->type = type;
2896
4df378a1
JH
2897 if (!new_key)
2898 return 0;
2899
2900 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
2901
744cf19e 2902 mgmt_new_link_key(hdev, key, persistent);
4df378a1 2903
6ec5bcad
VA
2904 if (conn)
2905 conn->flush_key = !persistent;
55ed8ca1
JH
2906
2907 return 0;
2908}
2909
ca9142b8 2910struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
35d70271
JH
2911 u8 addr_type, u8 type, u8 authenticated,
2912 u8 tk[16], u8 enc_size, __le16 ediv, u8 rand[8])
75d262c2 2913{
c9839a11 2914 struct smp_ltk *key, *old_key;
98a0b845 2915 bool master = ltk_type_master(type);
75d262c2 2916
98a0b845 2917 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
c9839a11 2918 if (old_key)
75d262c2 2919 key = old_key;
c9839a11 2920 else {
0a14ab41 2921 key = kzalloc(sizeof(*key), GFP_KERNEL);
75d262c2 2922 if (!key)
ca9142b8 2923 return NULL;
c9839a11 2924 list_add(&key->list, &hdev->long_term_keys);
75d262c2
VCG
2925 }
2926
75d262c2 2927 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
2928 key->bdaddr_type = addr_type;
2929 memcpy(key->val, tk, sizeof(key->val));
2930 key->authenticated = authenticated;
2931 key->ediv = ediv;
2932 key->enc_size = enc_size;
2933 key->type = type;
2934 memcpy(key->rand, rand, sizeof(key->rand));
75d262c2 2935
ca9142b8 2936 return key;
75d262c2
VCG
2937}
2938
ca9142b8
JH
2939struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2940 u8 addr_type, u8 val[16], bdaddr_t *rpa)
970c4e46
JH
2941{
2942 struct smp_irk *irk;
2943
2944 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2945 if (!irk) {
2946 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2947 if (!irk)
ca9142b8 2948 return NULL;
970c4e46
JH
2949
2950 bacpy(&irk->bdaddr, bdaddr);
2951 irk->addr_type = addr_type;
2952
2953 list_add(&irk->list, &hdev->identity_resolving_keys);
2954 }
2955
2956 memcpy(irk->val, val, 16);
2957 bacpy(&irk->rpa, rpa);
2958
ca9142b8 2959 return irk;
970c4e46
JH
2960}
2961
55ed8ca1
JH
2962int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2963{
2964 struct link_key *key;
2965
2966 key = hci_find_link_key(hdev, bdaddr);
2967 if (!key)
2968 return -ENOENT;
2969
6ed93dc6 2970 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1
JH
2971
2972 list_del(&key->list);
2973 kfree(key);
2974
2975 return 0;
2976}
2977
e0b2b27e 2978int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
b899efaf
VCG
2979{
2980 struct smp_ltk *k, *tmp;
c51ffa0b 2981 int removed = 0;
b899efaf
VCG
2982
2983 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
e0b2b27e 2984 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
b899efaf
VCG
2985 continue;
2986
6ed93dc6 2987 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf
VCG
2988
2989 list_del(&k->list);
2990 kfree(k);
c51ffa0b 2991 removed++;
b899efaf
VCG
2992 }
2993
c51ffa0b 2994 return removed ? 0 : -ENOENT;
b899efaf
VCG
2995}
2996
a7ec7338
JH
2997void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2998{
2999 struct smp_irk *k, *tmp;
3000
668b7b19 3001 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
a7ec7338
JH
3002 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3003 continue;
3004
3005 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3006
3007 list_del(&k->list);
3008 kfree(k);
3009 }
3010}
3011
6bd32326 3012/* HCI command timer function */
bda4f23a 3013static void hci_cmd_timeout(unsigned long arg)
6bd32326
VT
3014{
3015 struct hci_dev *hdev = (void *) arg;
3016
bda4f23a
AE
3017 if (hdev->sent_cmd) {
3018 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3019 u16 opcode = __le16_to_cpu(sent->opcode);
3020
3021 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3022 } else {
3023 BT_ERR("%s command tx timeout", hdev->name);
3024 }
3025
6bd32326 3026 atomic_set(&hdev->cmd_cnt, 1);
c347b765 3027 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
3028}
3029
2763eda6 3030struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
04124681 3031 bdaddr_t *bdaddr)
2763eda6
SJ
3032{
3033 struct oob_data *data;
3034
3035 list_for_each_entry(data, &hdev->remote_oob_data, list)
3036 if (bacmp(bdaddr, &data->bdaddr) == 0)
3037 return data;
3038
3039 return NULL;
3040}
3041
3042int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3043{
3044 struct oob_data *data;
3045
3046 data = hci_find_remote_oob_data(hdev, bdaddr);
3047 if (!data)
3048 return -ENOENT;
3049
6ed93dc6 3050 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2763eda6
SJ
3051
3052 list_del(&data->list);
3053 kfree(data);
3054
3055 return 0;
3056}
3057
35f7498a 3058void hci_remote_oob_data_clear(struct hci_dev *hdev)
2763eda6
SJ
3059{
3060 struct oob_data *data, *n;
3061
3062 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3063 list_del(&data->list);
3064 kfree(data);
3065 }
2763eda6
SJ
3066}
3067
0798872e
MH
3068int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3069 u8 *hash, u8 *randomizer)
2763eda6
SJ
3070{
3071 struct oob_data *data;
3072
3073 data = hci_find_remote_oob_data(hdev, bdaddr);
2763eda6 3074 if (!data) {
0a14ab41 3075 data = kmalloc(sizeof(*data), GFP_KERNEL);
2763eda6
SJ
3076 if (!data)
3077 return -ENOMEM;
3078
3079 bacpy(&data->bdaddr, bdaddr);
3080 list_add(&data->list, &hdev->remote_oob_data);
3081 }
3082
519ca9d0
MH
3083 memcpy(data->hash192, hash, sizeof(data->hash192));
3084 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
2763eda6 3085
0798872e
MH
3086 memset(data->hash256, 0, sizeof(data->hash256));
3087 memset(data->randomizer256, 0, sizeof(data->randomizer256));
3088
3089 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3090
3091 return 0;
3092}
3093
3094int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3095 u8 *hash192, u8 *randomizer192,
3096 u8 *hash256, u8 *randomizer256)
3097{
3098 struct oob_data *data;
3099
3100 data = hci_find_remote_oob_data(hdev, bdaddr);
3101 if (!data) {
0a14ab41 3102 data = kmalloc(sizeof(*data), GFP_KERNEL);
0798872e
MH
3103 if (!data)
3104 return -ENOMEM;
3105
3106 bacpy(&data->bdaddr, bdaddr);
3107 list_add(&data->list, &hdev->remote_oob_data);
3108 }
3109
3110 memcpy(data->hash192, hash192, sizeof(data->hash192));
3111 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3112
3113 memcpy(data->hash256, hash256, sizeof(data->hash256));
3114 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3115
6ed93dc6 3116 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
3117
3118 return 0;
3119}
3120
b9ee0a78
MH
3121struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
3122 bdaddr_t *bdaddr, u8 type)
b2a66aad 3123{
8035ded4 3124 struct bdaddr_list *b;
b2a66aad 3125
b9ee0a78
MH
3126 list_for_each_entry(b, &hdev->blacklist, list) {
3127 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
b2a66aad 3128 return b;
b9ee0a78 3129 }
b2a66aad
AJ
3130
3131 return NULL;
3132}
3133
35f7498a 3134void hci_blacklist_clear(struct hci_dev *hdev)
b2a66aad
AJ
3135{
3136 struct list_head *p, *n;
3137
3138 list_for_each_safe(p, n, &hdev->blacklist) {
b9ee0a78 3139 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
b2a66aad
AJ
3140
3141 list_del(p);
3142 kfree(b);
3143 }
b2a66aad
AJ
3144}
3145
88c1fe4b 3146int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
3147{
3148 struct bdaddr_list *entry;
b2a66aad 3149
b9ee0a78 3150 if (!bacmp(bdaddr, BDADDR_ANY))
b2a66aad
AJ
3151 return -EBADF;
3152
b9ee0a78 3153 if (hci_blacklist_lookup(hdev, bdaddr, type))
5e762444 3154 return -EEXIST;
b2a66aad
AJ
3155
3156 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
5e762444
AJ
3157 if (!entry)
3158 return -ENOMEM;
b2a66aad
AJ
3159
3160 bacpy(&entry->bdaddr, bdaddr);
b9ee0a78 3161 entry->bdaddr_type = type;
b2a66aad
AJ
3162
3163 list_add(&entry->list, &hdev->blacklist);
3164
88c1fe4b 3165 return mgmt_device_blocked(hdev, bdaddr, type);
b2a66aad
AJ
3166}
3167
88c1fe4b 3168int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
3169{
3170 struct bdaddr_list *entry;
b2a66aad 3171
35f7498a
JH
3172 if (!bacmp(bdaddr, BDADDR_ANY)) {
3173 hci_blacklist_clear(hdev);
3174 return 0;
3175 }
b2a66aad 3176
b9ee0a78 3177 entry = hci_blacklist_lookup(hdev, bdaddr, type);
1ec918ce 3178 if (!entry)
5e762444 3179 return -ENOENT;
b2a66aad
AJ
3180
3181 list_del(&entry->list);
3182 kfree(entry);
3183
88c1fe4b 3184 return mgmt_device_unblocked(hdev, bdaddr, type);
b2a66aad
AJ
3185}
3186
15819a70
AG
3187/* This function requires the caller holds hdev->lock */
3188struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3189 bdaddr_t *addr, u8 addr_type)
3190{
3191 struct hci_conn_params *params;
3192
3193 list_for_each_entry(params, &hdev->le_conn_params, list) {
3194 if (bacmp(&params->addr, addr) == 0 &&
3195 params->addr_type == addr_type) {
3196 return params;
3197 }
3198 }
3199
3200 return NULL;
3201}
3202
3203/* This function requires the caller holds hdev->lock */
3204void hci_conn_params_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3205 u16 conn_min_interval, u16 conn_max_interval)
3206{
3207 struct hci_conn_params *params;
3208
3209 params = hci_conn_params_lookup(hdev, addr, addr_type);
3210 if (params) {
3211 params->conn_min_interval = conn_min_interval;
3212 params->conn_max_interval = conn_max_interval;
3213 return;
3214 }
3215
3216 params = kzalloc(sizeof(*params), GFP_KERNEL);
3217 if (!params) {
3218 BT_ERR("Out of memory");
3219 return;
3220 }
3221
3222 bacpy(&params->addr, addr);
3223 params->addr_type = addr_type;
3224 params->conn_min_interval = conn_min_interval;
3225 params->conn_max_interval = conn_max_interval;
3226
3227 list_add(&params->list, &hdev->le_conn_params);
3228
3229 BT_DBG("addr %pMR (type %u) conn_min_interval 0x%.4x "
3230 "conn_max_interval 0x%.4x", addr, addr_type, conn_min_interval,
3231 conn_max_interval);
3232}
3233
3234/* This function requires the caller holds hdev->lock */
3235void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3236{
3237 struct hci_conn_params *params;
3238
3239 params = hci_conn_params_lookup(hdev, addr, addr_type);
3240 if (!params)
3241 return;
3242
3243 list_del(&params->list);
3244 kfree(params);
3245
3246 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3247}
3248
3249/* This function requires the caller holds hdev->lock */
3250void hci_conn_params_clear(struct hci_dev *hdev)
3251{
3252 struct hci_conn_params *params, *tmp;
3253
3254 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3255 list_del(&params->list);
3256 kfree(params);
3257 }
3258
3259 BT_DBG("All LE connection parameters were removed");
3260}
3261
4c87eaab 3262static void inquiry_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 3263{
4c87eaab
AG
3264 if (status) {
3265 BT_ERR("Failed to start inquiry: status %d", status);
7ba8b4be 3266
4c87eaab
AG
3267 hci_dev_lock(hdev);
3268 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3269 hci_dev_unlock(hdev);
3270 return;
3271 }
7ba8b4be
AG
3272}
3273
4c87eaab 3274static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 3275{
4c87eaab
AG
3276 /* General inquiry access code (GIAC) */
3277 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3278 struct hci_request req;
3279 struct hci_cp_inquiry cp;
7ba8b4be
AG
3280 int err;
3281
4c87eaab
AG
3282 if (status) {
3283 BT_ERR("Failed to disable LE scanning: status %d", status);
3284 return;
3285 }
7ba8b4be 3286
4c87eaab
AG
3287 switch (hdev->discovery.type) {
3288 case DISCOV_TYPE_LE:
3289 hci_dev_lock(hdev);
3290 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3291 hci_dev_unlock(hdev);
3292 break;
7ba8b4be 3293
4c87eaab
AG
3294 case DISCOV_TYPE_INTERLEAVED:
3295 hci_req_init(&req, hdev);
7ba8b4be 3296
4c87eaab
AG
3297 memset(&cp, 0, sizeof(cp));
3298 memcpy(&cp.lap, lap, sizeof(cp.lap));
3299 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3300 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
7ba8b4be 3301
4c87eaab 3302 hci_dev_lock(hdev);
7dbfac1d 3303
4c87eaab 3304 hci_inquiry_cache_flush(hdev);
7dbfac1d 3305
4c87eaab
AG
3306 err = hci_req_run(&req, inquiry_complete);
3307 if (err) {
3308 BT_ERR("Inquiry request failed: err %d", err);
3309 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3310 }
7dbfac1d 3311
4c87eaab
AG
3312 hci_dev_unlock(hdev);
3313 break;
7dbfac1d 3314 }
7dbfac1d
AG
3315}
3316
7ba8b4be
AG
3317static void le_scan_disable_work(struct work_struct *work)
3318{
3319 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 3320 le_scan_disable.work);
4c87eaab
AG
3321 struct hci_request req;
3322 int err;
7ba8b4be
AG
3323
3324 BT_DBG("%s", hdev->name);
3325
4c87eaab 3326 hci_req_init(&req, hdev);
28b75a89 3327
b1efcc28 3328 hci_req_add_le_scan_disable(&req);
28b75a89 3329
4c87eaab
AG
3330 err = hci_req_run(&req, le_scan_disable_work_complete);
3331 if (err)
3332 BT_ERR("Disable LE scanning request failed: err %d", err);
28b75a89
AG
3333}
3334
94b1fc92
MH
3335int hci_update_random_address(struct hci_request *req, bool require_privacy,
3336 u8 *own_addr_type)
ebd3a747
JH
3337{
3338 struct hci_dev *hdev = req->hdev;
3339 int err;
3340
3341 /* If privacy is enabled use a resolvable private address. If
2b5224dc
MH
3342 * current RPA has expired or there is something else than
3343 * the current RPA in use, then generate a new one.
ebd3a747
JH
3344 */
3345 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
ebd3a747
JH
3346 int to;
3347
3348 *own_addr_type = ADDR_LE_DEV_RANDOM;
3349
3350 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
2b5224dc 3351 !bacmp(&hdev->random_addr, &hdev->rpa))
ebd3a747
JH
3352 return 0;
3353
2b5224dc 3354 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
ebd3a747
JH
3355 if (err < 0) {
3356 BT_ERR("%s failed to generate new RPA", hdev->name);
3357 return err;
3358 }
3359
2b5224dc 3360 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, &hdev->rpa);
ebd3a747
JH
3361
3362 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3363 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3364
3365 return 0;
94b1fc92
MH
3366 }
3367
3368 /* In case of required privacy without resolvable private address,
3369 * use an unresolvable private address. This is useful for active
3370 * scanning and non-connectable advertising.
3371 */
3372 if (require_privacy) {
3373 bdaddr_t urpa;
3374
3375 get_random_bytes(&urpa, 6);
3376 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3377
3378 *own_addr_type = ADDR_LE_DEV_RANDOM;
3379 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, &urpa);
3380 return 0;
ebd3a747
JH
3381 }
3382
3383 /* If forcing static address is in use or there is no public
3384 * address use the static address as random address (but skip
3385 * the HCI command if the current random address is already the
3386 * static one.
3387 */
3388 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ||
3389 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3390 *own_addr_type = ADDR_LE_DEV_RANDOM;
3391 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3392 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3393 &hdev->static_addr);
3394 return 0;
3395 }
3396
3397 /* Neither privacy nor static address is being used so use a
3398 * public address.
3399 */
3400 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3401
3402 return 0;
3403}
3404
9be0dab7
DH
3405/* Alloc HCI device */
3406struct hci_dev *hci_alloc_dev(void)
3407{
3408 struct hci_dev *hdev;
3409
3410 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3411 if (!hdev)
3412 return NULL;
3413
b1b813d4
DH
3414 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3415 hdev->esco_type = (ESCO_HV1);
3416 hdev->link_mode = (HCI_LM_ACCEPT);
b4cb9fb2
MH
3417 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3418 hdev->io_capability = 0x03; /* No Input No Output */
bbaf444a
JH
3419 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3420 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
b1b813d4 3421
b1b813d4
DH
3422 hdev->sniff_max_interval = 800;
3423 hdev->sniff_min_interval = 80;
3424
3f959d46 3425 hdev->le_adv_channel_map = 0x07;
bef64738
MH
3426 hdev->le_scan_interval = 0x0060;
3427 hdev->le_scan_window = 0x0030;
4e70c7e7
MH
3428 hdev->le_conn_min_interval = 0x0028;
3429 hdev->le_conn_max_interval = 0x0038;
bef64738 3430
d6bfd59c
JH
3431 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3432
b1b813d4
DH
3433 mutex_init(&hdev->lock);
3434 mutex_init(&hdev->req_lock);
3435
3436 INIT_LIST_HEAD(&hdev->mgmt_pending);
3437 INIT_LIST_HEAD(&hdev->blacklist);
3438 INIT_LIST_HEAD(&hdev->uuids);
3439 INIT_LIST_HEAD(&hdev->link_keys);
3440 INIT_LIST_HEAD(&hdev->long_term_keys);
970c4e46 3441 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
b1b813d4 3442 INIT_LIST_HEAD(&hdev->remote_oob_data);
15819a70 3443 INIT_LIST_HEAD(&hdev->le_conn_params);
6b536b5e 3444 INIT_LIST_HEAD(&hdev->conn_hash.list);
b1b813d4
DH
3445
3446 INIT_WORK(&hdev->rx_work, hci_rx_work);
3447 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3448 INIT_WORK(&hdev->tx_work, hci_tx_work);
3449 INIT_WORK(&hdev->power_on, hci_power_on);
b1b813d4 3450
b1b813d4
DH
3451 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3452 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3453 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3454
b1b813d4
DH
3455 skb_queue_head_init(&hdev->rx_q);
3456 skb_queue_head_init(&hdev->cmd_q);
3457 skb_queue_head_init(&hdev->raw_q);
3458
3459 init_waitqueue_head(&hdev->req_wait_q);
3460
bda4f23a 3461 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
b1b813d4 3462
b1b813d4
DH
3463 hci_init_sysfs(hdev);
3464 discovery_init(hdev);
9be0dab7
DH
3465
3466 return hdev;
3467}
3468EXPORT_SYMBOL(hci_alloc_dev);
3469
3470/* Free HCI device */
3471void hci_free_dev(struct hci_dev *hdev)
3472{
9be0dab7
DH
3473 /* will free via device release */
3474 put_device(&hdev->dev);
3475}
3476EXPORT_SYMBOL(hci_free_dev);
3477
1da177e4
LT
3478/* Register HCI device */
3479int hci_register_dev(struct hci_dev *hdev)
3480{
b1b813d4 3481 int id, error;
1da177e4 3482
010666a1 3483 if (!hdev->open || !hdev->close)
1da177e4
LT
3484 return -EINVAL;
3485
08add513
MM
3486 /* Do not allow HCI_AMP devices to register at index 0,
3487 * so the index can be used as the AMP controller ID.
3488 */
3df92b31
SL
3489 switch (hdev->dev_type) {
3490 case HCI_BREDR:
3491 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3492 break;
3493 case HCI_AMP:
3494 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3495 break;
3496 default:
3497 return -EINVAL;
1da177e4 3498 }
8e87d142 3499
3df92b31
SL
3500 if (id < 0)
3501 return id;
3502
1da177e4
LT
3503 sprintf(hdev->name, "hci%d", id);
3504 hdev->id = id;
2d8b3a11
AE
3505
3506 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3507
d8537548
KC
3508 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3509 WQ_MEM_RECLAIM, 1, hdev->name);
33ca954d
DH
3510 if (!hdev->workqueue) {
3511 error = -ENOMEM;
3512 goto err;
3513 }
f48fd9c8 3514
d8537548
KC
3515 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3516 WQ_MEM_RECLAIM, 1, hdev->name);
6ead1bbc
JH
3517 if (!hdev->req_workqueue) {
3518 destroy_workqueue(hdev->workqueue);
3519 error = -ENOMEM;
3520 goto err;
3521 }
3522
0153e2ec
MH
3523 if (!IS_ERR_OR_NULL(bt_debugfs))
3524 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3525
bdc3e0f1
MH
3526 dev_set_name(&hdev->dev, "%s", hdev->name);
3527
99780a7b
JH
3528 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3529 CRYPTO_ALG_ASYNC);
3530 if (IS_ERR(hdev->tfm_aes)) {
3531 BT_ERR("Unable to create crypto context");
3532 error = PTR_ERR(hdev->tfm_aes);
3533 hdev->tfm_aes = NULL;
3534 goto err_wqueue;
3535 }
3536
bdc3e0f1 3537 error = device_add(&hdev->dev);
33ca954d 3538 if (error < 0)
99780a7b 3539 goto err_tfm;
1da177e4 3540
611b30f7 3541 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
3542 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3543 hdev);
611b30f7
MH
3544 if (hdev->rfkill) {
3545 if (rfkill_register(hdev->rfkill) < 0) {
3546 rfkill_destroy(hdev->rfkill);
3547 hdev->rfkill = NULL;
3548 }
3549 }
3550
5e130367
JH
3551 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3552 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3553
a8b2d5c2 3554 set_bit(HCI_SETUP, &hdev->dev_flags);
004b0258 3555 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
ce2be9ac 3556
01cd3404 3557 if (hdev->dev_type == HCI_BREDR) {
56f87901
JH
3558 /* Assume BR/EDR support until proven otherwise (such as
3559 * through reading supported features during init.
3560 */
3561 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3562 }
ce2be9ac 3563
fcee3377
GP
3564 write_lock(&hci_dev_list_lock);
3565 list_add(&hdev->list, &hci_dev_list);
3566 write_unlock(&hci_dev_list_lock);
3567
1da177e4 3568 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 3569 hci_dev_hold(hdev);
1da177e4 3570
19202573 3571 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 3572
1da177e4 3573 return id;
f48fd9c8 3574
99780a7b
JH
3575err_tfm:
3576 crypto_free_blkcipher(hdev->tfm_aes);
33ca954d
DH
3577err_wqueue:
3578 destroy_workqueue(hdev->workqueue);
6ead1bbc 3579 destroy_workqueue(hdev->req_workqueue);
33ca954d 3580err:
3df92b31 3581 ida_simple_remove(&hci_index_ida, hdev->id);
f48fd9c8 3582
33ca954d 3583 return error;
1da177e4
LT
3584}
3585EXPORT_SYMBOL(hci_register_dev);
3586
3587/* Unregister HCI device */
59735631 3588void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 3589{
3df92b31 3590 int i, id;
ef222013 3591
c13854ce 3592 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 3593
94324962
JH
3594 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3595
3df92b31
SL
3596 id = hdev->id;
3597
f20d09d5 3598 write_lock(&hci_dev_list_lock);
1da177e4 3599 list_del(&hdev->list);
f20d09d5 3600 write_unlock(&hci_dev_list_lock);
1da177e4
LT
3601
3602 hci_dev_do_close(hdev);
3603
cd4c5391 3604 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
3605 kfree_skb(hdev->reassembly[i]);
3606
b9b5ef18
GP
3607 cancel_work_sync(&hdev->power_on);
3608
ab81cbf9 3609 if (!test_bit(HCI_INIT, &hdev->flags) &&
a8c5fb1a 3610 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
09fd0de5 3611 hci_dev_lock(hdev);
744cf19e 3612 mgmt_index_removed(hdev);
09fd0de5 3613 hci_dev_unlock(hdev);
56e5cb86 3614 }
ab81cbf9 3615
2e58ef3e
JH
3616 /* mgmt_index_removed should take care of emptying the
3617 * pending list */
3618 BUG_ON(!list_empty(&hdev->mgmt_pending));
3619
1da177e4
LT
3620 hci_notify(hdev, HCI_DEV_UNREG);
3621
611b30f7
MH
3622 if (hdev->rfkill) {
3623 rfkill_unregister(hdev->rfkill);
3624 rfkill_destroy(hdev->rfkill);
3625 }
3626
99780a7b
JH
3627 if (hdev->tfm_aes)
3628 crypto_free_blkcipher(hdev->tfm_aes);
3629
bdc3e0f1 3630 device_del(&hdev->dev);
147e2d59 3631
0153e2ec
MH
3632 debugfs_remove_recursive(hdev->debugfs);
3633
f48fd9c8 3634 destroy_workqueue(hdev->workqueue);
6ead1bbc 3635 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 3636
09fd0de5 3637 hci_dev_lock(hdev);
e2e0cacb 3638 hci_blacklist_clear(hdev);
2aeb9a1a 3639 hci_uuids_clear(hdev);
55ed8ca1 3640 hci_link_keys_clear(hdev);
b899efaf 3641 hci_smp_ltks_clear(hdev);
970c4e46 3642 hci_smp_irks_clear(hdev);
2763eda6 3643 hci_remote_oob_data_clear(hdev);
15819a70 3644 hci_conn_params_clear(hdev);
09fd0de5 3645 hci_dev_unlock(hdev);
e2e0cacb 3646
dc946bd8 3647 hci_dev_put(hdev);
3df92b31
SL
3648
3649 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
3650}
3651EXPORT_SYMBOL(hci_unregister_dev);
3652
3653/* Suspend HCI device */
3654int hci_suspend_dev(struct hci_dev *hdev)
3655{
3656 hci_notify(hdev, HCI_DEV_SUSPEND);
3657 return 0;
3658}
3659EXPORT_SYMBOL(hci_suspend_dev);
3660
3661/* Resume HCI device */
3662int hci_resume_dev(struct hci_dev *hdev)
3663{
3664 hci_notify(hdev, HCI_DEV_RESUME);
3665 return 0;
3666}
3667EXPORT_SYMBOL(hci_resume_dev);
3668
76bca880 3669/* Receive frame from HCI drivers */
e1a26170 3670int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
76bca880 3671{
76bca880 3672 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 3673 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
3674 kfree_skb(skb);
3675 return -ENXIO;
3676 }
3677
d82603c6 3678 /* Incoming skb */
76bca880
MH
3679 bt_cb(skb)->incoming = 1;
3680
3681 /* Time stamp */
3682 __net_timestamp(skb);
3683
76bca880 3684 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 3685 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 3686
76bca880
MH
3687 return 0;
3688}
3689EXPORT_SYMBOL(hci_recv_frame);
3690
33e882a5 3691static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
a8c5fb1a 3692 int count, __u8 index)
33e882a5
SS
3693{
3694 int len = 0;
3695 int hlen = 0;
3696 int remain = count;
3697 struct sk_buff *skb;
3698 struct bt_skb_cb *scb;
3699
3700 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
a8c5fb1a 3701 index >= NUM_REASSEMBLY)
33e882a5
SS
3702 return -EILSEQ;
3703
3704 skb = hdev->reassembly[index];
3705
3706 if (!skb) {
3707 switch (type) {
3708 case HCI_ACLDATA_PKT:
3709 len = HCI_MAX_FRAME_SIZE;
3710 hlen = HCI_ACL_HDR_SIZE;
3711 break;
3712 case HCI_EVENT_PKT:
3713 len = HCI_MAX_EVENT_SIZE;
3714 hlen = HCI_EVENT_HDR_SIZE;
3715 break;
3716 case HCI_SCODATA_PKT:
3717 len = HCI_MAX_SCO_SIZE;
3718 hlen = HCI_SCO_HDR_SIZE;
3719 break;
3720 }
3721
1e429f38 3722 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
3723 if (!skb)
3724 return -ENOMEM;
3725
3726 scb = (void *) skb->cb;
3727 scb->expect = hlen;
3728 scb->pkt_type = type;
3729
33e882a5
SS
3730 hdev->reassembly[index] = skb;
3731 }
3732
3733 while (count) {
3734 scb = (void *) skb->cb;
89bb46d0 3735 len = min_t(uint, scb->expect, count);
33e882a5
SS
3736
3737 memcpy(skb_put(skb, len), data, len);
3738
3739 count -= len;
3740 data += len;
3741 scb->expect -= len;
3742 remain = count;
3743
3744 switch (type) {
3745 case HCI_EVENT_PKT:
3746 if (skb->len == HCI_EVENT_HDR_SIZE) {
3747 struct hci_event_hdr *h = hci_event_hdr(skb);
3748 scb->expect = h->plen;
3749
3750 if (skb_tailroom(skb) < scb->expect) {
3751 kfree_skb(skb);
3752 hdev->reassembly[index] = NULL;
3753 return -ENOMEM;
3754 }
3755 }
3756 break;
3757
3758 case HCI_ACLDATA_PKT:
3759 if (skb->len == HCI_ACL_HDR_SIZE) {
3760 struct hci_acl_hdr *h = hci_acl_hdr(skb);
3761 scb->expect = __le16_to_cpu(h->dlen);
3762
3763 if (skb_tailroom(skb) < scb->expect) {
3764 kfree_skb(skb);
3765 hdev->reassembly[index] = NULL;
3766 return -ENOMEM;
3767 }
3768 }
3769 break;
3770
3771 case HCI_SCODATA_PKT:
3772 if (skb->len == HCI_SCO_HDR_SIZE) {
3773 struct hci_sco_hdr *h = hci_sco_hdr(skb);
3774 scb->expect = h->dlen;
3775
3776 if (skb_tailroom(skb) < scb->expect) {
3777 kfree_skb(skb);
3778 hdev->reassembly[index] = NULL;
3779 return -ENOMEM;
3780 }
3781 }
3782 break;
3783 }
3784
3785 if (scb->expect == 0) {
3786 /* Complete frame */
3787
3788 bt_cb(skb)->pkt_type = type;
e1a26170 3789 hci_recv_frame(hdev, skb);
33e882a5
SS
3790
3791 hdev->reassembly[index] = NULL;
3792 return remain;
3793 }
3794 }
3795
3796 return remain;
3797}
3798
ef222013
MH
3799int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
3800{
f39a3c06
SS
3801 int rem = 0;
3802
ef222013
MH
3803 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
3804 return -EILSEQ;
3805
da5f6c37 3806 while (count) {
1e429f38 3807 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
3808 if (rem < 0)
3809 return rem;
ef222013 3810
f39a3c06
SS
3811 data += (count - rem);
3812 count = rem;
f81c6224 3813 }
ef222013 3814
f39a3c06 3815 return rem;
ef222013
MH
3816}
3817EXPORT_SYMBOL(hci_recv_fragment);
3818
99811510
SS
3819#define STREAM_REASSEMBLY 0
3820
3821int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
3822{
3823 int type;
3824 int rem = 0;
3825
da5f6c37 3826 while (count) {
99811510
SS
3827 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
3828
3829 if (!skb) {
3830 struct { char type; } *pkt;
3831
3832 /* Start of the frame */
3833 pkt = data;
3834 type = pkt->type;
3835
3836 data++;
3837 count--;
3838 } else
3839 type = bt_cb(skb)->pkt_type;
3840
1e429f38 3841 rem = hci_reassembly(hdev, type, data, count,
a8c5fb1a 3842 STREAM_REASSEMBLY);
99811510
SS
3843 if (rem < 0)
3844 return rem;
3845
3846 data += (count - rem);
3847 count = rem;
f81c6224 3848 }
99811510
SS
3849
3850 return rem;
3851}
3852EXPORT_SYMBOL(hci_recv_stream_fragment);
3853
1da177e4
LT
3854/* ---- Interface to upper protocols ---- */
3855
1da177e4
LT
3856int hci_register_cb(struct hci_cb *cb)
3857{
3858 BT_DBG("%p name %s", cb, cb->name);
3859
f20d09d5 3860 write_lock(&hci_cb_list_lock);
1da177e4 3861 list_add(&cb->list, &hci_cb_list);
f20d09d5 3862 write_unlock(&hci_cb_list_lock);
1da177e4
LT
3863
3864 return 0;
3865}
3866EXPORT_SYMBOL(hci_register_cb);
3867
3868int hci_unregister_cb(struct hci_cb *cb)
3869{
3870 BT_DBG("%p name %s", cb, cb->name);
3871
f20d09d5 3872 write_lock(&hci_cb_list_lock);
1da177e4 3873 list_del(&cb->list);
f20d09d5 3874 write_unlock(&hci_cb_list_lock);
1da177e4
LT
3875
3876 return 0;
3877}
3878EXPORT_SYMBOL(hci_unregister_cb);
3879
51086991 3880static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4 3881{
0d48d939 3882 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 3883
cd82e61c
MH
3884 /* Time stamp */
3885 __net_timestamp(skb);
1da177e4 3886
cd82e61c
MH
3887 /* Send copy to monitor */
3888 hci_send_to_monitor(hdev, skb);
3889
3890 if (atomic_read(&hdev->promisc)) {
3891 /* Send copy to the sockets */
470fe1b5 3892 hci_send_to_sock(hdev, skb);
1da177e4
LT
3893 }
3894
3895 /* Get rid of skb owner, prior to sending to the driver. */
3896 skb_orphan(skb);
3897
7bd8f09f 3898 if (hdev->send(hdev, skb) < 0)
51086991 3899 BT_ERR("%s sending frame failed", hdev->name);
1da177e4
LT
3900}
3901
3119ae95
JH
3902void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
3903{
3904 skb_queue_head_init(&req->cmd_q);
3905 req->hdev = hdev;
5d73e034 3906 req->err = 0;
3119ae95
JH
3907}
3908
3909int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
3910{
3911 struct hci_dev *hdev = req->hdev;
3912 struct sk_buff *skb;
3913 unsigned long flags;
3914
3915 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
3916
5d73e034
AG
3917 /* If an error occured during request building, remove all HCI
3918 * commands queued on the HCI request queue.
3919 */
3920 if (req->err) {
3921 skb_queue_purge(&req->cmd_q);
3922 return req->err;
3923 }
3924
3119ae95
JH
3925 /* Do not allow empty requests */
3926 if (skb_queue_empty(&req->cmd_q))
382b0c39 3927 return -ENODATA;
3119ae95
JH
3928
3929 skb = skb_peek_tail(&req->cmd_q);
3930 bt_cb(skb)->req.complete = complete;
3931
3932 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3933 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
3934 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3935
3936 queue_work(hdev->workqueue, &hdev->cmd_work);
3937
3938 return 0;
3939}
3940
1ca3a9d0 3941static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
07dc93dd 3942 u32 plen, const void *param)
1da177e4
LT
3943{
3944 int len = HCI_COMMAND_HDR_SIZE + plen;
3945 struct hci_command_hdr *hdr;
3946 struct sk_buff *skb;
3947
1da177e4 3948 skb = bt_skb_alloc(len, GFP_ATOMIC);
1ca3a9d0
JH
3949 if (!skb)
3950 return NULL;
1da177e4
LT
3951
3952 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 3953 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
3954 hdr->plen = plen;
3955
3956 if (plen)
3957 memcpy(skb_put(skb, plen), param, plen);
3958
3959 BT_DBG("skb len %d", skb->len);
3960
0d48d939 3961 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
c78ae283 3962
1ca3a9d0
JH
3963 return skb;
3964}
3965
3966/* Send HCI command */
07dc93dd
JH
3967int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3968 const void *param)
1ca3a9d0
JH
3969{
3970 struct sk_buff *skb;
3971
3972 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3973
3974 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3975 if (!skb) {
3976 BT_ERR("%s no memory for command", hdev->name);
3977 return -ENOMEM;
3978 }
3979
11714b3d
JH
3980 /* Stand-alone HCI commands must be flaged as
3981 * single-command requests.
3982 */
3983 bt_cb(skb)->req.start = true;
3984
1da177e4 3985 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 3986 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
3987
3988 return 0;
3989}
1da177e4 3990
71c76a17 3991/* Queue a command to an asynchronous HCI request */
07dc93dd
JH
3992void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
3993 const void *param, u8 event)
71c76a17
JH
3994{
3995 struct hci_dev *hdev = req->hdev;
3996 struct sk_buff *skb;
3997
3998 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3999
34739c1e
AG
4000 /* If an error occured during request building, there is no point in
4001 * queueing the HCI command. We can simply return.
4002 */
4003 if (req->err)
4004 return;
4005
71c76a17
JH
4006 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4007 if (!skb) {
5d73e034
AG
4008 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4009 hdev->name, opcode);
4010 req->err = -ENOMEM;
e348fe6b 4011 return;
71c76a17
JH
4012 }
4013
4014 if (skb_queue_empty(&req->cmd_q))
4015 bt_cb(skb)->req.start = true;
4016
02350a72
JH
4017 bt_cb(skb)->req.event = event;
4018
71c76a17 4019 skb_queue_tail(&req->cmd_q, skb);
71c76a17
JH
4020}
4021
07dc93dd
JH
4022void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4023 const void *param)
02350a72
JH
4024{
4025 hci_req_add_ev(req, opcode, plen, param, 0);
4026}
4027
1da177e4 4028/* Get data from the previously sent command */
a9de9248 4029void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
4030{
4031 struct hci_command_hdr *hdr;
4032
4033 if (!hdev->sent_cmd)
4034 return NULL;
4035
4036 hdr = (void *) hdev->sent_cmd->data;
4037
a9de9248 4038 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
4039 return NULL;
4040
f0e09510 4041 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
4042
4043 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4044}
4045
4046/* Send ACL data */
4047static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4048{
4049 struct hci_acl_hdr *hdr;
4050 int len = skb->len;
4051
badff6d0
ACM
4052 skb_push(skb, HCI_ACL_HDR_SIZE);
4053 skb_reset_transport_header(skb);
9c70220b 4054 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
4055 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4056 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
4057}
4058
ee22be7e 4059static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 4060 struct sk_buff *skb, __u16 flags)
1da177e4 4061{
ee22be7e 4062 struct hci_conn *conn = chan->conn;
1da177e4
LT
4063 struct hci_dev *hdev = conn->hdev;
4064 struct sk_buff *list;
4065
087bfd99
GP
4066 skb->len = skb_headlen(skb);
4067 skb->data_len = 0;
4068
4069 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
204a6e54
AE
4070
4071 switch (hdev->dev_type) {
4072 case HCI_BREDR:
4073 hci_add_acl_hdr(skb, conn->handle, flags);
4074 break;
4075 case HCI_AMP:
4076 hci_add_acl_hdr(skb, chan->handle, flags);
4077 break;
4078 default:
4079 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4080 return;
4081 }
087bfd99 4082
70f23020
AE
4083 list = skb_shinfo(skb)->frag_list;
4084 if (!list) {
1da177e4
LT
4085 /* Non fragmented */
4086 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4087
73d80deb 4088 skb_queue_tail(queue, skb);
1da177e4
LT
4089 } else {
4090 /* Fragmented */
4091 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4092
4093 skb_shinfo(skb)->frag_list = NULL;
4094
4095 /* Queue all fragments atomically */
af3e6359 4096 spin_lock(&queue->lock);
1da177e4 4097
73d80deb 4098 __skb_queue_tail(queue, skb);
e702112f
AE
4099
4100 flags &= ~ACL_START;
4101 flags |= ACL_CONT;
1da177e4
LT
4102 do {
4103 skb = list; list = list->next;
8e87d142 4104
0d48d939 4105 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 4106 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
4107
4108 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4109
73d80deb 4110 __skb_queue_tail(queue, skb);
1da177e4
LT
4111 } while (list);
4112
af3e6359 4113 spin_unlock(&queue->lock);
1da177e4 4114 }
73d80deb
LAD
4115}
4116
4117void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4118{
ee22be7e 4119 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 4120
f0e09510 4121 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb 4122
ee22be7e 4123 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 4124
3eff45ea 4125 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 4126}
1da177e4
LT
4127
4128/* Send SCO data */
0d861d8b 4129void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
4130{
4131 struct hci_dev *hdev = conn->hdev;
4132 struct hci_sco_hdr hdr;
4133
4134 BT_DBG("%s len %d", hdev->name, skb->len);
4135
aca3192c 4136 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
4137 hdr.dlen = skb->len;
4138
badff6d0
ACM
4139 skb_push(skb, HCI_SCO_HDR_SIZE);
4140 skb_reset_transport_header(skb);
9c70220b 4141 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4 4142
0d48d939 4143 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 4144
1da177e4 4145 skb_queue_tail(&conn->data_q, skb);
3eff45ea 4146 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 4147}
1da177e4
LT
4148
4149/* ---- HCI TX task (outgoing data) ---- */
4150
4151/* HCI Connection scheduler */
6039aa73
GP
4152static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4153 int *quote)
1da177e4
LT
4154{
4155 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 4156 struct hci_conn *conn = NULL, *c;
abc5de8f 4157 unsigned int num = 0, min = ~0;
1da177e4 4158
8e87d142 4159 /* We don't have to lock device here. Connections are always
1da177e4 4160 * added and removed with TX task disabled. */
bf4c6325
GP
4161
4162 rcu_read_lock();
4163
4164 list_for_each_entry_rcu(c, &h->list, list) {
769be974 4165 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 4166 continue;
769be974
MH
4167
4168 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4169 continue;
4170
1da177e4
LT
4171 num++;
4172
4173 if (c->sent < min) {
4174 min = c->sent;
4175 conn = c;
4176 }
52087a79
LAD
4177
4178 if (hci_conn_num(hdev, type) == num)
4179 break;
1da177e4
LT
4180 }
4181
bf4c6325
GP
4182 rcu_read_unlock();
4183
1da177e4 4184 if (conn) {
6ed58ec5
VT
4185 int cnt, q;
4186
4187 switch (conn->type) {
4188 case ACL_LINK:
4189 cnt = hdev->acl_cnt;
4190 break;
4191 case SCO_LINK:
4192 case ESCO_LINK:
4193 cnt = hdev->sco_cnt;
4194 break;
4195 case LE_LINK:
4196 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4197 break;
4198 default:
4199 cnt = 0;
4200 BT_ERR("Unknown link type");
4201 }
4202
4203 q = cnt / num;
1da177e4
LT
4204 *quote = q ? q : 1;
4205 } else
4206 *quote = 0;
4207
4208 BT_DBG("conn %p quote %d", conn, *quote);
4209 return conn;
4210}
4211
6039aa73 4212static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
4213{
4214 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 4215 struct hci_conn *c;
1da177e4 4216
bae1f5d9 4217 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 4218
bf4c6325
GP
4219 rcu_read_lock();
4220
1da177e4 4221 /* Kill stalled connections */
bf4c6325 4222 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 4223 if (c->type == type && c->sent) {
6ed93dc6
AE
4224 BT_ERR("%s killing stalled connection %pMR",
4225 hdev->name, &c->dst);
bed71748 4226 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
4227 }
4228 }
bf4c6325
GP
4229
4230 rcu_read_unlock();
1da177e4
LT
4231}
4232
6039aa73
GP
4233static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4234 int *quote)
1da177e4 4235{
73d80deb
LAD
4236 struct hci_conn_hash *h = &hdev->conn_hash;
4237 struct hci_chan *chan = NULL;
abc5de8f 4238 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 4239 struct hci_conn *conn;
73d80deb
LAD
4240 int cnt, q, conn_num = 0;
4241
4242 BT_DBG("%s", hdev->name);
4243
bf4c6325
GP
4244 rcu_read_lock();
4245
4246 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
4247 struct hci_chan *tmp;
4248
4249 if (conn->type != type)
4250 continue;
4251
4252 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4253 continue;
4254
4255 conn_num++;
4256
8192edef 4257 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
4258 struct sk_buff *skb;
4259
4260 if (skb_queue_empty(&tmp->data_q))
4261 continue;
4262
4263 skb = skb_peek(&tmp->data_q);
4264 if (skb->priority < cur_prio)
4265 continue;
4266
4267 if (skb->priority > cur_prio) {
4268 num = 0;
4269 min = ~0;
4270 cur_prio = skb->priority;
4271 }
4272
4273 num++;
4274
4275 if (conn->sent < min) {
4276 min = conn->sent;
4277 chan = tmp;
4278 }
4279 }
4280
4281 if (hci_conn_num(hdev, type) == conn_num)
4282 break;
4283 }
4284
bf4c6325
GP
4285 rcu_read_unlock();
4286
73d80deb
LAD
4287 if (!chan)
4288 return NULL;
4289
4290 switch (chan->conn->type) {
4291 case ACL_LINK:
4292 cnt = hdev->acl_cnt;
4293 break;
bd1eb66b
AE
4294 case AMP_LINK:
4295 cnt = hdev->block_cnt;
4296 break;
73d80deb
LAD
4297 case SCO_LINK:
4298 case ESCO_LINK:
4299 cnt = hdev->sco_cnt;
4300 break;
4301 case LE_LINK:
4302 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4303 break;
4304 default:
4305 cnt = 0;
4306 BT_ERR("Unknown link type");
4307 }
4308
4309 q = cnt / num;
4310 *quote = q ? q : 1;
4311 BT_DBG("chan %p quote %d", chan, *quote);
4312 return chan;
4313}
4314
02b20f0b
LAD
4315static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4316{
4317 struct hci_conn_hash *h = &hdev->conn_hash;
4318 struct hci_conn *conn;
4319 int num = 0;
4320
4321 BT_DBG("%s", hdev->name);
4322
bf4c6325
GP
4323 rcu_read_lock();
4324
4325 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
4326 struct hci_chan *chan;
4327
4328 if (conn->type != type)
4329 continue;
4330
4331 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4332 continue;
4333
4334 num++;
4335
8192edef 4336 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
4337 struct sk_buff *skb;
4338
4339 if (chan->sent) {
4340 chan->sent = 0;
4341 continue;
4342 }
4343
4344 if (skb_queue_empty(&chan->data_q))
4345 continue;
4346
4347 skb = skb_peek(&chan->data_q);
4348 if (skb->priority >= HCI_PRIO_MAX - 1)
4349 continue;
4350
4351 skb->priority = HCI_PRIO_MAX - 1;
4352
4353 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 4354 skb->priority);
02b20f0b
LAD
4355 }
4356
4357 if (hci_conn_num(hdev, type) == num)
4358 break;
4359 }
bf4c6325
GP
4360
4361 rcu_read_unlock();
4362
02b20f0b
LAD
4363}
4364
b71d385a
AE
4365static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4366{
4367 /* Calculate count of blocks used by this packet */
4368 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4369}
4370
6039aa73 4371static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 4372{
1da177e4
LT
4373 if (!test_bit(HCI_RAW, &hdev->flags)) {
4374 /* ACL tx timeout must be longer than maximum
4375 * link supervision timeout (40.9 seconds) */
63d2bc1b 4376 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 4377 HCI_ACL_TX_TIMEOUT))
bae1f5d9 4378 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 4379 }
63d2bc1b 4380}
1da177e4 4381
6039aa73 4382static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
4383{
4384 unsigned int cnt = hdev->acl_cnt;
4385 struct hci_chan *chan;
4386 struct sk_buff *skb;
4387 int quote;
4388
4389 __check_timeout(hdev, cnt);
04837f64 4390
73d80deb 4391 while (hdev->acl_cnt &&
a8c5fb1a 4392 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
4393 u32 priority = (skb_peek(&chan->data_q))->priority;
4394 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4395 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4396 skb->len, skb->priority);
73d80deb 4397
ec1cce24
LAD
4398 /* Stop if priority has changed */
4399 if (skb->priority < priority)
4400 break;
4401
4402 skb = skb_dequeue(&chan->data_q);
4403
73d80deb 4404 hci_conn_enter_active_mode(chan->conn,
04124681 4405 bt_cb(skb)->force_active);
04837f64 4406
57d17d70 4407 hci_send_frame(hdev, skb);
1da177e4
LT
4408 hdev->acl_last_tx = jiffies;
4409
4410 hdev->acl_cnt--;
73d80deb
LAD
4411 chan->sent++;
4412 chan->conn->sent++;
1da177e4
LT
4413 }
4414 }
02b20f0b
LAD
4415
4416 if (cnt != hdev->acl_cnt)
4417 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
4418}
4419
6039aa73 4420static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 4421{
63d2bc1b 4422 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
4423 struct hci_chan *chan;
4424 struct sk_buff *skb;
4425 int quote;
bd1eb66b 4426 u8 type;
b71d385a 4427
63d2bc1b 4428 __check_timeout(hdev, cnt);
b71d385a 4429
bd1eb66b
AE
4430 BT_DBG("%s", hdev->name);
4431
4432 if (hdev->dev_type == HCI_AMP)
4433 type = AMP_LINK;
4434 else
4435 type = ACL_LINK;
4436
b71d385a 4437 while (hdev->block_cnt > 0 &&
bd1eb66b 4438 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
4439 u32 priority = (skb_peek(&chan->data_q))->priority;
4440 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4441 int blocks;
4442
4443 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4444 skb->len, skb->priority);
b71d385a
AE
4445
4446 /* Stop if priority has changed */
4447 if (skb->priority < priority)
4448 break;
4449
4450 skb = skb_dequeue(&chan->data_q);
4451
4452 blocks = __get_blocks(hdev, skb);
4453 if (blocks > hdev->block_cnt)
4454 return;
4455
4456 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 4457 bt_cb(skb)->force_active);
b71d385a 4458
57d17d70 4459 hci_send_frame(hdev, skb);
b71d385a
AE
4460 hdev->acl_last_tx = jiffies;
4461
4462 hdev->block_cnt -= blocks;
4463 quote -= blocks;
4464
4465 chan->sent += blocks;
4466 chan->conn->sent += blocks;
4467 }
4468 }
4469
4470 if (cnt != hdev->block_cnt)
bd1eb66b 4471 hci_prio_recalculate(hdev, type);
b71d385a
AE
4472}
4473
6039aa73 4474static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
4475{
4476 BT_DBG("%s", hdev->name);
4477
bd1eb66b
AE
4478 /* No ACL link over BR/EDR controller */
4479 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4480 return;
4481
4482 /* No AMP link over AMP controller */
4483 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
4484 return;
4485
4486 switch (hdev->flow_ctl_mode) {
4487 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4488 hci_sched_acl_pkt(hdev);
4489 break;
4490
4491 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4492 hci_sched_acl_blk(hdev);
4493 break;
4494 }
4495}
4496
1da177e4 4497/* Schedule SCO */
6039aa73 4498static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
4499{
4500 struct hci_conn *conn;
4501 struct sk_buff *skb;
4502 int quote;
4503
4504 BT_DBG("%s", hdev->name);
4505
52087a79
LAD
4506 if (!hci_conn_num(hdev, SCO_LINK))
4507 return;
4508
1da177e4
LT
4509 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4510 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4511 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 4512 hci_send_frame(hdev, skb);
1da177e4
LT
4513
4514 conn->sent++;
4515 if (conn->sent == ~0)
4516 conn->sent = 0;
4517 }
4518 }
4519}
4520
6039aa73 4521static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
4522{
4523 struct hci_conn *conn;
4524 struct sk_buff *skb;
4525 int quote;
4526
4527 BT_DBG("%s", hdev->name);
4528
52087a79
LAD
4529 if (!hci_conn_num(hdev, ESCO_LINK))
4530 return;
4531
8fc9ced3
GP
4532 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4533 &quote))) {
b6a0dc82
MH
4534 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4535 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 4536 hci_send_frame(hdev, skb);
b6a0dc82
MH
4537
4538 conn->sent++;
4539 if (conn->sent == ~0)
4540 conn->sent = 0;
4541 }
4542 }
4543}
4544
6039aa73 4545static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 4546{
73d80deb 4547 struct hci_chan *chan;
6ed58ec5 4548 struct sk_buff *skb;
02b20f0b 4549 int quote, cnt, tmp;
6ed58ec5
VT
4550
4551 BT_DBG("%s", hdev->name);
4552
52087a79
LAD
4553 if (!hci_conn_num(hdev, LE_LINK))
4554 return;
4555
6ed58ec5
VT
4556 if (!test_bit(HCI_RAW, &hdev->flags)) {
4557 /* LE tx timeout must be longer than maximum
4558 * link supervision timeout (40.9 seconds) */
bae1f5d9 4559 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 4560 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 4561 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
4562 }
4563
4564 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 4565 tmp = cnt;
73d80deb 4566 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
4567 u32 priority = (skb_peek(&chan->data_q))->priority;
4568 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4569 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4570 skb->len, skb->priority);
6ed58ec5 4571
ec1cce24
LAD
4572 /* Stop if priority has changed */
4573 if (skb->priority < priority)
4574 break;
4575
4576 skb = skb_dequeue(&chan->data_q);
4577
57d17d70 4578 hci_send_frame(hdev, skb);
6ed58ec5
VT
4579 hdev->le_last_tx = jiffies;
4580
4581 cnt--;
73d80deb
LAD
4582 chan->sent++;
4583 chan->conn->sent++;
6ed58ec5
VT
4584 }
4585 }
73d80deb 4586
6ed58ec5
VT
4587 if (hdev->le_pkts)
4588 hdev->le_cnt = cnt;
4589 else
4590 hdev->acl_cnt = cnt;
02b20f0b
LAD
4591
4592 if (cnt != tmp)
4593 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
4594}
4595
3eff45ea 4596static void hci_tx_work(struct work_struct *work)
1da177e4 4597{
3eff45ea 4598 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
4599 struct sk_buff *skb;
4600
6ed58ec5 4601 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 4602 hdev->sco_cnt, hdev->le_cnt);
1da177e4 4603
52de599e
MH
4604 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4605 /* Schedule queues and send stuff to HCI driver */
4606 hci_sched_acl(hdev);
4607 hci_sched_sco(hdev);
4608 hci_sched_esco(hdev);
4609 hci_sched_le(hdev);
4610 }
6ed58ec5 4611
1da177e4
LT
4612 /* Send next queued raw (unknown type) packet */
4613 while ((skb = skb_dequeue(&hdev->raw_q)))
57d17d70 4614 hci_send_frame(hdev, skb);
1da177e4
LT
4615}
4616
25985edc 4617/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
4618
4619/* ACL data packet */
6039aa73 4620static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
4621{
4622 struct hci_acl_hdr *hdr = (void *) skb->data;
4623 struct hci_conn *conn;
4624 __u16 handle, flags;
4625
4626 skb_pull(skb, HCI_ACL_HDR_SIZE);
4627
4628 handle = __le16_to_cpu(hdr->handle);
4629 flags = hci_flags(handle);
4630 handle = hci_handle(handle);
4631
f0e09510 4632 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 4633 handle, flags);
1da177e4
LT
4634
4635 hdev->stat.acl_rx++;
4636
4637 hci_dev_lock(hdev);
4638 conn = hci_conn_hash_lookup_handle(hdev, handle);
4639 hci_dev_unlock(hdev);
8e87d142 4640
1da177e4 4641 if (conn) {
65983fc7 4642 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 4643
1da177e4 4644 /* Send to upper protocol */
686ebf28
UF
4645 l2cap_recv_acldata(conn, skb, flags);
4646 return;
1da177e4 4647 } else {
8e87d142 4648 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 4649 hdev->name, handle);
1da177e4
LT
4650 }
4651
4652 kfree_skb(skb);
4653}
4654
4655/* SCO data packet */
6039aa73 4656static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
4657{
4658 struct hci_sco_hdr *hdr = (void *) skb->data;
4659 struct hci_conn *conn;
4660 __u16 handle;
4661
4662 skb_pull(skb, HCI_SCO_HDR_SIZE);
4663
4664 handle = __le16_to_cpu(hdr->handle);
4665
f0e09510 4666 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
1da177e4
LT
4667
4668 hdev->stat.sco_rx++;
4669
4670 hci_dev_lock(hdev);
4671 conn = hci_conn_hash_lookup_handle(hdev, handle);
4672 hci_dev_unlock(hdev);
4673
4674 if (conn) {
1da177e4 4675 /* Send to upper protocol */
686ebf28
UF
4676 sco_recv_scodata(conn, skb);
4677 return;
1da177e4 4678 } else {
8e87d142 4679 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 4680 hdev->name, handle);
1da177e4
LT
4681 }
4682
4683 kfree_skb(skb);
4684}
4685
9238f36a
JH
4686static bool hci_req_is_complete(struct hci_dev *hdev)
4687{
4688 struct sk_buff *skb;
4689
4690 skb = skb_peek(&hdev->cmd_q);
4691 if (!skb)
4692 return true;
4693
4694 return bt_cb(skb)->req.start;
4695}
4696
42c6b129
JH
4697static void hci_resend_last(struct hci_dev *hdev)
4698{
4699 struct hci_command_hdr *sent;
4700 struct sk_buff *skb;
4701 u16 opcode;
4702
4703 if (!hdev->sent_cmd)
4704 return;
4705
4706 sent = (void *) hdev->sent_cmd->data;
4707 opcode = __le16_to_cpu(sent->opcode);
4708 if (opcode == HCI_OP_RESET)
4709 return;
4710
4711 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4712 if (!skb)
4713 return;
4714
4715 skb_queue_head(&hdev->cmd_q, skb);
4716 queue_work(hdev->workqueue, &hdev->cmd_work);
4717}
4718
9238f36a
JH
4719void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
4720{
4721 hci_req_complete_t req_complete = NULL;
4722 struct sk_buff *skb;
4723 unsigned long flags;
4724
4725 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4726
42c6b129
JH
4727 /* If the completed command doesn't match the last one that was
4728 * sent we need to do special handling of it.
9238f36a 4729 */
42c6b129
JH
4730 if (!hci_sent_cmd_data(hdev, opcode)) {
4731 /* Some CSR based controllers generate a spontaneous
4732 * reset complete event during init and any pending
4733 * command will never be completed. In such a case we
4734 * need to resend whatever was the last sent
4735 * command.
4736 */
4737 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4738 hci_resend_last(hdev);
4739
9238f36a 4740 return;
42c6b129 4741 }
9238f36a
JH
4742
4743 /* If the command succeeded and there's still more commands in
4744 * this request the request is not yet complete.
4745 */
4746 if (!status && !hci_req_is_complete(hdev))
4747 return;
4748
4749 /* If this was the last command in a request the complete
4750 * callback would be found in hdev->sent_cmd instead of the
4751 * command queue (hdev->cmd_q).
4752 */
4753 if (hdev->sent_cmd) {
4754 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
53e21fbc
JH
4755
4756 if (req_complete) {
4757 /* We must set the complete callback to NULL to
4758 * avoid calling the callback more than once if
4759 * this function gets called again.
4760 */
4761 bt_cb(hdev->sent_cmd)->req.complete = NULL;
4762
9238f36a 4763 goto call_complete;
53e21fbc 4764 }
9238f36a
JH
4765 }
4766
4767 /* Remove all pending commands belonging to this request */
4768 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4769 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4770 if (bt_cb(skb)->req.start) {
4771 __skb_queue_head(&hdev->cmd_q, skb);
4772 break;
4773 }
4774
4775 req_complete = bt_cb(skb)->req.complete;
4776 kfree_skb(skb);
4777 }
4778 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4779
4780call_complete:
4781 if (req_complete)
4782 req_complete(hdev, status);
4783}
4784
b78752cc 4785static void hci_rx_work(struct work_struct *work)
1da177e4 4786{
b78752cc 4787 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
4788 struct sk_buff *skb;
4789
4790 BT_DBG("%s", hdev->name);
4791
1da177e4 4792 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
4793 /* Send copy to monitor */
4794 hci_send_to_monitor(hdev, skb);
4795
1da177e4
LT
4796 if (atomic_read(&hdev->promisc)) {
4797 /* Send copy to the sockets */
470fe1b5 4798 hci_send_to_sock(hdev, skb);
1da177e4
LT
4799 }
4800
0736cfa8
MH
4801 if (test_bit(HCI_RAW, &hdev->flags) ||
4802 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1da177e4
LT
4803 kfree_skb(skb);
4804 continue;
4805 }
4806
4807 if (test_bit(HCI_INIT, &hdev->flags)) {
4808 /* Don't process data packets in this states. */
0d48d939 4809 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
4810 case HCI_ACLDATA_PKT:
4811 case HCI_SCODATA_PKT:
4812 kfree_skb(skb);
4813 continue;
3ff50b79 4814 }
1da177e4
LT
4815 }
4816
4817 /* Process frame */
0d48d939 4818 switch (bt_cb(skb)->pkt_type) {
1da177e4 4819 case HCI_EVENT_PKT:
b78752cc 4820 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
4821 hci_event_packet(hdev, skb);
4822 break;
4823
4824 case HCI_ACLDATA_PKT:
4825 BT_DBG("%s ACL data packet", hdev->name);
4826 hci_acldata_packet(hdev, skb);
4827 break;
4828
4829 case HCI_SCODATA_PKT:
4830 BT_DBG("%s SCO data packet", hdev->name);
4831 hci_scodata_packet(hdev, skb);
4832 break;
4833
4834 default:
4835 kfree_skb(skb);
4836 break;
4837 }
4838 }
1da177e4
LT
4839}
4840
c347b765 4841static void hci_cmd_work(struct work_struct *work)
1da177e4 4842{
c347b765 4843 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
4844 struct sk_buff *skb;
4845
2104786b
AE
4846 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4847 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 4848
1da177e4 4849 /* Send queued commands */
5a08ecce
AE
4850 if (atomic_read(&hdev->cmd_cnt)) {
4851 skb = skb_dequeue(&hdev->cmd_q);
4852 if (!skb)
4853 return;
4854
7585b97a 4855 kfree_skb(hdev->sent_cmd);
1da177e4 4856
a675d7f1 4857 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
70f23020 4858 if (hdev->sent_cmd) {
1da177e4 4859 atomic_dec(&hdev->cmd_cnt);
57d17d70 4860 hci_send_frame(hdev, skb);
7bdb8a5c
SJ
4861 if (test_bit(HCI_RESET, &hdev->flags))
4862 del_timer(&hdev->cmd_timer);
4863 else
4864 mod_timer(&hdev->cmd_timer,
5f246e89 4865 jiffies + HCI_CMD_TIMEOUT);
1da177e4
LT
4866 } else {
4867 skb_queue_head(&hdev->cmd_q, skb);
c347b765 4868 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
4869 }
4870 }
4871}
b1efcc28
AG
4872
4873void hci_req_add_le_scan_disable(struct hci_request *req)
4874{
4875 struct hci_cp_le_set_scan_enable cp;
4876
4877 memset(&cp, 0, sizeof(cp));
4878 cp.enable = LE_SCAN_DISABLE;
4879 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
4880}