]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - net/bluetooth/hci_core.c
Bluetooth: Auto connection and power on
[mirror_ubuntu-artful-kernel.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
8c520a59 30#include <linux/rfkill.h>
baf27f6e 31#include <linux/debugfs.h>
99780a7b 32#include <linux/crypto.h>
47219839 33#include <asm/unaligned.h>
1da177e4
LT
34
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
37
970c4e46
JH
38#include "smp.h"
39
b78752cc 40static void hci_rx_work(struct work_struct *work);
c347b765 41static void hci_cmd_work(struct work_struct *work);
3eff45ea 42static void hci_tx_work(struct work_struct *work);
1da177e4 43
1da177e4
LT
44/* HCI device list */
45LIST_HEAD(hci_dev_list);
46DEFINE_RWLOCK(hci_dev_list_lock);
47
48/* HCI callback list */
49LIST_HEAD(hci_cb_list);
50DEFINE_RWLOCK(hci_cb_list_lock);
51
3df92b31
SL
52/* HCI ID Numbering */
53static DEFINE_IDA(hci_index_ida);
54
1da177e4
LT
55/* ---- HCI notifications ---- */
56
6516455d 57static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 58{
040030ef 59 hci_sock_dev_event(hdev, event);
1da177e4
LT
60}
61
baf27f6e
MH
62/* ---- HCI debugfs entries ---- */
63
4b4148e9
MH
64static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
65 size_t count, loff_t *ppos)
66{
67 struct hci_dev *hdev = file->private_data;
68 char buf[3];
69
70 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dev_flags) ? 'Y': 'N';
71 buf[1] = '\n';
72 buf[2] = '\0';
73 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
74}
75
76static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
77 size_t count, loff_t *ppos)
78{
79 struct hci_dev *hdev = file->private_data;
80 struct sk_buff *skb;
81 char buf[32];
82 size_t buf_size = min(count, (sizeof(buf)-1));
83 bool enable;
84 int err;
85
86 if (!test_bit(HCI_UP, &hdev->flags))
87 return -ENETDOWN;
88
89 if (copy_from_user(buf, user_buf, buf_size))
90 return -EFAULT;
91
92 buf[buf_size] = '\0';
93 if (strtobool(buf, &enable))
94 return -EINVAL;
95
96 if (enable == test_bit(HCI_DUT_MODE, &hdev->dev_flags))
97 return -EALREADY;
98
99 hci_req_lock(hdev);
100 if (enable)
101 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
102 HCI_CMD_TIMEOUT);
103 else
104 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
105 HCI_CMD_TIMEOUT);
106 hci_req_unlock(hdev);
107
108 if (IS_ERR(skb))
109 return PTR_ERR(skb);
110
111 err = -bt_to_errno(skb->data[0]);
112 kfree_skb(skb);
113
114 if (err < 0)
115 return err;
116
117 change_bit(HCI_DUT_MODE, &hdev->dev_flags);
118
119 return count;
120}
121
122static const struct file_operations dut_mode_fops = {
123 .open = simple_open,
124 .read = dut_mode_read,
125 .write = dut_mode_write,
126 .llseek = default_llseek,
127};
128
dfb826a8
MH
129static int features_show(struct seq_file *f, void *ptr)
130{
131 struct hci_dev *hdev = f->private;
132 u8 p;
133
134 hci_dev_lock(hdev);
135 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
cfbb2b5b 136 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
dfb826a8
MH
137 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
138 hdev->features[p][0], hdev->features[p][1],
139 hdev->features[p][2], hdev->features[p][3],
140 hdev->features[p][4], hdev->features[p][5],
141 hdev->features[p][6], hdev->features[p][7]);
142 }
cfbb2b5b
MH
143 if (lmp_le_capable(hdev))
144 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
145 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
146 hdev->le_features[0], hdev->le_features[1],
147 hdev->le_features[2], hdev->le_features[3],
148 hdev->le_features[4], hdev->le_features[5],
149 hdev->le_features[6], hdev->le_features[7]);
dfb826a8
MH
150 hci_dev_unlock(hdev);
151
152 return 0;
153}
154
155static int features_open(struct inode *inode, struct file *file)
156{
157 return single_open(file, features_show, inode->i_private);
158}
159
160static const struct file_operations features_fops = {
161 .open = features_open,
162 .read = seq_read,
163 .llseek = seq_lseek,
164 .release = single_release,
165};
166
70afe0b8
MH
167static int blacklist_show(struct seq_file *f, void *p)
168{
169 struct hci_dev *hdev = f->private;
170 struct bdaddr_list *b;
171
172 hci_dev_lock(hdev);
173 list_for_each_entry(b, &hdev->blacklist, list)
b25f0785 174 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
70afe0b8
MH
175 hci_dev_unlock(hdev);
176
177 return 0;
178}
179
180static int blacklist_open(struct inode *inode, struct file *file)
181{
182 return single_open(file, blacklist_show, inode->i_private);
183}
184
185static const struct file_operations blacklist_fops = {
186 .open = blacklist_open,
187 .read = seq_read,
188 .llseek = seq_lseek,
189 .release = single_release,
190};
191
47219839
MH
192static int uuids_show(struct seq_file *f, void *p)
193{
194 struct hci_dev *hdev = f->private;
195 struct bt_uuid *uuid;
196
197 hci_dev_lock(hdev);
198 list_for_each_entry(uuid, &hdev->uuids, list) {
58f01aa9
MH
199 u8 i, val[16];
200
201 /* The Bluetooth UUID values are stored in big endian,
202 * but with reversed byte order. So convert them into
203 * the right order for the %pUb modifier.
204 */
205 for (i = 0; i < 16; i++)
206 val[i] = uuid->uuid[15 - i];
207
208 seq_printf(f, "%pUb\n", val);
47219839
MH
209 }
210 hci_dev_unlock(hdev);
211
212 return 0;
213}
214
215static int uuids_open(struct inode *inode, struct file *file)
216{
217 return single_open(file, uuids_show, inode->i_private);
218}
219
220static const struct file_operations uuids_fops = {
221 .open = uuids_open,
222 .read = seq_read,
223 .llseek = seq_lseek,
224 .release = single_release,
225};
226
baf27f6e
MH
227static int inquiry_cache_show(struct seq_file *f, void *p)
228{
229 struct hci_dev *hdev = f->private;
230 struct discovery_state *cache = &hdev->discovery;
231 struct inquiry_entry *e;
232
233 hci_dev_lock(hdev);
234
235 list_for_each_entry(e, &cache->all, all) {
236 struct inquiry_data *data = &e->data;
237 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
238 &data->bdaddr,
239 data->pscan_rep_mode, data->pscan_period_mode,
240 data->pscan_mode, data->dev_class[2],
241 data->dev_class[1], data->dev_class[0],
242 __le16_to_cpu(data->clock_offset),
243 data->rssi, data->ssp_mode, e->timestamp);
244 }
245
246 hci_dev_unlock(hdev);
247
248 return 0;
249}
250
251static int inquiry_cache_open(struct inode *inode, struct file *file)
252{
253 return single_open(file, inquiry_cache_show, inode->i_private);
254}
255
256static const struct file_operations inquiry_cache_fops = {
257 .open = inquiry_cache_open,
258 .read = seq_read,
259 .llseek = seq_lseek,
260 .release = single_release,
261};
262
02d08d15
MH
263static int link_keys_show(struct seq_file *f, void *ptr)
264{
265 struct hci_dev *hdev = f->private;
266 struct list_head *p, *n;
267
268 hci_dev_lock(hdev);
269 list_for_each_safe(p, n, &hdev->link_keys) {
270 struct link_key *key = list_entry(p, struct link_key, list);
271 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
272 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
273 }
274 hci_dev_unlock(hdev);
275
276 return 0;
277}
278
279static int link_keys_open(struct inode *inode, struct file *file)
280{
281 return single_open(file, link_keys_show, inode->i_private);
282}
283
284static const struct file_operations link_keys_fops = {
285 .open = link_keys_open,
286 .read = seq_read,
287 .llseek = seq_lseek,
288 .release = single_release,
289};
290
babdbb3c
MH
291static int dev_class_show(struct seq_file *f, void *ptr)
292{
293 struct hci_dev *hdev = f->private;
294
295 hci_dev_lock(hdev);
296 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
297 hdev->dev_class[1], hdev->dev_class[0]);
298 hci_dev_unlock(hdev);
299
300 return 0;
301}
302
303static int dev_class_open(struct inode *inode, struct file *file)
304{
305 return single_open(file, dev_class_show, inode->i_private);
306}
307
308static const struct file_operations dev_class_fops = {
309 .open = dev_class_open,
310 .read = seq_read,
311 .llseek = seq_lseek,
312 .release = single_release,
313};
314
041000b9
MH
315static int voice_setting_get(void *data, u64 *val)
316{
317 struct hci_dev *hdev = data;
318
319 hci_dev_lock(hdev);
320 *val = hdev->voice_setting;
321 hci_dev_unlock(hdev);
322
323 return 0;
324}
325
326DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
327 NULL, "0x%4.4llx\n");
328
ebd1e33b
MH
329static int auto_accept_delay_set(void *data, u64 val)
330{
331 struct hci_dev *hdev = data;
332
333 hci_dev_lock(hdev);
334 hdev->auto_accept_delay = val;
335 hci_dev_unlock(hdev);
336
337 return 0;
338}
339
340static int auto_accept_delay_get(void *data, u64 *val)
341{
342 struct hci_dev *hdev = data;
343
344 hci_dev_lock(hdev);
345 *val = hdev->auto_accept_delay;
346 hci_dev_unlock(hdev);
347
348 return 0;
349}
350
351DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
352 auto_accept_delay_set, "%llu\n");
353
06f5b778
MH
354static int ssp_debug_mode_set(void *data, u64 val)
355{
356 struct hci_dev *hdev = data;
357 struct sk_buff *skb;
358 __u8 mode;
359 int err;
360
361 if (val != 0 && val != 1)
362 return -EINVAL;
363
364 if (!test_bit(HCI_UP, &hdev->flags))
365 return -ENETDOWN;
366
367 hci_req_lock(hdev);
368 mode = val;
369 skb = __hci_cmd_sync(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode),
370 &mode, HCI_CMD_TIMEOUT);
371 hci_req_unlock(hdev);
372
373 if (IS_ERR(skb))
374 return PTR_ERR(skb);
375
376 err = -bt_to_errno(skb->data[0]);
377 kfree_skb(skb);
378
379 if (err < 0)
380 return err;
381
382 hci_dev_lock(hdev);
383 hdev->ssp_debug_mode = val;
384 hci_dev_unlock(hdev);
385
386 return 0;
387}
388
389static int ssp_debug_mode_get(void *data, u64 *val)
390{
391 struct hci_dev *hdev = data;
392
393 hci_dev_lock(hdev);
394 *val = hdev->ssp_debug_mode;
395 hci_dev_unlock(hdev);
396
397 return 0;
398}
399
400DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get,
401 ssp_debug_mode_set, "%llu\n");
402
5afeac14
MH
403static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
404 size_t count, loff_t *ppos)
405{
406 struct hci_dev *hdev = file->private_data;
407 char buf[3];
408
409 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dev_flags) ? 'Y': 'N';
410 buf[1] = '\n';
411 buf[2] = '\0';
412 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
413}
414
415static ssize_t force_sc_support_write(struct file *file,
416 const char __user *user_buf,
417 size_t count, loff_t *ppos)
418{
419 struct hci_dev *hdev = file->private_data;
420 char buf[32];
421 size_t buf_size = min(count, (sizeof(buf)-1));
422 bool enable;
423
424 if (test_bit(HCI_UP, &hdev->flags))
425 return -EBUSY;
426
427 if (copy_from_user(buf, user_buf, buf_size))
428 return -EFAULT;
429
430 buf[buf_size] = '\0';
431 if (strtobool(buf, &enable))
432 return -EINVAL;
433
434 if (enable == test_bit(HCI_FORCE_SC, &hdev->dev_flags))
435 return -EALREADY;
436
437 change_bit(HCI_FORCE_SC, &hdev->dev_flags);
438
439 return count;
440}
441
442static const struct file_operations force_sc_support_fops = {
443 .open = simple_open,
444 .read = force_sc_support_read,
445 .write = force_sc_support_write,
446 .llseek = default_llseek,
447};
448
134c2a89
MH
449static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
450 size_t count, loff_t *ppos)
451{
452 struct hci_dev *hdev = file->private_data;
453 char buf[3];
454
455 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
456 buf[1] = '\n';
457 buf[2] = '\0';
458 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
459}
460
461static const struct file_operations sc_only_mode_fops = {
462 .open = simple_open,
463 .read = sc_only_mode_read,
464 .llseek = default_llseek,
465};
466
2bfa3531
MH
467static int idle_timeout_set(void *data, u64 val)
468{
469 struct hci_dev *hdev = data;
470
471 if (val != 0 && (val < 500 || val > 3600000))
472 return -EINVAL;
473
474 hci_dev_lock(hdev);
2be48b65 475 hdev->idle_timeout = val;
2bfa3531
MH
476 hci_dev_unlock(hdev);
477
478 return 0;
479}
480
481static int idle_timeout_get(void *data, u64 *val)
482{
483 struct hci_dev *hdev = data;
484
485 hci_dev_lock(hdev);
486 *val = hdev->idle_timeout;
487 hci_dev_unlock(hdev);
488
489 return 0;
490}
491
492DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
493 idle_timeout_set, "%llu\n");
494
c982b2ea
JH
495static int rpa_timeout_set(void *data, u64 val)
496{
497 struct hci_dev *hdev = data;
498
499 /* Require the RPA timeout to be at least 30 seconds and at most
500 * 24 hours.
501 */
502 if (val < 30 || val > (60 * 60 * 24))
503 return -EINVAL;
504
505 hci_dev_lock(hdev);
506 hdev->rpa_timeout = val;
507 hci_dev_unlock(hdev);
508
509 return 0;
510}
511
512static int rpa_timeout_get(void *data, u64 *val)
513{
514 struct hci_dev *hdev = data;
515
516 hci_dev_lock(hdev);
517 *val = hdev->rpa_timeout;
518 hci_dev_unlock(hdev);
519
520 return 0;
521}
522
523DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
524 rpa_timeout_set, "%llu\n");
525
2bfa3531
MH
526static int sniff_min_interval_set(void *data, u64 val)
527{
528 struct hci_dev *hdev = data;
529
530 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
531 return -EINVAL;
532
533 hci_dev_lock(hdev);
2be48b65 534 hdev->sniff_min_interval = val;
2bfa3531
MH
535 hci_dev_unlock(hdev);
536
537 return 0;
538}
539
540static int sniff_min_interval_get(void *data, u64 *val)
541{
542 struct hci_dev *hdev = data;
543
544 hci_dev_lock(hdev);
545 *val = hdev->sniff_min_interval;
546 hci_dev_unlock(hdev);
547
548 return 0;
549}
550
551DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
552 sniff_min_interval_set, "%llu\n");
553
554static int sniff_max_interval_set(void *data, u64 val)
555{
556 struct hci_dev *hdev = data;
557
558 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
559 return -EINVAL;
560
561 hci_dev_lock(hdev);
2be48b65 562 hdev->sniff_max_interval = val;
2bfa3531
MH
563 hci_dev_unlock(hdev);
564
565 return 0;
566}
567
568static int sniff_max_interval_get(void *data, u64 *val)
569{
570 struct hci_dev *hdev = data;
571
572 hci_dev_lock(hdev);
573 *val = hdev->sniff_max_interval;
574 hci_dev_unlock(hdev);
575
576 return 0;
577}
578
579DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
580 sniff_max_interval_set, "%llu\n");
581
ac345813
MH
582static int identity_show(struct seq_file *f, void *p)
583{
584 struct hci_dev *hdev = f->private;
585 bdaddr_t *addr;
586 u8 addr_type;
587
588 hci_dev_lock(hdev);
589
590 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ||
591 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
592 addr = &hdev->static_addr;
593 addr_type = ADDR_LE_DEV_RANDOM;
594 } else {
595 addr = &hdev->bdaddr;
596 addr_type = ADDR_LE_DEV_PUBLIC;
597 }
598
473deef2
MH
599 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", addr, addr_type,
600 16, hdev->irk, &hdev->rpa);
ac345813
MH
601
602 hci_dev_unlock(hdev);
603
604 return 0;
605}
606
607static int identity_open(struct inode *inode, struct file *file)
608{
609 return single_open(file, identity_show, inode->i_private);
610}
611
612static const struct file_operations identity_fops = {
613 .open = identity_open,
614 .read = seq_read,
615 .llseek = seq_lseek,
616 .release = single_release,
617};
618
7a4cd51d
MH
619static int random_address_show(struct seq_file *f, void *p)
620{
621 struct hci_dev *hdev = f->private;
622
623 hci_dev_lock(hdev);
624 seq_printf(f, "%pMR\n", &hdev->random_addr);
625 hci_dev_unlock(hdev);
626
627 return 0;
628}
629
630static int random_address_open(struct inode *inode, struct file *file)
631{
632 return single_open(file, random_address_show, inode->i_private);
633}
634
635static const struct file_operations random_address_fops = {
636 .open = random_address_open,
637 .read = seq_read,
638 .llseek = seq_lseek,
639 .release = single_release,
640};
641
e7b8fc92
MH
642static int static_address_show(struct seq_file *f, void *p)
643{
644 struct hci_dev *hdev = f->private;
645
646 hci_dev_lock(hdev);
647 seq_printf(f, "%pMR\n", &hdev->static_addr);
648 hci_dev_unlock(hdev);
649
650 return 0;
651}
652
653static int static_address_open(struct inode *inode, struct file *file)
654{
655 return single_open(file, static_address_show, inode->i_private);
656}
657
658static const struct file_operations static_address_fops = {
659 .open = static_address_open,
660 .read = seq_read,
661 .llseek = seq_lseek,
662 .release = single_release,
663};
664
b32bba6c
MH
665static ssize_t force_static_address_read(struct file *file,
666 char __user *user_buf,
667 size_t count, loff_t *ppos)
92202185 668{
b32bba6c
MH
669 struct hci_dev *hdev = file->private_data;
670 char buf[3];
92202185 671
b32bba6c
MH
672 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ? 'Y': 'N';
673 buf[1] = '\n';
674 buf[2] = '\0';
675 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
92202185
MH
676}
677
b32bba6c
MH
678static ssize_t force_static_address_write(struct file *file,
679 const char __user *user_buf,
680 size_t count, loff_t *ppos)
92202185 681{
b32bba6c
MH
682 struct hci_dev *hdev = file->private_data;
683 char buf[32];
684 size_t buf_size = min(count, (sizeof(buf)-1));
685 bool enable;
92202185 686
b32bba6c
MH
687 if (test_bit(HCI_UP, &hdev->flags))
688 return -EBUSY;
92202185 689
b32bba6c
MH
690 if (copy_from_user(buf, user_buf, buf_size))
691 return -EFAULT;
692
693 buf[buf_size] = '\0';
694 if (strtobool(buf, &enable))
695 return -EINVAL;
696
697 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags))
698 return -EALREADY;
699
700 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags);
701
702 return count;
92202185
MH
703}
704
b32bba6c
MH
705static const struct file_operations force_static_address_fops = {
706 .open = simple_open,
707 .read = force_static_address_read,
708 .write = force_static_address_write,
709 .llseek = default_llseek,
710};
92202185 711
3698d704
MH
712static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
713{
714 struct hci_dev *hdev = f->private;
715 struct list_head *p, *n;
716
717 hci_dev_lock(hdev);
718 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
719 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
720 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
721 &irk->bdaddr, irk->addr_type,
722 16, irk->val, &irk->rpa);
723 }
724 hci_dev_unlock(hdev);
725
726 return 0;
727}
728
729static int identity_resolving_keys_open(struct inode *inode, struct file *file)
730{
731 return single_open(file, identity_resolving_keys_show,
732 inode->i_private);
733}
734
735static const struct file_operations identity_resolving_keys_fops = {
736 .open = identity_resolving_keys_open,
737 .read = seq_read,
738 .llseek = seq_lseek,
739 .release = single_release,
740};
741
8f8625cd
MH
742static int long_term_keys_show(struct seq_file *f, void *ptr)
743{
744 struct hci_dev *hdev = f->private;
745 struct list_head *p, *n;
746
747 hci_dev_lock(hdev);
f813f1be 748 list_for_each_safe(p, n, &hdev->long_term_keys) {
8f8625cd 749 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
f813f1be 750 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %*phN %*phN\n",
8f8625cd
MH
751 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
752 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
753 8, ltk->rand, 16, ltk->val);
754 }
755 hci_dev_unlock(hdev);
756
757 return 0;
758}
759
760static int long_term_keys_open(struct inode *inode, struct file *file)
761{
762 return single_open(file, long_term_keys_show, inode->i_private);
763}
764
765static const struct file_operations long_term_keys_fops = {
766 .open = long_term_keys_open,
767 .read = seq_read,
768 .llseek = seq_lseek,
769 .release = single_release,
770};
771
4e70c7e7
MH
772static int conn_min_interval_set(void *data, u64 val)
773{
774 struct hci_dev *hdev = data;
775
776 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
777 return -EINVAL;
778
779 hci_dev_lock(hdev);
2be48b65 780 hdev->le_conn_min_interval = val;
4e70c7e7
MH
781 hci_dev_unlock(hdev);
782
783 return 0;
784}
785
786static int conn_min_interval_get(void *data, u64 *val)
787{
788 struct hci_dev *hdev = data;
789
790 hci_dev_lock(hdev);
791 *val = hdev->le_conn_min_interval;
792 hci_dev_unlock(hdev);
793
794 return 0;
795}
796
797DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
798 conn_min_interval_set, "%llu\n");
799
800static int conn_max_interval_set(void *data, u64 val)
801{
802 struct hci_dev *hdev = data;
803
804 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
805 return -EINVAL;
806
807 hci_dev_lock(hdev);
2be48b65 808 hdev->le_conn_max_interval = val;
4e70c7e7
MH
809 hci_dev_unlock(hdev);
810
811 return 0;
812}
813
814static int conn_max_interval_get(void *data, u64 *val)
815{
816 struct hci_dev *hdev = data;
817
818 hci_dev_lock(hdev);
819 *val = hdev->le_conn_max_interval;
820 hci_dev_unlock(hdev);
821
822 return 0;
823}
824
825DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
826 conn_max_interval_set, "%llu\n");
827
3f959d46
MH
828static int adv_channel_map_set(void *data, u64 val)
829{
830 struct hci_dev *hdev = data;
831
832 if (val < 0x01 || val > 0x07)
833 return -EINVAL;
834
835 hci_dev_lock(hdev);
836 hdev->le_adv_channel_map = val;
837 hci_dev_unlock(hdev);
838
839 return 0;
840}
841
842static int adv_channel_map_get(void *data, u64 *val)
843{
844 struct hci_dev *hdev = data;
845
846 hci_dev_lock(hdev);
847 *val = hdev->le_adv_channel_map;
848 hci_dev_unlock(hdev);
849
850 return 0;
851}
852
853DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
854 adv_channel_map_set, "%llu\n");
855
89863109
JR
856static ssize_t lowpan_read(struct file *file, char __user *user_buf,
857 size_t count, loff_t *ppos)
858{
859 struct hci_dev *hdev = file->private_data;
860 char buf[3];
861
862 buf[0] = test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags) ? 'Y' : 'N';
863 buf[1] = '\n';
864 buf[2] = '\0';
865 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
866}
867
868static ssize_t lowpan_write(struct file *fp, const char __user *user_buffer,
869 size_t count, loff_t *position)
870{
871 struct hci_dev *hdev = fp->private_data;
872 bool enable;
873 char buf[32];
874 size_t buf_size = min(count, (sizeof(buf)-1));
875
876 if (copy_from_user(buf, user_buffer, buf_size))
877 return -EFAULT;
878
879 buf[buf_size] = '\0';
880
881 if (strtobool(buf, &enable) < 0)
882 return -EINVAL;
883
884 if (enable == test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags))
885 return -EALREADY;
886
887 change_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags);
888
889 return count;
890}
891
892static const struct file_operations lowpan_debugfs_fops = {
893 .open = simple_open,
894 .read = lowpan_read,
895 .write = lowpan_write,
896 .llseek = default_llseek,
897};
898
1da177e4
LT
899/* ---- HCI requests ---- */
900
42c6b129 901static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1da177e4 902{
42c6b129 903 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1da177e4
LT
904
905 if (hdev->req_status == HCI_REQ_PEND) {
906 hdev->req_result = result;
907 hdev->req_status = HCI_REQ_DONE;
908 wake_up_interruptible(&hdev->req_wait_q);
909 }
910}
911
912static void hci_req_cancel(struct hci_dev *hdev, int err)
913{
914 BT_DBG("%s err 0x%2.2x", hdev->name, err);
915
916 if (hdev->req_status == HCI_REQ_PEND) {
917 hdev->req_result = err;
918 hdev->req_status = HCI_REQ_CANCELED;
919 wake_up_interruptible(&hdev->req_wait_q);
920 }
921}
922
77a63e0a
FW
923static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
924 u8 event)
75e84b7c
JH
925{
926 struct hci_ev_cmd_complete *ev;
927 struct hci_event_hdr *hdr;
928 struct sk_buff *skb;
929
930 hci_dev_lock(hdev);
931
932 skb = hdev->recv_evt;
933 hdev->recv_evt = NULL;
934
935 hci_dev_unlock(hdev);
936
937 if (!skb)
938 return ERR_PTR(-ENODATA);
939
940 if (skb->len < sizeof(*hdr)) {
941 BT_ERR("Too short HCI event");
942 goto failed;
943 }
944
945 hdr = (void *) skb->data;
946 skb_pull(skb, HCI_EVENT_HDR_SIZE);
947
7b1abbbe
JH
948 if (event) {
949 if (hdr->evt != event)
950 goto failed;
951 return skb;
952 }
953
75e84b7c
JH
954 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
955 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
956 goto failed;
957 }
958
959 if (skb->len < sizeof(*ev)) {
960 BT_ERR("Too short cmd_complete event");
961 goto failed;
962 }
963
964 ev = (void *) skb->data;
965 skb_pull(skb, sizeof(*ev));
966
967 if (opcode == __le16_to_cpu(ev->opcode))
968 return skb;
969
970 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
971 __le16_to_cpu(ev->opcode));
972
973failed:
974 kfree_skb(skb);
975 return ERR_PTR(-ENODATA);
976}
977
7b1abbbe 978struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 979 const void *param, u8 event, u32 timeout)
75e84b7c
JH
980{
981 DECLARE_WAITQUEUE(wait, current);
982 struct hci_request req;
983 int err = 0;
984
985 BT_DBG("%s", hdev->name);
986
987 hci_req_init(&req, hdev);
988
7b1abbbe 989 hci_req_add_ev(&req, opcode, plen, param, event);
75e84b7c
JH
990
991 hdev->req_status = HCI_REQ_PEND;
992
993 err = hci_req_run(&req, hci_req_sync_complete);
994 if (err < 0)
995 return ERR_PTR(err);
996
997 add_wait_queue(&hdev->req_wait_q, &wait);
998 set_current_state(TASK_INTERRUPTIBLE);
999
1000 schedule_timeout(timeout);
1001
1002 remove_wait_queue(&hdev->req_wait_q, &wait);
1003
1004 if (signal_pending(current))
1005 return ERR_PTR(-EINTR);
1006
1007 switch (hdev->req_status) {
1008 case HCI_REQ_DONE:
1009 err = -bt_to_errno(hdev->req_result);
1010 break;
1011
1012 case HCI_REQ_CANCELED:
1013 err = -hdev->req_result;
1014 break;
1015
1016 default:
1017 err = -ETIMEDOUT;
1018 break;
1019 }
1020
1021 hdev->req_status = hdev->req_result = 0;
1022
1023 BT_DBG("%s end: err %d", hdev->name, err);
1024
1025 if (err < 0)
1026 return ERR_PTR(err);
1027
7b1abbbe
JH
1028 return hci_get_cmd_complete(hdev, opcode, event);
1029}
1030EXPORT_SYMBOL(__hci_cmd_sync_ev);
1031
1032struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 1033 const void *param, u32 timeout)
7b1abbbe
JH
1034{
1035 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
75e84b7c
JH
1036}
1037EXPORT_SYMBOL(__hci_cmd_sync);
1038
1da177e4 1039/* Execute request and wait for completion. */
01178cd4 1040static int __hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
1041 void (*func)(struct hci_request *req,
1042 unsigned long opt),
01178cd4 1043 unsigned long opt, __u32 timeout)
1da177e4 1044{
42c6b129 1045 struct hci_request req;
1da177e4
LT
1046 DECLARE_WAITQUEUE(wait, current);
1047 int err = 0;
1048
1049 BT_DBG("%s start", hdev->name);
1050
42c6b129
JH
1051 hci_req_init(&req, hdev);
1052
1da177e4
LT
1053 hdev->req_status = HCI_REQ_PEND;
1054
42c6b129 1055 func(&req, opt);
53cce22d 1056
42c6b129
JH
1057 err = hci_req_run(&req, hci_req_sync_complete);
1058 if (err < 0) {
53cce22d 1059 hdev->req_status = 0;
920c8300
AG
1060
1061 /* ENODATA means the HCI request command queue is empty.
1062 * This can happen when a request with conditionals doesn't
1063 * trigger any commands to be sent. This is normal behavior
1064 * and should not trigger an error return.
42c6b129 1065 */
920c8300
AG
1066 if (err == -ENODATA)
1067 return 0;
1068
1069 return err;
53cce22d
JH
1070 }
1071
bc4445c7
AG
1072 add_wait_queue(&hdev->req_wait_q, &wait);
1073 set_current_state(TASK_INTERRUPTIBLE);
1074
1da177e4
LT
1075 schedule_timeout(timeout);
1076
1077 remove_wait_queue(&hdev->req_wait_q, &wait);
1078
1079 if (signal_pending(current))
1080 return -EINTR;
1081
1082 switch (hdev->req_status) {
1083 case HCI_REQ_DONE:
e175072f 1084 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
1085 break;
1086
1087 case HCI_REQ_CANCELED:
1088 err = -hdev->req_result;
1089 break;
1090
1091 default:
1092 err = -ETIMEDOUT;
1093 break;
3ff50b79 1094 }
1da177e4 1095
a5040efa 1096 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
1097
1098 BT_DBG("%s end: err %d", hdev->name, err);
1099
1100 return err;
1101}
1102
01178cd4 1103static int hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
1104 void (*req)(struct hci_request *req,
1105 unsigned long opt),
01178cd4 1106 unsigned long opt, __u32 timeout)
1da177e4
LT
1107{
1108 int ret;
1109
7c6a329e
MH
1110 if (!test_bit(HCI_UP, &hdev->flags))
1111 return -ENETDOWN;
1112
1da177e4
LT
1113 /* Serialize all requests */
1114 hci_req_lock(hdev);
01178cd4 1115 ret = __hci_req_sync(hdev, req, opt, timeout);
1da177e4
LT
1116 hci_req_unlock(hdev);
1117
1118 return ret;
1119}
1120
42c6b129 1121static void hci_reset_req(struct hci_request *req, unsigned long opt)
1da177e4 1122{
42c6b129 1123 BT_DBG("%s %ld", req->hdev->name, opt);
1da177e4
LT
1124
1125 /* Reset device */
42c6b129
JH
1126 set_bit(HCI_RESET, &req->hdev->flags);
1127 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1da177e4
LT
1128}
1129
42c6b129 1130static void bredr_init(struct hci_request *req)
1da177e4 1131{
42c6b129 1132 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
2455a3ea 1133
1da177e4 1134 /* Read Local Supported Features */
42c6b129 1135 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 1136
1143e5a6 1137 /* Read Local Version */
42c6b129 1138 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
2177bab5
JH
1139
1140 /* Read BD Address */
42c6b129 1141 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1da177e4
LT
1142}
1143
42c6b129 1144static void amp_init(struct hci_request *req)
e61ef499 1145{
42c6b129 1146 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
2455a3ea 1147
e61ef499 1148 /* Read Local Version */
42c6b129 1149 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489 1150
f6996cfe
MH
1151 /* Read Local Supported Commands */
1152 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1153
1154 /* Read Local Supported Features */
1155 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1156
6bcbc489 1157 /* Read Local AMP Info */
42c6b129 1158 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
1159
1160 /* Read Data Blk size */
42c6b129 1161 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
7528ca1c 1162
f38ba941
MH
1163 /* Read Flow Control Mode */
1164 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1165
7528ca1c
MH
1166 /* Read Location Data */
1167 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
e61ef499
AE
1168}
1169
42c6b129 1170static void hci_init1_req(struct hci_request *req, unsigned long opt)
e61ef499 1171{
42c6b129 1172 struct hci_dev *hdev = req->hdev;
e61ef499
AE
1173
1174 BT_DBG("%s %ld", hdev->name, opt);
1175
11778716
AE
1176 /* Reset */
1177 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
42c6b129 1178 hci_reset_req(req, 0);
11778716 1179
e61ef499
AE
1180 switch (hdev->dev_type) {
1181 case HCI_BREDR:
42c6b129 1182 bredr_init(req);
e61ef499
AE
1183 break;
1184
1185 case HCI_AMP:
42c6b129 1186 amp_init(req);
e61ef499
AE
1187 break;
1188
1189 default:
1190 BT_ERR("Unknown device type %d", hdev->dev_type);
1191 break;
1192 }
e61ef499
AE
1193}
1194
42c6b129 1195static void bredr_setup(struct hci_request *req)
2177bab5 1196{
4ca048e3
MH
1197 struct hci_dev *hdev = req->hdev;
1198
2177bab5
JH
1199 __le16 param;
1200 __u8 flt_type;
1201
1202 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
42c6b129 1203 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
1204
1205 /* Read Class of Device */
42c6b129 1206 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
2177bab5
JH
1207
1208 /* Read Local Name */
42c6b129 1209 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
2177bab5
JH
1210
1211 /* Read Voice Setting */
42c6b129 1212 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
2177bab5 1213
b4cb9fb2
MH
1214 /* Read Number of Supported IAC */
1215 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1216
4b836f39
MH
1217 /* Read Current IAC LAP */
1218 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1219
2177bab5
JH
1220 /* Clear Event Filters */
1221 flt_type = HCI_FLT_CLEAR_ALL;
42c6b129 1222 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
2177bab5
JH
1223
1224 /* Connection accept timeout ~20 secs */
1225 param = __constant_cpu_to_le16(0x7d00);
42c6b129 1226 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
2177bab5 1227
4ca048e3
MH
1228 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1229 * but it does not support page scan related HCI commands.
1230 */
1231 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
f332ec66
JH
1232 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1233 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1234 }
2177bab5
JH
1235}
1236
42c6b129 1237static void le_setup(struct hci_request *req)
2177bab5 1238{
c73eee91
JH
1239 struct hci_dev *hdev = req->hdev;
1240
2177bab5 1241 /* Read LE Buffer Size */
42c6b129 1242 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
1243
1244 /* Read LE Local Supported Features */
42c6b129 1245 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
2177bab5
JH
1246
1247 /* Read LE Advertising Channel TX Power */
42c6b129 1248 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
2177bab5
JH
1249
1250 /* Read LE White List Size */
42c6b129 1251 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
2177bab5
JH
1252
1253 /* Read LE Supported States */
42c6b129 1254 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
c73eee91
JH
1255
1256 /* LE-only controllers have LE implicitly enabled */
1257 if (!lmp_bredr_capable(hdev))
1258 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2177bab5
JH
1259}
1260
1261static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1262{
1263 if (lmp_ext_inq_capable(hdev))
1264 return 0x02;
1265
1266 if (lmp_inq_rssi_capable(hdev))
1267 return 0x01;
1268
1269 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1270 hdev->lmp_subver == 0x0757)
1271 return 0x01;
1272
1273 if (hdev->manufacturer == 15) {
1274 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1275 return 0x01;
1276 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1277 return 0x01;
1278 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1279 return 0x01;
1280 }
1281
1282 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1283 hdev->lmp_subver == 0x1805)
1284 return 0x01;
1285
1286 return 0x00;
1287}
1288
42c6b129 1289static void hci_setup_inquiry_mode(struct hci_request *req)
2177bab5
JH
1290{
1291 u8 mode;
1292
42c6b129 1293 mode = hci_get_inquiry_mode(req->hdev);
2177bab5 1294
42c6b129 1295 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
2177bab5
JH
1296}
1297
42c6b129 1298static void hci_setup_event_mask(struct hci_request *req)
2177bab5 1299{
42c6b129
JH
1300 struct hci_dev *hdev = req->hdev;
1301
2177bab5
JH
1302 /* The second byte is 0xff instead of 0x9f (two reserved bits
1303 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1304 * command otherwise.
1305 */
1306 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1307
1308 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1309 * any event mask for pre 1.2 devices.
1310 */
1311 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1312 return;
1313
1314 if (lmp_bredr_capable(hdev)) {
1315 events[4] |= 0x01; /* Flow Specification Complete */
1316 events[4] |= 0x02; /* Inquiry Result with RSSI */
1317 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1318 events[5] |= 0x08; /* Synchronous Connection Complete */
1319 events[5] |= 0x10; /* Synchronous Connection Changed */
c7882cbd
MH
1320 } else {
1321 /* Use a different default for LE-only devices */
1322 memset(events, 0, sizeof(events));
1323 events[0] |= 0x10; /* Disconnection Complete */
1324 events[0] |= 0x80; /* Encryption Change */
1325 events[1] |= 0x08; /* Read Remote Version Information Complete */
1326 events[1] |= 0x20; /* Command Complete */
1327 events[1] |= 0x40; /* Command Status */
1328 events[1] |= 0x80; /* Hardware Error */
1329 events[2] |= 0x04; /* Number of Completed Packets */
1330 events[3] |= 0x02; /* Data Buffer Overflow */
1331 events[5] |= 0x80; /* Encryption Key Refresh Complete */
2177bab5
JH
1332 }
1333
1334 if (lmp_inq_rssi_capable(hdev))
1335 events[4] |= 0x02; /* Inquiry Result with RSSI */
1336
1337 if (lmp_sniffsubr_capable(hdev))
1338 events[5] |= 0x20; /* Sniff Subrating */
1339
1340 if (lmp_pause_enc_capable(hdev))
1341 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1342
1343 if (lmp_ext_inq_capable(hdev))
1344 events[5] |= 0x40; /* Extended Inquiry Result */
1345
1346 if (lmp_no_flush_capable(hdev))
1347 events[7] |= 0x01; /* Enhanced Flush Complete */
1348
1349 if (lmp_lsto_capable(hdev))
1350 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1351
1352 if (lmp_ssp_capable(hdev)) {
1353 events[6] |= 0x01; /* IO Capability Request */
1354 events[6] |= 0x02; /* IO Capability Response */
1355 events[6] |= 0x04; /* User Confirmation Request */
1356 events[6] |= 0x08; /* User Passkey Request */
1357 events[6] |= 0x10; /* Remote OOB Data Request */
1358 events[6] |= 0x20; /* Simple Pairing Complete */
1359 events[7] |= 0x04; /* User Passkey Notification */
1360 events[7] |= 0x08; /* Keypress Notification */
1361 events[7] |= 0x10; /* Remote Host Supported
1362 * Features Notification
1363 */
1364 }
1365
1366 if (lmp_le_capable(hdev))
1367 events[7] |= 0x20; /* LE Meta-Event */
1368
42c6b129 1369 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
2177bab5
JH
1370
1371 if (lmp_le_capable(hdev)) {
1372 memset(events, 0, sizeof(events));
1373 events[0] = 0x1f;
42c6b129
JH
1374 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1375 sizeof(events), events);
2177bab5
JH
1376 }
1377}
1378
42c6b129 1379static void hci_init2_req(struct hci_request *req, unsigned long opt)
2177bab5 1380{
42c6b129
JH
1381 struct hci_dev *hdev = req->hdev;
1382
2177bab5 1383 if (lmp_bredr_capable(hdev))
42c6b129 1384 bredr_setup(req);
56f87901
JH
1385 else
1386 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2177bab5
JH
1387
1388 if (lmp_le_capable(hdev))
42c6b129 1389 le_setup(req);
2177bab5 1390
42c6b129 1391 hci_setup_event_mask(req);
2177bab5 1392
3f8e2d75
JH
1393 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1394 * local supported commands HCI command.
1395 */
1396 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
42c6b129 1397 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
2177bab5
JH
1398
1399 if (lmp_ssp_capable(hdev)) {
57af75a8
MH
1400 /* When SSP is available, then the host features page
1401 * should also be available as well. However some
1402 * controllers list the max_page as 0 as long as SSP
1403 * has not been enabled. To achieve proper debugging
1404 * output, force the minimum max_page to 1 at least.
1405 */
1406 hdev->max_page = 0x01;
1407
2177bab5
JH
1408 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1409 u8 mode = 0x01;
42c6b129
JH
1410 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1411 sizeof(mode), &mode);
2177bab5
JH
1412 } else {
1413 struct hci_cp_write_eir cp;
1414
1415 memset(hdev->eir, 0, sizeof(hdev->eir));
1416 memset(&cp, 0, sizeof(cp));
1417
42c6b129 1418 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
2177bab5
JH
1419 }
1420 }
1421
1422 if (lmp_inq_rssi_capable(hdev))
42c6b129 1423 hci_setup_inquiry_mode(req);
2177bab5
JH
1424
1425 if (lmp_inq_tx_pwr_capable(hdev))
42c6b129 1426 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
2177bab5
JH
1427
1428 if (lmp_ext_feat_capable(hdev)) {
1429 struct hci_cp_read_local_ext_features cp;
1430
1431 cp.page = 0x01;
42c6b129
JH
1432 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1433 sizeof(cp), &cp);
2177bab5
JH
1434 }
1435
1436 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1437 u8 enable = 1;
42c6b129
JH
1438 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1439 &enable);
2177bab5
JH
1440 }
1441}
1442
42c6b129 1443static void hci_setup_link_policy(struct hci_request *req)
2177bab5 1444{
42c6b129 1445 struct hci_dev *hdev = req->hdev;
2177bab5
JH
1446 struct hci_cp_write_def_link_policy cp;
1447 u16 link_policy = 0;
1448
1449 if (lmp_rswitch_capable(hdev))
1450 link_policy |= HCI_LP_RSWITCH;
1451 if (lmp_hold_capable(hdev))
1452 link_policy |= HCI_LP_HOLD;
1453 if (lmp_sniff_capable(hdev))
1454 link_policy |= HCI_LP_SNIFF;
1455 if (lmp_park_capable(hdev))
1456 link_policy |= HCI_LP_PARK;
1457
1458 cp.policy = cpu_to_le16(link_policy);
42c6b129 1459 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
2177bab5
JH
1460}
1461
42c6b129 1462static void hci_set_le_support(struct hci_request *req)
2177bab5 1463{
42c6b129 1464 struct hci_dev *hdev = req->hdev;
2177bab5
JH
1465 struct hci_cp_write_le_host_supported cp;
1466
c73eee91
JH
1467 /* LE-only devices do not support explicit enablement */
1468 if (!lmp_bredr_capable(hdev))
1469 return;
1470
2177bab5
JH
1471 memset(&cp, 0, sizeof(cp));
1472
1473 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1474 cp.le = 0x01;
1475 cp.simul = lmp_le_br_capable(hdev);
1476 }
1477
1478 if (cp.le != lmp_host_le_capable(hdev))
42c6b129
JH
1479 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1480 &cp);
2177bab5
JH
1481}
1482
d62e6d67
JH
1483static void hci_set_event_mask_page_2(struct hci_request *req)
1484{
1485 struct hci_dev *hdev = req->hdev;
1486 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1487
1488 /* If Connectionless Slave Broadcast master role is supported
1489 * enable all necessary events for it.
1490 */
53b834d2 1491 if (lmp_csb_master_capable(hdev)) {
d62e6d67
JH
1492 events[1] |= 0x40; /* Triggered Clock Capture */
1493 events[1] |= 0x80; /* Synchronization Train Complete */
1494 events[2] |= 0x10; /* Slave Page Response Timeout */
1495 events[2] |= 0x20; /* CSB Channel Map Change */
1496 }
1497
1498 /* If Connectionless Slave Broadcast slave role is supported
1499 * enable all necessary events for it.
1500 */
53b834d2 1501 if (lmp_csb_slave_capable(hdev)) {
d62e6d67
JH
1502 events[2] |= 0x01; /* Synchronization Train Received */
1503 events[2] |= 0x02; /* CSB Receive */
1504 events[2] |= 0x04; /* CSB Timeout */
1505 events[2] |= 0x08; /* Truncated Page Complete */
1506 }
1507
40c59fcb
MH
1508 /* Enable Authenticated Payload Timeout Expired event if supported */
1509 if (lmp_ping_capable(hdev))
1510 events[2] |= 0x80;
1511
d62e6d67
JH
1512 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1513}
1514
42c6b129 1515static void hci_init3_req(struct hci_request *req, unsigned long opt)
2177bab5 1516{
42c6b129 1517 struct hci_dev *hdev = req->hdev;
d2c5d77f 1518 u8 p;
42c6b129 1519
b8f4e068
GP
1520 /* Some Broadcom based Bluetooth controllers do not support the
1521 * Delete Stored Link Key command. They are clearly indicating its
1522 * absence in the bit mask of supported commands.
1523 *
1524 * Check the supported commands and only if the the command is marked
1525 * as supported send it. If not supported assume that the controller
1526 * does not have actual support for stored link keys which makes this
1527 * command redundant anyway.
f9f462fa
MH
1528 *
1529 * Some controllers indicate that they support handling deleting
1530 * stored link keys, but they don't. The quirk lets a driver
1531 * just disable this command.
637b4cae 1532 */
f9f462fa
MH
1533 if (hdev->commands[6] & 0x80 &&
1534 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
59f45d57
JH
1535 struct hci_cp_delete_stored_link_key cp;
1536
1537 bacpy(&cp.bdaddr, BDADDR_ANY);
1538 cp.delete_all = 0x01;
1539 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1540 sizeof(cp), &cp);
1541 }
1542
2177bab5 1543 if (hdev->commands[5] & 0x10)
42c6b129 1544 hci_setup_link_policy(req);
2177bab5 1545
7bf32048 1546 if (lmp_le_capable(hdev))
42c6b129 1547 hci_set_le_support(req);
d2c5d77f
JH
1548
1549 /* Read features beyond page 1 if available */
1550 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1551 struct hci_cp_read_local_ext_features cp;
1552
1553 cp.page = p;
1554 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1555 sizeof(cp), &cp);
1556 }
2177bab5
JH
1557}
1558
5d4e7e8d
JH
1559static void hci_init4_req(struct hci_request *req, unsigned long opt)
1560{
1561 struct hci_dev *hdev = req->hdev;
1562
d62e6d67
JH
1563 /* Set event mask page 2 if the HCI command for it is supported */
1564 if (hdev->commands[22] & 0x04)
1565 hci_set_event_mask_page_2(req);
1566
5d4e7e8d 1567 /* Check for Synchronization Train support */
53b834d2 1568 if (lmp_sync_train_capable(hdev))
5d4e7e8d 1569 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
a6d0d690
MH
1570
1571 /* Enable Secure Connections if supported and configured */
5afeac14
MH
1572 if ((lmp_sc_capable(hdev) ||
1573 test_bit(HCI_FORCE_SC, &hdev->dev_flags)) &&
a6d0d690
MH
1574 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1575 u8 support = 0x01;
1576 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1577 sizeof(support), &support);
1578 }
5d4e7e8d
JH
1579}
1580
2177bab5
JH
1581static int __hci_init(struct hci_dev *hdev)
1582{
1583 int err;
1584
1585 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1586 if (err < 0)
1587 return err;
1588
4b4148e9
MH
1589 /* The Device Under Test (DUT) mode is special and available for
1590 * all controller types. So just create it early on.
1591 */
1592 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1593 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1594 &dut_mode_fops);
1595 }
1596
2177bab5
JH
1597 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1598 * BR/EDR/LE type controllers. AMP controllers only need the
1599 * first stage init.
1600 */
1601 if (hdev->dev_type != HCI_BREDR)
1602 return 0;
1603
1604 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1605 if (err < 0)
1606 return err;
1607
5d4e7e8d
JH
1608 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1609 if (err < 0)
1610 return err;
1611
baf27f6e
MH
1612 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1613 if (err < 0)
1614 return err;
1615
1616 /* Only create debugfs entries during the initial setup
1617 * phase and not every time the controller gets powered on.
1618 */
1619 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1620 return 0;
1621
dfb826a8
MH
1622 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1623 &features_fops);
ceeb3bc0
MH
1624 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1625 &hdev->manufacturer);
1626 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1627 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
70afe0b8
MH
1628 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1629 &blacklist_fops);
47219839
MH
1630 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1631
baf27f6e
MH
1632 if (lmp_bredr_capable(hdev)) {
1633 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1634 hdev, &inquiry_cache_fops);
02d08d15
MH
1635 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1636 hdev, &link_keys_fops);
babdbb3c
MH
1637 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1638 hdev, &dev_class_fops);
041000b9
MH
1639 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1640 hdev, &voice_setting_fops);
baf27f6e
MH
1641 }
1642
06f5b778 1643 if (lmp_ssp_capable(hdev)) {
ebd1e33b
MH
1644 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1645 hdev, &auto_accept_delay_fops);
06f5b778
MH
1646 debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs,
1647 hdev, &ssp_debug_mode_fops);
5afeac14
MH
1648 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1649 hdev, &force_sc_support_fops);
134c2a89
MH
1650 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1651 hdev, &sc_only_mode_fops);
06f5b778 1652 }
ebd1e33b 1653
2bfa3531
MH
1654 if (lmp_sniff_capable(hdev)) {
1655 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1656 hdev, &idle_timeout_fops);
1657 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1658 hdev, &sniff_min_interval_fops);
1659 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1660 hdev, &sniff_max_interval_fops);
1661 }
1662
d0f729b8 1663 if (lmp_le_capable(hdev)) {
ac345813
MH
1664 debugfs_create_file("identity", 0400, hdev->debugfs,
1665 hdev, &identity_fops);
1666 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1667 hdev, &rpa_timeout_fops);
7a4cd51d
MH
1668 debugfs_create_file("random_address", 0444, hdev->debugfs,
1669 hdev, &random_address_fops);
b32bba6c
MH
1670 debugfs_create_file("static_address", 0444, hdev->debugfs,
1671 hdev, &static_address_fops);
1672
1673 /* For controllers with a public address, provide a debug
1674 * option to force the usage of the configured static
1675 * address. By default the public address is used.
1676 */
1677 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1678 debugfs_create_file("force_static_address", 0644,
1679 hdev->debugfs, hdev,
1680 &force_static_address_fops);
1681
d0f729b8
MH
1682 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1683 &hdev->le_white_list_size);
3698d704
MH
1684 debugfs_create_file("identity_resolving_keys", 0400,
1685 hdev->debugfs, hdev,
1686 &identity_resolving_keys_fops);
8f8625cd
MH
1687 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1688 hdev, &long_term_keys_fops);
4e70c7e7
MH
1689 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1690 hdev, &conn_min_interval_fops);
1691 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1692 hdev, &conn_max_interval_fops);
3f959d46
MH
1693 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1694 hdev, &adv_channel_map_fops);
89863109
JR
1695 debugfs_create_file("6lowpan", 0644, hdev->debugfs, hdev,
1696 &lowpan_debugfs_fops);
d0f729b8 1697 }
e7b8fc92 1698
baf27f6e 1699 return 0;
2177bab5
JH
1700}
1701
42c6b129 1702static void hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1703{
1704 __u8 scan = opt;
1705
42c6b129 1706 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
1707
1708 /* Inquiry and Page scans */
42c6b129 1709 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
1710}
1711
42c6b129 1712static void hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1713{
1714 __u8 auth = opt;
1715
42c6b129 1716 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
1717
1718 /* Authentication */
42c6b129 1719 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
1720}
1721
42c6b129 1722static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1723{
1724 __u8 encrypt = opt;
1725
42c6b129 1726 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 1727
e4e8e37c 1728 /* Encryption */
42c6b129 1729 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
1730}
1731
42c6b129 1732static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
1733{
1734 __le16 policy = cpu_to_le16(opt);
1735
42c6b129 1736 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
1737
1738 /* Default link policy */
42c6b129 1739 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
e4e8e37c
MH
1740}
1741
8e87d142 1742/* Get HCI device by index.
1da177e4
LT
1743 * Device is held on return. */
1744struct hci_dev *hci_dev_get(int index)
1745{
8035ded4 1746 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
1747
1748 BT_DBG("%d", index);
1749
1750 if (index < 0)
1751 return NULL;
1752
1753 read_lock(&hci_dev_list_lock);
8035ded4 1754 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
1755 if (d->id == index) {
1756 hdev = hci_dev_hold(d);
1757 break;
1758 }
1759 }
1760 read_unlock(&hci_dev_list_lock);
1761 return hdev;
1762}
1da177e4
LT
1763
1764/* ---- Inquiry support ---- */
ff9ef578 1765
30dc78e1
JH
1766bool hci_discovery_active(struct hci_dev *hdev)
1767{
1768 struct discovery_state *discov = &hdev->discovery;
1769
6fbe195d 1770 switch (discov->state) {
343f935b 1771 case DISCOVERY_FINDING:
6fbe195d 1772 case DISCOVERY_RESOLVING:
30dc78e1
JH
1773 return true;
1774
6fbe195d
AG
1775 default:
1776 return false;
1777 }
30dc78e1
JH
1778}
1779
ff9ef578
JH
1780void hci_discovery_set_state(struct hci_dev *hdev, int state)
1781{
1782 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1783
1784 if (hdev->discovery.state == state)
1785 return;
1786
1787 switch (state) {
1788 case DISCOVERY_STOPPED:
c54c3860
AG
1789 hci_update_background_scan(hdev);
1790
7b99b659
AG
1791 if (hdev->discovery.state != DISCOVERY_STARTING)
1792 mgmt_discovering(hdev, 0);
ff9ef578
JH
1793 break;
1794 case DISCOVERY_STARTING:
1795 break;
343f935b 1796 case DISCOVERY_FINDING:
ff9ef578
JH
1797 mgmt_discovering(hdev, 1);
1798 break;
30dc78e1
JH
1799 case DISCOVERY_RESOLVING:
1800 break;
ff9ef578
JH
1801 case DISCOVERY_STOPPING:
1802 break;
1803 }
1804
1805 hdev->discovery.state = state;
1806}
1807
1f9b9a5d 1808void hci_inquiry_cache_flush(struct hci_dev *hdev)
1da177e4 1809{
30883512 1810 struct discovery_state *cache = &hdev->discovery;
b57c1a56 1811 struct inquiry_entry *p, *n;
1da177e4 1812
561aafbc
JH
1813 list_for_each_entry_safe(p, n, &cache->all, all) {
1814 list_del(&p->all);
b57c1a56 1815 kfree(p);
1da177e4 1816 }
561aafbc
JH
1817
1818 INIT_LIST_HEAD(&cache->unknown);
1819 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
1820}
1821
a8c5fb1a
GP
1822struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1823 bdaddr_t *bdaddr)
1da177e4 1824{
30883512 1825 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1826 struct inquiry_entry *e;
1827
6ed93dc6 1828 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 1829
561aafbc
JH
1830 list_for_each_entry(e, &cache->all, all) {
1831 if (!bacmp(&e->data.bdaddr, bdaddr))
1832 return e;
1833 }
1834
1835 return NULL;
1836}
1837
1838struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 1839 bdaddr_t *bdaddr)
561aafbc 1840{
30883512 1841 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
1842 struct inquiry_entry *e;
1843
6ed93dc6 1844 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
1845
1846 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 1847 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
1848 return e;
1849 }
1850
1851 return NULL;
1da177e4
LT
1852}
1853
30dc78e1 1854struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
1855 bdaddr_t *bdaddr,
1856 int state)
30dc78e1
JH
1857{
1858 struct discovery_state *cache = &hdev->discovery;
1859 struct inquiry_entry *e;
1860
6ed93dc6 1861 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
1862
1863 list_for_each_entry(e, &cache->resolve, list) {
1864 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1865 return e;
1866 if (!bacmp(&e->data.bdaddr, bdaddr))
1867 return e;
1868 }
1869
1870 return NULL;
1871}
1872
a3d4e20a 1873void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 1874 struct inquiry_entry *ie)
a3d4e20a
JH
1875{
1876 struct discovery_state *cache = &hdev->discovery;
1877 struct list_head *pos = &cache->resolve;
1878 struct inquiry_entry *p;
1879
1880 list_del(&ie->list);
1881
1882 list_for_each_entry(p, &cache->resolve, list) {
1883 if (p->name_state != NAME_PENDING &&
a8c5fb1a 1884 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
1885 break;
1886 pos = &p->list;
1887 }
1888
1889 list_add(&ie->list, pos);
1890}
1891
3175405b 1892bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
04124681 1893 bool name_known, bool *ssp)
1da177e4 1894{
30883512 1895 struct discovery_state *cache = &hdev->discovery;
70f23020 1896 struct inquiry_entry *ie;
1da177e4 1897
6ed93dc6 1898 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 1899
2b2fec4d
SJ
1900 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1901
388fc8fa
JH
1902 if (ssp)
1903 *ssp = data->ssp_mode;
1904
70f23020 1905 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 1906 if (ie) {
388fc8fa
JH
1907 if (ie->data.ssp_mode && ssp)
1908 *ssp = true;
1909
a3d4e20a 1910 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 1911 data->rssi != ie->data.rssi) {
a3d4e20a
JH
1912 ie->data.rssi = data->rssi;
1913 hci_inquiry_cache_update_resolve(hdev, ie);
1914 }
1915
561aafbc 1916 goto update;
a3d4e20a 1917 }
561aafbc
JH
1918
1919 /* Entry not in the cache. Add new one. */
1920 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
1921 if (!ie)
3175405b 1922 return false;
561aafbc
JH
1923
1924 list_add(&ie->all, &cache->all);
1925
1926 if (name_known) {
1927 ie->name_state = NAME_KNOWN;
1928 } else {
1929 ie->name_state = NAME_NOT_KNOWN;
1930 list_add(&ie->list, &cache->unknown);
1931 }
70f23020 1932
561aafbc
JH
1933update:
1934 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 1935 ie->name_state != NAME_PENDING) {
561aafbc
JH
1936 ie->name_state = NAME_KNOWN;
1937 list_del(&ie->list);
1da177e4
LT
1938 }
1939
70f23020
AE
1940 memcpy(&ie->data, data, sizeof(*data));
1941 ie->timestamp = jiffies;
1da177e4 1942 cache->timestamp = jiffies;
3175405b
JH
1943
1944 if (ie->name_state == NAME_NOT_KNOWN)
1945 return false;
1946
1947 return true;
1da177e4
LT
1948}
1949
1950static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1951{
30883512 1952 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1953 struct inquiry_info *info = (struct inquiry_info *) buf;
1954 struct inquiry_entry *e;
1955 int copied = 0;
1956
561aafbc 1957 list_for_each_entry(e, &cache->all, all) {
1da177e4 1958 struct inquiry_data *data = &e->data;
b57c1a56
JH
1959
1960 if (copied >= num)
1961 break;
1962
1da177e4
LT
1963 bacpy(&info->bdaddr, &data->bdaddr);
1964 info->pscan_rep_mode = data->pscan_rep_mode;
1965 info->pscan_period_mode = data->pscan_period_mode;
1966 info->pscan_mode = data->pscan_mode;
1967 memcpy(info->dev_class, data->dev_class, 3);
1968 info->clock_offset = data->clock_offset;
b57c1a56 1969
1da177e4 1970 info++;
b57c1a56 1971 copied++;
1da177e4
LT
1972 }
1973
1974 BT_DBG("cache %p, copied %d", cache, copied);
1975 return copied;
1976}
1977
42c6b129 1978static void hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1979{
1980 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 1981 struct hci_dev *hdev = req->hdev;
1da177e4
LT
1982 struct hci_cp_inquiry cp;
1983
1984 BT_DBG("%s", hdev->name);
1985
1986 if (test_bit(HCI_INQUIRY, &hdev->flags))
1987 return;
1988
1989 /* Start Inquiry */
1990 memcpy(&cp.lap, &ir->lap, 3);
1991 cp.length = ir->length;
1992 cp.num_rsp = ir->num_rsp;
42c6b129 1993 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
1994}
1995
3e13fa1e
AG
1996static int wait_inquiry(void *word)
1997{
1998 schedule();
1999 return signal_pending(current);
2000}
2001
1da177e4
LT
2002int hci_inquiry(void __user *arg)
2003{
2004 __u8 __user *ptr = arg;
2005 struct hci_inquiry_req ir;
2006 struct hci_dev *hdev;
2007 int err = 0, do_inquiry = 0, max_rsp;
2008 long timeo;
2009 __u8 *buf;
2010
2011 if (copy_from_user(&ir, ptr, sizeof(ir)))
2012 return -EFAULT;
2013
5a08ecce
AE
2014 hdev = hci_dev_get(ir.dev_id);
2015 if (!hdev)
1da177e4
LT
2016 return -ENODEV;
2017
0736cfa8
MH
2018 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2019 err = -EBUSY;
2020 goto done;
2021 }
2022
5b69bef5
MH
2023 if (hdev->dev_type != HCI_BREDR) {
2024 err = -EOPNOTSUPP;
2025 goto done;
2026 }
2027
56f87901
JH
2028 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2029 err = -EOPNOTSUPP;
2030 goto done;
2031 }
2032
09fd0de5 2033 hci_dev_lock(hdev);
8e87d142 2034 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 2035 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1f9b9a5d 2036 hci_inquiry_cache_flush(hdev);
1da177e4
LT
2037 do_inquiry = 1;
2038 }
09fd0de5 2039 hci_dev_unlock(hdev);
1da177e4 2040
04837f64 2041 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
2042
2043 if (do_inquiry) {
01178cd4
JH
2044 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2045 timeo);
70f23020
AE
2046 if (err < 0)
2047 goto done;
3e13fa1e
AG
2048
2049 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2050 * cleared). If it is interrupted by a signal, return -EINTR.
2051 */
2052 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2053 TASK_INTERRUPTIBLE))
2054 return -EINTR;
70f23020 2055 }
1da177e4 2056
8fc9ced3
GP
2057 /* for unlimited number of responses we will use buffer with
2058 * 255 entries
2059 */
1da177e4
LT
2060 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2061
2062 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2063 * copy it to the user space.
2064 */
01df8c31 2065 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 2066 if (!buf) {
1da177e4
LT
2067 err = -ENOMEM;
2068 goto done;
2069 }
2070
09fd0de5 2071 hci_dev_lock(hdev);
1da177e4 2072 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 2073 hci_dev_unlock(hdev);
1da177e4
LT
2074
2075 BT_DBG("num_rsp %d", ir.num_rsp);
2076
2077 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2078 ptr += sizeof(ir);
2079 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 2080 ir.num_rsp))
1da177e4 2081 err = -EFAULT;
8e87d142 2082 } else
1da177e4
LT
2083 err = -EFAULT;
2084
2085 kfree(buf);
2086
2087done:
2088 hci_dev_put(hdev);
2089 return err;
2090}
2091
cbed0ca1 2092static int hci_dev_do_open(struct hci_dev *hdev)
1da177e4 2093{
1da177e4
LT
2094 int ret = 0;
2095
1da177e4
LT
2096 BT_DBG("%s %p", hdev->name, hdev);
2097
2098 hci_req_lock(hdev);
2099
94324962
JH
2100 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2101 ret = -ENODEV;
2102 goto done;
2103 }
2104
a5c8f270
MH
2105 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
2106 /* Check for rfkill but allow the HCI setup stage to
2107 * proceed (which in itself doesn't cause any RF activity).
2108 */
2109 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2110 ret = -ERFKILL;
2111 goto done;
2112 }
2113
2114 /* Check for valid public address or a configured static
2115 * random adddress, but let the HCI setup proceed to
2116 * be able to determine if there is a public address
2117 * or not.
2118 *
c6beca0e
MH
2119 * In case of user channel usage, it is not important
2120 * if a public address or static random address is
2121 * available.
2122 *
a5c8f270
MH
2123 * This check is only valid for BR/EDR controllers
2124 * since AMP controllers do not have an address.
2125 */
c6beca0e
MH
2126 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2127 hdev->dev_type == HCI_BREDR &&
a5c8f270
MH
2128 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2129 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2130 ret = -EADDRNOTAVAIL;
2131 goto done;
2132 }
611b30f7
MH
2133 }
2134
1da177e4
LT
2135 if (test_bit(HCI_UP, &hdev->flags)) {
2136 ret = -EALREADY;
2137 goto done;
2138 }
2139
1da177e4
LT
2140 if (hdev->open(hdev)) {
2141 ret = -EIO;
2142 goto done;
2143 }
2144
f41c70c4
MH
2145 atomic_set(&hdev->cmd_cnt, 1);
2146 set_bit(HCI_INIT, &hdev->flags);
2147
2148 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
2149 ret = hdev->setup(hdev);
2150
2151 if (!ret) {
f41c70c4
MH
2152 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2153 set_bit(HCI_RAW, &hdev->flags);
2154
0736cfa8
MH
2155 if (!test_bit(HCI_RAW, &hdev->flags) &&
2156 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
f41c70c4 2157 ret = __hci_init(hdev);
1da177e4
LT
2158 }
2159
f41c70c4
MH
2160 clear_bit(HCI_INIT, &hdev->flags);
2161
1da177e4
LT
2162 if (!ret) {
2163 hci_dev_hold(hdev);
d6bfd59c 2164 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
1da177e4
LT
2165 set_bit(HCI_UP, &hdev->flags);
2166 hci_notify(hdev, HCI_DEV_UP);
bb4b2a9a 2167 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
0736cfa8 2168 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1514b892 2169 hdev->dev_type == HCI_BREDR) {
09fd0de5 2170 hci_dev_lock(hdev);
744cf19e 2171 mgmt_powered(hdev, 1);
09fd0de5 2172 hci_dev_unlock(hdev);
56e5cb86 2173 }
8e87d142 2174 } else {
1da177e4 2175 /* Init failed, cleanup */
3eff45ea 2176 flush_work(&hdev->tx_work);
c347b765 2177 flush_work(&hdev->cmd_work);
b78752cc 2178 flush_work(&hdev->rx_work);
1da177e4
LT
2179
2180 skb_queue_purge(&hdev->cmd_q);
2181 skb_queue_purge(&hdev->rx_q);
2182
2183 if (hdev->flush)
2184 hdev->flush(hdev);
2185
2186 if (hdev->sent_cmd) {
2187 kfree_skb(hdev->sent_cmd);
2188 hdev->sent_cmd = NULL;
2189 }
2190
2191 hdev->close(hdev);
2192 hdev->flags = 0;
2193 }
2194
2195done:
2196 hci_req_unlock(hdev);
1da177e4
LT
2197 return ret;
2198}
2199
cbed0ca1
JH
2200/* ---- HCI ioctl helpers ---- */
2201
2202int hci_dev_open(__u16 dev)
2203{
2204 struct hci_dev *hdev;
2205 int err;
2206
2207 hdev = hci_dev_get(dev);
2208 if (!hdev)
2209 return -ENODEV;
2210
e1d08f40
JH
2211 /* We need to ensure that no other power on/off work is pending
2212 * before proceeding to call hci_dev_do_open. This is
2213 * particularly important if the setup procedure has not yet
2214 * completed.
2215 */
2216 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2217 cancel_delayed_work(&hdev->power_off);
2218
a5c8f270
MH
2219 /* After this call it is guaranteed that the setup procedure
2220 * has finished. This means that error conditions like RFKILL
2221 * or no valid public or static random address apply.
2222 */
e1d08f40
JH
2223 flush_workqueue(hdev->req_workqueue);
2224
cbed0ca1
JH
2225 err = hci_dev_do_open(hdev);
2226
2227 hci_dev_put(hdev);
2228
2229 return err;
2230}
2231
1da177e4
LT
2232static int hci_dev_do_close(struct hci_dev *hdev)
2233{
2234 BT_DBG("%s %p", hdev->name, hdev);
2235
78c04c0b
VCG
2236 cancel_delayed_work(&hdev->power_off);
2237
1da177e4
LT
2238 hci_req_cancel(hdev, ENODEV);
2239 hci_req_lock(hdev);
2240
2241 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
b79f44c1 2242 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
2243 hci_req_unlock(hdev);
2244 return 0;
2245 }
2246
3eff45ea
GP
2247 /* Flush RX and TX works */
2248 flush_work(&hdev->tx_work);
b78752cc 2249 flush_work(&hdev->rx_work);
1da177e4 2250
16ab91ab 2251 if (hdev->discov_timeout > 0) {
e0f9309f 2252 cancel_delayed_work(&hdev->discov_off);
16ab91ab 2253 hdev->discov_timeout = 0;
5e5282bb 2254 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
310a3d48 2255 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
16ab91ab
JH
2256 }
2257
a8b2d5c2 2258 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
7d78525d
JH
2259 cancel_delayed_work(&hdev->service_cache);
2260
7ba8b4be 2261 cancel_delayed_work_sync(&hdev->le_scan_disable);
4518bb0f
JH
2262
2263 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2264 cancel_delayed_work_sync(&hdev->rpa_expired);
7ba8b4be 2265
09fd0de5 2266 hci_dev_lock(hdev);
1f9b9a5d 2267 hci_inquiry_cache_flush(hdev);
1da177e4 2268 hci_conn_hash_flush(hdev);
6046dc3e 2269 hci_pend_le_conns_clear(hdev);
09fd0de5 2270 hci_dev_unlock(hdev);
1da177e4
LT
2271
2272 hci_notify(hdev, HCI_DEV_DOWN);
2273
2274 if (hdev->flush)
2275 hdev->flush(hdev);
2276
2277 /* Reset device */
2278 skb_queue_purge(&hdev->cmd_q);
2279 atomic_set(&hdev->cmd_cnt, 1);
8af59467 2280 if (!test_bit(HCI_RAW, &hdev->flags) &&
3a6afbd2 2281 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
a6c511c6 2282 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1da177e4 2283 set_bit(HCI_INIT, &hdev->flags);
01178cd4 2284 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1da177e4
LT
2285 clear_bit(HCI_INIT, &hdev->flags);
2286 }
2287
c347b765
GP
2288 /* flush cmd work */
2289 flush_work(&hdev->cmd_work);
1da177e4
LT
2290
2291 /* Drop queues */
2292 skb_queue_purge(&hdev->rx_q);
2293 skb_queue_purge(&hdev->cmd_q);
2294 skb_queue_purge(&hdev->raw_q);
2295
2296 /* Drop last sent command */
2297 if (hdev->sent_cmd) {
b79f44c1 2298 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
2299 kfree_skb(hdev->sent_cmd);
2300 hdev->sent_cmd = NULL;
2301 }
2302
b6ddb638
JH
2303 kfree_skb(hdev->recv_evt);
2304 hdev->recv_evt = NULL;
2305
1da177e4
LT
2306 /* After this point our queues are empty
2307 * and no tasks are scheduled. */
2308 hdev->close(hdev);
2309
35b973c9
JH
2310 /* Clear flags */
2311 hdev->flags = 0;
2312 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2313
93c311a0
MH
2314 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2315 if (hdev->dev_type == HCI_BREDR) {
2316 hci_dev_lock(hdev);
2317 mgmt_powered(hdev, 0);
2318 hci_dev_unlock(hdev);
2319 }
8ee56540 2320 }
5add6af8 2321
ced5c338 2322 /* Controller radio is available but is currently powered down */
536619e8 2323 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
ced5c338 2324
e59fda8d 2325 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 2326 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
7a4cd51d 2327 bacpy(&hdev->random_addr, BDADDR_ANY);
e59fda8d 2328
1da177e4
LT
2329 hci_req_unlock(hdev);
2330
2331 hci_dev_put(hdev);
2332 return 0;
2333}
2334
2335int hci_dev_close(__u16 dev)
2336{
2337 struct hci_dev *hdev;
2338 int err;
2339
70f23020
AE
2340 hdev = hci_dev_get(dev);
2341 if (!hdev)
1da177e4 2342 return -ENODEV;
8ee56540 2343
0736cfa8
MH
2344 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2345 err = -EBUSY;
2346 goto done;
2347 }
2348
8ee56540
MH
2349 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2350 cancel_delayed_work(&hdev->power_off);
2351
1da177e4 2352 err = hci_dev_do_close(hdev);
8ee56540 2353
0736cfa8 2354done:
1da177e4
LT
2355 hci_dev_put(hdev);
2356 return err;
2357}
2358
2359int hci_dev_reset(__u16 dev)
2360{
2361 struct hci_dev *hdev;
2362 int ret = 0;
2363
70f23020
AE
2364 hdev = hci_dev_get(dev);
2365 if (!hdev)
1da177e4
LT
2366 return -ENODEV;
2367
2368 hci_req_lock(hdev);
1da177e4 2369
808a049e
MH
2370 if (!test_bit(HCI_UP, &hdev->flags)) {
2371 ret = -ENETDOWN;
1da177e4 2372 goto done;
808a049e 2373 }
1da177e4 2374
0736cfa8
MH
2375 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2376 ret = -EBUSY;
2377 goto done;
2378 }
2379
1da177e4
LT
2380 /* Drop queues */
2381 skb_queue_purge(&hdev->rx_q);
2382 skb_queue_purge(&hdev->cmd_q);
2383
09fd0de5 2384 hci_dev_lock(hdev);
1f9b9a5d 2385 hci_inquiry_cache_flush(hdev);
1da177e4 2386 hci_conn_hash_flush(hdev);
09fd0de5 2387 hci_dev_unlock(hdev);
1da177e4
LT
2388
2389 if (hdev->flush)
2390 hdev->flush(hdev);
2391
8e87d142 2392 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 2393 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4
LT
2394
2395 if (!test_bit(HCI_RAW, &hdev->flags))
01178cd4 2396 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1da177e4
LT
2397
2398done:
1da177e4
LT
2399 hci_req_unlock(hdev);
2400 hci_dev_put(hdev);
2401 return ret;
2402}
2403
2404int hci_dev_reset_stat(__u16 dev)
2405{
2406 struct hci_dev *hdev;
2407 int ret = 0;
2408
70f23020
AE
2409 hdev = hci_dev_get(dev);
2410 if (!hdev)
1da177e4
LT
2411 return -ENODEV;
2412
0736cfa8
MH
2413 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2414 ret = -EBUSY;
2415 goto done;
2416 }
2417
1da177e4
LT
2418 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2419
0736cfa8 2420done:
1da177e4 2421 hci_dev_put(hdev);
1da177e4
LT
2422 return ret;
2423}
2424
2425int hci_dev_cmd(unsigned int cmd, void __user *arg)
2426{
2427 struct hci_dev *hdev;
2428 struct hci_dev_req dr;
2429 int err = 0;
2430
2431 if (copy_from_user(&dr, arg, sizeof(dr)))
2432 return -EFAULT;
2433
70f23020
AE
2434 hdev = hci_dev_get(dr.dev_id);
2435 if (!hdev)
1da177e4
LT
2436 return -ENODEV;
2437
0736cfa8
MH
2438 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2439 err = -EBUSY;
2440 goto done;
2441 }
2442
5b69bef5
MH
2443 if (hdev->dev_type != HCI_BREDR) {
2444 err = -EOPNOTSUPP;
2445 goto done;
2446 }
2447
56f87901
JH
2448 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2449 err = -EOPNOTSUPP;
2450 goto done;
2451 }
2452
1da177e4
LT
2453 switch (cmd) {
2454 case HCISETAUTH:
01178cd4
JH
2455 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2456 HCI_INIT_TIMEOUT);
1da177e4
LT
2457 break;
2458
2459 case HCISETENCRYPT:
2460 if (!lmp_encrypt_capable(hdev)) {
2461 err = -EOPNOTSUPP;
2462 break;
2463 }
2464
2465 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2466 /* Auth must be enabled first */
01178cd4
JH
2467 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2468 HCI_INIT_TIMEOUT);
1da177e4
LT
2469 if (err)
2470 break;
2471 }
2472
01178cd4
JH
2473 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2474 HCI_INIT_TIMEOUT);
1da177e4
LT
2475 break;
2476
2477 case HCISETSCAN:
01178cd4
JH
2478 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2479 HCI_INIT_TIMEOUT);
1da177e4
LT
2480 break;
2481
1da177e4 2482 case HCISETLINKPOL:
01178cd4
JH
2483 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2484 HCI_INIT_TIMEOUT);
1da177e4
LT
2485 break;
2486
2487 case HCISETLINKMODE:
e4e8e37c
MH
2488 hdev->link_mode = ((__u16) dr.dev_opt) &
2489 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2490 break;
2491
2492 case HCISETPTYPE:
2493 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
2494 break;
2495
2496 case HCISETACLMTU:
e4e8e37c
MH
2497 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2498 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2499 break;
2500
2501 case HCISETSCOMTU:
e4e8e37c
MH
2502 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2503 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2504 break;
2505
2506 default:
2507 err = -EINVAL;
2508 break;
2509 }
e4e8e37c 2510
0736cfa8 2511done:
1da177e4
LT
2512 hci_dev_put(hdev);
2513 return err;
2514}
2515
2516int hci_get_dev_list(void __user *arg)
2517{
8035ded4 2518 struct hci_dev *hdev;
1da177e4
LT
2519 struct hci_dev_list_req *dl;
2520 struct hci_dev_req *dr;
1da177e4
LT
2521 int n = 0, size, err;
2522 __u16 dev_num;
2523
2524 if (get_user(dev_num, (__u16 __user *) arg))
2525 return -EFAULT;
2526
2527 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2528 return -EINVAL;
2529
2530 size = sizeof(*dl) + dev_num * sizeof(*dr);
2531
70f23020
AE
2532 dl = kzalloc(size, GFP_KERNEL);
2533 if (!dl)
1da177e4
LT
2534 return -ENOMEM;
2535
2536 dr = dl->dev_req;
2537
f20d09d5 2538 read_lock(&hci_dev_list_lock);
8035ded4 2539 list_for_each_entry(hdev, &hci_dev_list, list) {
a8b2d5c2 2540 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
e0f9309f 2541 cancel_delayed_work(&hdev->power_off);
c542a06c 2542
a8b2d5c2
JH
2543 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2544 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 2545
1da177e4
LT
2546 (dr + n)->dev_id = hdev->id;
2547 (dr + n)->dev_opt = hdev->flags;
c542a06c 2548
1da177e4
LT
2549 if (++n >= dev_num)
2550 break;
2551 }
f20d09d5 2552 read_unlock(&hci_dev_list_lock);
1da177e4
LT
2553
2554 dl->dev_num = n;
2555 size = sizeof(*dl) + n * sizeof(*dr);
2556
2557 err = copy_to_user(arg, dl, size);
2558 kfree(dl);
2559
2560 return err ? -EFAULT : 0;
2561}
2562
2563int hci_get_dev_info(void __user *arg)
2564{
2565 struct hci_dev *hdev;
2566 struct hci_dev_info di;
2567 int err = 0;
2568
2569 if (copy_from_user(&di, arg, sizeof(di)))
2570 return -EFAULT;
2571
70f23020
AE
2572 hdev = hci_dev_get(di.dev_id);
2573 if (!hdev)
1da177e4
LT
2574 return -ENODEV;
2575
a8b2d5c2 2576 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
3243553f 2577 cancel_delayed_work_sync(&hdev->power_off);
ab81cbf9 2578
a8b2d5c2
JH
2579 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2580 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 2581
1da177e4
LT
2582 strcpy(di.name, hdev->name);
2583 di.bdaddr = hdev->bdaddr;
60f2a3ed 2584 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
1da177e4
LT
2585 di.flags = hdev->flags;
2586 di.pkt_type = hdev->pkt_type;
572c7f84
JH
2587 if (lmp_bredr_capable(hdev)) {
2588 di.acl_mtu = hdev->acl_mtu;
2589 di.acl_pkts = hdev->acl_pkts;
2590 di.sco_mtu = hdev->sco_mtu;
2591 di.sco_pkts = hdev->sco_pkts;
2592 } else {
2593 di.acl_mtu = hdev->le_mtu;
2594 di.acl_pkts = hdev->le_pkts;
2595 di.sco_mtu = 0;
2596 di.sco_pkts = 0;
2597 }
1da177e4
LT
2598 di.link_policy = hdev->link_policy;
2599 di.link_mode = hdev->link_mode;
2600
2601 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2602 memcpy(&di.features, &hdev->features, sizeof(di.features));
2603
2604 if (copy_to_user(arg, &di, sizeof(di)))
2605 err = -EFAULT;
2606
2607 hci_dev_put(hdev);
2608
2609 return err;
2610}
2611
2612/* ---- Interface to HCI drivers ---- */
2613
611b30f7
MH
2614static int hci_rfkill_set_block(void *data, bool blocked)
2615{
2616 struct hci_dev *hdev = data;
2617
2618 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2619
0736cfa8
MH
2620 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2621 return -EBUSY;
2622
5e130367
JH
2623 if (blocked) {
2624 set_bit(HCI_RFKILLED, &hdev->dev_flags);
bf543036
JH
2625 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2626 hci_dev_do_close(hdev);
5e130367
JH
2627 } else {
2628 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
1025c04c 2629 }
611b30f7
MH
2630
2631 return 0;
2632}
2633
2634static const struct rfkill_ops hci_rfkill_ops = {
2635 .set_block = hci_rfkill_set_block,
2636};
2637
ab81cbf9
JH
2638static void hci_power_on(struct work_struct *work)
2639{
2640 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
96570ffc 2641 int err;
ab81cbf9
JH
2642
2643 BT_DBG("%s", hdev->name);
2644
cbed0ca1 2645 err = hci_dev_do_open(hdev);
96570ffc
JH
2646 if (err < 0) {
2647 mgmt_set_powered_failed(hdev, err);
ab81cbf9 2648 return;
96570ffc 2649 }
ab81cbf9 2650
a5c8f270
MH
2651 /* During the HCI setup phase, a few error conditions are
2652 * ignored and they need to be checked now. If they are still
2653 * valid, it is important to turn the device back off.
2654 */
2655 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2656 (hdev->dev_type == HCI_BREDR &&
2657 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2658 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
bf543036
JH
2659 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2660 hci_dev_do_close(hdev);
2661 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
19202573
JH
2662 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2663 HCI_AUTO_OFF_TIMEOUT);
bf543036 2664 }
ab81cbf9 2665
a8b2d5c2 2666 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
744cf19e 2667 mgmt_index_added(hdev);
ab81cbf9
JH
2668}
2669
2670static void hci_power_off(struct work_struct *work)
2671{
3243553f 2672 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 2673 power_off.work);
ab81cbf9
JH
2674
2675 BT_DBG("%s", hdev->name);
2676
8ee56540 2677 hci_dev_do_close(hdev);
ab81cbf9
JH
2678}
2679
16ab91ab
JH
2680static void hci_discov_off(struct work_struct *work)
2681{
2682 struct hci_dev *hdev;
16ab91ab
JH
2683
2684 hdev = container_of(work, struct hci_dev, discov_off.work);
2685
2686 BT_DBG("%s", hdev->name);
2687
d1967ff8 2688 mgmt_discoverable_timeout(hdev);
16ab91ab
JH
2689}
2690
35f7498a 2691void hci_uuids_clear(struct hci_dev *hdev)
2aeb9a1a 2692{
4821002c 2693 struct bt_uuid *uuid, *tmp;
2aeb9a1a 2694
4821002c
JH
2695 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2696 list_del(&uuid->list);
2aeb9a1a
JH
2697 kfree(uuid);
2698 }
2aeb9a1a
JH
2699}
2700
35f7498a 2701void hci_link_keys_clear(struct hci_dev *hdev)
55ed8ca1
JH
2702{
2703 struct list_head *p, *n;
2704
2705 list_for_each_safe(p, n, &hdev->link_keys) {
2706 struct link_key *key;
2707
2708 key = list_entry(p, struct link_key, list);
2709
2710 list_del(p);
2711 kfree(key);
2712 }
55ed8ca1
JH
2713}
2714
35f7498a 2715void hci_smp_ltks_clear(struct hci_dev *hdev)
b899efaf
VCG
2716{
2717 struct smp_ltk *k, *tmp;
2718
2719 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2720 list_del(&k->list);
2721 kfree(k);
2722 }
b899efaf
VCG
2723}
2724
970c4e46
JH
2725void hci_smp_irks_clear(struct hci_dev *hdev)
2726{
2727 struct smp_irk *k, *tmp;
2728
2729 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2730 list_del(&k->list);
2731 kfree(k);
2732 }
2733}
2734
55ed8ca1
JH
2735struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2736{
8035ded4 2737 struct link_key *k;
55ed8ca1 2738
8035ded4 2739 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
2740 if (bacmp(bdaddr, &k->bdaddr) == 0)
2741 return k;
55ed8ca1
JH
2742
2743 return NULL;
2744}
2745
745c0ce3 2746static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 2747 u8 key_type, u8 old_key_type)
d25e28ab
JH
2748{
2749 /* Legacy key */
2750 if (key_type < 0x03)
745c0ce3 2751 return true;
d25e28ab
JH
2752
2753 /* Debug keys are insecure so don't store them persistently */
2754 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 2755 return false;
d25e28ab
JH
2756
2757 /* Changed combination key and there's no previous one */
2758 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 2759 return false;
d25e28ab
JH
2760
2761 /* Security mode 3 case */
2762 if (!conn)
745c0ce3 2763 return true;
d25e28ab
JH
2764
2765 /* Neither local nor remote side had no-bonding as requirement */
2766 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 2767 return true;
d25e28ab
JH
2768
2769 /* Local side had dedicated bonding as requirement */
2770 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 2771 return true;
d25e28ab
JH
2772
2773 /* Remote side had dedicated bonding as requirement */
2774 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 2775 return true;
d25e28ab
JH
2776
2777 /* If none of the above criteria match, then don't store the key
2778 * persistently */
745c0ce3 2779 return false;
d25e28ab
JH
2780}
2781
98a0b845
JH
2782static bool ltk_type_master(u8 type)
2783{
2784 if (type == HCI_SMP_STK || type == HCI_SMP_LTK)
2785 return true;
2786
2787 return false;
2788}
2789
2790struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8],
2791 bool master)
75d262c2 2792{
c9839a11 2793 struct smp_ltk *k;
75d262c2 2794
c9839a11
VCG
2795 list_for_each_entry(k, &hdev->long_term_keys, list) {
2796 if (k->ediv != ediv ||
a8c5fb1a 2797 memcmp(rand, k->rand, sizeof(k->rand)))
75d262c2
VCG
2798 continue;
2799
98a0b845
JH
2800 if (ltk_type_master(k->type) != master)
2801 continue;
2802
c9839a11 2803 return k;
75d262c2
VCG
2804 }
2805
2806 return NULL;
2807}
75d262c2 2808
c9839a11 2809struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
98a0b845 2810 u8 addr_type, bool master)
75d262c2 2811{
c9839a11 2812 struct smp_ltk *k;
75d262c2 2813
c9839a11
VCG
2814 list_for_each_entry(k, &hdev->long_term_keys, list)
2815 if (addr_type == k->bdaddr_type &&
98a0b845
JH
2816 bacmp(bdaddr, &k->bdaddr) == 0 &&
2817 ltk_type_master(k->type) == master)
75d262c2
VCG
2818 return k;
2819
2820 return NULL;
2821}
75d262c2 2822
970c4e46
JH
2823struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2824{
2825 struct smp_irk *irk;
2826
2827 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2828 if (!bacmp(&irk->rpa, rpa))
2829 return irk;
2830 }
2831
2832 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2833 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
2834 bacpy(&irk->rpa, rpa);
2835 return irk;
2836 }
2837 }
2838
2839 return NULL;
2840}
2841
2842struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2843 u8 addr_type)
2844{
2845 struct smp_irk *irk;
2846
6cfc9988
JH
2847 /* Identity Address must be public or static random */
2848 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2849 return NULL;
2850
970c4e46
JH
2851 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2852 if (addr_type == irk->addr_type &&
2853 bacmp(bdaddr, &irk->bdaddr) == 0)
2854 return irk;
2855 }
2856
2857 return NULL;
2858}
2859
d25e28ab 2860int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
04124681 2861 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
55ed8ca1
JH
2862{
2863 struct link_key *key, *old_key;
745c0ce3
VA
2864 u8 old_key_type;
2865 bool persistent;
55ed8ca1
JH
2866
2867 old_key = hci_find_link_key(hdev, bdaddr);
2868 if (old_key) {
2869 old_key_type = old_key->type;
2870 key = old_key;
2871 } else {
12adcf3a 2872 old_key_type = conn ? conn->key_type : 0xff;
0a14ab41 2873 key = kzalloc(sizeof(*key), GFP_KERNEL);
55ed8ca1
JH
2874 if (!key)
2875 return -ENOMEM;
2876 list_add(&key->list, &hdev->link_keys);
2877 }
2878
6ed93dc6 2879 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 2880
d25e28ab
JH
2881 /* Some buggy controller combinations generate a changed
2882 * combination key for legacy pairing even when there's no
2883 * previous key */
2884 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 2885 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 2886 type = HCI_LK_COMBINATION;
655fe6ec
JH
2887 if (conn)
2888 conn->key_type = type;
2889 }
d25e28ab 2890
55ed8ca1 2891 bacpy(&key->bdaddr, bdaddr);
9b3b4460 2892 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
2893 key->pin_len = pin_len;
2894
b6020ba0 2895 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 2896 key->type = old_key_type;
4748fed2
JH
2897 else
2898 key->type = type;
2899
4df378a1
JH
2900 if (!new_key)
2901 return 0;
2902
2903 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
2904
744cf19e 2905 mgmt_new_link_key(hdev, key, persistent);
4df378a1 2906
6ec5bcad
VA
2907 if (conn)
2908 conn->flush_key = !persistent;
55ed8ca1
JH
2909
2910 return 0;
2911}
2912
ca9142b8 2913struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
35d70271
JH
2914 u8 addr_type, u8 type, u8 authenticated,
2915 u8 tk[16], u8 enc_size, __le16 ediv, u8 rand[8])
75d262c2 2916{
c9839a11 2917 struct smp_ltk *key, *old_key;
98a0b845 2918 bool master = ltk_type_master(type);
75d262c2 2919
98a0b845 2920 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
c9839a11 2921 if (old_key)
75d262c2 2922 key = old_key;
c9839a11 2923 else {
0a14ab41 2924 key = kzalloc(sizeof(*key), GFP_KERNEL);
75d262c2 2925 if (!key)
ca9142b8 2926 return NULL;
c9839a11 2927 list_add(&key->list, &hdev->long_term_keys);
75d262c2
VCG
2928 }
2929
75d262c2 2930 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
2931 key->bdaddr_type = addr_type;
2932 memcpy(key->val, tk, sizeof(key->val));
2933 key->authenticated = authenticated;
2934 key->ediv = ediv;
2935 key->enc_size = enc_size;
2936 key->type = type;
2937 memcpy(key->rand, rand, sizeof(key->rand));
75d262c2 2938
ca9142b8 2939 return key;
75d262c2
VCG
2940}
2941
ca9142b8
JH
2942struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2943 u8 addr_type, u8 val[16], bdaddr_t *rpa)
970c4e46
JH
2944{
2945 struct smp_irk *irk;
2946
2947 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2948 if (!irk) {
2949 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2950 if (!irk)
ca9142b8 2951 return NULL;
970c4e46
JH
2952
2953 bacpy(&irk->bdaddr, bdaddr);
2954 irk->addr_type = addr_type;
2955
2956 list_add(&irk->list, &hdev->identity_resolving_keys);
2957 }
2958
2959 memcpy(irk->val, val, 16);
2960 bacpy(&irk->rpa, rpa);
2961
ca9142b8 2962 return irk;
970c4e46
JH
2963}
2964
55ed8ca1
JH
2965int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2966{
2967 struct link_key *key;
2968
2969 key = hci_find_link_key(hdev, bdaddr);
2970 if (!key)
2971 return -ENOENT;
2972
6ed93dc6 2973 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1
JH
2974
2975 list_del(&key->list);
2976 kfree(key);
2977
2978 return 0;
2979}
2980
e0b2b27e 2981int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
b899efaf
VCG
2982{
2983 struct smp_ltk *k, *tmp;
c51ffa0b 2984 int removed = 0;
b899efaf
VCG
2985
2986 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
e0b2b27e 2987 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
b899efaf
VCG
2988 continue;
2989
6ed93dc6 2990 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf
VCG
2991
2992 list_del(&k->list);
2993 kfree(k);
c51ffa0b 2994 removed++;
b899efaf
VCG
2995 }
2996
c51ffa0b 2997 return removed ? 0 : -ENOENT;
b899efaf
VCG
2998}
2999
a7ec7338
JH
3000void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3001{
3002 struct smp_irk *k, *tmp;
3003
668b7b19 3004 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
a7ec7338
JH
3005 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3006 continue;
3007
3008 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3009
3010 list_del(&k->list);
3011 kfree(k);
3012 }
3013}
3014
6bd32326 3015/* HCI command timer function */
bda4f23a 3016static void hci_cmd_timeout(unsigned long arg)
6bd32326
VT
3017{
3018 struct hci_dev *hdev = (void *) arg;
3019
bda4f23a
AE
3020 if (hdev->sent_cmd) {
3021 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3022 u16 opcode = __le16_to_cpu(sent->opcode);
3023
3024 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3025 } else {
3026 BT_ERR("%s command tx timeout", hdev->name);
3027 }
3028
6bd32326 3029 atomic_set(&hdev->cmd_cnt, 1);
c347b765 3030 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
3031}
3032
2763eda6 3033struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
04124681 3034 bdaddr_t *bdaddr)
2763eda6
SJ
3035{
3036 struct oob_data *data;
3037
3038 list_for_each_entry(data, &hdev->remote_oob_data, list)
3039 if (bacmp(bdaddr, &data->bdaddr) == 0)
3040 return data;
3041
3042 return NULL;
3043}
3044
3045int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3046{
3047 struct oob_data *data;
3048
3049 data = hci_find_remote_oob_data(hdev, bdaddr);
3050 if (!data)
3051 return -ENOENT;
3052
6ed93dc6 3053 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2763eda6
SJ
3054
3055 list_del(&data->list);
3056 kfree(data);
3057
3058 return 0;
3059}
3060
35f7498a 3061void hci_remote_oob_data_clear(struct hci_dev *hdev)
2763eda6
SJ
3062{
3063 struct oob_data *data, *n;
3064
3065 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3066 list_del(&data->list);
3067 kfree(data);
3068 }
2763eda6
SJ
3069}
3070
0798872e
MH
3071int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3072 u8 *hash, u8 *randomizer)
2763eda6
SJ
3073{
3074 struct oob_data *data;
3075
3076 data = hci_find_remote_oob_data(hdev, bdaddr);
2763eda6 3077 if (!data) {
0a14ab41 3078 data = kmalloc(sizeof(*data), GFP_KERNEL);
2763eda6
SJ
3079 if (!data)
3080 return -ENOMEM;
3081
3082 bacpy(&data->bdaddr, bdaddr);
3083 list_add(&data->list, &hdev->remote_oob_data);
3084 }
3085
519ca9d0
MH
3086 memcpy(data->hash192, hash, sizeof(data->hash192));
3087 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
2763eda6 3088
0798872e
MH
3089 memset(data->hash256, 0, sizeof(data->hash256));
3090 memset(data->randomizer256, 0, sizeof(data->randomizer256));
3091
3092 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3093
3094 return 0;
3095}
3096
3097int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3098 u8 *hash192, u8 *randomizer192,
3099 u8 *hash256, u8 *randomizer256)
3100{
3101 struct oob_data *data;
3102
3103 data = hci_find_remote_oob_data(hdev, bdaddr);
3104 if (!data) {
0a14ab41 3105 data = kmalloc(sizeof(*data), GFP_KERNEL);
0798872e
MH
3106 if (!data)
3107 return -ENOMEM;
3108
3109 bacpy(&data->bdaddr, bdaddr);
3110 list_add(&data->list, &hdev->remote_oob_data);
3111 }
3112
3113 memcpy(data->hash192, hash192, sizeof(data->hash192));
3114 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3115
3116 memcpy(data->hash256, hash256, sizeof(data->hash256));
3117 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3118
6ed93dc6 3119 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
3120
3121 return 0;
3122}
3123
b9ee0a78
MH
3124struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
3125 bdaddr_t *bdaddr, u8 type)
b2a66aad 3126{
8035ded4 3127 struct bdaddr_list *b;
b2a66aad 3128
b9ee0a78
MH
3129 list_for_each_entry(b, &hdev->blacklist, list) {
3130 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
b2a66aad 3131 return b;
b9ee0a78 3132 }
b2a66aad
AJ
3133
3134 return NULL;
3135}
3136
35f7498a 3137void hci_blacklist_clear(struct hci_dev *hdev)
b2a66aad
AJ
3138{
3139 struct list_head *p, *n;
3140
3141 list_for_each_safe(p, n, &hdev->blacklist) {
b9ee0a78 3142 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
b2a66aad
AJ
3143
3144 list_del(p);
3145 kfree(b);
3146 }
b2a66aad
AJ
3147}
3148
88c1fe4b 3149int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
3150{
3151 struct bdaddr_list *entry;
b2a66aad 3152
b9ee0a78 3153 if (!bacmp(bdaddr, BDADDR_ANY))
b2a66aad
AJ
3154 return -EBADF;
3155
b9ee0a78 3156 if (hci_blacklist_lookup(hdev, bdaddr, type))
5e762444 3157 return -EEXIST;
b2a66aad
AJ
3158
3159 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
5e762444
AJ
3160 if (!entry)
3161 return -ENOMEM;
b2a66aad
AJ
3162
3163 bacpy(&entry->bdaddr, bdaddr);
b9ee0a78 3164 entry->bdaddr_type = type;
b2a66aad
AJ
3165
3166 list_add(&entry->list, &hdev->blacklist);
3167
88c1fe4b 3168 return mgmt_device_blocked(hdev, bdaddr, type);
b2a66aad
AJ
3169}
3170
88c1fe4b 3171int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
3172{
3173 struct bdaddr_list *entry;
b2a66aad 3174
35f7498a
JH
3175 if (!bacmp(bdaddr, BDADDR_ANY)) {
3176 hci_blacklist_clear(hdev);
3177 return 0;
3178 }
b2a66aad 3179
b9ee0a78 3180 entry = hci_blacklist_lookup(hdev, bdaddr, type);
1ec918ce 3181 if (!entry)
5e762444 3182 return -ENOENT;
b2a66aad
AJ
3183
3184 list_del(&entry->list);
3185 kfree(entry);
3186
88c1fe4b 3187 return mgmt_device_unblocked(hdev, bdaddr, type);
b2a66aad
AJ
3188}
3189
15819a70
AG
3190/* This function requires the caller holds hdev->lock */
3191struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3192 bdaddr_t *addr, u8 addr_type)
3193{
3194 struct hci_conn_params *params;
3195
3196 list_for_each_entry(params, &hdev->le_conn_params, list) {
3197 if (bacmp(&params->addr, addr) == 0 &&
3198 params->addr_type == addr_type) {
3199 return params;
3200 }
3201 }
3202
3203 return NULL;
3204}
3205
cef952ce
AG
3206static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3207{
3208 struct hci_conn *conn;
3209
3210 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3211 if (!conn)
3212 return false;
3213
3214 if (conn->dst_type != type)
3215 return false;
3216
3217 if (conn->state != BT_CONNECTED)
3218 return false;
3219
3220 return true;
3221}
3222
15819a70
AG
3223/* This function requires the caller holds hdev->lock */
3224void hci_conn_params_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
9fcb18ef
AG
3225 u8 auto_connect, u16 conn_min_interval,
3226 u16 conn_max_interval)
15819a70
AG
3227{
3228 struct hci_conn_params *params;
3229
3230 params = hci_conn_params_lookup(hdev, addr, addr_type);
cef952ce
AG
3231 if (params)
3232 goto update;
15819a70
AG
3233
3234 params = kzalloc(sizeof(*params), GFP_KERNEL);
3235 if (!params) {
3236 BT_ERR("Out of memory");
3237 return;
3238 }
3239
3240 bacpy(&params->addr, addr);
3241 params->addr_type = addr_type;
cef952ce
AG
3242
3243 list_add(&params->list, &hdev->le_conn_params);
3244
3245update:
15819a70
AG
3246 params->conn_min_interval = conn_min_interval;
3247 params->conn_max_interval = conn_max_interval;
9fcb18ef 3248 params->auto_connect = auto_connect;
15819a70 3249
cef952ce
AG
3250 switch (auto_connect) {
3251 case HCI_AUTO_CONN_DISABLED:
3252 case HCI_AUTO_CONN_LINK_LOSS:
3253 hci_pend_le_conn_del(hdev, addr, addr_type);
3254 break;
3255 case HCI_AUTO_CONN_ALWAYS:
3256 if (!is_connected(hdev, addr, addr_type))
3257 hci_pend_le_conn_add(hdev, addr, addr_type);
3258 break;
3259 }
15819a70 3260
9fcb18ef
AG
3261 BT_DBG("addr %pMR (type %u) auto_connect %u conn_min_interval 0x%.4x "
3262 "conn_max_interval 0x%.4x", addr, addr_type, auto_connect,
3263 conn_min_interval, conn_max_interval);
15819a70
AG
3264}
3265
3266/* This function requires the caller holds hdev->lock */
3267void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3268{
3269 struct hci_conn_params *params;
3270
3271 params = hci_conn_params_lookup(hdev, addr, addr_type);
3272 if (!params)
3273 return;
3274
cef952ce
AG
3275 hci_pend_le_conn_del(hdev, addr, addr_type);
3276
15819a70
AG
3277 list_del(&params->list);
3278 kfree(params);
3279
3280 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3281}
3282
3283/* This function requires the caller holds hdev->lock */
3284void hci_conn_params_clear(struct hci_dev *hdev)
3285{
3286 struct hci_conn_params *params, *tmp;
3287
3288 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3289 list_del(&params->list);
3290 kfree(params);
3291 }
3292
3293 BT_DBG("All LE connection parameters were removed");
3294}
3295
77a77a30
AG
3296/* This function requires the caller holds hdev->lock */
3297struct bdaddr_list *hci_pend_le_conn_lookup(struct hci_dev *hdev,
3298 bdaddr_t *addr, u8 addr_type)
3299{
3300 struct bdaddr_list *entry;
3301
3302 list_for_each_entry(entry, &hdev->pend_le_conns, list) {
3303 if (bacmp(&entry->bdaddr, addr) == 0 &&
3304 entry->bdaddr_type == addr_type)
3305 return entry;
3306 }
3307
3308 return NULL;
3309}
3310
3311/* This function requires the caller holds hdev->lock */
3312void hci_pend_le_conn_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3313{
3314 struct bdaddr_list *entry;
3315
3316 entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3317 if (entry)
a4790dbd 3318 goto done;
77a77a30
AG
3319
3320 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3321 if (!entry) {
3322 BT_ERR("Out of memory");
3323 return;
3324 }
3325
3326 bacpy(&entry->bdaddr, addr);
3327 entry->bdaddr_type = addr_type;
3328
3329 list_add(&entry->list, &hdev->pend_le_conns);
3330
3331 BT_DBG("addr %pMR (type %u)", addr, addr_type);
a4790dbd
AG
3332
3333done:
3334 hci_update_background_scan(hdev);
77a77a30
AG
3335}
3336
3337/* This function requires the caller holds hdev->lock */
3338void hci_pend_le_conn_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3339{
3340 struct bdaddr_list *entry;
3341
3342 entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3343 if (!entry)
a4790dbd 3344 goto done;
77a77a30
AG
3345
3346 list_del(&entry->list);
3347 kfree(entry);
3348
3349 BT_DBG("addr %pMR (type %u)", addr, addr_type);
a4790dbd
AG
3350
3351done:
3352 hci_update_background_scan(hdev);
77a77a30
AG
3353}
3354
3355/* This function requires the caller holds hdev->lock */
3356void hci_pend_le_conns_clear(struct hci_dev *hdev)
3357{
3358 struct bdaddr_list *entry, *tmp;
3359
3360 list_for_each_entry_safe(entry, tmp, &hdev->pend_le_conns, list) {
3361 list_del(&entry->list);
3362 kfree(entry);
3363 }
3364
3365 BT_DBG("All LE pending connections cleared");
3366}
3367
4c87eaab 3368static void inquiry_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 3369{
4c87eaab
AG
3370 if (status) {
3371 BT_ERR("Failed to start inquiry: status %d", status);
7ba8b4be 3372
4c87eaab
AG
3373 hci_dev_lock(hdev);
3374 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3375 hci_dev_unlock(hdev);
3376 return;
3377 }
7ba8b4be
AG
3378}
3379
4c87eaab 3380static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 3381{
4c87eaab
AG
3382 /* General inquiry access code (GIAC) */
3383 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3384 struct hci_request req;
3385 struct hci_cp_inquiry cp;
7ba8b4be
AG
3386 int err;
3387
4c87eaab
AG
3388 if (status) {
3389 BT_ERR("Failed to disable LE scanning: status %d", status);
3390 return;
3391 }
7ba8b4be 3392
4c87eaab
AG
3393 switch (hdev->discovery.type) {
3394 case DISCOV_TYPE_LE:
3395 hci_dev_lock(hdev);
3396 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3397 hci_dev_unlock(hdev);
3398 break;
7ba8b4be 3399
4c87eaab
AG
3400 case DISCOV_TYPE_INTERLEAVED:
3401 hci_req_init(&req, hdev);
7ba8b4be 3402
4c87eaab
AG
3403 memset(&cp, 0, sizeof(cp));
3404 memcpy(&cp.lap, lap, sizeof(cp.lap));
3405 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3406 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
7ba8b4be 3407
4c87eaab 3408 hci_dev_lock(hdev);
7dbfac1d 3409
4c87eaab 3410 hci_inquiry_cache_flush(hdev);
7dbfac1d 3411
4c87eaab
AG
3412 err = hci_req_run(&req, inquiry_complete);
3413 if (err) {
3414 BT_ERR("Inquiry request failed: err %d", err);
3415 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3416 }
7dbfac1d 3417
4c87eaab
AG
3418 hci_dev_unlock(hdev);
3419 break;
7dbfac1d 3420 }
7dbfac1d
AG
3421}
3422
7ba8b4be
AG
3423static void le_scan_disable_work(struct work_struct *work)
3424{
3425 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 3426 le_scan_disable.work);
4c87eaab
AG
3427 struct hci_request req;
3428 int err;
7ba8b4be
AG
3429
3430 BT_DBG("%s", hdev->name);
3431
4c87eaab 3432 hci_req_init(&req, hdev);
28b75a89 3433
b1efcc28 3434 hci_req_add_le_scan_disable(&req);
28b75a89 3435
4c87eaab
AG
3436 err = hci_req_run(&req, le_scan_disable_work_complete);
3437 if (err)
3438 BT_ERR("Disable LE scanning request failed: err %d", err);
28b75a89
AG
3439}
3440
94b1fc92
MH
3441int hci_update_random_address(struct hci_request *req, bool require_privacy,
3442 u8 *own_addr_type)
ebd3a747
JH
3443{
3444 struct hci_dev *hdev = req->hdev;
3445 int err;
3446
3447 /* If privacy is enabled use a resolvable private address. If
2b5224dc
MH
3448 * current RPA has expired or there is something else than
3449 * the current RPA in use, then generate a new one.
ebd3a747
JH
3450 */
3451 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
ebd3a747
JH
3452 int to;
3453
3454 *own_addr_type = ADDR_LE_DEV_RANDOM;
3455
3456 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
2b5224dc 3457 !bacmp(&hdev->random_addr, &hdev->rpa))
ebd3a747
JH
3458 return 0;
3459
2b5224dc 3460 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
ebd3a747
JH
3461 if (err < 0) {
3462 BT_ERR("%s failed to generate new RPA", hdev->name);
3463 return err;
3464 }
3465
2b5224dc 3466 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, &hdev->rpa);
ebd3a747
JH
3467
3468 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3469 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3470
3471 return 0;
94b1fc92
MH
3472 }
3473
3474 /* In case of required privacy without resolvable private address,
3475 * use an unresolvable private address. This is useful for active
3476 * scanning and non-connectable advertising.
3477 */
3478 if (require_privacy) {
3479 bdaddr_t urpa;
3480
3481 get_random_bytes(&urpa, 6);
3482 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3483
3484 *own_addr_type = ADDR_LE_DEV_RANDOM;
3485 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, &urpa);
3486 return 0;
ebd3a747
JH
3487 }
3488
3489 /* If forcing static address is in use or there is no public
3490 * address use the static address as random address (but skip
3491 * the HCI command if the current random address is already the
3492 * static one.
3493 */
3494 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ||
3495 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3496 *own_addr_type = ADDR_LE_DEV_RANDOM;
3497 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3498 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3499 &hdev->static_addr);
3500 return 0;
3501 }
3502
3503 /* Neither privacy nor static address is being used so use a
3504 * public address.
3505 */
3506 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3507
3508 return 0;
3509}
3510
9be0dab7
DH
3511/* Alloc HCI device */
3512struct hci_dev *hci_alloc_dev(void)
3513{
3514 struct hci_dev *hdev;
3515
3516 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3517 if (!hdev)
3518 return NULL;
3519
b1b813d4
DH
3520 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3521 hdev->esco_type = (ESCO_HV1);
3522 hdev->link_mode = (HCI_LM_ACCEPT);
b4cb9fb2
MH
3523 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3524 hdev->io_capability = 0x03; /* No Input No Output */
bbaf444a
JH
3525 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3526 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
b1b813d4 3527
b1b813d4
DH
3528 hdev->sniff_max_interval = 800;
3529 hdev->sniff_min_interval = 80;
3530
3f959d46 3531 hdev->le_adv_channel_map = 0x07;
bef64738
MH
3532 hdev->le_scan_interval = 0x0060;
3533 hdev->le_scan_window = 0x0030;
4e70c7e7
MH
3534 hdev->le_conn_min_interval = 0x0028;
3535 hdev->le_conn_max_interval = 0x0038;
bef64738 3536
d6bfd59c
JH
3537 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3538
b1b813d4
DH
3539 mutex_init(&hdev->lock);
3540 mutex_init(&hdev->req_lock);
3541
3542 INIT_LIST_HEAD(&hdev->mgmt_pending);
3543 INIT_LIST_HEAD(&hdev->blacklist);
3544 INIT_LIST_HEAD(&hdev->uuids);
3545 INIT_LIST_HEAD(&hdev->link_keys);
3546 INIT_LIST_HEAD(&hdev->long_term_keys);
970c4e46 3547 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
b1b813d4 3548 INIT_LIST_HEAD(&hdev->remote_oob_data);
15819a70 3549 INIT_LIST_HEAD(&hdev->le_conn_params);
77a77a30 3550 INIT_LIST_HEAD(&hdev->pend_le_conns);
6b536b5e 3551 INIT_LIST_HEAD(&hdev->conn_hash.list);
b1b813d4
DH
3552
3553 INIT_WORK(&hdev->rx_work, hci_rx_work);
3554 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3555 INIT_WORK(&hdev->tx_work, hci_tx_work);
3556 INIT_WORK(&hdev->power_on, hci_power_on);
b1b813d4 3557
b1b813d4
DH
3558 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3559 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3560 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3561
b1b813d4
DH
3562 skb_queue_head_init(&hdev->rx_q);
3563 skb_queue_head_init(&hdev->cmd_q);
3564 skb_queue_head_init(&hdev->raw_q);
3565
3566 init_waitqueue_head(&hdev->req_wait_q);
3567
bda4f23a 3568 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
b1b813d4 3569
b1b813d4
DH
3570 hci_init_sysfs(hdev);
3571 discovery_init(hdev);
9be0dab7
DH
3572
3573 return hdev;
3574}
3575EXPORT_SYMBOL(hci_alloc_dev);
3576
3577/* Free HCI device */
3578void hci_free_dev(struct hci_dev *hdev)
3579{
9be0dab7
DH
3580 /* will free via device release */
3581 put_device(&hdev->dev);
3582}
3583EXPORT_SYMBOL(hci_free_dev);
3584
1da177e4
LT
3585/* Register HCI device */
3586int hci_register_dev(struct hci_dev *hdev)
3587{
b1b813d4 3588 int id, error;
1da177e4 3589
010666a1 3590 if (!hdev->open || !hdev->close)
1da177e4
LT
3591 return -EINVAL;
3592
08add513
MM
3593 /* Do not allow HCI_AMP devices to register at index 0,
3594 * so the index can be used as the AMP controller ID.
3595 */
3df92b31
SL
3596 switch (hdev->dev_type) {
3597 case HCI_BREDR:
3598 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3599 break;
3600 case HCI_AMP:
3601 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3602 break;
3603 default:
3604 return -EINVAL;
1da177e4 3605 }
8e87d142 3606
3df92b31
SL
3607 if (id < 0)
3608 return id;
3609
1da177e4
LT
3610 sprintf(hdev->name, "hci%d", id);
3611 hdev->id = id;
2d8b3a11
AE
3612
3613 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3614
d8537548
KC
3615 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3616 WQ_MEM_RECLAIM, 1, hdev->name);
33ca954d
DH
3617 if (!hdev->workqueue) {
3618 error = -ENOMEM;
3619 goto err;
3620 }
f48fd9c8 3621
d8537548
KC
3622 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3623 WQ_MEM_RECLAIM, 1, hdev->name);
6ead1bbc
JH
3624 if (!hdev->req_workqueue) {
3625 destroy_workqueue(hdev->workqueue);
3626 error = -ENOMEM;
3627 goto err;
3628 }
3629
0153e2ec
MH
3630 if (!IS_ERR_OR_NULL(bt_debugfs))
3631 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3632
bdc3e0f1
MH
3633 dev_set_name(&hdev->dev, "%s", hdev->name);
3634
99780a7b
JH
3635 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3636 CRYPTO_ALG_ASYNC);
3637 if (IS_ERR(hdev->tfm_aes)) {
3638 BT_ERR("Unable to create crypto context");
3639 error = PTR_ERR(hdev->tfm_aes);
3640 hdev->tfm_aes = NULL;
3641 goto err_wqueue;
3642 }
3643
bdc3e0f1 3644 error = device_add(&hdev->dev);
33ca954d 3645 if (error < 0)
99780a7b 3646 goto err_tfm;
1da177e4 3647
611b30f7 3648 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
3649 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3650 hdev);
611b30f7
MH
3651 if (hdev->rfkill) {
3652 if (rfkill_register(hdev->rfkill) < 0) {
3653 rfkill_destroy(hdev->rfkill);
3654 hdev->rfkill = NULL;
3655 }
3656 }
3657
5e130367
JH
3658 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3659 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3660
a8b2d5c2 3661 set_bit(HCI_SETUP, &hdev->dev_flags);
004b0258 3662 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
ce2be9ac 3663
01cd3404 3664 if (hdev->dev_type == HCI_BREDR) {
56f87901
JH
3665 /* Assume BR/EDR support until proven otherwise (such as
3666 * through reading supported features during init.
3667 */
3668 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3669 }
ce2be9ac 3670
fcee3377
GP
3671 write_lock(&hci_dev_list_lock);
3672 list_add(&hdev->list, &hci_dev_list);
3673 write_unlock(&hci_dev_list_lock);
3674
1da177e4 3675 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 3676 hci_dev_hold(hdev);
1da177e4 3677
19202573 3678 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 3679
1da177e4 3680 return id;
f48fd9c8 3681
99780a7b
JH
3682err_tfm:
3683 crypto_free_blkcipher(hdev->tfm_aes);
33ca954d
DH
3684err_wqueue:
3685 destroy_workqueue(hdev->workqueue);
6ead1bbc 3686 destroy_workqueue(hdev->req_workqueue);
33ca954d 3687err:
3df92b31 3688 ida_simple_remove(&hci_index_ida, hdev->id);
f48fd9c8 3689
33ca954d 3690 return error;
1da177e4
LT
3691}
3692EXPORT_SYMBOL(hci_register_dev);
3693
3694/* Unregister HCI device */
59735631 3695void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 3696{
3df92b31 3697 int i, id;
ef222013 3698
c13854ce 3699 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 3700
94324962
JH
3701 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3702
3df92b31
SL
3703 id = hdev->id;
3704
f20d09d5 3705 write_lock(&hci_dev_list_lock);
1da177e4 3706 list_del(&hdev->list);
f20d09d5 3707 write_unlock(&hci_dev_list_lock);
1da177e4
LT
3708
3709 hci_dev_do_close(hdev);
3710
cd4c5391 3711 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
3712 kfree_skb(hdev->reassembly[i]);
3713
b9b5ef18
GP
3714 cancel_work_sync(&hdev->power_on);
3715
ab81cbf9 3716 if (!test_bit(HCI_INIT, &hdev->flags) &&
a8c5fb1a 3717 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
09fd0de5 3718 hci_dev_lock(hdev);
744cf19e 3719 mgmt_index_removed(hdev);
09fd0de5 3720 hci_dev_unlock(hdev);
56e5cb86 3721 }
ab81cbf9 3722
2e58ef3e
JH
3723 /* mgmt_index_removed should take care of emptying the
3724 * pending list */
3725 BUG_ON(!list_empty(&hdev->mgmt_pending));
3726
1da177e4
LT
3727 hci_notify(hdev, HCI_DEV_UNREG);
3728
611b30f7
MH
3729 if (hdev->rfkill) {
3730 rfkill_unregister(hdev->rfkill);
3731 rfkill_destroy(hdev->rfkill);
3732 }
3733
99780a7b
JH
3734 if (hdev->tfm_aes)
3735 crypto_free_blkcipher(hdev->tfm_aes);
3736
bdc3e0f1 3737 device_del(&hdev->dev);
147e2d59 3738
0153e2ec
MH
3739 debugfs_remove_recursive(hdev->debugfs);
3740
f48fd9c8 3741 destroy_workqueue(hdev->workqueue);
6ead1bbc 3742 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 3743
09fd0de5 3744 hci_dev_lock(hdev);
e2e0cacb 3745 hci_blacklist_clear(hdev);
2aeb9a1a 3746 hci_uuids_clear(hdev);
55ed8ca1 3747 hci_link_keys_clear(hdev);
b899efaf 3748 hci_smp_ltks_clear(hdev);
970c4e46 3749 hci_smp_irks_clear(hdev);
2763eda6 3750 hci_remote_oob_data_clear(hdev);
15819a70 3751 hci_conn_params_clear(hdev);
77a77a30 3752 hci_pend_le_conns_clear(hdev);
09fd0de5 3753 hci_dev_unlock(hdev);
e2e0cacb 3754
dc946bd8 3755 hci_dev_put(hdev);
3df92b31
SL
3756
3757 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
3758}
3759EXPORT_SYMBOL(hci_unregister_dev);
3760
3761/* Suspend HCI device */
3762int hci_suspend_dev(struct hci_dev *hdev)
3763{
3764 hci_notify(hdev, HCI_DEV_SUSPEND);
3765 return 0;
3766}
3767EXPORT_SYMBOL(hci_suspend_dev);
3768
3769/* Resume HCI device */
3770int hci_resume_dev(struct hci_dev *hdev)
3771{
3772 hci_notify(hdev, HCI_DEV_RESUME);
3773 return 0;
3774}
3775EXPORT_SYMBOL(hci_resume_dev);
3776
76bca880 3777/* Receive frame from HCI drivers */
e1a26170 3778int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
76bca880 3779{
76bca880 3780 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 3781 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
3782 kfree_skb(skb);
3783 return -ENXIO;
3784 }
3785
d82603c6 3786 /* Incoming skb */
76bca880
MH
3787 bt_cb(skb)->incoming = 1;
3788
3789 /* Time stamp */
3790 __net_timestamp(skb);
3791
76bca880 3792 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 3793 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 3794
76bca880
MH
3795 return 0;
3796}
3797EXPORT_SYMBOL(hci_recv_frame);
3798
33e882a5 3799static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
a8c5fb1a 3800 int count, __u8 index)
33e882a5
SS
3801{
3802 int len = 0;
3803 int hlen = 0;
3804 int remain = count;
3805 struct sk_buff *skb;
3806 struct bt_skb_cb *scb;
3807
3808 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
a8c5fb1a 3809 index >= NUM_REASSEMBLY)
33e882a5
SS
3810 return -EILSEQ;
3811
3812 skb = hdev->reassembly[index];
3813
3814 if (!skb) {
3815 switch (type) {
3816 case HCI_ACLDATA_PKT:
3817 len = HCI_MAX_FRAME_SIZE;
3818 hlen = HCI_ACL_HDR_SIZE;
3819 break;
3820 case HCI_EVENT_PKT:
3821 len = HCI_MAX_EVENT_SIZE;
3822 hlen = HCI_EVENT_HDR_SIZE;
3823 break;
3824 case HCI_SCODATA_PKT:
3825 len = HCI_MAX_SCO_SIZE;
3826 hlen = HCI_SCO_HDR_SIZE;
3827 break;
3828 }
3829
1e429f38 3830 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
3831 if (!skb)
3832 return -ENOMEM;
3833
3834 scb = (void *) skb->cb;
3835 scb->expect = hlen;
3836 scb->pkt_type = type;
3837
33e882a5
SS
3838 hdev->reassembly[index] = skb;
3839 }
3840
3841 while (count) {
3842 scb = (void *) skb->cb;
89bb46d0 3843 len = min_t(uint, scb->expect, count);
33e882a5
SS
3844
3845 memcpy(skb_put(skb, len), data, len);
3846
3847 count -= len;
3848 data += len;
3849 scb->expect -= len;
3850 remain = count;
3851
3852 switch (type) {
3853 case HCI_EVENT_PKT:
3854 if (skb->len == HCI_EVENT_HDR_SIZE) {
3855 struct hci_event_hdr *h = hci_event_hdr(skb);
3856 scb->expect = h->plen;
3857
3858 if (skb_tailroom(skb) < scb->expect) {
3859 kfree_skb(skb);
3860 hdev->reassembly[index] = NULL;
3861 return -ENOMEM;
3862 }
3863 }
3864 break;
3865
3866 case HCI_ACLDATA_PKT:
3867 if (skb->len == HCI_ACL_HDR_SIZE) {
3868 struct hci_acl_hdr *h = hci_acl_hdr(skb);
3869 scb->expect = __le16_to_cpu(h->dlen);
3870
3871 if (skb_tailroom(skb) < scb->expect) {
3872 kfree_skb(skb);
3873 hdev->reassembly[index] = NULL;
3874 return -ENOMEM;
3875 }
3876 }
3877 break;
3878
3879 case HCI_SCODATA_PKT:
3880 if (skb->len == HCI_SCO_HDR_SIZE) {
3881 struct hci_sco_hdr *h = hci_sco_hdr(skb);
3882 scb->expect = h->dlen;
3883
3884 if (skb_tailroom(skb) < scb->expect) {
3885 kfree_skb(skb);
3886 hdev->reassembly[index] = NULL;
3887 return -ENOMEM;
3888 }
3889 }
3890 break;
3891 }
3892
3893 if (scb->expect == 0) {
3894 /* Complete frame */
3895
3896 bt_cb(skb)->pkt_type = type;
e1a26170 3897 hci_recv_frame(hdev, skb);
33e882a5
SS
3898
3899 hdev->reassembly[index] = NULL;
3900 return remain;
3901 }
3902 }
3903
3904 return remain;
3905}
3906
ef222013
MH
3907int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
3908{
f39a3c06
SS
3909 int rem = 0;
3910
ef222013
MH
3911 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
3912 return -EILSEQ;
3913
da5f6c37 3914 while (count) {
1e429f38 3915 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
3916 if (rem < 0)
3917 return rem;
ef222013 3918
f39a3c06
SS
3919 data += (count - rem);
3920 count = rem;
f81c6224 3921 }
ef222013 3922
f39a3c06 3923 return rem;
ef222013
MH
3924}
3925EXPORT_SYMBOL(hci_recv_fragment);
3926
99811510
SS
3927#define STREAM_REASSEMBLY 0
3928
3929int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
3930{
3931 int type;
3932 int rem = 0;
3933
da5f6c37 3934 while (count) {
99811510
SS
3935 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
3936
3937 if (!skb) {
3938 struct { char type; } *pkt;
3939
3940 /* Start of the frame */
3941 pkt = data;
3942 type = pkt->type;
3943
3944 data++;
3945 count--;
3946 } else
3947 type = bt_cb(skb)->pkt_type;
3948
1e429f38 3949 rem = hci_reassembly(hdev, type, data, count,
a8c5fb1a 3950 STREAM_REASSEMBLY);
99811510
SS
3951 if (rem < 0)
3952 return rem;
3953
3954 data += (count - rem);
3955 count = rem;
f81c6224 3956 }
99811510
SS
3957
3958 return rem;
3959}
3960EXPORT_SYMBOL(hci_recv_stream_fragment);
3961
1da177e4
LT
3962/* ---- Interface to upper protocols ---- */
3963
1da177e4
LT
3964int hci_register_cb(struct hci_cb *cb)
3965{
3966 BT_DBG("%p name %s", cb, cb->name);
3967
f20d09d5 3968 write_lock(&hci_cb_list_lock);
1da177e4 3969 list_add(&cb->list, &hci_cb_list);
f20d09d5 3970 write_unlock(&hci_cb_list_lock);
1da177e4
LT
3971
3972 return 0;
3973}
3974EXPORT_SYMBOL(hci_register_cb);
3975
3976int hci_unregister_cb(struct hci_cb *cb)
3977{
3978 BT_DBG("%p name %s", cb, cb->name);
3979
f20d09d5 3980 write_lock(&hci_cb_list_lock);
1da177e4 3981 list_del(&cb->list);
f20d09d5 3982 write_unlock(&hci_cb_list_lock);
1da177e4
LT
3983
3984 return 0;
3985}
3986EXPORT_SYMBOL(hci_unregister_cb);
3987
51086991 3988static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4 3989{
0d48d939 3990 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 3991
cd82e61c
MH
3992 /* Time stamp */
3993 __net_timestamp(skb);
1da177e4 3994
cd82e61c
MH
3995 /* Send copy to monitor */
3996 hci_send_to_monitor(hdev, skb);
3997
3998 if (atomic_read(&hdev->promisc)) {
3999 /* Send copy to the sockets */
470fe1b5 4000 hci_send_to_sock(hdev, skb);
1da177e4
LT
4001 }
4002
4003 /* Get rid of skb owner, prior to sending to the driver. */
4004 skb_orphan(skb);
4005
7bd8f09f 4006 if (hdev->send(hdev, skb) < 0)
51086991 4007 BT_ERR("%s sending frame failed", hdev->name);
1da177e4
LT
4008}
4009
3119ae95
JH
4010void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4011{
4012 skb_queue_head_init(&req->cmd_q);
4013 req->hdev = hdev;
5d73e034 4014 req->err = 0;
3119ae95
JH
4015}
4016
4017int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4018{
4019 struct hci_dev *hdev = req->hdev;
4020 struct sk_buff *skb;
4021 unsigned long flags;
4022
4023 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4024
5d73e034
AG
4025 /* If an error occured during request building, remove all HCI
4026 * commands queued on the HCI request queue.
4027 */
4028 if (req->err) {
4029 skb_queue_purge(&req->cmd_q);
4030 return req->err;
4031 }
4032
3119ae95
JH
4033 /* Do not allow empty requests */
4034 if (skb_queue_empty(&req->cmd_q))
382b0c39 4035 return -ENODATA;
3119ae95
JH
4036
4037 skb = skb_peek_tail(&req->cmd_q);
4038 bt_cb(skb)->req.complete = complete;
4039
4040 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4041 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4042 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4043
4044 queue_work(hdev->workqueue, &hdev->cmd_work);
4045
4046 return 0;
4047}
4048
1ca3a9d0 4049static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
07dc93dd 4050 u32 plen, const void *param)
1da177e4
LT
4051{
4052 int len = HCI_COMMAND_HDR_SIZE + plen;
4053 struct hci_command_hdr *hdr;
4054 struct sk_buff *skb;
4055
1da177e4 4056 skb = bt_skb_alloc(len, GFP_ATOMIC);
1ca3a9d0
JH
4057 if (!skb)
4058 return NULL;
1da177e4
LT
4059
4060 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 4061 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
4062 hdr->plen = plen;
4063
4064 if (plen)
4065 memcpy(skb_put(skb, plen), param, plen);
4066
4067 BT_DBG("skb len %d", skb->len);
4068
0d48d939 4069 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
c78ae283 4070
1ca3a9d0
JH
4071 return skb;
4072}
4073
4074/* Send HCI command */
07dc93dd
JH
4075int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4076 const void *param)
1ca3a9d0
JH
4077{
4078 struct sk_buff *skb;
4079
4080 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4081
4082 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4083 if (!skb) {
4084 BT_ERR("%s no memory for command", hdev->name);
4085 return -ENOMEM;
4086 }
4087
11714b3d
JH
4088 /* Stand-alone HCI commands must be flaged as
4089 * single-command requests.
4090 */
4091 bt_cb(skb)->req.start = true;
4092
1da177e4 4093 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 4094 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
4095
4096 return 0;
4097}
1da177e4 4098
71c76a17 4099/* Queue a command to an asynchronous HCI request */
07dc93dd
JH
4100void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4101 const void *param, u8 event)
71c76a17
JH
4102{
4103 struct hci_dev *hdev = req->hdev;
4104 struct sk_buff *skb;
4105
4106 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4107
34739c1e
AG
4108 /* If an error occured during request building, there is no point in
4109 * queueing the HCI command. We can simply return.
4110 */
4111 if (req->err)
4112 return;
4113
71c76a17
JH
4114 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4115 if (!skb) {
5d73e034
AG
4116 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4117 hdev->name, opcode);
4118 req->err = -ENOMEM;
e348fe6b 4119 return;
71c76a17
JH
4120 }
4121
4122 if (skb_queue_empty(&req->cmd_q))
4123 bt_cb(skb)->req.start = true;
4124
02350a72
JH
4125 bt_cb(skb)->req.event = event;
4126
71c76a17 4127 skb_queue_tail(&req->cmd_q, skb);
71c76a17
JH
4128}
4129
07dc93dd
JH
4130void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4131 const void *param)
02350a72
JH
4132{
4133 hci_req_add_ev(req, opcode, plen, param, 0);
4134}
4135
1da177e4 4136/* Get data from the previously sent command */
a9de9248 4137void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
4138{
4139 struct hci_command_hdr *hdr;
4140
4141 if (!hdev->sent_cmd)
4142 return NULL;
4143
4144 hdr = (void *) hdev->sent_cmd->data;
4145
a9de9248 4146 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
4147 return NULL;
4148
f0e09510 4149 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
4150
4151 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4152}
4153
4154/* Send ACL data */
4155static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4156{
4157 struct hci_acl_hdr *hdr;
4158 int len = skb->len;
4159
badff6d0
ACM
4160 skb_push(skb, HCI_ACL_HDR_SIZE);
4161 skb_reset_transport_header(skb);
9c70220b 4162 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
4163 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4164 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
4165}
4166
ee22be7e 4167static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 4168 struct sk_buff *skb, __u16 flags)
1da177e4 4169{
ee22be7e 4170 struct hci_conn *conn = chan->conn;
1da177e4
LT
4171 struct hci_dev *hdev = conn->hdev;
4172 struct sk_buff *list;
4173
087bfd99
GP
4174 skb->len = skb_headlen(skb);
4175 skb->data_len = 0;
4176
4177 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
204a6e54
AE
4178
4179 switch (hdev->dev_type) {
4180 case HCI_BREDR:
4181 hci_add_acl_hdr(skb, conn->handle, flags);
4182 break;
4183 case HCI_AMP:
4184 hci_add_acl_hdr(skb, chan->handle, flags);
4185 break;
4186 default:
4187 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4188 return;
4189 }
087bfd99 4190
70f23020
AE
4191 list = skb_shinfo(skb)->frag_list;
4192 if (!list) {
1da177e4
LT
4193 /* Non fragmented */
4194 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4195
73d80deb 4196 skb_queue_tail(queue, skb);
1da177e4
LT
4197 } else {
4198 /* Fragmented */
4199 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4200
4201 skb_shinfo(skb)->frag_list = NULL;
4202
4203 /* Queue all fragments atomically */
af3e6359 4204 spin_lock(&queue->lock);
1da177e4 4205
73d80deb 4206 __skb_queue_tail(queue, skb);
e702112f
AE
4207
4208 flags &= ~ACL_START;
4209 flags |= ACL_CONT;
1da177e4
LT
4210 do {
4211 skb = list; list = list->next;
8e87d142 4212
0d48d939 4213 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 4214 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
4215
4216 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4217
73d80deb 4218 __skb_queue_tail(queue, skb);
1da177e4
LT
4219 } while (list);
4220
af3e6359 4221 spin_unlock(&queue->lock);
1da177e4 4222 }
73d80deb
LAD
4223}
4224
4225void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4226{
ee22be7e 4227 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 4228
f0e09510 4229 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb 4230
ee22be7e 4231 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 4232
3eff45ea 4233 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 4234}
1da177e4
LT
4235
4236/* Send SCO data */
0d861d8b 4237void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
4238{
4239 struct hci_dev *hdev = conn->hdev;
4240 struct hci_sco_hdr hdr;
4241
4242 BT_DBG("%s len %d", hdev->name, skb->len);
4243
aca3192c 4244 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
4245 hdr.dlen = skb->len;
4246
badff6d0
ACM
4247 skb_push(skb, HCI_SCO_HDR_SIZE);
4248 skb_reset_transport_header(skb);
9c70220b 4249 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4 4250
0d48d939 4251 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 4252
1da177e4 4253 skb_queue_tail(&conn->data_q, skb);
3eff45ea 4254 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 4255}
1da177e4
LT
4256
4257/* ---- HCI TX task (outgoing data) ---- */
4258
4259/* HCI Connection scheduler */
6039aa73
GP
4260static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4261 int *quote)
1da177e4
LT
4262{
4263 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 4264 struct hci_conn *conn = NULL, *c;
abc5de8f 4265 unsigned int num = 0, min = ~0;
1da177e4 4266
8e87d142 4267 /* We don't have to lock device here. Connections are always
1da177e4 4268 * added and removed with TX task disabled. */
bf4c6325
GP
4269
4270 rcu_read_lock();
4271
4272 list_for_each_entry_rcu(c, &h->list, list) {
769be974 4273 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 4274 continue;
769be974
MH
4275
4276 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4277 continue;
4278
1da177e4
LT
4279 num++;
4280
4281 if (c->sent < min) {
4282 min = c->sent;
4283 conn = c;
4284 }
52087a79
LAD
4285
4286 if (hci_conn_num(hdev, type) == num)
4287 break;
1da177e4
LT
4288 }
4289
bf4c6325
GP
4290 rcu_read_unlock();
4291
1da177e4 4292 if (conn) {
6ed58ec5
VT
4293 int cnt, q;
4294
4295 switch (conn->type) {
4296 case ACL_LINK:
4297 cnt = hdev->acl_cnt;
4298 break;
4299 case SCO_LINK:
4300 case ESCO_LINK:
4301 cnt = hdev->sco_cnt;
4302 break;
4303 case LE_LINK:
4304 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4305 break;
4306 default:
4307 cnt = 0;
4308 BT_ERR("Unknown link type");
4309 }
4310
4311 q = cnt / num;
1da177e4
LT
4312 *quote = q ? q : 1;
4313 } else
4314 *quote = 0;
4315
4316 BT_DBG("conn %p quote %d", conn, *quote);
4317 return conn;
4318}
4319
6039aa73 4320static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
4321{
4322 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 4323 struct hci_conn *c;
1da177e4 4324
bae1f5d9 4325 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 4326
bf4c6325
GP
4327 rcu_read_lock();
4328
1da177e4 4329 /* Kill stalled connections */
bf4c6325 4330 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 4331 if (c->type == type && c->sent) {
6ed93dc6
AE
4332 BT_ERR("%s killing stalled connection %pMR",
4333 hdev->name, &c->dst);
bed71748 4334 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
4335 }
4336 }
bf4c6325
GP
4337
4338 rcu_read_unlock();
1da177e4
LT
4339}
4340
6039aa73
GP
4341static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4342 int *quote)
1da177e4 4343{
73d80deb
LAD
4344 struct hci_conn_hash *h = &hdev->conn_hash;
4345 struct hci_chan *chan = NULL;
abc5de8f 4346 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 4347 struct hci_conn *conn;
73d80deb
LAD
4348 int cnt, q, conn_num = 0;
4349
4350 BT_DBG("%s", hdev->name);
4351
bf4c6325
GP
4352 rcu_read_lock();
4353
4354 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
4355 struct hci_chan *tmp;
4356
4357 if (conn->type != type)
4358 continue;
4359
4360 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4361 continue;
4362
4363 conn_num++;
4364
8192edef 4365 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
4366 struct sk_buff *skb;
4367
4368 if (skb_queue_empty(&tmp->data_q))
4369 continue;
4370
4371 skb = skb_peek(&tmp->data_q);
4372 if (skb->priority < cur_prio)
4373 continue;
4374
4375 if (skb->priority > cur_prio) {
4376 num = 0;
4377 min = ~0;
4378 cur_prio = skb->priority;
4379 }
4380
4381 num++;
4382
4383 if (conn->sent < min) {
4384 min = conn->sent;
4385 chan = tmp;
4386 }
4387 }
4388
4389 if (hci_conn_num(hdev, type) == conn_num)
4390 break;
4391 }
4392
bf4c6325
GP
4393 rcu_read_unlock();
4394
73d80deb
LAD
4395 if (!chan)
4396 return NULL;
4397
4398 switch (chan->conn->type) {
4399 case ACL_LINK:
4400 cnt = hdev->acl_cnt;
4401 break;
bd1eb66b
AE
4402 case AMP_LINK:
4403 cnt = hdev->block_cnt;
4404 break;
73d80deb
LAD
4405 case SCO_LINK:
4406 case ESCO_LINK:
4407 cnt = hdev->sco_cnt;
4408 break;
4409 case LE_LINK:
4410 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4411 break;
4412 default:
4413 cnt = 0;
4414 BT_ERR("Unknown link type");
4415 }
4416
4417 q = cnt / num;
4418 *quote = q ? q : 1;
4419 BT_DBG("chan %p quote %d", chan, *quote);
4420 return chan;
4421}
4422
02b20f0b
LAD
4423static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4424{
4425 struct hci_conn_hash *h = &hdev->conn_hash;
4426 struct hci_conn *conn;
4427 int num = 0;
4428
4429 BT_DBG("%s", hdev->name);
4430
bf4c6325
GP
4431 rcu_read_lock();
4432
4433 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
4434 struct hci_chan *chan;
4435
4436 if (conn->type != type)
4437 continue;
4438
4439 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4440 continue;
4441
4442 num++;
4443
8192edef 4444 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
4445 struct sk_buff *skb;
4446
4447 if (chan->sent) {
4448 chan->sent = 0;
4449 continue;
4450 }
4451
4452 if (skb_queue_empty(&chan->data_q))
4453 continue;
4454
4455 skb = skb_peek(&chan->data_q);
4456 if (skb->priority >= HCI_PRIO_MAX - 1)
4457 continue;
4458
4459 skb->priority = HCI_PRIO_MAX - 1;
4460
4461 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 4462 skb->priority);
02b20f0b
LAD
4463 }
4464
4465 if (hci_conn_num(hdev, type) == num)
4466 break;
4467 }
bf4c6325
GP
4468
4469 rcu_read_unlock();
4470
02b20f0b
LAD
4471}
4472
b71d385a
AE
4473static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4474{
4475 /* Calculate count of blocks used by this packet */
4476 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4477}
4478
6039aa73 4479static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 4480{
1da177e4
LT
4481 if (!test_bit(HCI_RAW, &hdev->flags)) {
4482 /* ACL tx timeout must be longer than maximum
4483 * link supervision timeout (40.9 seconds) */
63d2bc1b 4484 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 4485 HCI_ACL_TX_TIMEOUT))
bae1f5d9 4486 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 4487 }
63d2bc1b 4488}
1da177e4 4489
6039aa73 4490static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
4491{
4492 unsigned int cnt = hdev->acl_cnt;
4493 struct hci_chan *chan;
4494 struct sk_buff *skb;
4495 int quote;
4496
4497 __check_timeout(hdev, cnt);
04837f64 4498
73d80deb 4499 while (hdev->acl_cnt &&
a8c5fb1a 4500 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
4501 u32 priority = (skb_peek(&chan->data_q))->priority;
4502 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4503 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4504 skb->len, skb->priority);
73d80deb 4505
ec1cce24
LAD
4506 /* Stop if priority has changed */
4507 if (skb->priority < priority)
4508 break;
4509
4510 skb = skb_dequeue(&chan->data_q);
4511
73d80deb 4512 hci_conn_enter_active_mode(chan->conn,
04124681 4513 bt_cb(skb)->force_active);
04837f64 4514
57d17d70 4515 hci_send_frame(hdev, skb);
1da177e4
LT
4516 hdev->acl_last_tx = jiffies;
4517
4518 hdev->acl_cnt--;
73d80deb
LAD
4519 chan->sent++;
4520 chan->conn->sent++;
1da177e4
LT
4521 }
4522 }
02b20f0b
LAD
4523
4524 if (cnt != hdev->acl_cnt)
4525 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
4526}
4527
6039aa73 4528static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 4529{
63d2bc1b 4530 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
4531 struct hci_chan *chan;
4532 struct sk_buff *skb;
4533 int quote;
bd1eb66b 4534 u8 type;
b71d385a 4535
63d2bc1b 4536 __check_timeout(hdev, cnt);
b71d385a 4537
bd1eb66b
AE
4538 BT_DBG("%s", hdev->name);
4539
4540 if (hdev->dev_type == HCI_AMP)
4541 type = AMP_LINK;
4542 else
4543 type = ACL_LINK;
4544
b71d385a 4545 while (hdev->block_cnt > 0 &&
bd1eb66b 4546 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
4547 u32 priority = (skb_peek(&chan->data_q))->priority;
4548 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4549 int blocks;
4550
4551 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4552 skb->len, skb->priority);
b71d385a
AE
4553
4554 /* Stop if priority has changed */
4555 if (skb->priority < priority)
4556 break;
4557
4558 skb = skb_dequeue(&chan->data_q);
4559
4560 blocks = __get_blocks(hdev, skb);
4561 if (blocks > hdev->block_cnt)
4562 return;
4563
4564 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 4565 bt_cb(skb)->force_active);
b71d385a 4566
57d17d70 4567 hci_send_frame(hdev, skb);
b71d385a
AE
4568 hdev->acl_last_tx = jiffies;
4569
4570 hdev->block_cnt -= blocks;
4571 quote -= blocks;
4572
4573 chan->sent += blocks;
4574 chan->conn->sent += blocks;
4575 }
4576 }
4577
4578 if (cnt != hdev->block_cnt)
bd1eb66b 4579 hci_prio_recalculate(hdev, type);
b71d385a
AE
4580}
4581
6039aa73 4582static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
4583{
4584 BT_DBG("%s", hdev->name);
4585
bd1eb66b
AE
4586 /* No ACL link over BR/EDR controller */
4587 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4588 return;
4589
4590 /* No AMP link over AMP controller */
4591 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
4592 return;
4593
4594 switch (hdev->flow_ctl_mode) {
4595 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4596 hci_sched_acl_pkt(hdev);
4597 break;
4598
4599 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4600 hci_sched_acl_blk(hdev);
4601 break;
4602 }
4603}
4604
1da177e4 4605/* Schedule SCO */
6039aa73 4606static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
4607{
4608 struct hci_conn *conn;
4609 struct sk_buff *skb;
4610 int quote;
4611
4612 BT_DBG("%s", hdev->name);
4613
52087a79
LAD
4614 if (!hci_conn_num(hdev, SCO_LINK))
4615 return;
4616
1da177e4
LT
4617 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4618 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4619 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 4620 hci_send_frame(hdev, skb);
1da177e4
LT
4621
4622 conn->sent++;
4623 if (conn->sent == ~0)
4624 conn->sent = 0;
4625 }
4626 }
4627}
4628
6039aa73 4629static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
4630{
4631 struct hci_conn *conn;
4632 struct sk_buff *skb;
4633 int quote;
4634
4635 BT_DBG("%s", hdev->name);
4636
52087a79
LAD
4637 if (!hci_conn_num(hdev, ESCO_LINK))
4638 return;
4639
8fc9ced3
GP
4640 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4641 &quote))) {
b6a0dc82
MH
4642 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4643 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 4644 hci_send_frame(hdev, skb);
b6a0dc82
MH
4645
4646 conn->sent++;
4647 if (conn->sent == ~0)
4648 conn->sent = 0;
4649 }
4650 }
4651}
4652
6039aa73 4653static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 4654{
73d80deb 4655 struct hci_chan *chan;
6ed58ec5 4656 struct sk_buff *skb;
02b20f0b 4657 int quote, cnt, tmp;
6ed58ec5
VT
4658
4659 BT_DBG("%s", hdev->name);
4660
52087a79
LAD
4661 if (!hci_conn_num(hdev, LE_LINK))
4662 return;
4663
6ed58ec5
VT
4664 if (!test_bit(HCI_RAW, &hdev->flags)) {
4665 /* LE tx timeout must be longer than maximum
4666 * link supervision timeout (40.9 seconds) */
bae1f5d9 4667 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 4668 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 4669 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
4670 }
4671
4672 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 4673 tmp = cnt;
73d80deb 4674 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
4675 u32 priority = (skb_peek(&chan->data_q))->priority;
4676 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4677 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4678 skb->len, skb->priority);
6ed58ec5 4679
ec1cce24
LAD
4680 /* Stop if priority has changed */
4681 if (skb->priority < priority)
4682 break;
4683
4684 skb = skb_dequeue(&chan->data_q);
4685
57d17d70 4686 hci_send_frame(hdev, skb);
6ed58ec5
VT
4687 hdev->le_last_tx = jiffies;
4688
4689 cnt--;
73d80deb
LAD
4690 chan->sent++;
4691 chan->conn->sent++;
6ed58ec5
VT
4692 }
4693 }
73d80deb 4694
6ed58ec5
VT
4695 if (hdev->le_pkts)
4696 hdev->le_cnt = cnt;
4697 else
4698 hdev->acl_cnt = cnt;
02b20f0b
LAD
4699
4700 if (cnt != tmp)
4701 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
4702}
4703
3eff45ea 4704static void hci_tx_work(struct work_struct *work)
1da177e4 4705{
3eff45ea 4706 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
4707 struct sk_buff *skb;
4708
6ed58ec5 4709 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 4710 hdev->sco_cnt, hdev->le_cnt);
1da177e4 4711
52de599e
MH
4712 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4713 /* Schedule queues and send stuff to HCI driver */
4714 hci_sched_acl(hdev);
4715 hci_sched_sco(hdev);
4716 hci_sched_esco(hdev);
4717 hci_sched_le(hdev);
4718 }
6ed58ec5 4719
1da177e4
LT
4720 /* Send next queued raw (unknown type) packet */
4721 while ((skb = skb_dequeue(&hdev->raw_q)))
57d17d70 4722 hci_send_frame(hdev, skb);
1da177e4
LT
4723}
4724
25985edc 4725/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
4726
4727/* ACL data packet */
6039aa73 4728static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
4729{
4730 struct hci_acl_hdr *hdr = (void *) skb->data;
4731 struct hci_conn *conn;
4732 __u16 handle, flags;
4733
4734 skb_pull(skb, HCI_ACL_HDR_SIZE);
4735
4736 handle = __le16_to_cpu(hdr->handle);
4737 flags = hci_flags(handle);
4738 handle = hci_handle(handle);
4739
f0e09510 4740 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 4741 handle, flags);
1da177e4
LT
4742
4743 hdev->stat.acl_rx++;
4744
4745 hci_dev_lock(hdev);
4746 conn = hci_conn_hash_lookup_handle(hdev, handle);
4747 hci_dev_unlock(hdev);
8e87d142 4748
1da177e4 4749 if (conn) {
65983fc7 4750 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 4751
1da177e4 4752 /* Send to upper protocol */
686ebf28
UF
4753 l2cap_recv_acldata(conn, skb, flags);
4754 return;
1da177e4 4755 } else {
8e87d142 4756 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 4757 hdev->name, handle);
1da177e4
LT
4758 }
4759
4760 kfree_skb(skb);
4761}
4762
4763/* SCO data packet */
6039aa73 4764static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
4765{
4766 struct hci_sco_hdr *hdr = (void *) skb->data;
4767 struct hci_conn *conn;
4768 __u16 handle;
4769
4770 skb_pull(skb, HCI_SCO_HDR_SIZE);
4771
4772 handle = __le16_to_cpu(hdr->handle);
4773
f0e09510 4774 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
1da177e4
LT
4775
4776 hdev->stat.sco_rx++;
4777
4778 hci_dev_lock(hdev);
4779 conn = hci_conn_hash_lookup_handle(hdev, handle);
4780 hci_dev_unlock(hdev);
4781
4782 if (conn) {
1da177e4 4783 /* Send to upper protocol */
686ebf28
UF
4784 sco_recv_scodata(conn, skb);
4785 return;
1da177e4 4786 } else {
8e87d142 4787 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 4788 hdev->name, handle);
1da177e4
LT
4789 }
4790
4791 kfree_skb(skb);
4792}
4793
9238f36a
JH
4794static bool hci_req_is_complete(struct hci_dev *hdev)
4795{
4796 struct sk_buff *skb;
4797
4798 skb = skb_peek(&hdev->cmd_q);
4799 if (!skb)
4800 return true;
4801
4802 return bt_cb(skb)->req.start;
4803}
4804
42c6b129
JH
4805static void hci_resend_last(struct hci_dev *hdev)
4806{
4807 struct hci_command_hdr *sent;
4808 struct sk_buff *skb;
4809 u16 opcode;
4810
4811 if (!hdev->sent_cmd)
4812 return;
4813
4814 sent = (void *) hdev->sent_cmd->data;
4815 opcode = __le16_to_cpu(sent->opcode);
4816 if (opcode == HCI_OP_RESET)
4817 return;
4818
4819 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4820 if (!skb)
4821 return;
4822
4823 skb_queue_head(&hdev->cmd_q, skb);
4824 queue_work(hdev->workqueue, &hdev->cmd_work);
4825}
4826
9238f36a
JH
4827void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
4828{
4829 hci_req_complete_t req_complete = NULL;
4830 struct sk_buff *skb;
4831 unsigned long flags;
4832
4833 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4834
42c6b129
JH
4835 /* If the completed command doesn't match the last one that was
4836 * sent we need to do special handling of it.
9238f36a 4837 */
42c6b129
JH
4838 if (!hci_sent_cmd_data(hdev, opcode)) {
4839 /* Some CSR based controllers generate a spontaneous
4840 * reset complete event during init and any pending
4841 * command will never be completed. In such a case we
4842 * need to resend whatever was the last sent
4843 * command.
4844 */
4845 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4846 hci_resend_last(hdev);
4847
9238f36a 4848 return;
42c6b129 4849 }
9238f36a
JH
4850
4851 /* If the command succeeded and there's still more commands in
4852 * this request the request is not yet complete.
4853 */
4854 if (!status && !hci_req_is_complete(hdev))
4855 return;
4856
4857 /* If this was the last command in a request the complete
4858 * callback would be found in hdev->sent_cmd instead of the
4859 * command queue (hdev->cmd_q).
4860 */
4861 if (hdev->sent_cmd) {
4862 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
53e21fbc
JH
4863
4864 if (req_complete) {
4865 /* We must set the complete callback to NULL to
4866 * avoid calling the callback more than once if
4867 * this function gets called again.
4868 */
4869 bt_cb(hdev->sent_cmd)->req.complete = NULL;
4870
9238f36a 4871 goto call_complete;
53e21fbc 4872 }
9238f36a
JH
4873 }
4874
4875 /* Remove all pending commands belonging to this request */
4876 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4877 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4878 if (bt_cb(skb)->req.start) {
4879 __skb_queue_head(&hdev->cmd_q, skb);
4880 break;
4881 }
4882
4883 req_complete = bt_cb(skb)->req.complete;
4884 kfree_skb(skb);
4885 }
4886 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4887
4888call_complete:
4889 if (req_complete)
4890 req_complete(hdev, status);
4891}
4892
b78752cc 4893static void hci_rx_work(struct work_struct *work)
1da177e4 4894{
b78752cc 4895 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
4896 struct sk_buff *skb;
4897
4898 BT_DBG("%s", hdev->name);
4899
1da177e4 4900 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
4901 /* Send copy to monitor */
4902 hci_send_to_monitor(hdev, skb);
4903
1da177e4
LT
4904 if (atomic_read(&hdev->promisc)) {
4905 /* Send copy to the sockets */
470fe1b5 4906 hci_send_to_sock(hdev, skb);
1da177e4
LT
4907 }
4908
0736cfa8
MH
4909 if (test_bit(HCI_RAW, &hdev->flags) ||
4910 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1da177e4
LT
4911 kfree_skb(skb);
4912 continue;
4913 }
4914
4915 if (test_bit(HCI_INIT, &hdev->flags)) {
4916 /* Don't process data packets in this states. */
0d48d939 4917 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
4918 case HCI_ACLDATA_PKT:
4919 case HCI_SCODATA_PKT:
4920 kfree_skb(skb);
4921 continue;
3ff50b79 4922 }
1da177e4
LT
4923 }
4924
4925 /* Process frame */
0d48d939 4926 switch (bt_cb(skb)->pkt_type) {
1da177e4 4927 case HCI_EVENT_PKT:
b78752cc 4928 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
4929 hci_event_packet(hdev, skb);
4930 break;
4931
4932 case HCI_ACLDATA_PKT:
4933 BT_DBG("%s ACL data packet", hdev->name);
4934 hci_acldata_packet(hdev, skb);
4935 break;
4936
4937 case HCI_SCODATA_PKT:
4938 BT_DBG("%s SCO data packet", hdev->name);
4939 hci_scodata_packet(hdev, skb);
4940 break;
4941
4942 default:
4943 kfree_skb(skb);
4944 break;
4945 }
4946 }
1da177e4
LT
4947}
4948
c347b765 4949static void hci_cmd_work(struct work_struct *work)
1da177e4 4950{
c347b765 4951 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
4952 struct sk_buff *skb;
4953
2104786b
AE
4954 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4955 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 4956
1da177e4 4957 /* Send queued commands */
5a08ecce
AE
4958 if (atomic_read(&hdev->cmd_cnt)) {
4959 skb = skb_dequeue(&hdev->cmd_q);
4960 if (!skb)
4961 return;
4962
7585b97a 4963 kfree_skb(hdev->sent_cmd);
1da177e4 4964
a675d7f1 4965 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
70f23020 4966 if (hdev->sent_cmd) {
1da177e4 4967 atomic_dec(&hdev->cmd_cnt);
57d17d70 4968 hci_send_frame(hdev, skb);
7bdb8a5c
SJ
4969 if (test_bit(HCI_RESET, &hdev->flags))
4970 del_timer(&hdev->cmd_timer);
4971 else
4972 mod_timer(&hdev->cmd_timer,
5f246e89 4973 jiffies + HCI_CMD_TIMEOUT);
1da177e4
LT
4974 } else {
4975 skb_queue_head(&hdev->cmd_q, skb);
c347b765 4976 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
4977 }
4978 }
4979}
b1efcc28
AG
4980
4981void hci_req_add_le_scan_disable(struct hci_request *req)
4982{
4983 struct hci_cp_le_set_scan_enable cp;
4984
4985 memset(&cp, 0, sizeof(cp));
4986 cp.enable = LE_SCAN_DISABLE;
4987 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
4988}
a4790dbd
AG
4989
4990static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
4991{
4992 if (status)
4993 BT_DBG("HCI request failed to update background scanning: "
4994 "status 0x%2.2x", status);
4995}
4996
4997/* This function controls the background scanning based on hdev->pend_le_conns
4998 * list. If there are pending LE connection we start the background scanning,
4999 * otherwise we stop it.
5000 *
5001 * This function requires the caller holds hdev->lock.
5002 */
5003void hci_update_background_scan(struct hci_dev *hdev)
5004{
5005 struct hci_cp_le_set_scan_param param_cp;
5006 struct hci_cp_le_set_scan_enable enable_cp;
5007 struct hci_request req;
5008 struct hci_conn *conn;
5009 int err;
5010
5011 hci_req_init(&req, hdev);
5012
5013 if (list_empty(&hdev->pend_le_conns)) {
5014 /* If there is no pending LE connections, we should stop
5015 * the background scanning.
5016 */
5017
5018 /* If controller is not scanning we are done. */
5019 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5020 return;
5021
5022 hci_req_add_le_scan_disable(&req);
5023
5024 BT_DBG("%s stopping background scanning", hdev->name);
5025 } else {
5026 u8 own_addr_type;
5027
5028 /* If there is at least one pending LE connection, we should
5029 * keep the background scan running.
5030 */
5031
5032 /* If controller is already scanning we are done. */
5033 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5034 return;
5035
5036 /* If controller is connecting, we should not start scanning
5037 * since some controllers are not able to scan and connect at
5038 * the same time.
5039 */
5040 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5041 if (conn)
5042 return;
5043
5044 /* Set require_privacy to true to avoid identification from
5045 * unknown peer devices. Since this is passive scanning, no
5046 * SCAN_REQ using the local identity should be sent. Mandating
5047 * privacy is just an extra precaution.
5048 */
5049 if (hci_update_random_address(&req, true, &own_addr_type))
5050 return;
5051
5052 memset(&param_cp, 0, sizeof(param_cp));
5053 param_cp.type = LE_SCAN_PASSIVE;
5054 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5055 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5056 param_cp.own_address_type = own_addr_type;
5057 hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5058 &param_cp);
5059
5060 memset(&enable_cp, 0, sizeof(enable_cp));
5061 enable_cp.enable = LE_SCAN_ENABLE;
5062 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
5063 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5064 &enable_cp);
5065
5066 BT_DBG("%s starting background scanning", hdev->name);
5067 }
5068
5069 err = hci_req_run(&req, update_background_scan_complete);
5070 if (err)
5071 BT_ERR("Failed to run HCI request: err %d", err);
5072}