]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - net/bluetooth/hci_core.c
Bluetooth: Wait for SMP key distribution completion when pairing
[mirror_ubuntu-bionic-kernel.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
8c520a59 30#include <linux/rfkill.h>
baf27f6e 31#include <linux/debugfs.h>
99780a7b 32#include <linux/crypto.h>
47219839 33#include <asm/unaligned.h>
1da177e4
LT
34
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
37
970c4e46
JH
38#include "smp.h"
39
b78752cc 40static void hci_rx_work(struct work_struct *work);
c347b765 41static void hci_cmd_work(struct work_struct *work);
3eff45ea 42static void hci_tx_work(struct work_struct *work);
1da177e4 43
1da177e4
LT
44/* HCI device list */
45LIST_HEAD(hci_dev_list);
46DEFINE_RWLOCK(hci_dev_list_lock);
47
48/* HCI callback list */
49LIST_HEAD(hci_cb_list);
50DEFINE_RWLOCK(hci_cb_list_lock);
51
3df92b31
SL
52/* HCI ID Numbering */
53static DEFINE_IDA(hci_index_ida);
54
1da177e4
LT
55/* ---- HCI notifications ---- */
56
6516455d 57static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 58{
040030ef 59 hci_sock_dev_event(hdev, event);
1da177e4
LT
60}
61
baf27f6e
MH
62/* ---- HCI debugfs entries ---- */
63
4b4148e9
MH
64static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
65 size_t count, loff_t *ppos)
66{
67 struct hci_dev *hdev = file->private_data;
68 char buf[3];
69
70 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dev_flags) ? 'Y': 'N';
71 buf[1] = '\n';
72 buf[2] = '\0';
73 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
74}
75
76static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
77 size_t count, loff_t *ppos)
78{
79 struct hci_dev *hdev = file->private_data;
80 struct sk_buff *skb;
81 char buf[32];
82 size_t buf_size = min(count, (sizeof(buf)-1));
83 bool enable;
84 int err;
85
86 if (!test_bit(HCI_UP, &hdev->flags))
87 return -ENETDOWN;
88
89 if (copy_from_user(buf, user_buf, buf_size))
90 return -EFAULT;
91
92 buf[buf_size] = '\0';
93 if (strtobool(buf, &enable))
94 return -EINVAL;
95
96 if (enable == test_bit(HCI_DUT_MODE, &hdev->dev_flags))
97 return -EALREADY;
98
99 hci_req_lock(hdev);
100 if (enable)
101 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
102 HCI_CMD_TIMEOUT);
103 else
104 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
105 HCI_CMD_TIMEOUT);
106 hci_req_unlock(hdev);
107
108 if (IS_ERR(skb))
109 return PTR_ERR(skb);
110
111 err = -bt_to_errno(skb->data[0]);
112 kfree_skb(skb);
113
114 if (err < 0)
115 return err;
116
117 change_bit(HCI_DUT_MODE, &hdev->dev_flags);
118
119 return count;
120}
121
122static const struct file_operations dut_mode_fops = {
123 .open = simple_open,
124 .read = dut_mode_read,
125 .write = dut_mode_write,
126 .llseek = default_llseek,
127};
128
dfb826a8
MH
129static int features_show(struct seq_file *f, void *ptr)
130{
131 struct hci_dev *hdev = f->private;
132 u8 p;
133
134 hci_dev_lock(hdev);
135 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
cfbb2b5b 136 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
dfb826a8
MH
137 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
138 hdev->features[p][0], hdev->features[p][1],
139 hdev->features[p][2], hdev->features[p][3],
140 hdev->features[p][4], hdev->features[p][5],
141 hdev->features[p][6], hdev->features[p][7]);
142 }
cfbb2b5b
MH
143 if (lmp_le_capable(hdev))
144 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
145 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
146 hdev->le_features[0], hdev->le_features[1],
147 hdev->le_features[2], hdev->le_features[3],
148 hdev->le_features[4], hdev->le_features[5],
149 hdev->le_features[6], hdev->le_features[7]);
dfb826a8
MH
150 hci_dev_unlock(hdev);
151
152 return 0;
153}
154
155static int features_open(struct inode *inode, struct file *file)
156{
157 return single_open(file, features_show, inode->i_private);
158}
159
160static const struct file_operations features_fops = {
161 .open = features_open,
162 .read = seq_read,
163 .llseek = seq_lseek,
164 .release = single_release,
165};
166
70afe0b8
MH
167static int blacklist_show(struct seq_file *f, void *p)
168{
169 struct hci_dev *hdev = f->private;
170 struct bdaddr_list *b;
171
172 hci_dev_lock(hdev);
173 list_for_each_entry(b, &hdev->blacklist, list)
b25f0785 174 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
70afe0b8
MH
175 hci_dev_unlock(hdev);
176
177 return 0;
178}
179
180static int blacklist_open(struct inode *inode, struct file *file)
181{
182 return single_open(file, blacklist_show, inode->i_private);
183}
184
185static const struct file_operations blacklist_fops = {
186 .open = blacklist_open,
187 .read = seq_read,
188 .llseek = seq_lseek,
189 .release = single_release,
190};
191
47219839
MH
192static int uuids_show(struct seq_file *f, void *p)
193{
194 struct hci_dev *hdev = f->private;
195 struct bt_uuid *uuid;
196
197 hci_dev_lock(hdev);
198 list_for_each_entry(uuid, &hdev->uuids, list) {
58f01aa9
MH
199 u8 i, val[16];
200
201 /* The Bluetooth UUID values are stored in big endian,
202 * but with reversed byte order. So convert them into
203 * the right order for the %pUb modifier.
204 */
205 for (i = 0; i < 16; i++)
206 val[i] = uuid->uuid[15 - i];
207
208 seq_printf(f, "%pUb\n", val);
47219839
MH
209 }
210 hci_dev_unlock(hdev);
211
212 return 0;
213}
214
215static int uuids_open(struct inode *inode, struct file *file)
216{
217 return single_open(file, uuids_show, inode->i_private);
218}
219
220static const struct file_operations uuids_fops = {
221 .open = uuids_open,
222 .read = seq_read,
223 .llseek = seq_lseek,
224 .release = single_release,
225};
226
baf27f6e
MH
227static int inquiry_cache_show(struct seq_file *f, void *p)
228{
229 struct hci_dev *hdev = f->private;
230 struct discovery_state *cache = &hdev->discovery;
231 struct inquiry_entry *e;
232
233 hci_dev_lock(hdev);
234
235 list_for_each_entry(e, &cache->all, all) {
236 struct inquiry_data *data = &e->data;
237 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
238 &data->bdaddr,
239 data->pscan_rep_mode, data->pscan_period_mode,
240 data->pscan_mode, data->dev_class[2],
241 data->dev_class[1], data->dev_class[0],
242 __le16_to_cpu(data->clock_offset),
243 data->rssi, data->ssp_mode, e->timestamp);
244 }
245
246 hci_dev_unlock(hdev);
247
248 return 0;
249}
250
251static int inquiry_cache_open(struct inode *inode, struct file *file)
252{
253 return single_open(file, inquiry_cache_show, inode->i_private);
254}
255
256static const struct file_operations inquiry_cache_fops = {
257 .open = inquiry_cache_open,
258 .read = seq_read,
259 .llseek = seq_lseek,
260 .release = single_release,
261};
262
02d08d15
MH
263static int link_keys_show(struct seq_file *f, void *ptr)
264{
265 struct hci_dev *hdev = f->private;
266 struct list_head *p, *n;
267
268 hci_dev_lock(hdev);
269 list_for_each_safe(p, n, &hdev->link_keys) {
270 struct link_key *key = list_entry(p, struct link_key, list);
271 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
272 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
273 }
274 hci_dev_unlock(hdev);
275
276 return 0;
277}
278
279static int link_keys_open(struct inode *inode, struct file *file)
280{
281 return single_open(file, link_keys_show, inode->i_private);
282}
283
284static const struct file_operations link_keys_fops = {
285 .open = link_keys_open,
286 .read = seq_read,
287 .llseek = seq_lseek,
288 .release = single_release,
289};
290
babdbb3c
MH
291static int dev_class_show(struct seq_file *f, void *ptr)
292{
293 struct hci_dev *hdev = f->private;
294
295 hci_dev_lock(hdev);
296 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
297 hdev->dev_class[1], hdev->dev_class[0]);
298 hci_dev_unlock(hdev);
299
300 return 0;
301}
302
303static int dev_class_open(struct inode *inode, struct file *file)
304{
305 return single_open(file, dev_class_show, inode->i_private);
306}
307
308static const struct file_operations dev_class_fops = {
309 .open = dev_class_open,
310 .read = seq_read,
311 .llseek = seq_lseek,
312 .release = single_release,
313};
314
041000b9
MH
315static int voice_setting_get(void *data, u64 *val)
316{
317 struct hci_dev *hdev = data;
318
319 hci_dev_lock(hdev);
320 *val = hdev->voice_setting;
321 hci_dev_unlock(hdev);
322
323 return 0;
324}
325
326DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
327 NULL, "0x%4.4llx\n");
328
ebd1e33b
MH
329static int auto_accept_delay_set(void *data, u64 val)
330{
331 struct hci_dev *hdev = data;
332
333 hci_dev_lock(hdev);
334 hdev->auto_accept_delay = val;
335 hci_dev_unlock(hdev);
336
337 return 0;
338}
339
340static int auto_accept_delay_get(void *data, u64 *val)
341{
342 struct hci_dev *hdev = data;
343
344 hci_dev_lock(hdev);
345 *val = hdev->auto_accept_delay;
346 hci_dev_unlock(hdev);
347
348 return 0;
349}
350
351DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
352 auto_accept_delay_set, "%llu\n");
353
06f5b778
MH
354static int ssp_debug_mode_set(void *data, u64 val)
355{
356 struct hci_dev *hdev = data;
357 struct sk_buff *skb;
358 __u8 mode;
359 int err;
360
361 if (val != 0 && val != 1)
362 return -EINVAL;
363
364 if (!test_bit(HCI_UP, &hdev->flags))
365 return -ENETDOWN;
366
367 hci_req_lock(hdev);
368 mode = val;
369 skb = __hci_cmd_sync(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode),
370 &mode, HCI_CMD_TIMEOUT);
371 hci_req_unlock(hdev);
372
373 if (IS_ERR(skb))
374 return PTR_ERR(skb);
375
376 err = -bt_to_errno(skb->data[0]);
377 kfree_skb(skb);
378
379 if (err < 0)
380 return err;
381
382 hci_dev_lock(hdev);
383 hdev->ssp_debug_mode = val;
384 hci_dev_unlock(hdev);
385
386 return 0;
387}
388
389static int ssp_debug_mode_get(void *data, u64 *val)
390{
391 struct hci_dev *hdev = data;
392
393 hci_dev_lock(hdev);
394 *val = hdev->ssp_debug_mode;
395 hci_dev_unlock(hdev);
396
397 return 0;
398}
399
400DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get,
401 ssp_debug_mode_set, "%llu\n");
402
5afeac14
MH
403static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
404 size_t count, loff_t *ppos)
405{
406 struct hci_dev *hdev = file->private_data;
407 char buf[3];
408
409 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dev_flags) ? 'Y': 'N';
410 buf[1] = '\n';
411 buf[2] = '\0';
412 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
413}
414
415static ssize_t force_sc_support_write(struct file *file,
416 const char __user *user_buf,
417 size_t count, loff_t *ppos)
418{
419 struct hci_dev *hdev = file->private_data;
420 char buf[32];
421 size_t buf_size = min(count, (sizeof(buf)-1));
422 bool enable;
423
424 if (test_bit(HCI_UP, &hdev->flags))
425 return -EBUSY;
426
427 if (copy_from_user(buf, user_buf, buf_size))
428 return -EFAULT;
429
430 buf[buf_size] = '\0';
431 if (strtobool(buf, &enable))
432 return -EINVAL;
433
434 if (enable == test_bit(HCI_FORCE_SC, &hdev->dev_flags))
435 return -EALREADY;
436
437 change_bit(HCI_FORCE_SC, &hdev->dev_flags);
438
439 return count;
440}
441
442static const struct file_operations force_sc_support_fops = {
443 .open = simple_open,
444 .read = force_sc_support_read,
445 .write = force_sc_support_write,
446 .llseek = default_llseek,
447};
448
134c2a89
MH
449static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
450 size_t count, loff_t *ppos)
451{
452 struct hci_dev *hdev = file->private_data;
453 char buf[3];
454
455 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
456 buf[1] = '\n';
457 buf[2] = '\0';
458 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
459}
460
461static const struct file_operations sc_only_mode_fops = {
462 .open = simple_open,
463 .read = sc_only_mode_read,
464 .llseek = default_llseek,
465};
466
2bfa3531
MH
467static int idle_timeout_set(void *data, u64 val)
468{
469 struct hci_dev *hdev = data;
470
471 if (val != 0 && (val < 500 || val > 3600000))
472 return -EINVAL;
473
474 hci_dev_lock(hdev);
2be48b65 475 hdev->idle_timeout = val;
2bfa3531
MH
476 hci_dev_unlock(hdev);
477
478 return 0;
479}
480
481static int idle_timeout_get(void *data, u64 *val)
482{
483 struct hci_dev *hdev = data;
484
485 hci_dev_lock(hdev);
486 *val = hdev->idle_timeout;
487 hci_dev_unlock(hdev);
488
489 return 0;
490}
491
492DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
493 idle_timeout_set, "%llu\n");
494
495static int sniff_min_interval_set(void *data, u64 val)
496{
497 struct hci_dev *hdev = data;
498
499 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
500 return -EINVAL;
501
502 hci_dev_lock(hdev);
2be48b65 503 hdev->sniff_min_interval = val;
2bfa3531
MH
504 hci_dev_unlock(hdev);
505
506 return 0;
507}
508
509static int sniff_min_interval_get(void *data, u64 *val)
510{
511 struct hci_dev *hdev = data;
512
513 hci_dev_lock(hdev);
514 *val = hdev->sniff_min_interval;
515 hci_dev_unlock(hdev);
516
517 return 0;
518}
519
520DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
521 sniff_min_interval_set, "%llu\n");
522
523static int sniff_max_interval_set(void *data, u64 val)
524{
525 struct hci_dev *hdev = data;
526
527 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
528 return -EINVAL;
529
530 hci_dev_lock(hdev);
2be48b65 531 hdev->sniff_max_interval = val;
2bfa3531
MH
532 hci_dev_unlock(hdev);
533
534 return 0;
535}
536
537static int sniff_max_interval_get(void *data, u64 *val)
538{
539 struct hci_dev *hdev = data;
540
541 hci_dev_lock(hdev);
542 *val = hdev->sniff_max_interval;
543 hci_dev_unlock(hdev);
544
545 return 0;
546}
547
548DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
549 sniff_max_interval_set, "%llu\n");
550
e7b8fc92
MH
551static int static_address_show(struct seq_file *f, void *p)
552{
553 struct hci_dev *hdev = f->private;
554
555 hci_dev_lock(hdev);
556 seq_printf(f, "%pMR\n", &hdev->static_addr);
557 hci_dev_unlock(hdev);
558
559 return 0;
560}
561
562static int static_address_open(struct inode *inode, struct file *file)
563{
564 return single_open(file, static_address_show, inode->i_private);
565}
566
567static const struct file_operations static_address_fops = {
568 .open = static_address_open,
569 .read = seq_read,
570 .llseek = seq_lseek,
571 .release = single_release,
572};
573
92202185
MH
574static int own_address_type_set(void *data, u64 val)
575{
576 struct hci_dev *hdev = data;
577
578 if (val != 0 && val != 1)
579 return -EINVAL;
580
581 hci_dev_lock(hdev);
582 hdev->own_addr_type = val;
583 hci_dev_unlock(hdev);
584
585 return 0;
586}
587
588static int own_address_type_get(void *data, u64 *val)
589{
590 struct hci_dev *hdev = data;
591
592 hci_dev_lock(hdev);
593 *val = hdev->own_addr_type;
594 hci_dev_unlock(hdev);
595
596 return 0;
597}
598
599DEFINE_SIMPLE_ATTRIBUTE(own_address_type_fops, own_address_type_get,
600 own_address_type_set, "%llu\n");
601
8f8625cd
MH
602static int long_term_keys_show(struct seq_file *f, void *ptr)
603{
604 struct hci_dev *hdev = f->private;
605 struct list_head *p, *n;
606
607 hci_dev_lock(hdev);
f813f1be 608 list_for_each_safe(p, n, &hdev->long_term_keys) {
8f8625cd 609 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
f813f1be 610 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %*phN %*phN\n",
8f8625cd
MH
611 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
612 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
613 8, ltk->rand, 16, ltk->val);
614 }
615 hci_dev_unlock(hdev);
616
617 return 0;
618}
619
620static int long_term_keys_open(struct inode *inode, struct file *file)
621{
622 return single_open(file, long_term_keys_show, inode->i_private);
623}
624
625static const struct file_operations long_term_keys_fops = {
626 .open = long_term_keys_open,
627 .read = seq_read,
628 .llseek = seq_lseek,
629 .release = single_release,
630};
631
4e70c7e7
MH
632static int conn_min_interval_set(void *data, u64 val)
633{
634 struct hci_dev *hdev = data;
635
636 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
637 return -EINVAL;
638
639 hci_dev_lock(hdev);
2be48b65 640 hdev->le_conn_min_interval = val;
4e70c7e7
MH
641 hci_dev_unlock(hdev);
642
643 return 0;
644}
645
646static int conn_min_interval_get(void *data, u64 *val)
647{
648 struct hci_dev *hdev = data;
649
650 hci_dev_lock(hdev);
651 *val = hdev->le_conn_min_interval;
652 hci_dev_unlock(hdev);
653
654 return 0;
655}
656
657DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
658 conn_min_interval_set, "%llu\n");
659
660static int conn_max_interval_set(void *data, u64 val)
661{
662 struct hci_dev *hdev = data;
663
664 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
665 return -EINVAL;
666
667 hci_dev_lock(hdev);
2be48b65 668 hdev->le_conn_max_interval = val;
4e70c7e7
MH
669 hci_dev_unlock(hdev);
670
671 return 0;
672}
673
674static int conn_max_interval_get(void *data, u64 *val)
675{
676 struct hci_dev *hdev = data;
677
678 hci_dev_lock(hdev);
679 *val = hdev->le_conn_max_interval;
680 hci_dev_unlock(hdev);
681
682 return 0;
683}
684
685DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
686 conn_max_interval_set, "%llu\n");
687
89863109
JR
688static ssize_t lowpan_read(struct file *file, char __user *user_buf,
689 size_t count, loff_t *ppos)
690{
691 struct hci_dev *hdev = file->private_data;
692 char buf[3];
693
694 buf[0] = test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags) ? 'Y' : 'N';
695 buf[1] = '\n';
696 buf[2] = '\0';
697 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
698}
699
700static ssize_t lowpan_write(struct file *fp, const char __user *user_buffer,
701 size_t count, loff_t *position)
702{
703 struct hci_dev *hdev = fp->private_data;
704 bool enable;
705 char buf[32];
706 size_t buf_size = min(count, (sizeof(buf)-1));
707
708 if (copy_from_user(buf, user_buffer, buf_size))
709 return -EFAULT;
710
711 buf[buf_size] = '\0';
712
713 if (strtobool(buf, &enable) < 0)
714 return -EINVAL;
715
716 if (enable == test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags))
717 return -EALREADY;
718
719 change_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags);
720
721 return count;
722}
723
724static const struct file_operations lowpan_debugfs_fops = {
725 .open = simple_open,
726 .read = lowpan_read,
727 .write = lowpan_write,
728 .llseek = default_llseek,
729};
730
1da177e4
LT
731/* ---- HCI requests ---- */
732
42c6b129 733static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1da177e4 734{
42c6b129 735 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1da177e4
LT
736
737 if (hdev->req_status == HCI_REQ_PEND) {
738 hdev->req_result = result;
739 hdev->req_status = HCI_REQ_DONE;
740 wake_up_interruptible(&hdev->req_wait_q);
741 }
742}
743
744static void hci_req_cancel(struct hci_dev *hdev, int err)
745{
746 BT_DBG("%s err 0x%2.2x", hdev->name, err);
747
748 if (hdev->req_status == HCI_REQ_PEND) {
749 hdev->req_result = err;
750 hdev->req_status = HCI_REQ_CANCELED;
751 wake_up_interruptible(&hdev->req_wait_q);
752 }
753}
754
77a63e0a
FW
755static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
756 u8 event)
75e84b7c
JH
757{
758 struct hci_ev_cmd_complete *ev;
759 struct hci_event_hdr *hdr;
760 struct sk_buff *skb;
761
762 hci_dev_lock(hdev);
763
764 skb = hdev->recv_evt;
765 hdev->recv_evt = NULL;
766
767 hci_dev_unlock(hdev);
768
769 if (!skb)
770 return ERR_PTR(-ENODATA);
771
772 if (skb->len < sizeof(*hdr)) {
773 BT_ERR("Too short HCI event");
774 goto failed;
775 }
776
777 hdr = (void *) skb->data;
778 skb_pull(skb, HCI_EVENT_HDR_SIZE);
779
7b1abbbe
JH
780 if (event) {
781 if (hdr->evt != event)
782 goto failed;
783 return skb;
784 }
785
75e84b7c
JH
786 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
787 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
788 goto failed;
789 }
790
791 if (skb->len < sizeof(*ev)) {
792 BT_ERR("Too short cmd_complete event");
793 goto failed;
794 }
795
796 ev = (void *) skb->data;
797 skb_pull(skb, sizeof(*ev));
798
799 if (opcode == __le16_to_cpu(ev->opcode))
800 return skb;
801
802 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
803 __le16_to_cpu(ev->opcode));
804
805failed:
806 kfree_skb(skb);
807 return ERR_PTR(-ENODATA);
808}
809
7b1abbbe 810struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 811 const void *param, u8 event, u32 timeout)
75e84b7c
JH
812{
813 DECLARE_WAITQUEUE(wait, current);
814 struct hci_request req;
815 int err = 0;
816
817 BT_DBG("%s", hdev->name);
818
819 hci_req_init(&req, hdev);
820
7b1abbbe 821 hci_req_add_ev(&req, opcode, plen, param, event);
75e84b7c
JH
822
823 hdev->req_status = HCI_REQ_PEND;
824
825 err = hci_req_run(&req, hci_req_sync_complete);
826 if (err < 0)
827 return ERR_PTR(err);
828
829 add_wait_queue(&hdev->req_wait_q, &wait);
830 set_current_state(TASK_INTERRUPTIBLE);
831
832 schedule_timeout(timeout);
833
834 remove_wait_queue(&hdev->req_wait_q, &wait);
835
836 if (signal_pending(current))
837 return ERR_PTR(-EINTR);
838
839 switch (hdev->req_status) {
840 case HCI_REQ_DONE:
841 err = -bt_to_errno(hdev->req_result);
842 break;
843
844 case HCI_REQ_CANCELED:
845 err = -hdev->req_result;
846 break;
847
848 default:
849 err = -ETIMEDOUT;
850 break;
851 }
852
853 hdev->req_status = hdev->req_result = 0;
854
855 BT_DBG("%s end: err %d", hdev->name, err);
856
857 if (err < 0)
858 return ERR_PTR(err);
859
7b1abbbe
JH
860 return hci_get_cmd_complete(hdev, opcode, event);
861}
862EXPORT_SYMBOL(__hci_cmd_sync_ev);
863
864struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 865 const void *param, u32 timeout)
7b1abbbe
JH
866{
867 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
75e84b7c
JH
868}
869EXPORT_SYMBOL(__hci_cmd_sync);
870
1da177e4 871/* Execute request and wait for completion. */
01178cd4 872static int __hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
873 void (*func)(struct hci_request *req,
874 unsigned long opt),
01178cd4 875 unsigned long opt, __u32 timeout)
1da177e4 876{
42c6b129 877 struct hci_request req;
1da177e4
LT
878 DECLARE_WAITQUEUE(wait, current);
879 int err = 0;
880
881 BT_DBG("%s start", hdev->name);
882
42c6b129
JH
883 hci_req_init(&req, hdev);
884
1da177e4
LT
885 hdev->req_status = HCI_REQ_PEND;
886
42c6b129 887 func(&req, opt);
53cce22d 888
42c6b129
JH
889 err = hci_req_run(&req, hci_req_sync_complete);
890 if (err < 0) {
53cce22d 891 hdev->req_status = 0;
920c8300
AG
892
893 /* ENODATA means the HCI request command queue is empty.
894 * This can happen when a request with conditionals doesn't
895 * trigger any commands to be sent. This is normal behavior
896 * and should not trigger an error return.
42c6b129 897 */
920c8300
AG
898 if (err == -ENODATA)
899 return 0;
900
901 return err;
53cce22d
JH
902 }
903
bc4445c7
AG
904 add_wait_queue(&hdev->req_wait_q, &wait);
905 set_current_state(TASK_INTERRUPTIBLE);
906
1da177e4
LT
907 schedule_timeout(timeout);
908
909 remove_wait_queue(&hdev->req_wait_q, &wait);
910
911 if (signal_pending(current))
912 return -EINTR;
913
914 switch (hdev->req_status) {
915 case HCI_REQ_DONE:
e175072f 916 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
917 break;
918
919 case HCI_REQ_CANCELED:
920 err = -hdev->req_result;
921 break;
922
923 default:
924 err = -ETIMEDOUT;
925 break;
3ff50b79 926 }
1da177e4 927
a5040efa 928 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
929
930 BT_DBG("%s end: err %d", hdev->name, err);
931
932 return err;
933}
934
01178cd4 935static int hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
936 void (*req)(struct hci_request *req,
937 unsigned long opt),
01178cd4 938 unsigned long opt, __u32 timeout)
1da177e4
LT
939{
940 int ret;
941
7c6a329e
MH
942 if (!test_bit(HCI_UP, &hdev->flags))
943 return -ENETDOWN;
944
1da177e4
LT
945 /* Serialize all requests */
946 hci_req_lock(hdev);
01178cd4 947 ret = __hci_req_sync(hdev, req, opt, timeout);
1da177e4
LT
948 hci_req_unlock(hdev);
949
950 return ret;
951}
952
42c6b129 953static void hci_reset_req(struct hci_request *req, unsigned long opt)
1da177e4 954{
42c6b129 955 BT_DBG("%s %ld", req->hdev->name, opt);
1da177e4
LT
956
957 /* Reset device */
42c6b129
JH
958 set_bit(HCI_RESET, &req->hdev->flags);
959 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1da177e4
LT
960}
961
42c6b129 962static void bredr_init(struct hci_request *req)
1da177e4 963{
42c6b129 964 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
2455a3ea 965
1da177e4 966 /* Read Local Supported Features */
42c6b129 967 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 968
1143e5a6 969 /* Read Local Version */
42c6b129 970 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
2177bab5
JH
971
972 /* Read BD Address */
42c6b129 973 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1da177e4
LT
974}
975
42c6b129 976static void amp_init(struct hci_request *req)
e61ef499 977{
42c6b129 978 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
2455a3ea 979
e61ef499 980 /* Read Local Version */
42c6b129 981 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489 982
f6996cfe
MH
983 /* Read Local Supported Commands */
984 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
985
986 /* Read Local Supported Features */
987 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
988
6bcbc489 989 /* Read Local AMP Info */
42c6b129 990 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
991
992 /* Read Data Blk size */
42c6b129 993 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
7528ca1c 994
f38ba941
MH
995 /* Read Flow Control Mode */
996 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
997
7528ca1c
MH
998 /* Read Location Data */
999 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
e61ef499
AE
1000}
1001
42c6b129 1002static void hci_init1_req(struct hci_request *req, unsigned long opt)
e61ef499 1003{
42c6b129 1004 struct hci_dev *hdev = req->hdev;
e61ef499
AE
1005
1006 BT_DBG("%s %ld", hdev->name, opt);
1007
11778716
AE
1008 /* Reset */
1009 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
42c6b129 1010 hci_reset_req(req, 0);
11778716 1011
e61ef499
AE
1012 switch (hdev->dev_type) {
1013 case HCI_BREDR:
42c6b129 1014 bredr_init(req);
e61ef499
AE
1015 break;
1016
1017 case HCI_AMP:
42c6b129 1018 amp_init(req);
e61ef499
AE
1019 break;
1020
1021 default:
1022 BT_ERR("Unknown device type %d", hdev->dev_type);
1023 break;
1024 }
e61ef499
AE
1025}
1026
42c6b129 1027static void bredr_setup(struct hci_request *req)
2177bab5 1028{
4ca048e3
MH
1029 struct hci_dev *hdev = req->hdev;
1030
2177bab5
JH
1031 __le16 param;
1032 __u8 flt_type;
1033
1034 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
42c6b129 1035 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
1036
1037 /* Read Class of Device */
42c6b129 1038 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
2177bab5
JH
1039
1040 /* Read Local Name */
42c6b129 1041 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
2177bab5
JH
1042
1043 /* Read Voice Setting */
42c6b129 1044 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
2177bab5 1045
b4cb9fb2
MH
1046 /* Read Number of Supported IAC */
1047 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1048
4b836f39
MH
1049 /* Read Current IAC LAP */
1050 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1051
2177bab5
JH
1052 /* Clear Event Filters */
1053 flt_type = HCI_FLT_CLEAR_ALL;
42c6b129 1054 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
2177bab5
JH
1055
1056 /* Connection accept timeout ~20 secs */
1057 param = __constant_cpu_to_le16(0x7d00);
42c6b129 1058 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
2177bab5 1059
4ca048e3
MH
1060 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1061 * but it does not support page scan related HCI commands.
1062 */
1063 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
f332ec66
JH
1064 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1065 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1066 }
2177bab5
JH
1067}
1068
42c6b129 1069static void le_setup(struct hci_request *req)
2177bab5 1070{
c73eee91
JH
1071 struct hci_dev *hdev = req->hdev;
1072
2177bab5 1073 /* Read LE Buffer Size */
42c6b129 1074 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
1075
1076 /* Read LE Local Supported Features */
42c6b129 1077 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
2177bab5
JH
1078
1079 /* Read LE Advertising Channel TX Power */
42c6b129 1080 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
2177bab5
JH
1081
1082 /* Read LE White List Size */
42c6b129 1083 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
2177bab5
JH
1084
1085 /* Read LE Supported States */
42c6b129 1086 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
c73eee91
JH
1087
1088 /* LE-only controllers have LE implicitly enabled */
1089 if (!lmp_bredr_capable(hdev))
1090 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2177bab5
JH
1091}
1092
1093static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1094{
1095 if (lmp_ext_inq_capable(hdev))
1096 return 0x02;
1097
1098 if (lmp_inq_rssi_capable(hdev))
1099 return 0x01;
1100
1101 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1102 hdev->lmp_subver == 0x0757)
1103 return 0x01;
1104
1105 if (hdev->manufacturer == 15) {
1106 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1107 return 0x01;
1108 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1109 return 0x01;
1110 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1111 return 0x01;
1112 }
1113
1114 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1115 hdev->lmp_subver == 0x1805)
1116 return 0x01;
1117
1118 return 0x00;
1119}
1120
42c6b129 1121static void hci_setup_inquiry_mode(struct hci_request *req)
2177bab5
JH
1122{
1123 u8 mode;
1124
42c6b129 1125 mode = hci_get_inquiry_mode(req->hdev);
2177bab5 1126
42c6b129 1127 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
2177bab5
JH
1128}
1129
42c6b129 1130static void hci_setup_event_mask(struct hci_request *req)
2177bab5 1131{
42c6b129
JH
1132 struct hci_dev *hdev = req->hdev;
1133
2177bab5
JH
1134 /* The second byte is 0xff instead of 0x9f (two reserved bits
1135 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1136 * command otherwise.
1137 */
1138 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1139
1140 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1141 * any event mask for pre 1.2 devices.
1142 */
1143 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1144 return;
1145
1146 if (lmp_bredr_capable(hdev)) {
1147 events[4] |= 0x01; /* Flow Specification Complete */
1148 events[4] |= 0x02; /* Inquiry Result with RSSI */
1149 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1150 events[5] |= 0x08; /* Synchronous Connection Complete */
1151 events[5] |= 0x10; /* Synchronous Connection Changed */
c7882cbd
MH
1152 } else {
1153 /* Use a different default for LE-only devices */
1154 memset(events, 0, sizeof(events));
1155 events[0] |= 0x10; /* Disconnection Complete */
1156 events[0] |= 0x80; /* Encryption Change */
1157 events[1] |= 0x08; /* Read Remote Version Information Complete */
1158 events[1] |= 0x20; /* Command Complete */
1159 events[1] |= 0x40; /* Command Status */
1160 events[1] |= 0x80; /* Hardware Error */
1161 events[2] |= 0x04; /* Number of Completed Packets */
1162 events[3] |= 0x02; /* Data Buffer Overflow */
1163 events[5] |= 0x80; /* Encryption Key Refresh Complete */
2177bab5
JH
1164 }
1165
1166 if (lmp_inq_rssi_capable(hdev))
1167 events[4] |= 0x02; /* Inquiry Result with RSSI */
1168
1169 if (lmp_sniffsubr_capable(hdev))
1170 events[5] |= 0x20; /* Sniff Subrating */
1171
1172 if (lmp_pause_enc_capable(hdev))
1173 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1174
1175 if (lmp_ext_inq_capable(hdev))
1176 events[5] |= 0x40; /* Extended Inquiry Result */
1177
1178 if (lmp_no_flush_capable(hdev))
1179 events[7] |= 0x01; /* Enhanced Flush Complete */
1180
1181 if (lmp_lsto_capable(hdev))
1182 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1183
1184 if (lmp_ssp_capable(hdev)) {
1185 events[6] |= 0x01; /* IO Capability Request */
1186 events[6] |= 0x02; /* IO Capability Response */
1187 events[6] |= 0x04; /* User Confirmation Request */
1188 events[6] |= 0x08; /* User Passkey Request */
1189 events[6] |= 0x10; /* Remote OOB Data Request */
1190 events[6] |= 0x20; /* Simple Pairing Complete */
1191 events[7] |= 0x04; /* User Passkey Notification */
1192 events[7] |= 0x08; /* Keypress Notification */
1193 events[7] |= 0x10; /* Remote Host Supported
1194 * Features Notification
1195 */
1196 }
1197
1198 if (lmp_le_capable(hdev))
1199 events[7] |= 0x20; /* LE Meta-Event */
1200
42c6b129 1201 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
2177bab5
JH
1202
1203 if (lmp_le_capable(hdev)) {
1204 memset(events, 0, sizeof(events));
1205 events[0] = 0x1f;
42c6b129
JH
1206 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1207 sizeof(events), events);
2177bab5
JH
1208 }
1209}
1210
42c6b129 1211static void hci_init2_req(struct hci_request *req, unsigned long opt)
2177bab5 1212{
42c6b129
JH
1213 struct hci_dev *hdev = req->hdev;
1214
2177bab5 1215 if (lmp_bredr_capable(hdev))
42c6b129 1216 bredr_setup(req);
56f87901
JH
1217 else
1218 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2177bab5
JH
1219
1220 if (lmp_le_capable(hdev))
42c6b129 1221 le_setup(req);
2177bab5 1222
42c6b129 1223 hci_setup_event_mask(req);
2177bab5 1224
3f8e2d75
JH
1225 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1226 * local supported commands HCI command.
1227 */
1228 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
42c6b129 1229 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
2177bab5
JH
1230
1231 if (lmp_ssp_capable(hdev)) {
57af75a8
MH
1232 /* When SSP is available, then the host features page
1233 * should also be available as well. However some
1234 * controllers list the max_page as 0 as long as SSP
1235 * has not been enabled. To achieve proper debugging
1236 * output, force the minimum max_page to 1 at least.
1237 */
1238 hdev->max_page = 0x01;
1239
2177bab5
JH
1240 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1241 u8 mode = 0x01;
42c6b129
JH
1242 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1243 sizeof(mode), &mode);
2177bab5
JH
1244 } else {
1245 struct hci_cp_write_eir cp;
1246
1247 memset(hdev->eir, 0, sizeof(hdev->eir));
1248 memset(&cp, 0, sizeof(cp));
1249
42c6b129 1250 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
2177bab5
JH
1251 }
1252 }
1253
1254 if (lmp_inq_rssi_capable(hdev))
42c6b129 1255 hci_setup_inquiry_mode(req);
2177bab5
JH
1256
1257 if (lmp_inq_tx_pwr_capable(hdev))
42c6b129 1258 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
2177bab5
JH
1259
1260 if (lmp_ext_feat_capable(hdev)) {
1261 struct hci_cp_read_local_ext_features cp;
1262
1263 cp.page = 0x01;
42c6b129
JH
1264 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1265 sizeof(cp), &cp);
2177bab5
JH
1266 }
1267
1268 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1269 u8 enable = 1;
42c6b129
JH
1270 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1271 &enable);
2177bab5
JH
1272 }
1273}
1274
42c6b129 1275static void hci_setup_link_policy(struct hci_request *req)
2177bab5 1276{
42c6b129 1277 struct hci_dev *hdev = req->hdev;
2177bab5
JH
1278 struct hci_cp_write_def_link_policy cp;
1279 u16 link_policy = 0;
1280
1281 if (lmp_rswitch_capable(hdev))
1282 link_policy |= HCI_LP_RSWITCH;
1283 if (lmp_hold_capable(hdev))
1284 link_policy |= HCI_LP_HOLD;
1285 if (lmp_sniff_capable(hdev))
1286 link_policy |= HCI_LP_SNIFF;
1287 if (lmp_park_capable(hdev))
1288 link_policy |= HCI_LP_PARK;
1289
1290 cp.policy = cpu_to_le16(link_policy);
42c6b129 1291 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
2177bab5
JH
1292}
1293
42c6b129 1294static void hci_set_le_support(struct hci_request *req)
2177bab5 1295{
42c6b129 1296 struct hci_dev *hdev = req->hdev;
2177bab5
JH
1297 struct hci_cp_write_le_host_supported cp;
1298
c73eee91
JH
1299 /* LE-only devices do not support explicit enablement */
1300 if (!lmp_bredr_capable(hdev))
1301 return;
1302
2177bab5
JH
1303 memset(&cp, 0, sizeof(cp));
1304
1305 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1306 cp.le = 0x01;
1307 cp.simul = lmp_le_br_capable(hdev);
1308 }
1309
1310 if (cp.le != lmp_host_le_capable(hdev))
42c6b129
JH
1311 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1312 &cp);
2177bab5
JH
1313}
1314
d62e6d67
JH
1315static void hci_set_event_mask_page_2(struct hci_request *req)
1316{
1317 struct hci_dev *hdev = req->hdev;
1318 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1319
1320 /* If Connectionless Slave Broadcast master role is supported
1321 * enable all necessary events for it.
1322 */
53b834d2 1323 if (lmp_csb_master_capable(hdev)) {
d62e6d67
JH
1324 events[1] |= 0x40; /* Triggered Clock Capture */
1325 events[1] |= 0x80; /* Synchronization Train Complete */
1326 events[2] |= 0x10; /* Slave Page Response Timeout */
1327 events[2] |= 0x20; /* CSB Channel Map Change */
1328 }
1329
1330 /* If Connectionless Slave Broadcast slave role is supported
1331 * enable all necessary events for it.
1332 */
53b834d2 1333 if (lmp_csb_slave_capable(hdev)) {
d62e6d67
JH
1334 events[2] |= 0x01; /* Synchronization Train Received */
1335 events[2] |= 0x02; /* CSB Receive */
1336 events[2] |= 0x04; /* CSB Timeout */
1337 events[2] |= 0x08; /* Truncated Page Complete */
1338 }
1339
40c59fcb
MH
1340 /* Enable Authenticated Payload Timeout Expired event if supported */
1341 if (lmp_ping_capable(hdev))
1342 events[2] |= 0x80;
1343
d62e6d67
JH
1344 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1345}
1346
42c6b129 1347static void hci_init3_req(struct hci_request *req, unsigned long opt)
2177bab5 1348{
42c6b129 1349 struct hci_dev *hdev = req->hdev;
d2c5d77f 1350 u8 p;
42c6b129 1351
b8f4e068
GP
1352 /* Some Broadcom based Bluetooth controllers do not support the
1353 * Delete Stored Link Key command. They are clearly indicating its
1354 * absence in the bit mask of supported commands.
1355 *
1356 * Check the supported commands and only if the the command is marked
1357 * as supported send it. If not supported assume that the controller
1358 * does not have actual support for stored link keys which makes this
1359 * command redundant anyway.
f9f462fa
MH
1360 *
1361 * Some controllers indicate that they support handling deleting
1362 * stored link keys, but they don't. The quirk lets a driver
1363 * just disable this command.
637b4cae 1364 */
f9f462fa
MH
1365 if (hdev->commands[6] & 0x80 &&
1366 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
59f45d57
JH
1367 struct hci_cp_delete_stored_link_key cp;
1368
1369 bacpy(&cp.bdaddr, BDADDR_ANY);
1370 cp.delete_all = 0x01;
1371 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1372 sizeof(cp), &cp);
1373 }
1374
2177bab5 1375 if (hdev->commands[5] & 0x10)
42c6b129 1376 hci_setup_link_policy(req);
2177bab5 1377
79830f66 1378 if (lmp_le_capable(hdev)) {
bef34c0a
MH
1379 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1380 /* If the controller has a public BD_ADDR, then
1381 * by default use that one. If this is a LE only
1382 * controller without a public address, default
1383 * to the random address.
1384 */
1385 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1386 hdev->own_addr_type = ADDR_LE_DEV_PUBLIC;
1387 else
1388 hdev->own_addr_type = ADDR_LE_DEV_RANDOM;
1389 }
79830f66 1390
42c6b129 1391 hci_set_le_support(req);
79830f66 1392 }
d2c5d77f
JH
1393
1394 /* Read features beyond page 1 if available */
1395 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1396 struct hci_cp_read_local_ext_features cp;
1397
1398 cp.page = p;
1399 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1400 sizeof(cp), &cp);
1401 }
2177bab5
JH
1402}
1403
5d4e7e8d
JH
1404static void hci_init4_req(struct hci_request *req, unsigned long opt)
1405{
1406 struct hci_dev *hdev = req->hdev;
1407
d62e6d67
JH
1408 /* Set event mask page 2 if the HCI command for it is supported */
1409 if (hdev->commands[22] & 0x04)
1410 hci_set_event_mask_page_2(req);
1411
5d4e7e8d 1412 /* Check for Synchronization Train support */
53b834d2 1413 if (lmp_sync_train_capable(hdev))
5d4e7e8d 1414 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
a6d0d690
MH
1415
1416 /* Enable Secure Connections if supported and configured */
5afeac14
MH
1417 if ((lmp_sc_capable(hdev) ||
1418 test_bit(HCI_FORCE_SC, &hdev->dev_flags)) &&
a6d0d690
MH
1419 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1420 u8 support = 0x01;
1421 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1422 sizeof(support), &support);
1423 }
5d4e7e8d
JH
1424}
1425
2177bab5
JH
1426static int __hci_init(struct hci_dev *hdev)
1427{
1428 int err;
1429
1430 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1431 if (err < 0)
1432 return err;
1433
4b4148e9
MH
1434 /* The Device Under Test (DUT) mode is special and available for
1435 * all controller types. So just create it early on.
1436 */
1437 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1438 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1439 &dut_mode_fops);
1440 }
1441
2177bab5
JH
1442 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1443 * BR/EDR/LE type controllers. AMP controllers only need the
1444 * first stage init.
1445 */
1446 if (hdev->dev_type != HCI_BREDR)
1447 return 0;
1448
1449 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1450 if (err < 0)
1451 return err;
1452
5d4e7e8d
JH
1453 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1454 if (err < 0)
1455 return err;
1456
baf27f6e
MH
1457 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1458 if (err < 0)
1459 return err;
1460
1461 /* Only create debugfs entries during the initial setup
1462 * phase and not every time the controller gets powered on.
1463 */
1464 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1465 return 0;
1466
dfb826a8
MH
1467 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1468 &features_fops);
ceeb3bc0
MH
1469 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1470 &hdev->manufacturer);
1471 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1472 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
70afe0b8
MH
1473 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1474 &blacklist_fops);
47219839
MH
1475 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1476
baf27f6e
MH
1477 if (lmp_bredr_capable(hdev)) {
1478 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1479 hdev, &inquiry_cache_fops);
02d08d15
MH
1480 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1481 hdev, &link_keys_fops);
babdbb3c
MH
1482 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1483 hdev, &dev_class_fops);
041000b9
MH
1484 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1485 hdev, &voice_setting_fops);
baf27f6e
MH
1486 }
1487
06f5b778 1488 if (lmp_ssp_capable(hdev)) {
ebd1e33b
MH
1489 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1490 hdev, &auto_accept_delay_fops);
06f5b778
MH
1491 debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs,
1492 hdev, &ssp_debug_mode_fops);
5afeac14
MH
1493 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1494 hdev, &force_sc_support_fops);
134c2a89
MH
1495 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1496 hdev, &sc_only_mode_fops);
06f5b778 1497 }
ebd1e33b 1498
2bfa3531
MH
1499 if (lmp_sniff_capable(hdev)) {
1500 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1501 hdev, &idle_timeout_fops);
1502 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1503 hdev, &sniff_min_interval_fops);
1504 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1505 hdev, &sniff_max_interval_fops);
1506 }
1507
d0f729b8
MH
1508 if (lmp_le_capable(hdev)) {
1509 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1510 &hdev->le_white_list_size);
e7b8fc92
MH
1511 debugfs_create_file("static_address", 0444, hdev->debugfs,
1512 hdev, &static_address_fops);
92202185
MH
1513 debugfs_create_file("own_address_type", 0644, hdev->debugfs,
1514 hdev, &own_address_type_fops);
8f8625cd
MH
1515 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1516 hdev, &long_term_keys_fops);
4e70c7e7
MH
1517 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1518 hdev, &conn_min_interval_fops);
1519 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1520 hdev, &conn_max_interval_fops);
89863109
JR
1521 debugfs_create_file("6lowpan", 0644, hdev->debugfs, hdev,
1522 &lowpan_debugfs_fops);
d0f729b8 1523 }
e7b8fc92 1524
baf27f6e 1525 return 0;
2177bab5
JH
1526}
1527
42c6b129 1528static void hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1529{
1530 __u8 scan = opt;
1531
42c6b129 1532 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
1533
1534 /* Inquiry and Page scans */
42c6b129 1535 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
1536}
1537
42c6b129 1538static void hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1539{
1540 __u8 auth = opt;
1541
42c6b129 1542 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
1543
1544 /* Authentication */
42c6b129 1545 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
1546}
1547
42c6b129 1548static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1549{
1550 __u8 encrypt = opt;
1551
42c6b129 1552 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 1553
e4e8e37c 1554 /* Encryption */
42c6b129 1555 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
1556}
1557
42c6b129 1558static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
1559{
1560 __le16 policy = cpu_to_le16(opt);
1561
42c6b129 1562 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
1563
1564 /* Default link policy */
42c6b129 1565 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
e4e8e37c
MH
1566}
1567
8e87d142 1568/* Get HCI device by index.
1da177e4
LT
1569 * Device is held on return. */
1570struct hci_dev *hci_dev_get(int index)
1571{
8035ded4 1572 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
1573
1574 BT_DBG("%d", index);
1575
1576 if (index < 0)
1577 return NULL;
1578
1579 read_lock(&hci_dev_list_lock);
8035ded4 1580 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
1581 if (d->id == index) {
1582 hdev = hci_dev_hold(d);
1583 break;
1584 }
1585 }
1586 read_unlock(&hci_dev_list_lock);
1587 return hdev;
1588}
1da177e4
LT
1589
1590/* ---- Inquiry support ---- */
ff9ef578 1591
30dc78e1
JH
1592bool hci_discovery_active(struct hci_dev *hdev)
1593{
1594 struct discovery_state *discov = &hdev->discovery;
1595
6fbe195d 1596 switch (discov->state) {
343f935b 1597 case DISCOVERY_FINDING:
6fbe195d 1598 case DISCOVERY_RESOLVING:
30dc78e1
JH
1599 return true;
1600
6fbe195d
AG
1601 default:
1602 return false;
1603 }
30dc78e1
JH
1604}
1605
ff9ef578
JH
1606void hci_discovery_set_state(struct hci_dev *hdev, int state)
1607{
1608 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1609
1610 if (hdev->discovery.state == state)
1611 return;
1612
1613 switch (state) {
1614 case DISCOVERY_STOPPED:
7b99b659
AG
1615 if (hdev->discovery.state != DISCOVERY_STARTING)
1616 mgmt_discovering(hdev, 0);
ff9ef578
JH
1617 break;
1618 case DISCOVERY_STARTING:
1619 break;
343f935b 1620 case DISCOVERY_FINDING:
ff9ef578
JH
1621 mgmt_discovering(hdev, 1);
1622 break;
30dc78e1
JH
1623 case DISCOVERY_RESOLVING:
1624 break;
ff9ef578
JH
1625 case DISCOVERY_STOPPING:
1626 break;
1627 }
1628
1629 hdev->discovery.state = state;
1630}
1631
1f9b9a5d 1632void hci_inquiry_cache_flush(struct hci_dev *hdev)
1da177e4 1633{
30883512 1634 struct discovery_state *cache = &hdev->discovery;
b57c1a56 1635 struct inquiry_entry *p, *n;
1da177e4 1636
561aafbc
JH
1637 list_for_each_entry_safe(p, n, &cache->all, all) {
1638 list_del(&p->all);
b57c1a56 1639 kfree(p);
1da177e4 1640 }
561aafbc
JH
1641
1642 INIT_LIST_HEAD(&cache->unknown);
1643 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
1644}
1645
a8c5fb1a
GP
1646struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1647 bdaddr_t *bdaddr)
1da177e4 1648{
30883512 1649 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1650 struct inquiry_entry *e;
1651
6ed93dc6 1652 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 1653
561aafbc
JH
1654 list_for_each_entry(e, &cache->all, all) {
1655 if (!bacmp(&e->data.bdaddr, bdaddr))
1656 return e;
1657 }
1658
1659 return NULL;
1660}
1661
1662struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 1663 bdaddr_t *bdaddr)
561aafbc 1664{
30883512 1665 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
1666 struct inquiry_entry *e;
1667
6ed93dc6 1668 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
1669
1670 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 1671 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
1672 return e;
1673 }
1674
1675 return NULL;
1da177e4
LT
1676}
1677
30dc78e1 1678struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
1679 bdaddr_t *bdaddr,
1680 int state)
30dc78e1
JH
1681{
1682 struct discovery_state *cache = &hdev->discovery;
1683 struct inquiry_entry *e;
1684
6ed93dc6 1685 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
1686
1687 list_for_each_entry(e, &cache->resolve, list) {
1688 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1689 return e;
1690 if (!bacmp(&e->data.bdaddr, bdaddr))
1691 return e;
1692 }
1693
1694 return NULL;
1695}
1696
a3d4e20a 1697void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 1698 struct inquiry_entry *ie)
a3d4e20a
JH
1699{
1700 struct discovery_state *cache = &hdev->discovery;
1701 struct list_head *pos = &cache->resolve;
1702 struct inquiry_entry *p;
1703
1704 list_del(&ie->list);
1705
1706 list_for_each_entry(p, &cache->resolve, list) {
1707 if (p->name_state != NAME_PENDING &&
a8c5fb1a 1708 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
1709 break;
1710 pos = &p->list;
1711 }
1712
1713 list_add(&ie->list, pos);
1714}
1715
3175405b 1716bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
04124681 1717 bool name_known, bool *ssp)
1da177e4 1718{
30883512 1719 struct discovery_state *cache = &hdev->discovery;
70f23020 1720 struct inquiry_entry *ie;
1da177e4 1721
6ed93dc6 1722 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 1723
2b2fec4d
SJ
1724 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1725
388fc8fa
JH
1726 if (ssp)
1727 *ssp = data->ssp_mode;
1728
70f23020 1729 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 1730 if (ie) {
388fc8fa
JH
1731 if (ie->data.ssp_mode && ssp)
1732 *ssp = true;
1733
a3d4e20a 1734 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 1735 data->rssi != ie->data.rssi) {
a3d4e20a
JH
1736 ie->data.rssi = data->rssi;
1737 hci_inquiry_cache_update_resolve(hdev, ie);
1738 }
1739
561aafbc 1740 goto update;
a3d4e20a 1741 }
561aafbc
JH
1742
1743 /* Entry not in the cache. Add new one. */
1744 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
1745 if (!ie)
3175405b 1746 return false;
561aafbc
JH
1747
1748 list_add(&ie->all, &cache->all);
1749
1750 if (name_known) {
1751 ie->name_state = NAME_KNOWN;
1752 } else {
1753 ie->name_state = NAME_NOT_KNOWN;
1754 list_add(&ie->list, &cache->unknown);
1755 }
70f23020 1756
561aafbc
JH
1757update:
1758 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 1759 ie->name_state != NAME_PENDING) {
561aafbc
JH
1760 ie->name_state = NAME_KNOWN;
1761 list_del(&ie->list);
1da177e4
LT
1762 }
1763
70f23020
AE
1764 memcpy(&ie->data, data, sizeof(*data));
1765 ie->timestamp = jiffies;
1da177e4 1766 cache->timestamp = jiffies;
3175405b
JH
1767
1768 if (ie->name_state == NAME_NOT_KNOWN)
1769 return false;
1770
1771 return true;
1da177e4
LT
1772}
1773
1774static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1775{
30883512 1776 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1777 struct inquiry_info *info = (struct inquiry_info *) buf;
1778 struct inquiry_entry *e;
1779 int copied = 0;
1780
561aafbc 1781 list_for_each_entry(e, &cache->all, all) {
1da177e4 1782 struct inquiry_data *data = &e->data;
b57c1a56
JH
1783
1784 if (copied >= num)
1785 break;
1786
1da177e4
LT
1787 bacpy(&info->bdaddr, &data->bdaddr);
1788 info->pscan_rep_mode = data->pscan_rep_mode;
1789 info->pscan_period_mode = data->pscan_period_mode;
1790 info->pscan_mode = data->pscan_mode;
1791 memcpy(info->dev_class, data->dev_class, 3);
1792 info->clock_offset = data->clock_offset;
b57c1a56 1793
1da177e4 1794 info++;
b57c1a56 1795 copied++;
1da177e4
LT
1796 }
1797
1798 BT_DBG("cache %p, copied %d", cache, copied);
1799 return copied;
1800}
1801
42c6b129 1802static void hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1803{
1804 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 1805 struct hci_dev *hdev = req->hdev;
1da177e4
LT
1806 struct hci_cp_inquiry cp;
1807
1808 BT_DBG("%s", hdev->name);
1809
1810 if (test_bit(HCI_INQUIRY, &hdev->flags))
1811 return;
1812
1813 /* Start Inquiry */
1814 memcpy(&cp.lap, &ir->lap, 3);
1815 cp.length = ir->length;
1816 cp.num_rsp = ir->num_rsp;
42c6b129 1817 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
1818}
1819
3e13fa1e
AG
1820static int wait_inquiry(void *word)
1821{
1822 schedule();
1823 return signal_pending(current);
1824}
1825
1da177e4
LT
1826int hci_inquiry(void __user *arg)
1827{
1828 __u8 __user *ptr = arg;
1829 struct hci_inquiry_req ir;
1830 struct hci_dev *hdev;
1831 int err = 0, do_inquiry = 0, max_rsp;
1832 long timeo;
1833 __u8 *buf;
1834
1835 if (copy_from_user(&ir, ptr, sizeof(ir)))
1836 return -EFAULT;
1837
5a08ecce
AE
1838 hdev = hci_dev_get(ir.dev_id);
1839 if (!hdev)
1da177e4
LT
1840 return -ENODEV;
1841
0736cfa8
MH
1842 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1843 err = -EBUSY;
1844 goto done;
1845 }
1846
5b69bef5
MH
1847 if (hdev->dev_type != HCI_BREDR) {
1848 err = -EOPNOTSUPP;
1849 goto done;
1850 }
1851
56f87901
JH
1852 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1853 err = -EOPNOTSUPP;
1854 goto done;
1855 }
1856
09fd0de5 1857 hci_dev_lock(hdev);
8e87d142 1858 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 1859 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1f9b9a5d 1860 hci_inquiry_cache_flush(hdev);
1da177e4
LT
1861 do_inquiry = 1;
1862 }
09fd0de5 1863 hci_dev_unlock(hdev);
1da177e4 1864
04837f64 1865 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
1866
1867 if (do_inquiry) {
01178cd4
JH
1868 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1869 timeo);
70f23020
AE
1870 if (err < 0)
1871 goto done;
3e13fa1e
AG
1872
1873 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1874 * cleared). If it is interrupted by a signal, return -EINTR.
1875 */
1876 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1877 TASK_INTERRUPTIBLE))
1878 return -EINTR;
70f23020 1879 }
1da177e4 1880
8fc9ced3
GP
1881 /* for unlimited number of responses we will use buffer with
1882 * 255 entries
1883 */
1da177e4
LT
1884 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1885
1886 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1887 * copy it to the user space.
1888 */
01df8c31 1889 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 1890 if (!buf) {
1da177e4
LT
1891 err = -ENOMEM;
1892 goto done;
1893 }
1894
09fd0de5 1895 hci_dev_lock(hdev);
1da177e4 1896 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 1897 hci_dev_unlock(hdev);
1da177e4
LT
1898
1899 BT_DBG("num_rsp %d", ir.num_rsp);
1900
1901 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1902 ptr += sizeof(ir);
1903 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 1904 ir.num_rsp))
1da177e4 1905 err = -EFAULT;
8e87d142 1906 } else
1da177e4
LT
1907 err = -EFAULT;
1908
1909 kfree(buf);
1910
1911done:
1912 hci_dev_put(hdev);
1913 return err;
1914}
1915
cbed0ca1 1916static int hci_dev_do_open(struct hci_dev *hdev)
1da177e4 1917{
1da177e4
LT
1918 int ret = 0;
1919
1da177e4
LT
1920 BT_DBG("%s %p", hdev->name, hdev);
1921
1922 hci_req_lock(hdev);
1923
94324962
JH
1924 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1925 ret = -ENODEV;
1926 goto done;
1927 }
1928
a5c8f270
MH
1929 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
1930 /* Check for rfkill but allow the HCI setup stage to
1931 * proceed (which in itself doesn't cause any RF activity).
1932 */
1933 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1934 ret = -ERFKILL;
1935 goto done;
1936 }
1937
1938 /* Check for valid public address or a configured static
1939 * random adddress, but let the HCI setup proceed to
1940 * be able to determine if there is a public address
1941 * or not.
1942 *
c6beca0e
MH
1943 * In case of user channel usage, it is not important
1944 * if a public address or static random address is
1945 * available.
1946 *
a5c8f270
MH
1947 * This check is only valid for BR/EDR controllers
1948 * since AMP controllers do not have an address.
1949 */
c6beca0e
MH
1950 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1951 hdev->dev_type == HCI_BREDR &&
a5c8f270
MH
1952 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1953 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1954 ret = -EADDRNOTAVAIL;
1955 goto done;
1956 }
611b30f7
MH
1957 }
1958
1da177e4
LT
1959 if (test_bit(HCI_UP, &hdev->flags)) {
1960 ret = -EALREADY;
1961 goto done;
1962 }
1963
1da177e4
LT
1964 if (hdev->open(hdev)) {
1965 ret = -EIO;
1966 goto done;
1967 }
1968
f41c70c4
MH
1969 atomic_set(&hdev->cmd_cnt, 1);
1970 set_bit(HCI_INIT, &hdev->flags);
1971
1972 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1973 ret = hdev->setup(hdev);
1974
1975 if (!ret) {
f41c70c4
MH
1976 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1977 set_bit(HCI_RAW, &hdev->flags);
1978
0736cfa8
MH
1979 if (!test_bit(HCI_RAW, &hdev->flags) &&
1980 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
f41c70c4 1981 ret = __hci_init(hdev);
1da177e4
LT
1982 }
1983
f41c70c4
MH
1984 clear_bit(HCI_INIT, &hdev->flags);
1985
1da177e4
LT
1986 if (!ret) {
1987 hci_dev_hold(hdev);
1988 set_bit(HCI_UP, &hdev->flags);
1989 hci_notify(hdev, HCI_DEV_UP);
bb4b2a9a 1990 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
0736cfa8 1991 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1514b892 1992 hdev->dev_type == HCI_BREDR) {
09fd0de5 1993 hci_dev_lock(hdev);
744cf19e 1994 mgmt_powered(hdev, 1);
09fd0de5 1995 hci_dev_unlock(hdev);
56e5cb86 1996 }
8e87d142 1997 } else {
1da177e4 1998 /* Init failed, cleanup */
3eff45ea 1999 flush_work(&hdev->tx_work);
c347b765 2000 flush_work(&hdev->cmd_work);
b78752cc 2001 flush_work(&hdev->rx_work);
1da177e4
LT
2002
2003 skb_queue_purge(&hdev->cmd_q);
2004 skb_queue_purge(&hdev->rx_q);
2005
2006 if (hdev->flush)
2007 hdev->flush(hdev);
2008
2009 if (hdev->sent_cmd) {
2010 kfree_skb(hdev->sent_cmd);
2011 hdev->sent_cmd = NULL;
2012 }
2013
2014 hdev->close(hdev);
2015 hdev->flags = 0;
2016 }
2017
2018done:
2019 hci_req_unlock(hdev);
1da177e4
LT
2020 return ret;
2021}
2022
cbed0ca1
JH
2023/* ---- HCI ioctl helpers ---- */
2024
2025int hci_dev_open(__u16 dev)
2026{
2027 struct hci_dev *hdev;
2028 int err;
2029
2030 hdev = hci_dev_get(dev);
2031 if (!hdev)
2032 return -ENODEV;
2033
e1d08f40
JH
2034 /* We need to ensure that no other power on/off work is pending
2035 * before proceeding to call hci_dev_do_open. This is
2036 * particularly important if the setup procedure has not yet
2037 * completed.
2038 */
2039 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2040 cancel_delayed_work(&hdev->power_off);
2041
a5c8f270
MH
2042 /* After this call it is guaranteed that the setup procedure
2043 * has finished. This means that error conditions like RFKILL
2044 * or no valid public or static random address apply.
2045 */
e1d08f40
JH
2046 flush_workqueue(hdev->req_workqueue);
2047
cbed0ca1
JH
2048 err = hci_dev_do_open(hdev);
2049
2050 hci_dev_put(hdev);
2051
2052 return err;
2053}
2054
1da177e4
LT
2055static int hci_dev_do_close(struct hci_dev *hdev)
2056{
2057 BT_DBG("%s %p", hdev->name, hdev);
2058
78c04c0b
VCG
2059 cancel_delayed_work(&hdev->power_off);
2060
1da177e4
LT
2061 hci_req_cancel(hdev, ENODEV);
2062 hci_req_lock(hdev);
2063
2064 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
b79f44c1 2065 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
2066 hci_req_unlock(hdev);
2067 return 0;
2068 }
2069
3eff45ea
GP
2070 /* Flush RX and TX works */
2071 flush_work(&hdev->tx_work);
b78752cc 2072 flush_work(&hdev->rx_work);
1da177e4 2073
16ab91ab 2074 if (hdev->discov_timeout > 0) {
e0f9309f 2075 cancel_delayed_work(&hdev->discov_off);
16ab91ab 2076 hdev->discov_timeout = 0;
5e5282bb 2077 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
310a3d48 2078 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
16ab91ab
JH
2079 }
2080
a8b2d5c2 2081 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
7d78525d
JH
2082 cancel_delayed_work(&hdev->service_cache);
2083
7ba8b4be
AG
2084 cancel_delayed_work_sync(&hdev->le_scan_disable);
2085
09fd0de5 2086 hci_dev_lock(hdev);
1f9b9a5d 2087 hci_inquiry_cache_flush(hdev);
1da177e4 2088 hci_conn_hash_flush(hdev);
09fd0de5 2089 hci_dev_unlock(hdev);
1da177e4
LT
2090
2091 hci_notify(hdev, HCI_DEV_DOWN);
2092
2093 if (hdev->flush)
2094 hdev->flush(hdev);
2095
2096 /* Reset device */
2097 skb_queue_purge(&hdev->cmd_q);
2098 atomic_set(&hdev->cmd_cnt, 1);
8af59467 2099 if (!test_bit(HCI_RAW, &hdev->flags) &&
3a6afbd2 2100 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
a6c511c6 2101 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1da177e4 2102 set_bit(HCI_INIT, &hdev->flags);
01178cd4 2103 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1da177e4
LT
2104 clear_bit(HCI_INIT, &hdev->flags);
2105 }
2106
c347b765
GP
2107 /* flush cmd work */
2108 flush_work(&hdev->cmd_work);
1da177e4
LT
2109
2110 /* Drop queues */
2111 skb_queue_purge(&hdev->rx_q);
2112 skb_queue_purge(&hdev->cmd_q);
2113 skb_queue_purge(&hdev->raw_q);
2114
2115 /* Drop last sent command */
2116 if (hdev->sent_cmd) {
b79f44c1 2117 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
2118 kfree_skb(hdev->sent_cmd);
2119 hdev->sent_cmd = NULL;
2120 }
2121
b6ddb638
JH
2122 kfree_skb(hdev->recv_evt);
2123 hdev->recv_evt = NULL;
2124
1da177e4
LT
2125 /* After this point our queues are empty
2126 * and no tasks are scheduled. */
2127 hdev->close(hdev);
2128
35b973c9
JH
2129 /* Clear flags */
2130 hdev->flags = 0;
2131 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2132
93c311a0
MH
2133 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2134 if (hdev->dev_type == HCI_BREDR) {
2135 hci_dev_lock(hdev);
2136 mgmt_powered(hdev, 0);
2137 hci_dev_unlock(hdev);
2138 }
8ee56540 2139 }
5add6af8 2140
ced5c338 2141 /* Controller radio is available but is currently powered down */
536619e8 2142 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
ced5c338 2143
e59fda8d 2144 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 2145 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
e59fda8d 2146
1da177e4
LT
2147 hci_req_unlock(hdev);
2148
2149 hci_dev_put(hdev);
2150 return 0;
2151}
2152
2153int hci_dev_close(__u16 dev)
2154{
2155 struct hci_dev *hdev;
2156 int err;
2157
70f23020
AE
2158 hdev = hci_dev_get(dev);
2159 if (!hdev)
1da177e4 2160 return -ENODEV;
8ee56540 2161
0736cfa8
MH
2162 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2163 err = -EBUSY;
2164 goto done;
2165 }
2166
8ee56540
MH
2167 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2168 cancel_delayed_work(&hdev->power_off);
2169
1da177e4 2170 err = hci_dev_do_close(hdev);
8ee56540 2171
0736cfa8 2172done:
1da177e4
LT
2173 hci_dev_put(hdev);
2174 return err;
2175}
2176
2177int hci_dev_reset(__u16 dev)
2178{
2179 struct hci_dev *hdev;
2180 int ret = 0;
2181
70f23020
AE
2182 hdev = hci_dev_get(dev);
2183 if (!hdev)
1da177e4
LT
2184 return -ENODEV;
2185
2186 hci_req_lock(hdev);
1da177e4 2187
808a049e
MH
2188 if (!test_bit(HCI_UP, &hdev->flags)) {
2189 ret = -ENETDOWN;
1da177e4 2190 goto done;
808a049e 2191 }
1da177e4 2192
0736cfa8
MH
2193 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2194 ret = -EBUSY;
2195 goto done;
2196 }
2197
1da177e4
LT
2198 /* Drop queues */
2199 skb_queue_purge(&hdev->rx_q);
2200 skb_queue_purge(&hdev->cmd_q);
2201
09fd0de5 2202 hci_dev_lock(hdev);
1f9b9a5d 2203 hci_inquiry_cache_flush(hdev);
1da177e4 2204 hci_conn_hash_flush(hdev);
09fd0de5 2205 hci_dev_unlock(hdev);
1da177e4
LT
2206
2207 if (hdev->flush)
2208 hdev->flush(hdev);
2209
8e87d142 2210 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 2211 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4
LT
2212
2213 if (!test_bit(HCI_RAW, &hdev->flags))
01178cd4 2214 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1da177e4
LT
2215
2216done:
1da177e4
LT
2217 hci_req_unlock(hdev);
2218 hci_dev_put(hdev);
2219 return ret;
2220}
2221
2222int hci_dev_reset_stat(__u16 dev)
2223{
2224 struct hci_dev *hdev;
2225 int ret = 0;
2226
70f23020
AE
2227 hdev = hci_dev_get(dev);
2228 if (!hdev)
1da177e4
LT
2229 return -ENODEV;
2230
0736cfa8
MH
2231 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2232 ret = -EBUSY;
2233 goto done;
2234 }
2235
1da177e4
LT
2236 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2237
0736cfa8 2238done:
1da177e4 2239 hci_dev_put(hdev);
1da177e4
LT
2240 return ret;
2241}
2242
2243int hci_dev_cmd(unsigned int cmd, void __user *arg)
2244{
2245 struct hci_dev *hdev;
2246 struct hci_dev_req dr;
2247 int err = 0;
2248
2249 if (copy_from_user(&dr, arg, sizeof(dr)))
2250 return -EFAULT;
2251
70f23020
AE
2252 hdev = hci_dev_get(dr.dev_id);
2253 if (!hdev)
1da177e4
LT
2254 return -ENODEV;
2255
0736cfa8
MH
2256 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2257 err = -EBUSY;
2258 goto done;
2259 }
2260
5b69bef5
MH
2261 if (hdev->dev_type != HCI_BREDR) {
2262 err = -EOPNOTSUPP;
2263 goto done;
2264 }
2265
56f87901
JH
2266 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2267 err = -EOPNOTSUPP;
2268 goto done;
2269 }
2270
1da177e4
LT
2271 switch (cmd) {
2272 case HCISETAUTH:
01178cd4
JH
2273 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2274 HCI_INIT_TIMEOUT);
1da177e4
LT
2275 break;
2276
2277 case HCISETENCRYPT:
2278 if (!lmp_encrypt_capable(hdev)) {
2279 err = -EOPNOTSUPP;
2280 break;
2281 }
2282
2283 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2284 /* Auth must be enabled first */
01178cd4
JH
2285 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2286 HCI_INIT_TIMEOUT);
1da177e4
LT
2287 if (err)
2288 break;
2289 }
2290
01178cd4
JH
2291 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2292 HCI_INIT_TIMEOUT);
1da177e4
LT
2293 break;
2294
2295 case HCISETSCAN:
01178cd4
JH
2296 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2297 HCI_INIT_TIMEOUT);
1da177e4
LT
2298 break;
2299
1da177e4 2300 case HCISETLINKPOL:
01178cd4
JH
2301 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2302 HCI_INIT_TIMEOUT);
1da177e4
LT
2303 break;
2304
2305 case HCISETLINKMODE:
e4e8e37c
MH
2306 hdev->link_mode = ((__u16) dr.dev_opt) &
2307 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2308 break;
2309
2310 case HCISETPTYPE:
2311 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
2312 break;
2313
2314 case HCISETACLMTU:
e4e8e37c
MH
2315 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2316 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2317 break;
2318
2319 case HCISETSCOMTU:
e4e8e37c
MH
2320 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2321 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2322 break;
2323
2324 default:
2325 err = -EINVAL;
2326 break;
2327 }
e4e8e37c 2328
0736cfa8 2329done:
1da177e4
LT
2330 hci_dev_put(hdev);
2331 return err;
2332}
2333
2334int hci_get_dev_list(void __user *arg)
2335{
8035ded4 2336 struct hci_dev *hdev;
1da177e4
LT
2337 struct hci_dev_list_req *dl;
2338 struct hci_dev_req *dr;
1da177e4
LT
2339 int n = 0, size, err;
2340 __u16 dev_num;
2341
2342 if (get_user(dev_num, (__u16 __user *) arg))
2343 return -EFAULT;
2344
2345 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2346 return -EINVAL;
2347
2348 size = sizeof(*dl) + dev_num * sizeof(*dr);
2349
70f23020
AE
2350 dl = kzalloc(size, GFP_KERNEL);
2351 if (!dl)
1da177e4
LT
2352 return -ENOMEM;
2353
2354 dr = dl->dev_req;
2355
f20d09d5 2356 read_lock(&hci_dev_list_lock);
8035ded4 2357 list_for_each_entry(hdev, &hci_dev_list, list) {
a8b2d5c2 2358 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
e0f9309f 2359 cancel_delayed_work(&hdev->power_off);
c542a06c 2360
a8b2d5c2
JH
2361 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2362 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 2363
1da177e4
LT
2364 (dr + n)->dev_id = hdev->id;
2365 (dr + n)->dev_opt = hdev->flags;
c542a06c 2366
1da177e4
LT
2367 if (++n >= dev_num)
2368 break;
2369 }
f20d09d5 2370 read_unlock(&hci_dev_list_lock);
1da177e4
LT
2371
2372 dl->dev_num = n;
2373 size = sizeof(*dl) + n * sizeof(*dr);
2374
2375 err = copy_to_user(arg, dl, size);
2376 kfree(dl);
2377
2378 return err ? -EFAULT : 0;
2379}
2380
2381int hci_get_dev_info(void __user *arg)
2382{
2383 struct hci_dev *hdev;
2384 struct hci_dev_info di;
2385 int err = 0;
2386
2387 if (copy_from_user(&di, arg, sizeof(di)))
2388 return -EFAULT;
2389
70f23020
AE
2390 hdev = hci_dev_get(di.dev_id);
2391 if (!hdev)
1da177e4
LT
2392 return -ENODEV;
2393
a8b2d5c2 2394 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
3243553f 2395 cancel_delayed_work_sync(&hdev->power_off);
ab81cbf9 2396
a8b2d5c2
JH
2397 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2398 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 2399
1da177e4
LT
2400 strcpy(di.name, hdev->name);
2401 di.bdaddr = hdev->bdaddr;
60f2a3ed 2402 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
1da177e4
LT
2403 di.flags = hdev->flags;
2404 di.pkt_type = hdev->pkt_type;
572c7f84
JH
2405 if (lmp_bredr_capable(hdev)) {
2406 di.acl_mtu = hdev->acl_mtu;
2407 di.acl_pkts = hdev->acl_pkts;
2408 di.sco_mtu = hdev->sco_mtu;
2409 di.sco_pkts = hdev->sco_pkts;
2410 } else {
2411 di.acl_mtu = hdev->le_mtu;
2412 di.acl_pkts = hdev->le_pkts;
2413 di.sco_mtu = 0;
2414 di.sco_pkts = 0;
2415 }
1da177e4
LT
2416 di.link_policy = hdev->link_policy;
2417 di.link_mode = hdev->link_mode;
2418
2419 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2420 memcpy(&di.features, &hdev->features, sizeof(di.features));
2421
2422 if (copy_to_user(arg, &di, sizeof(di)))
2423 err = -EFAULT;
2424
2425 hci_dev_put(hdev);
2426
2427 return err;
2428}
2429
2430/* ---- Interface to HCI drivers ---- */
2431
611b30f7
MH
2432static int hci_rfkill_set_block(void *data, bool blocked)
2433{
2434 struct hci_dev *hdev = data;
2435
2436 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2437
0736cfa8
MH
2438 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2439 return -EBUSY;
2440
5e130367
JH
2441 if (blocked) {
2442 set_bit(HCI_RFKILLED, &hdev->dev_flags);
bf543036
JH
2443 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2444 hci_dev_do_close(hdev);
5e130367
JH
2445 } else {
2446 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
1025c04c 2447 }
611b30f7
MH
2448
2449 return 0;
2450}
2451
2452static const struct rfkill_ops hci_rfkill_ops = {
2453 .set_block = hci_rfkill_set_block,
2454};
2455
ab81cbf9
JH
2456static void hci_power_on(struct work_struct *work)
2457{
2458 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
96570ffc 2459 int err;
ab81cbf9
JH
2460
2461 BT_DBG("%s", hdev->name);
2462
cbed0ca1 2463 err = hci_dev_do_open(hdev);
96570ffc
JH
2464 if (err < 0) {
2465 mgmt_set_powered_failed(hdev, err);
ab81cbf9 2466 return;
96570ffc 2467 }
ab81cbf9 2468
a5c8f270
MH
2469 /* During the HCI setup phase, a few error conditions are
2470 * ignored and they need to be checked now. If they are still
2471 * valid, it is important to turn the device back off.
2472 */
2473 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2474 (hdev->dev_type == HCI_BREDR &&
2475 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2476 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
bf543036
JH
2477 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2478 hci_dev_do_close(hdev);
2479 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
19202573
JH
2480 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2481 HCI_AUTO_OFF_TIMEOUT);
bf543036 2482 }
ab81cbf9 2483
a8b2d5c2 2484 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
744cf19e 2485 mgmt_index_added(hdev);
ab81cbf9
JH
2486}
2487
2488static void hci_power_off(struct work_struct *work)
2489{
3243553f 2490 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 2491 power_off.work);
ab81cbf9
JH
2492
2493 BT_DBG("%s", hdev->name);
2494
8ee56540 2495 hci_dev_do_close(hdev);
ab81cbf9
JH
2496}
2497
16ab91ab
JH
2498static void hci_discov_off(struct work_struct *work)
2499{
2500 struct hci_dev *hdev;
16ab91ab
JH
2501
2502 hdev = container_of(work, struct hci_dev, discov_off.work);
2503
2504 BT_DBG("%s", hdev->name);
2505
d1967ff8 2506 mgmt_discoverable_timeout(hdev);
16ab91ab
JH
2507}
2508
35f7498a 2509void hci_uuids_clear(struct hci_dev *hdev)
2aeb9a1a 2510{
4821002c 2511 struct bt_uuid *uuid, *tmp;
2aeb9a1a 2512
4821002c
JH
2513 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2514 list_del(&uuid->list);
2aeb9a1a
JH
2515 kfree(uuid);
2516 }
2aeb9a1a
JH
2517}
2518
35f7498a 2519void hci_link_keys_clear(struct hci_dev *hdev)
55ed8ca1
JH
2520{
2521 struct list_head *p, *n;
2522
2523 list_for_each_safe(p, n, &hdev->link_keys) {
2524 struct link_key *key;
2525
2526 key = list_entry(p, struct link_key, list);
2527
2528 list_del(p);
2529 kfree(key);
2530 }
55ed8ca1
JH
2531}
2532
35f7498a 2533void hci_smp_ltks_clear(struct hci_dev *hdev)
b899efaf
VCG
2534{
2535 struct smp_ltk *k, *tmp;
2536
2537 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2538 list_del(&k->list);
2539 kfree(k);
2540 }
b899efaf
VCG
2541}
2542
970c4e46
JH
2543void hci_smp_irks_clear(struct hci_dev *hdev)
2544{
2545 struct smp_irk *k, *tmp;
2546
2547 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2548 list_del(&k->list);
2549 kfree(k);
2550 }
2551}
2552
55ed8ca1
JH
2553struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2554{
8035ded4 2555 struct link_key *k;
55ed8ca1 2556
8035ded4 2557 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
2558 if (bacmp(bdaddr, &k->bdaddr) == 0)
2559 return k;
55ed8ca1
JH
2560
2561 return NULL;
2562}
2563
745c0ce3 2564static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 2565 u8 key_type, u8 old_key_type)
d25e28ab
JH
2566{
2567 /* Legacy key */
2568 if (key_type < 0x03)
745c0ce3 2569 return true;
d25e28ab
JH
2570
2571 /* Debug keys are insecure so don't store them persistently */
2572 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 2573 return false;
d25e28ab
JH
2574
2575 /* Changed combination key and there's no previous one */
2576 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 2577 return false;
d25e28ab
JH
2578
2579 /* Security mode 3 case */
2580 if (!conn)
745c0ce3 2581 return true;
d25e28ab
JH
2582
2583 /* Neither local nor remote side had no-bonding as requirement */
2584 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 2585 return true;
d25e28ab
JH
2586
2587 /* Local side had dedicated bonding as requirement */
2588 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 2589 return true;
d25e28ab
JH
2590
2591 /* Remote side had dedicated bonding as requirement */
2592 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 2593 return true;
d25e28ab
JH
2594
2595 /* If none of the above criteria match, then don't store the key
2596 * persistently */
745c0ce3 2597 return false;
d25e28ab
JH
2598}
2599
98a0b845
JH
2600static bool ltk_type_master(u8 type)
2601{
2602 if (type == HCI_SMP_STK || type == HCI_SMP_LTK)
2603 return true;
2604
2605 return false;
2606}
2607
2608struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8],
2609 bool master)
75d262c2 2610{
c9839a11 2611 struct smp_ltk *k;
75d262c2 2612
c9839a11
VCG
2613 list_for_each_entry(k, &hdev->long_term_keys, list) {
2614 if (k->ediv != ediv ||
a8c5fb1a 2615 memcmp(rand, k->rand, sizeof(k->rand)))
75d262c2
VCG
2616 continue;
2617
98a0b845
JH
2618 if (ltk_type_master(k->type) != master)
2619 continue;
2620
c9839a11 2621 return k;
75d262c2
VCG
2622 }
2623
2624 return NULL;
2625}
75d262c2 2626
c9839a11 2627struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
98a0b845 2628 u8 addr_type, bool master)
75d262c2 2629{
c9839a11 2630 struct smp_ltk *k;
75d262c2 2631
c9839a11
VCG
2632 list_for_each_entry(k, &hdev->long_term_keys, list)
2633 if (addr_type == k->bdaddr_type &&
98a0b845
JH
2634 bacmp(bdaddr, &k->bdaddr) == 0 &&
2635 ltk_type_master(k->type) == master)
75d262c2
VCG
2636 return k;
2637
2638 return NULL;
2639}
75d262c2 2640
970c4e46
JH
2641struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2642{
2643 struct smp_irk *irk;
2644
2645 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2646 if (!bacmp(&irk->rpa, rpa))
2647 return irk;
2648 }
2649
2650 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2651 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
2652 bacpy(&irk->rpa, rpa);
2653 return irk;
2654 }
2655 }
2656
2657 return NULL;
2658}
2659
2660struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2661 u8 addr_type)
2662{
2663 struct smp_irk *irk;
2664
2665 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2666 if (addr_type == irk->addr_type &&
2667 bacmp(bdaddr, &irk->bdaddr) == 0)
2668 return irk;
2669 }
2670
2671 return NULL;
2672}
2673
d25e28ab 2674int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
04124681 2675 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
55ed8ca1
JH
2676{
2677 struct link_key *key, *old_key;
745c0ce3
VA
2678 u8 old_key_type;
2679 bool persistent;
55ed8ca1
JH
2680
2681 old_key = hci_find_link_key(hdev, bdaddr);
2682 if (old_key) {
2683 old_key_type = old_key->type;
2684 key = old_key;
2685 } else {
12adcf3a 2686 old_key_type = conn ? conn->key_type : 0xff;
55ed8ca1
JH
2687 key = kzalloc(sizeof(*key), GFP_ATOMIC);
2688 if (!key)
2689 return -ENOMEM;
2690 list_add(&key->list, &hdev->link_keys);
2691 }
2692
6ed93dc6 2693 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 2694
d25e28ab
JH
2695 /* Some buggy controller combinations generate a changed
2696 * combination key for legacy pairing even when there's no
2697 * previous key */
2698 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 2699 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 2700 type = HCI_LK_COMBINATION;
655fe6ec
JH
2701 if (conn)
2702 conn->key_type = type;
2703 }
d25e28ab 2704
55ed8ca1 2705 bacpy(&key->bdaddr, bdaddr);
9b3b4460 2706 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
2707 key->pin_len = pin_len;
2708
b6020ba0 2709 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 2710 key->type = old_key_type;
4748fed2
JH
2711 else
2712 key->type = type;
2713
4df378a1
JH
2714 if (!new_key)
2715 return 0;
2716
2717 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
2718
744cf19e 2719 mgmt_new_link_key(hdev, key, persistent);
4df378a1 2720
6ec5bcad
VA
2721 if (conn)
2722 conn->flush_key = !persistent;
55ed8ca1
JH
2723
2724 return 0;
2725}
2726
c9839a11 2727int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
9a006657 2728 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
04124681 2729 ediv, u8 rand[8])
75d262c2 2730{
c9839a11 2731 struct smp_ltk *key, *old_key;
98a0b845 2732 bool master = ltk_type_master(type);
0fe442ff 2733 u8 persistent;
75d262c2 2734
98a0b845 2735 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
c9839a11 2736 if (old_key)
75d262c2 2737 key = old_key;
c9839a11
VCG
2738 else {
2739 key = kzalloc(sizeof(*key), GFP_ATOMIC);
75d262c2
VCG
2740 if (!key)
2741 return -ENOMEM;
c9839a11 2742 list_add(&key->list, &hdev->long_term_keys);
75d262c2
VCG
2743 }
2744
75d262c2 2745 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
2746 key->bdaddr_type = addr_type;
2747 memcpy(key->val, tk, sizeof(key->val));
2748 key->authenticated = authenticated;
2749 key->ediv = ediv;
2750 key->enc_size = enc_size;
2751 key->type = type;
2752 memcpy(key->rand, rand, sizeof(key->rand));
75d262c2 2753
c9839a11
VCG
2754 if (!new_key)
2755 return 0;
75d262c2 2756
0fe442ff
MH
2757 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2758 persistent = 0;
2759 else
2760 persistent = 1;
2761
21b93b75 2762 if (type == HCI_SMP_LTK || type == HCI_SMP_LTK_SLAVE)
0fe442ff 2763 mgmt_new_ltk(hdev, key, persistent);
261cc5aa 2764
75d262c2
VCG
2765 return 0;
2766}
2767
970c4e46
JH
2768int hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type,
2769 u8 val[16], bdaddr_t *rpa)
2770{
2771 struct smp_irk *irk;
2772
2773 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2774 if (!irk) {
2775 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2776 if (!irk)
2777 return -ENOMEM;
2778
2779 bacpy(&irk->bdaddr, bdaddr);
2780 irk->addr_type = addr_type;
2781
2782 list_add(&irk->list, &hdev->identity_resolving_keys);
2783 }
2784
2785 memcpy(irk->val, val, 16);
2786 bacpy(&irk->rpa, rpa);
2787
2788 return 0;
2789}
2790
55ed8ca1
JH
2791int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2792{
2793 struct link_key *key;
2794
2795 key = hci_find_link_key(hdev, bdaddr);
2796 if (!key)
2797 return -ENOENT;
2798
6ed93dc6 2799 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1
JH
2800
2801 list_del(&key->list);
2802 kfree(key);
2803
2804 return 0;
2805}
2806
e0b2b27e 2807int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
b899efaf
VCG
2808{
2809 struct smp_ltk *k, *tmp;
c51ffa0b 2810 int removed = 0;
b899efaf
VCG
2811
2812 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
e0b2b27e 2813 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
b899efaf
VCG
2814 continue;
2815
6ed93dc6 2816 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf
VCG
2817
2818 list_del(&k->list);
2819 kfree(k);
c51ffa0b 2820 removed++;
b899efaf
VCG
2821 }
2822
c51ffa0b 2823 return removed ? 0 : -ENOENT;
b899efaf
VCG
2824}
2825
a7ec7338
JH
2826void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2827{
2828 struct smp_irk *k, *tmp;
2829
2830 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2831 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2832 continue;
2833
2834 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2835
2836 list_del(&k->list);
2837 kfree(k);
2838 }
2839}
2840
6bd32326 2841/* HCI command timer function */
bda4f23a 2842static void hci_cmd_timeout(unsigned long arg)
6bd32326
VT
2843{
2844 struct hci_dev *hdev = (void *) arg;
2845
bda4f23a
AE
2846 if (hdev->sent_cmd) {
2847 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2848 u16 opcode = __le16_to_cpu(sent->opcode);
2849
2850 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2851 } else {
2852 BT_ERR("%s command tx timeout", hdev->name);
2853 }
2854
6bd32326 2855 atomic_set(&hdev->cmd_cnt, 1);
c347b765 2856 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
2857}
2858
2763eda6 2859struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
04124681 2860 bdaddr_t *bdaddr)
2763eda6
SJ
2861{
2862 struct oob_data *data;
2863
2864 list_for_each_entry(data, &hdev->remote_oob_data, list)
2865 if (bacmp(bdaddr, &data->bdaddr) == 0)
2866 return data;
2867
2868 return NULL;
2869}
2870
2871int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2872{
2873 struct oob_data *data;
2874
2875 data = hci_find_remote_oob_data(hdev, bdaddr);
2876 if (!data)
2877 return -ENOENT;
2878
6ed93dc6 2879 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2763eda6
SJ
2880
2881 list_del(&data->list);
2882 kfree(data);
2883
2884 return 0;
2885}
2886
35f7498a 2887void hci_remote_oob_data_clear(struct hci_dev *hdev)
2763eda6
SJ
2888{
2889 struct oob_data *data, *n;
2890
2891 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2892 list_del(&data->list);
2893 kfree(data);
2894 }
2763eda6
SJ
2895}
2896
0798872e
MH
2897int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2898 u8 *hash, u8 *randomizer)
2763eda6
SJ
2899{
2900 struct oob_data *data;
2901
2902 data = hci_find_remote_oob_data(hdev, bdaddr);
2763eda6 2903 if (!data) {
0798872e 2904 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2763eda6
SJ
2905 if (!data)
2906 return -ENOMEM;
2907
2908 bacpy(&data->bdaddr, bdaddr);
2909 list_add(&data->list, &hdev->remote_oob_data);
2910 }
2911
519ca9d0
MH
2912 memcpy(data->hash192, hash, sizeof(data->hash192));
2913 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
2763eda6 2914
0798872e
MH
2915 memset(data->hash256, 0, sizeof(data->hash256));
2916 memset(data->randomizer256, 0, sizeof(data->randomizer256));
2917
2918 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2919
2920 return 0;
2921}
2922
2923int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2924 u8 *hash192, u8 *randomizer192,
2925 u8 *hash256, u8 *randomizer256)
2926{
2927 struct oob_data *data;
2928
2929 data = hci_find_remote_oob_data(hdev, bdaddr);
2930 if (!data) {
2931 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2932 if (!data)
2933 return -ENOMEM;
2934
2935 bacpy(&data->bdaddr, bdaddr);
2936 list_add(&data->list, &hdev->remote_oob_data);
2937 }
2938
2939 memcpy(data->hash192, hash192, sizeof(data->hash192));
2940 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
2941
2942 memcpy(data->hash256, hash256, sizeof(data->hash256));
2943 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
2944
6ed93dc6 2945 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
2946
2947 return 0;
2948}
2949
b9ee0a78
MH
2950struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
2951 bdaddr_t *bdaddr, u8 type)
b2a66aad 2952{
8035ded4 2953 struct bdaddr_list *b;
b2a66aad 2954
b9ee0a78
MH
2955 list_for_each_entry(b, &hdev->blacklist, list) {
2956 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
b2a66aad 2957 return b;
b9ee0a78 2958 }
b2a66aad
AJ
2959
2960 return NULL;
2961}
2962
35f7498a 2963void hci_blacklist_clear(struct hci_dev *hdev)
b2a66aad
AJ
2964{
2965 struct list_head *p, *n;
2966
2967 list_for_each_safe(p, n, &hdev->blacklist) {
b9ee0a78 2968 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
b2a66aad
AJ
2969
2970 list_del(p);
2971 kfree(b);
2972 }
b2a66aad
AJ
2973}
2974
88c1fe4b 2975int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
2976{
2977 struct bdaddr_list *entry;
b2a66aad 2978
b9ee0a78 2979 if (!bacmp(bdaddr, BDADDR_ANY))
b2a66aad
AJ
2980 return -EBADF;
2981
b9ee0a78 2982 if (hci_blacklist_lookup(hdev, bdaddr, type))
5e762444 2983 return -EEXIST;
b2a66aad
AJ
2984
2985 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
5e762444
AJ
2986 if (!entry)
2987 return -ENOMEM;
b2a66aad
AJ
2988
2989 bacpy(&entry->bdaddr, bdaddr);
b9ee0a78 2990 entry->bdaddr_type = type;
b2a66aad
AJ
2991
2992 list_add(&entry->list, &hdev->blacklist);
2993
88c1fe4b 2994 return mgmt_device_blocked(hdev, bdaddr, type);
b2a66aad
AJ
2995}
2996
88c1fe4b 2997int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
2998{
2999 struct bdaddr_list *entry;
b2a66aad 3000
35f7498a
JH
3001 if (!bacmp(bdaddr, BDADDR_ANY)) {
3002 hci_blacklist_clear(hdev);
3003 return 0;
3004 }
b2a66aad 3005
b9ee0a78 3006 entry = hci_blacklist_lookup(hdev, bdaddr, type);
1ec918ce 3007 if (!entry)
5e762444 3008 return -ENOENT;
b2a66aad
AJ
3009
3010 list_del(&entry->list);
3011 kfree(entry);
3012
88c1fe4b 3013 return mgmt_device_unblocked(hdev, bdaddr, type);
b2a66aad
AJ
3014}
3015
15819a70
AG
3016/* This function requires the caller holds hdev->lock */
3017struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3018 bdaddr_t *addr, u8 addr_type)
3019{
3020 struct hci_conn_params *params;
3021
3022 list_for_each_entry(params, &hdev->le_conn_params, list) {
3023 if (bacmp(&params->addr, addr) == 0 &&
3024 params->addr_type == addr_type) {
3025 return params;
3026 }
3027 }
3028
3029 return NULL;
3030}
3031
3032/* This function requires the caller holds hdev->lock */
3033void hci_conn_params_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3034 u16 conn_min_interval, u16 conn_max_interval)
3035{
3036 struct hci_conn_params *params;
3037
3038 params = hci_conn_params_lookup(hdev, addr, addr_type);
3039 if (params) {
3040 params->conn_min_interval = conn_min_interval;
3041 params->conn_max_interval = conn_max_interval;
3042 return;
3043 }
3044
3045 params = kzalloc(sizeof(*params), GFP_KERNEL);
3046 if (!params) {
3047 BT_ERR("Out of memory");
3048 return;
3049 }
3050
3051 bacpy(&params->addr, addr);
3052 params->addr_type = addr_type;
3053 params->conn_min_interval = conn_min_interval;
3054 params->conn_max_interval = conn_max_interval;
3055
3056 list_add(&params->list, &hdev->le_conn_params);
3057
3058 BT_DBG("addr %pMR (type %u) conn_min_interval 0x%.4x "
3059 "conn_max_interval 0x%.4x", addr, addr_type, conn_min_interval,
3060 conn_max_interval);
3061}
3062
3063/* This function requires the caller holds hdev->lock */
3064void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3065{
3066 struct hci_conn_params *params;
3067
3068 params = hci_conn_params_lookup(hdev, addr, addr_type);
3069 if (!params)
3070 return;
3071
3072 list_del(&params->list);
3073 kfree(params);
3074
3075 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3076}
3077
3078/* This function requires the caller holds hdev->lock */
3079void hci_conn_params_clear(struct hci_dev *hdev)
3080{
3081 struct hci_conn_params *params, *tmp;
3082
3083 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3084 list_del(&params->list);
3085 kfree(params);
3086 }
3087
3088 BT_DBG("All LE connection parameters were removed");
3089}
3090
4c87eaab 3091static void inquiry_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 3092{
4c87eaab
AG
3093 if (status) {
3094 BT_ERR("Failed to start inquiry: status %d", status);
7ba8b4be 3095
4c87eaab
AG
3096 hci_dev_lock(hdev);
3097 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3098 hci_dev_unlock(hdev);
3099 return;
3100 }
7ba8b4be
AG
3101}
3102
4c87eaab 3103static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 3104{
4c87eaab
AG
3105 /* General inquiry access code (GIAC) */
3106 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3107 struct hci_request req;
3108 struct hci_cp_inquiry cp;
7ba8b4be
AG
3109 int err;
3110
4c87eaab
AG
3111 if (status) {
3112 BT_ERR("Failed to disable LE scanning: status %d", status);
3113 return;
3114 }
7ba8b4be 3115
4c87eaab
AG
3116 switch (hdev->discovery.type) {
3117 case DISCOV_TYPE_LE:
3118 hci_dev_lock(hdev);
3119 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3120 hci_dev_unlock(hdev);
3121 break;
7ba8b4be 3122
4c87eaab
AG
3123 case DISCOV_TYPE_INTERLEAVED:
3124 hci_req_init(&req, hdev);
7ba8b4be 3125
4c87eaab
AG
3126 memset(&cp, 0, sizeof(cp));
3127 memcpy(&cp.lap, lap, sizeof(cp.lap));
3128 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3129 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
7ba8b4be 3130
4c87eaab 3131 hci_dev_lock(hdev);
7dbfac1d 3132
4c87eaab 3133 hci_inquiry_cache_flush(hdev);
7dbfac1d 3134
4c87eaab
AG
3135 err = hci_req_run(&req, inquiry_complete);
3136 if (err) {
3137 BT_ERR("Inquiry request failed: err %d", err);
3138 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3139 }
7dbfac1d 3140
4c87eaab
AG
3141 hci_dev_unlock(hdev);
3142 break;
7dbfac1d 3143 }
7dbfac1d
AG
3144}
3145
7ba8b4be
AG
3146static void le_scan_disable_work(struct work_struct *work)
3147{
3148 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 3149 le_scan_disable.work);
7ba8b4be 3150 struct hci_cp_le_set_scan_enable cp;
4c87eaab
AG
3151 struct hci_request req;
3152 int err;
7ba8b4be
AG
3153
3154 BT_DBG("%s", hdev->name);
3155
4c87eaab 3156 hci_req_init(&req, hdev);
28b75a89 3157
7ba8b4be 3158 memset(&cp, 0, sizeof(cp));
4c87eaab
AG
3159 cp.enable = LE_SCAN_DISABLE;
3160 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
28b75a89 3161
4c87eaab
AG
3162 err = hci_req_run(&req, le_scan_disable_work_complete);
3163 if (err)
3164 BT_ERR("Disable LE scanning request failed: err %d", err);
28b75a89
AG
3165}
3166
9be0dab7
DH
3167/* Alloc HCI device */
3168struct hci_dev *hci_alloc_dev(void)
3169{
3170 struct hci_dev *hdev;
3171
3172 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3173 if (!hdev)
3174 return NULL;
3175
b1b813d4
DH
3176 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3177 hdev->esco_type = (ESCO_HV1);
3178 hdev->link_mode = (HCI_LM_ACCEPT);
b4cb9fb2
MH
3179 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3180 hdev->io_capability = 0x03; /* No Input No Output */
bbaf444a
JH
3181 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3182 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
b1b813d4 3183
b1b813d4
DH
3184 hdev->sniff_max_interval = 800;
3185 hdev->sniff_min_interval = 80;
3186
bef64738
MH
3187 hdev->le_scan_interval = 0x0060;
3188 hdev->le_scan_window = 0x0030;
4e70c7e7
MH
3189 hdev->le_conn_min_interval = 0x0028;
3190 hdev->le_conn_max_interval = 0x0038;
bef64738 3191
b1b813d4
DH
3192 mutex_init(&hdev->lock);
3193 mutex_init(&hdev->req_lock);
3194
3195 INIT_LIST_HEAD(&hdev->mgmt_pending);
3196 INIT_LIST_HEAD(&hdev->blacklist);
3197 INIT_LIST_HEAD(&hdev->uuids);
3198 INIT_LIST_HEAD(&hdev->link_keys);
3199 INIT_LIST_HEAD(&hdev->long_term_keys);
970c4e46 3200 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
b1b813d4 3201 INIT_LIST_HEAD(&hdev->remote_oob_data);
15819a70 3202 INIT_LIST_HEAD(&hdev->le_conn_params);
6b536b5e 3203 INIT_LIST_HEAD(&hdev->conn_hash.list);
b1b813d4
DH
3204
3205 INIT_WORK(&hdev->rx_work, hci_rx_work);
3206 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3207 INIT_WORK(&hdev->tx_work, hci_tx_work);
3208 INIT_WORK(&hdev->power_on, hci_power_on);
b1b813d4 3209
b1b813d4
DH
3210 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3211 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3212 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3213
b1b813d4
DH
3214 skb_queue_head_init(&hdev->rx_q);
3215 skb_queue_head_init(&hdev->cmd_q);
3216 skb_queue_head_init(&hdev->raw_q);
3217
3218 init_waitqueue_head(&hdev->req_wait_q);
3219
bda4f23a 3220 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
b1b813d4 3221
b1b813d4
DH
3222 hci_init_sysfs(hdev);
3223 discovery_init(hdev);
9be0dab7
DH
3224
3225 return hdev;
3226}
3227EXPORT_SYMBOL(hci_alloc_dev);
3228
3229/* Free HCI device */
3230void hci_free_dev(struct hci_dev *hdev)
3231{
9be0dab7
DH
3232 /* will free via device release */
3233 put_device(&hdev->dev);
3234}
3235EXPORT_SYMBOL(hci_free_dev);
3236
1da177e4
LT
3237/* Register HCI device */
3238int hci_register_dev(struct hci_dev *hdev)
3239{
b1b813d4 3240 int id, error;
1da177e4 3241
010666a1 3242 if (!hdev->open || !hdev->close)
1da177e4
LT
3243 return -EINVAL;
3244
08add513
MM
3245 /* Do not allow HCI_AMP devices to register at index 0,
3246 * so the index can be used as the AMP controller ID.
3247 */
3df92b31
SL
3248 switch (hdev->dev_type) {
3249 case HCI_BREDR:
3250 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3251 break;
3252 case HCI_AMP:
3253 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3254 break;
3255 default:
3256 return -EINVAL;
1da177e4 3257 }
8e87d142 3258
3df92b31
SL
3259 if (id < 0)
3260 return id;
3261
1da177e4
LT
3262 sprintf(hdev->name, "hci%d", id);
3263 hdev->id = id;
2d8b3a11
AE
3264
3265 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3266
d8537548
KC
3267 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3268 WQ_MEM_RECLAIM, 1, hdev->name);
33ca954d
DH
3269 if (!hdev->workqueue) {
3270 error = -ENOMEM;
3271 goto err;
3272 }
f48fd9c8 3273
d8537548
KC
3274 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3275 WQ_MEM_RECLAIM, 1, hdev->name);
6ead1bbc
JH
3276 if (!hdev->req_workqueue) {
3277 destroy_workqueue(hdev->workqueue);
3278 error = -ENOMEM;
3279 goto err;
3280 }
3281
0153e2ec
MH
3282 if (!IS_ERR_OR_NULL(bt_debugfs))
3283 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3284
bdc3e0f1
MH
3285 dev_set_name(&hdev->dev, "%s", hdev->name);
3286
99780a7b
JH
3287 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3288 CRYPTO_ALG_ASYNC);
3289 if (IS_ERR(hdev->tfm_aes)) {
3290 BT_ERR("Unable to create crypto context");
3291 error = PTR_ERR(hdev->tfm_aes);
3292 hdev->tfm_aes = NULL;
3293 goto err_wqueue;
3294 }
3295
bdc3e0f1 3296 error = device_add(&hdev->dev);
33ca954d 3297 if (error < 0)
99780a7b 3298 goto err_tfm;
1da177e4 3299
611b30f7 3300 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
3301 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3302 hdev);
611b30f7
MH
3303 if (hdev->rfkill) {
3304 if (rfkill_register(hdev->rfkill) < 0) {
3305 rfkill_destroy(hdev->rfkill);
3306 hdev->rfkill = NULL;
3307 }
3308 }
3309
5e130367
JH
3310 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3311 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3312
a8b2d5c2 3313 set_bit(HCI_SETUP, &hdev->dev_flags);
004b0258 3314 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
ce2be9ac 3315
01cd3404 3316 if (hdev->dev_type == HCI_BREDR) {
56f87901
JH
3317 /* Assume BR/EDR support until proven otherwise (such as
3318 * through reading supported features during init.
3319 */
3320 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3321 }
ce2be9ac 3322
fcee3377
GP
3323 write_lock(&hci_dev_list_lock);
3324 list_add(&hdev->list, &hci_dev_list);
3325 write_unlock(&hci_dev_list_lock);
3326
1da177e4 3327 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 3328 hci_dev_hold(hdev);
1da177e4 3329
19202573 3330 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 3331
1da177e4 3332 return id;
f48fd9c8 3333
99780a7b
JH
3334err_tfm:
3335 crypto_free_blkcipher(hdev->tfm_aes);
33ca954d
DH
3336err_wqueue:
3337 destroy_workqueue(hdev->workqueue);
6ead1bbc 3338 destroy_workqueue(hdev->req_workqueue);
33ca954d 3339err:
3df92b31 3340 ida_simple_remove(&hci_index_ida, hdev->id);
f48fd9c8 3341
33ca954d 3342 return error;
1da177e4
LT
3343}
3344EXPORT_SYMBOL(hci_register_dev);
3345
3346/* Unregister HCI device */
59735631 3347void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 3348{
3df92b31 3349 int i, id;
ef222013 3350
c13854ce 3351 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 3352
94324962
JH
3353 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3354
3df92b31
SL
3355 id = hdev->id;
3356
f20d09d5 3357 write_lock(&hci_dev_list_lock);
1da177e4 3358 list_del(&hdev->list);
f20d09d5 3359 write_unlock(&hci_dev_list_lock);
1da177e4
LT
3360
3361 hci_dev_do_close(hdev);
3362
cd4c5391 3363 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
3364 kfree_skb(hdev->reassembly[i]);
3365
b9b5ef18
GP
3366 cancel_work_sync(&hdev->power_on);
3367
ab81cbf9 3368 if (!test_bit(HCI_INIT, &hdev->flags) &&
a8c5fb1a 3369 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
09fd0de5 3370 hci_dev_lock(hdev);
744cf19e 3371 mgmt_index_removed(hdev);
09fd0de5 3372 hci_dev_unlock(hdev);
56e5cb86 3373 }
ab81cbf9 3374
2e58ef3e
JH
3375 /* mgmt_index_removed should take care of emptying the
3376 * pending list */
3377 BUG_ON(!list_empty(&hdev->mgmt_pending));
3378
1da177e4
LT
3379 hci_notify(hdev, HCI_DEV_UNREG);
3380
611b30f7
MH
3381 if (hdev->rfkill) {
3382 rfkill_unregister(hdev->rfkill);
3383 rfkill_destroy(hdev->rfkill);
3384 }
3385
99780a7b
JH
3386 if (hdev->tfm_aes)
3387 crypto_free_blkcipher(hdev->tfm_aes);
3388
bdc3e0f1 3389 device_del(&hdev->dev);
147e2d59 3390
0153e2ec
MH
3391 debugfs_remove_recursive(hdev->debugfs);
3392
f48fd9c8 3393 destroy_workqueue(hdev->workqueue);
6ead1bbc 3394 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 3395
09fd0de5 3396 hci_dev_lock(hdev);
e2e0cacb 3397 hci_blacklist_clear(hdev);
2aeb9a1a 3398 hci_uuids_clear(hdev);
55ed8ca1 3399 hci_link_keys_clear(hdev);
b899efaf 3400 hci_smp_ltks_clear(hdev);
970c4e46 3401 hci_smp_irks_clear(hdev);
2763eda6 3402 hci_remote_oob_data_clear(hdev);
15819a70 3403 hci_conn_params_clear(hdev);
09fd0de5 3404 hci_dev_unlock(hdev);
e2e0cacb 3405
dc946bd8 3406 hci_dev_put(hdev);
3df92b31
SL
3407
3408 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
3409}
3410EXPORT_SYMBOL(hci_unregister_dev);
3411
3412/* Suspend HCI device */
3413int hci_suspend_dev(struct hci_dev *hdev)
3414{
3415 hci_notify(hdev, HCI_DEV_SUSPEND);
3416 return 0;
3417}
3418EXPORT_SYMBOL(hci_suspend_dev);
3419
3420/* Resume HCI device */
3421int hci_resume_dev(struct hci_dev *hdev)
3422{
3423 hci_notify(hdev, HCI_DEV_RESUME);
3424 return 0;
3425}
3426EXPORT_SYMBOL(hci_resume_dev);
3427
76bca880 3428/* Receive frame from HCI drivers */
e1a26170 3429int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
76bca880 3430{
76bca880 3431 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 3432 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
3433 kfree_skb(skb);
3434 return -ENXIO;
3435 }
3436
d82603c6 3437 /* Incoming skb */
76bca880
MH
3438 bt_cb(skb)->incoming = 1;
3439
3440 /* Time stamp */
3441 __net_timestamp(skb);
3442
76bca880 3443 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 3444 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 3445
76bca880
MH
3446 return 0;
3447}
3448EXPORT_SYMBOL(hci_recv_frame);
3449
33e882a5 3450static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
a8c5fb1a 3451 int count, __u8 index)
33e882a5
SS
3452{
3453 int len = 0;
3454 int hlen = 0;
3455 int remain = count;
3456 struct sk_buff *skb;
3457 struct bt_skb_cb *scb;
3458
3459 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
a8c5fb1a 3460 index >= NUM_REASSEMBLY)
33e882a5
SS
3461 return -EILSEQ;
3462
3463 skb = hdev->reassembly[index];
3464
3465 if (!skb) {
3466 switch (type) {
3467 case HCI_ACLDATA_PKT:
3468 len = HCI_MAX_FRAME_SIZE;
3469 hlen = HCI_ACL_HDR_SIZE;
3470 break;
3471 case HCI_EVENT_PKT:
3472 len = HCI_MAX_EVENT_SIZE;
3473 hlen = HCI_EVENT_HDR_SIZE;
3474 break;
3475 case HCI_SCODATA_PKT:
3476 len = HCI_MAX_SCO_SIZE;
3477 hlen = HCI_SCO_HDR_SIZE;
3478 break;
3479 }
3480
1e429f38 3481 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
3482 if (!skb)
3483 return -ENOMEM;
3484
3485 scb = (void *) skb->cb;
3486 scb->expect = hlen;
3487 scb->pkt_type = type;
3488
33e882a5
SS
3489 hdev->reassembly[index] = skb;
3490 }
3491
3492 while (count) {
3493 scb = (void *) skb->cb;
89bb46d0 3494 len = min_t(uint, scb->expect, count);
33e882a5
SS
3495
3496 memcpy(skb_put(skb, len), data, len);
3497
3498 count -= len;
3499 data += len;
3500 scb->expect -= len;
3501 remain = count;
3502
3503 switch (type) {
3504 case HCI_EVENT_PKT:
3505 if (skb->len == HCI_EVENT_HDR_SIZE) {
3506 struct hci_event_hdr *h = hci_event_hdr(skb);
3507 scb->expect = h->plen;
3508
3509 if (skb_tailroom(skb) < scb->expect) {
3510 kfree_skb(skb);
3511 hdev->reassembly[index] = NULL;
3512 return -ENOMEM;
3513 }
3514 }
3515 break;
3516
3517 case HCI_ACLDATA_PKT:
3518 if (skb->len == HCI_ACL_HDR_SIZE) {
3519 struct hci_acl_hdr *h = hci_acl_hdr(skb);
3520 scb->expect = __le16_to_cpu(h->dlen);
3521
3522 if (skb_tailroom(skb) < scb->expect) {
3523 kfree_skb(skb);
3524 hdev->reassembly[index] = NULL;
3525 return -ENOMEM;
3526 }
3527 }
3528 break;
3529
3530 case HCI_SCODATA_PKT:
3531 if (skb->len == HCI_SCO_HDR_SIZE) {
3532 struct hci_sco_hdr *h = hci_sco_hdr(skb);
3533 scb->expect = h->dlen;
3534
3535 if (skb_tailroom(skb) < scb->expect) {
3536 kfree_skb(skb);
3537 hdev->reassembly[index] = NULL;
3538 return -ENOMEM;
3539 }
3540 }
3541 break;
3542 }
3543
3544 if (scb->expect == 0) {
3545 /* Complete frame */
3546
3547 bt_cb(skb)->pkt_type = type;
e1a26170 3548 hci_recv_frame(hdev, skb);
33e882a5
SS
3549
3550 hdev->reassembly[index] = NULL;
3551 return remain;
3552 }
3553 }
3554
3555 return remain;
3556}
3557
ef222013
MH
3558int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
3559{
f39a3c06
SS
3560 int rem = 0;
3561
ef222013
MH
3562 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
3563 return -EILSEQ;
3564
da5f6c37 3565 while (count) {
1e429f38 3566 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
3567 if (rem < 0)
3568 return rem;
ef222013 3569
f39a3c06
SS
3570 data += (count - rem);
3571 count = rem;
f81c6224 3572 }
ef222013 3573
f39a3c06 3574 return rem;
ef222013
MH
3575}
3576EXPORT_SYMBOL(hci_recv_fragment);
3577
99811510
SS
3578#define STREAM_REASSEMBLY 0
3579
3580int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
3581{
3582 int type;
3583 int rem = 0;
3584
da5f6c37 3585 while (count) {
99811510
SS
3586 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
3587
3588 if (!skb) {
3589 struct { char type; } *pkt;
3590
3591 /* Start of the frame */
3592 pkt = data;
3593 type = pkt->type;
3594
3595 data++;
3596 count--;
3597 } else
3598 type = bt_cb(skb)->pkt_type;
3599
1e429f38 3600 rem = hci_reassembly(hdev, type, data, count,
a8c5fb1a 3601 STREAM_REASSEMBLY);
99811510
SS
3602 if (rem < 0)
3603 return rem;
3604
3605 data += (count - rem);
3606 count = rem;
f81c6224 3607 }
99811510
SS
3608
3609 return rem;
3610}
3611EXPORT_SYMBOL(hci_recv_stream_fragment);
3612
1da177e4
LT
3613/* ---- Interface to upper protocols ---- */
3614
1da177e4
LT
3615int hci_register_cb(struct hci_cb *cb)
3616{
3617 BT_DBG("%p name %s", cb, cb->name);
3618
f20d09d5 3619 write_lock(&hci_cb_list_lock);
1da177e4 3620 list_add(&cb->list, &hci_cb_list);
f20d09d5 3621 write_unlock(&hci_cb_list_lock);
1da177e4
LT
3622
3623 return 0;
3624}
3625EXPORT_SYMBOL(hci_register_cb);
3626
3627int hci_unregister_cb(struct hci_cb *cb)
3628{
3629 BT_DBG("%p name %s", cb, cb->name);
3630
f20d09d5 3631 write_lock(&hci_cb_list_lock);
1da177e4 3632 list_del(&cb->list);
f20d09d5 3633 write_unlock(&hci_cb_list_lock);
1da177e4
LT
3634
3635 return 0;
3636}
3637EXPORT_SYMBOL(hci_unregister_cb);
3638
51086991 3639static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4 3640{
0d48d939 3641 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 3642
cd82e61c
MH
3643 /* Time stamp */
3644 __net_timestamp(skb);
1da177e4 3645
cd82e61c
MH
3646 /* Send copy to monitor */
3647 hci_send_to_monitor(hdev, skb);
3648
3649 if (atomic_read(&hdev->promisc)) {
3650 /* Send copy to the sockets */
470fe1b5 3651 hci_send_to_sock(hdev, skb);
1da177e4
LT
3652 }
3653
3654 /* Get rid of skb owner, prior to sending to the driver. */
3655 skb_orphan(skb);
3656
7bd8f09f 3657 if (hdev->send(hdev, skb) < 0)
51086991 3658 BT_ERR("%s sending frame failed", hdev->name);
1da177e4
LT
3659}
3660
3119ae95
JH
3661void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
3662{
3663 skb_queue_head_init(&req->cmd_q);
3664 req->hdev = hdev;
5d73e034 3665 req->err = 0;
3119ae95
JH
3666}
3667
3668int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
3669{
3670 struct hci_dev *hdev = req->hdev;
3671 struct sk_buff *skb;
3672 unsigned long flags;
3673
3674 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
3675
5d73e034
AG
3676 /* If an error occured during request building, remove all HCI
3677 * commands queued on the HCI request queue.
3678 */
3679 if (req->err) {
3680 skb_queue_purge(&req->cmd_q);
3681 return req->err;
3682 }
3683
3119ae95
JH
3684 /* Do not allow empty requests */
3685 if (skb_queue_empty(&req->cmd_q))
382b0c39 3686 return -ENODATA;
3119ae95
JH
3687
3688 skb = skb_peek_tail(&req->cmd_q);
3689 bt_cb(skb)->req.complete = complete;
3690
3691 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3692 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
3693 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3694
3695 queue_work(hdev->workqueue, &hdev->cmd_work);
3696
3697 return 0;
3698}
3699
1ca3a9d0 3700static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
07dc93dd 3701 u32 plen, const void *param)
1da177e4
LT
3702{
3703 int len = HCI_COMMAND_HDR_SIZE + plen;
3704 struct hci_command_hdr *hdr;
3705 struct sk_buff *skb;
3706
1da177e4 3707 skb = bt_skb_alloc(len, GFP_ATOMIC);
1ca3a9d0
JH
3708 if (!skb)
3709 return NULL;
1da177e4
LT
3710
3711 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 3712 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
3713 hdr->plen = plen;
3714
3715 if (plen)
3716 memcpy(skb_put(skb, plen), param, plen);
3717
3718 BT_DBG("skb len %d", skb->len);
3719
0d48d939 3720 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
c78ae283 3721
1ca3a9d0
JH
3722 return skb;
3723}
3724
3725/* Send HCI command */
07dc93dd
JH
3726int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3727 const void *param)
1ca3a9d0
JH
3728{
3729 struct sk_buff *skb;
3730
3731 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3732
3733 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3734 if (!skb) {
3735 BT_ERR("%s no memory for command", hdev->name);
3736 return -ENOMEM;
3737 }
3738
11714b3d
JH
3739 /* Stand-alone HCI commands must be flaged as
3740 * single-command requests.
3741 */
3742 bt_cb(skb)->req.start = true;
3743
1da177e4 3744 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 3745 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
3746
3747 return 0;
3748}
1da177e4 3749
71c76a17 3750/* Queue a command to an asynchronous HCI request */
07dc93dd
JH
3751void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
3752 const void *param, u8 event)
71c76a17
JH
3753{
3754 struct hci_dev *hdev = req->hdev;
3755 struct sk_buff *skb;
3756
3757 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3758
34739c1e
AG
3759 /* If an error occured during request building, there is no point in
3760 * queueing the HCI command. We can simply return.
3761 */
3762 if (req->err)
3763 return;
3764
71c76a17
JH
3765 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3766 if (!skb) {
5d73e034
AG
3767 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
3768 hdev->name, opcode);
3769 req->err = -ENOMEM;
e348fe6b 3770 return;
71c76a17
JH
3771 }
3772
3773 if (skb_queue_empty(&req->cmd_q))
3774 bt_cb(skb)->req.start = true;
3775
02350a72
JH
3776 bt_cb(skb)->req.event = event;
3777
71c76a17 3778 skb_queue_tail(&req->cmd_q, skb);
71c76a17
JH
3779}
3780
07dc93dd
JH
3781void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
3782 const void *param)
02350a72
JH
3783{
3784 hci_req_add_ev(req, opcode, plen, param, 0);
3785}
3786
1da177e4 3787/* Get data from the previously sent command */
a9de9248 3788void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
3789{
3790 struct hci_command_hdr *hdr;
3791
3792 if (!hdev->sent_cmd)
3793 return NULL;
3794
3795 hdr = (void *) hdev->sent_cmd->data;
3796
a9de9248 3797 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
3798 return NULL;
3799
f0e09510 3800 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
3801
3802 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3803}
3804
3805/* Send ACL data */
3806static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3807{
3808 struct hci_acl_hdr *hdr;
3809 int len = skb->len;
3810
badff6d0
ACM
3811 skb_push(skb, HCI_ACL_HDR_SIZE);
3812 skb_reset_transport_header(skb);
9c70220b 3813 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
3814 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3815 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
3816}
3817
ee22be7e 3818static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 3819 struct sk_buff *skb, __u16 flags)
1da177e4 3820{
ee22be7e 3821 struct hci_conn *conn = chan->conn;
1da177e4
LT
3822 struct hci_dev *hdev = conn->hdev;
3823 struct sk_buff *list;
3824
087bfd99
GP
3825 skb->len = skb_headlen(skb);
3826 skb->data_len = 0;
3827
3828 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
204a6e54
AE
3829
3830 switch (hdev->dev_type) {
3831 case HCI_BREDR:
3832 hci_add_acl_hdr(skb, conn->handle, flags);
3833 break;
3834 case HCI_AMP:
3835 hci_add_acl_hdr(skb, chan->handle, flags);
3836 break;
3837 default:
3838 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3839 return;
3840 }
087bfd99 3841
70f23020
AE
3842 list = skb_shinfo(skb)->frag_list;
3843 if (!list) {
1da177e4
LT
3844 /* Non fragmented */
3845 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3846
73d80deb 3847 skb_queue_tail(queue, skb);
1da177e4
LT
3848 } else {
3849 /* Fragmented */
3850 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3851
3852 skb_shinfo(skb)->frag_list = NULL;
3853
3854 /* Queue all fragments atomically */
af3e6359 3855 spin_lock(&queue->lock);
1da177e4 3856
73d80deb 3857 __skb_queue_tail(queue, skb);
e702112f
AE
3858
3859 flags &= ~ACL_START;
3860 flags |= ACL_CONT;
1da177e4
LT
3861 do {
3862 skb = list; list = list->next;
8e87d142 3863
0d48d939 3864 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 3865 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
3866
3867 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3868
73d80deb 3869 __skb_queue_tail(queue, skb);
1da177e4
LT
3870 } while (list);
3871
af3e6359 3872 spin_unlock(&queue->lock);
1da177e4 3873 }
73d80deb
LAD
3874}
3875
3876void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3877{
ee22be7e 3878 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 3879
f0e09510 3880 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb 3881
ee22be7e 3882 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 3883
3eff45ea 3884 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 3885}
1da177e4
LT
3886
3887/* Send SCO data */
0d861d8b 3888void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
3889{
3890 struct hci_dev *hdev = conn->hdev;
3891 struct hci_sco_hdr hdr;
3892
3893 BT_DBG("%s len %d", hdev->name, skb->len);
3894
aca3192c 3895 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
3896 hdr.dlen = skb->len;
3897
badff6d0
ACM
3898 skb_push(skb, HCI_SCO_HDR_SIZE);
3899 skb_reset_transport_header(skb);
9c70220b 3900 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4 3901
0d48d939 3902 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 3903
1da177e4 3904 skb_queue_tail(&conn->data_q, skb);
3eff45ea 3905 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 3906}
1da177e4
LT
3907
3908/* ---- HCI TX task (outgoing data) ---- */
3909
3910/* HCI Connection scheduler */
6039aa73
GP
3911static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3912 int *quote)
1da177e4
LT
3913{
3914 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 3915 struct hci_conn *conn = NULL, *c;
abc5de8f 3916 unsigned int num = 0, min = ~0;
1da177e4 3917
8e87d142 3918 /* We don't have to lock device here. Connections are always
1da177e4 3919 * added and removed with TX task disabled. */
bf4c6325
GP
3920
3921 rcu_read_lock();
3922
3923 list_for_each_entry_rcu(c, &h->list, list) {
769be974 3924 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 3925 continue;
769be974
MH
3926
3927 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3928 continue;
3929
1da177e4
LT
3930 num++;
3931
3932 if (c->sent < min) {
3933 min = c->sent;
3934 conn = c;
3935 }
52087a79
LAD
3936
3937 if (hci_conn_num(hdev, type) == num)
3938 break;
1da177e4
LT
3939 }
3940
bf4c6325
GP
3941 rcu_read_unlock();
3942
1da177e4 3943 if (conn) {
6ed58ec5
VT
3944 int cnt, q;
3945
3946 switch (conn->type) {
3947 case ACL_LINK:
3948 cnt = hdev->acl_cnt;
3949 break;
3950 case SCO_LINK:
3951 case ESCO_LINK:
3952 cnt = hdev->sco_cnt;
3953 break;
3954 case LE_LINK:
3955 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3956 break;
3957 default:
3958 cnt = 0;
3959 BT_ERR("Unknown link type");
3960 }
3961
3962 q = cnt / num;
1da177e4
LT
3963 *quote = q ? q : 1;
3964 } else
3965 *quote = 0;
3966
3967 BT_DBG("conn %p quote %d", conn, *quote);
3968 return conn;
3969}
3970
6039aa73 3971static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
3972{
3973 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 3974 struct hci_conn *c;
1da177e4 3975
bae1f5d9 3976 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 3977
bf4c6325
GP
3978 rcu_read_lock();
3979
1da177e4 3980 /* Kill stalled connections */
bf4c6325 3981 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 3982 if (c->type == type && c->sent) {
6ed93dc6
AE
3983 BT_ERR("%s killing stalled connection %pMR",
3984 hdev->name, &c->dst);
bed71748 3985 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
3986 }
3987 }
bf4c6325
GP
3988
3989 rcu_read_unlock();
1da177e4
LT
3990}
3991
6039aa73
GP
3992static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3993 int *quote)
1da177e4 3994{
73d80deb
LAD
3995 struct hci_conn_hash *h = &hdev->conn_hash;
3996 struct hci_chan *chan = NULL;
abc5de8f 3997 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 3998 struct hci_conn *conn;
73d80deb
LAD
3999 int cnt, q, conn_num = 0;
4000
4001 BT_DBG("%s", hdev->name);
4002
bf4c6325
GP
4003 rcu_read_lock();
4004
4005 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
4006 struct hci_chan *tmp;
4007
4008 if (conn->type != type)
4009 continue;
4010
4011 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4012 continue;
4013
4014 conn_num++;
4015
8192edef 4016 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
4017 struct sk_buff *skb;
4018
4019 if (skb_queue_empty(&tmp->data_q))
4020 continue;
4021
4022 skb = skb_peek(&tmp->data_q);
4023 if (skb->priority < cur_prio)
4024 continue;
4025
4026 if (skb->priority > cur_prio) {
4027 num = 0;
4028 min = ~0;
4029 cur_prio = skb->priority;
4030 }
4031
4032 num++;
4033
4034 if (conn->sent < min) {
4035 min = conn->sent;
4036 chan = tmp;
4037 }
4038 }
4039
4040 if (hci_conn_num(hdev, type) == conn_num)
4041 break;
4042 }
4043
bf4c6325
GP
4044 rcu_read_unlock();
4045
73d80deb
LAD
4046 if (!chan)
4047 return NULL;
4048
4049 switch (chan->conn->type) {
4050 case ACL_LINK:
4051 cnt = hdev->acl_cnt;
4052 break;
bd1eb66b
AE
4053 case AMP_LINK:
4054 cnt = hdev->block_cnt;
4055 break;
73d80deb
LAD
4056 case SCO_LINK:
4057 case ESCO_LINK:
4058 cnt = hdev->sco_cnt;
4059 break;
4060 case LE_LINK:
4061 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4062 break;
4063 default:
4064 cnt = 0;
4065 BT_ERR("Unknown link type");
4066 }
4067
4068 q = cnt / num;
4069 *quote = q ? q : 1;
4070 BT_DBG("chan %p quote %d", chan, *quote);
4071 return chan;
4072}
4073
02b20f0b
LAD
4074static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4075{
4076 struct hci_conn_hash *h = &hdev->conn_hash;
4077 struct hci_conn *conn;
4078 int num = 0;
4079
4080 BT_DBG("%s", hdev->name);
4081
bf4c6325
GP
4082 rcu_read_lock();
4083
4084 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
4085 struct hci_chan *chan;
4086
4087 if (conn->type != type)
4088 continue;
4089
4090 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4091 continue;
4092
4093 num++;
4094
8192edef 4095 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
4096 struct sk_buff *skb;
4097
4098 if (chan->sent) {
4099 chan->sent = 0;
4100 continue;
4101 }
4102
4103 if (skb_queue_empty(&chan->data_q))
4104 continue;
4105
4106 skb = skb_peek(&chan->data_q);
4107 if (skb->priority >= HCI_PRIO_MAX - 1)
4108 continue;
4109
4110 skb->priority = HCI_PRIO_MAX - 1;
4111
4112 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 4113 skb->priority);
02b20f0b
LAD
4114 }
4115
4116 if (hci_conn_num(hdev, type) == num)
4117 break;
4118 }
bf4c6325
GP
4119
4120 rcu_read_unlock();
4121
02b20f0b
LAD
4122}
4123
b71d385a
AE
4124static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4125{
4126 /* Calculate count of blocks used by this packet */
4127 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4128}
4129
6039aa73 4130static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 4131{
1da177e4
LT
4132 if (!test_bit(HCI_RAW, &hdev->flags)) {
4133 /* ACL tx timeout must be longer than maximum
4134 * link supervision timeout (40.9 seconds) */
63d2bc1b 4135 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 4136 HCI_ACL_TX_TIMEOUT))
bae1f5d9 4137 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 4138 }
63d2bc1b 4139}
1da177e4 4140
6039aa73 4141static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
4142{
4143 unsigned int cnt = hdev->acl_cnt;
4144 struct hci_chan *chan;
4145 struct sk_buff *skb;
4146 int quote;
4147
4148 __check_timeout(hdev, cnt);
04837f64 4149
73d80deb 4150 while (hdev->acl_cnt &&
a8c5fb1a 4151 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
4152 u32 priority = (skb_peek(&chan->data_q))->priority;
4153 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4154 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4155 skb->len, skb->priority);
73d80deb 4156
ec1cce24
LAD
4157 /* Stop if priority has changed */
4158 if (skb->priority < priority)
4159 break;
4160
4161 skb = skb_dequeue(&chan->data_q);
4162
73d80deb 4163 hci_conn_enter_active_mode(chan->conn,
04124681 4164 bt_cb(skb)->force_active);
04837f64 4165
57d17d70 4166 hci_send_frame(hdev, skb);
1da177e4
LT
4167 hdev->acl_last_tx = jiffies;
4168
4169 hdev->acl_cnt--;
73d80deb
LAD
4170 chan->sent++;
4171 chan->conn->sent++;
1da177e4
LT
4172 }
4173 }
02b20f0b
LAD
4174
4175 if (cnt != hdev->acl_cnt)
4176 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
4177}
4178
6039aa73 4179static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 4180{
63d2bc1b 4181 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
4182 struct hci_chan *chan;
4183 struct sk_buff *skb;
4184 int quote;
bd1eb66b 4185 u8 type;
b71d385a 4186
63d2bc1b 4187 __check_timeout(hdev, cnt);
b71d385a 4188
bd1eb66b
AE
4189 BT_DBG("%s", hdev->name);
4190
4191 if (hdev->dev_type == HCI_AMP)
4192 type = AMP_LINK;
4193 else
4194 type = ACL_LINK;
4195
b71d385a 4196 while (hdev->block_cnt > 0 &&
bd1eb66b 4197 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
4198 u32 priority = (skb_peek(&chan->data_q))->priority;
4199 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4200 int blocks;
4201
4202 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4203 skb->len, skb->priority);
b71d385a
AE
4204
4205 /* Stop if priority has changed */
4206 if (skb->priority < priority)
4207 break;
4208
4209 skb = skb_dequeue(&chan->data_q);
4210
4211 blocks = __get_blocks(hdev, skb);
4212 if (blocks > hdev->block_cnt)
4213 return;
4214
4215 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 4216 bt_cb(skb)->force_active);
b71d385a 4217
57d17d70 4218 hci_send_frame(hdev, skb);
b71d385a
AE
4219 hdev->acl_last_tx = jiffies;
4220
4221 hdev->block_cnt -= blocks;
4222 quote -= blocks;
4223
4224 chan->sent += blocks;
4225 chan->conn->sent += blocks;
4226 }
4227 }
4228
4229 if (cnt != hdev->block_cnt)
bd1eb66b 4230 hci_prio_recalculate(hdev, type);
b71d385a
AE
4231}
4232
6039aa73 4233static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
4234{
4235 BT_DBG("%s", hdev->name);
4236
bd1eb66b
AE
4237 /* No ACL link over BR/EDR controller */
4238 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4239 return;
4240
4241 /* No AMP link over AMP controller */
4242 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
4243 return;
4244
4245 switch (hdev->flow_ctl_mode) {
4246 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4247 hci_sched_acl_pkt(hdev);
4248 break;
4249
4250 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4251 hci_sched_acl_blk(hdev);
4252 break;
4253 }
4254}
4255
1da177e4 4256/* Schedule SCO */
6039aa73 4257static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
4258{
4259 struct hci_conn *conn;
4260 struct sk_buff *skb;
4261 int quote;
4262
4263 BT_DBG("%s", hdev->name);
4264
52087a79
LAD
4265 if (!hci_conn_num(hdev, SCO_LINK))
4266 return;
4267
1da177e4
LT
4268 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4269 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4270 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 4271 hci_send_frame(hdev, skb);
1da177e4
LT
4272
4273 conn->sent++;
4274 if (conn->sent == ~0)
4275 conn->sent = 0;
4276 }
4277 }
4278}
4279
6039aa73 4280static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
4281{
4282 struct hci_conn *conn;
4283 struct sk_buff *skb;
4284 int quote;
4285
4286 BT_DBG("%s", hdev->name);
4287
52087a79
LAD
4288 if (!hci_conn_num(hdev, ESCO_LINK))
4289 return;
4290
8fc9ced3
GP
4291 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4292 &quote))) {
b6a0dc82
MH
4293 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4294 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 4295 hci_send_frame(hdev, skb);
b6a0dc82
MH
4296
4297 conn->sent++;
4298 if (conn->sent == ~0)
4299 conn->sent = 0;
4300 }
4301 }
4302}
4303
6039aa73 4304static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 4305{
73d80deb 4306 struct hci_chan *chan;
6ed58ec5 4307 struct sk_buff *skb;
02b20f0b 4308 int quote, cnt, tmp;
6ed58ec5
VT
4309
4310 BT_DBG("%s", hdev->name);
4311
52087a79
LAD
4312 if (!hci_conn_num(hdev, LE_LINK))
4313 return;
4314
6ed58ec5
VT
4315 if (!test_bit(HCI_RAW, &hdev->flags)) {
4316 /* LE tx timeout must be longer than maximum
4317 * link supervision timeout (40.9 seconds) */
bae1f5d9 4318 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 4319 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 4320 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
4321 }
4322
4323 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 4324 tmp = cnt;
73d80deb 4325 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
4326 u32 priority = (skb_peek(&chan->data_q))->priority;
4327 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4328 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4329 skb->len, skb->priority);
6ed58ec5 4330
ec1cce24
LAD
4331 /* Stop if priority has changed */
4332 if (skb->priority < priority)
4333 break;
4334
4335 skb = skb_dequeue(&chan->data_q);
4336
57d17d70 4337 hci_send_frame(hdev, skb);
6ed58ec5
VT
4338 hdev->le_last_tx = jiffies;
4339
4340 cnt--;
73d80deb
LAD
4341 chan->sent++;
4342 chan->conn->sent++;
6ed58ec5
VT
4343 }
4344 }
73d80deb 4345
6ed58ec5
VT
4346 if (hdev->le_pkts)
4347 hdev->le_cnt = cnt;
4348 else
4349 hdev->acl_cnt = cnt;
02b20f0b
LAD
4350
4351 if (cnt != tmp)
4352 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
4353}
4354
3eff45ea 4355static void hci_tx_work(struct work_struct *work)
1da177e4 4356{
3eff45ea 4357 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
4358 struct sk_buff *skb;
4359
6ed58ec5 4360 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 4361 hdev->sco_cnt, hdev->le_cnt);
1da177e4 4362
52de599e
MH
4363 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4364 /* Schedule queues and send stuff to HCI driver */
4365 hci_sched_acl(hdev);
4366 hci_sched_sco(hdev);
4367 hci_sched_esco(hdev);
4368 hci_sched_le(hdev);
4369 }
6ed58ec5 4370
1da177e4
LT
4371 /* Send next queued raw (unknown type) packet */
4372 while ((skb = skb_dequeue(&hdev->raw_q)))
57d17d70 4373 hci_send_frame(hdev, skb);
1da177e4
LT
4374}
4375
25985edc 4376/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
4377
4378/* ACL data packet */
6039aa73 4379static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
4380{
4381 struct hci_acl_hdr *hdr = (void *) skb->data;
4382 struct hci_conn *conn;
4383 __u16 handle, flags;
4384
4385 skb_pull(skb, HCI_ACL_HDR_SIZE);
4386
4387 handle = __le16_to_cpu(hdr->handle);
4388 flags = hci_flags(handle);
4389 handle = hci_handle(handle);
4390
f0e09510 4391 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 4392 handle, flags);
1da177e4
LT
4393
4394 hdev->stat.acl_rx++;
4395
4396 hci_dev_lock(hdev);
4397 conn = hci_conn_hash_lookup_handle(hdev, handle);
4398 hci_dev_unlock(hdev);
8e87d142 4399
1da177e4 4400 if (conn) {
65983fc7 4401 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 4402
1da177e4 4403 /* Send to upper protocol */
686ebf28
UF
4404 l2cap_recv_acldata(conn, skb, flags);
4405 return;
1da177e4 4406 } else {
8e87d142 4407 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 4408 hdev->name, handle);
1da177e4
LT
4409 }
4410
4411 kfree_skb(skb);
4412}
4413
4414/* SCO data packet */
6039aa73 4415static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
4416{
4417 struct hci_sco_hdr *hdr = (void *) skb->data;
4418 struct hci_conn *conn;
4419 __u16 handle;
4420
4421 skb_pull(skb, HCI_SCO_HDR_SIZE);
4422
4423 handle = __le16_to_cpu(hdr->handle);
4424
f0e09510 4425 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
1da177e4
LT
4426
4427 hdev->stat.sco_rx++;
4428
4429 hci_dev_lock(hdev);
4430 conn = hci_conn_hash_lookup_handle(hdev, handle);
4431 hci_dev_unlock(hdev);
4432
4433 if (conn) {
1da177e4 4434 /* Send to upper protocol */
686ebf28
UF
4435 sco_recv_scodata(conn, skb);
4436 return;
1da177e4 4437 } else {
8e87d142 4438 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 4439 hdev->name, handle);
1da177e4
LT
4440 }
4441
4442 kfree_skb(skb);
4443}
4444
9238f36a
JH
4445static bool hci_req_is_complete(struct hci_dev *hdev)
4446{
4447 struct sk_buff *skb;
4448
4449 skb = skb_peek(&hdev->cmd_q);
4450 if (!skb)
4451 return true;
4452
4453 return bt_cb(skb)->req.start;
4454}
4455
42c6b129
JH
4456static void hci_resend_last(struct hci_dev *hdev)
4457{
4458 struct hci_command_hdr *sent;
4459 struct sk_buff *skb;
4460 u16 opcode;
4461
4462 if (!hdev->sent_cmd)
4463 return;
4464
4465 sent = (void *) hdev->sent_cmd->data;
4466 opcode = __le16_to_cpu(sent->opcode);
4467 if (opcode == HCI_OP_RESET)
4468 return;
4469
4470 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4471 if (!skb)
4472 return;
4473
4474 skb_queue_head(&hdev->cmd_q, skb);
4475 queue_work(hdev->workqueue, &hdev->cmd_work);
4476}
4477
9238f36a
JH
4478void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
4479{
4480 hci_req_complete_t req_complete = NULL;
4481 struct sk_buff *skb;
4482 unsigned long flags;
4483
4484 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4485
42c6b129
JH
4486 /* If the completed command doesn't match the last one that was
4487 * sent we need to do special handling of it.
9238f36a 4488 */
42c6b129
JH
4489 if (!hci_sent_cmd_data(hdev, opcode)) {
4490 /* Some CSR based controllers generate a spontaneous
4491 * reset complete event during init and any pending
4492 * command will never be completed. In such a case we
4493 * need to resend whatever was the last sent
4494 * command.
4495 */
4496 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4497 hci_resend_last(hdev);
4498
9238f36a 4499 return;
42c6b129 4500 }
9238f36a
JH
4501
4502 /* If the command succeeded and there's still more commands in
4503 * this request the request is not yet complete.
4504 */
4505 if (!status && !hci_req_is_complete(hdev))
4506 return;
4507
4508 /* If this was the last command in a request the complete
4509 * callback would be found in hdev->sent_cmd instead of the
4510 * command queue (hdev->cmd_q).
4511 */
4512 if (hdev->sent_cmd) {
4513 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
53e21fbc
JH
4514
4515 if (req_complete) {
4516 /* We must set the complete callback to NULL to
4517 * avoid calling the callback more than once if
4518 * this function gets called again.
4519 */
4520 bt_cb(hdev->sent_cmd)->req.complete = NULL;
4521
9238f36a 4522 goto call_complete;
53e21fbc 4523 }
9238f36a
JH
4524 }
4525
4526 /* Remove all pending commands belonging to this request */
4527 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4528 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4529 if (bt_cb(skb)->req.start) {
4530 __skb_queue_head(&hdev->cmd_q, skb);
4531 break;
4532 }
4533
4534 req_complete = bt_cb(skb)->req.complete;
4535 kfree_skb(skb);
4536 }
4537 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4538
4539call_complete:
4540 if (req_complete)
4541 req_complete(hdev, status);
4542}
4543
b78752cc 4544static void hci_rx_work(struct work_struct *work)
1da177e4 4545{
b78752cc 4546 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
4547 struct sk_buff *skb;
4548
4549 BT_DBG("%s", hdev->name);
4550
1da177e4 4551 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
4552 /* Send copy to monitor */
4553 hci_send_to_monitor(hdev, skb);
4554
1da177e4
LT
4555 if (atomic_read(&hdev->promisc)) {
4556 /* Send copy to the sockets */
470fe1b5 4557 hci_send_to_sock(hdev, skb);
1da177e4
LT
4558 }
4559
0736cfa8
MH
4560 if (test_bit(HCI_RAW, &hdev->flags) ||
4561 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1da177e4
LT
4562 kfree_skb(skb);
4563 continue;
4564 }
4565
4566 if (test_bit(HCI_INIT, &hdev->flags)) {
4567 /* Don't process data packets in this states. */
0d48d939 4568 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
4569 case HCI_ACLDATA_PKT:
4570 case HCI_SCODATA_PKT:
4571 kfree_skb(skb);
4572 continue;
3ff50b79 4573 }
1da177e4
LT
4574 }
4575
4576 /* Process frame */
0d48d939 4577 switch (bt_cb(skb)->pkt_type) {
1da177e4 4578 case HCI_EVENT_PKT:
b78752cc 4579 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
4580 hci_event_packet(hdev, skb);
4581 break;
4582
4583 case HCI_ACLDATA_PKT:
4584 BT_DBG("%s ACL data packet", hdev->name);
4585 hci_acldata_packet(hdev, skb);
4586 break;
4587
4588 case HCI_SCODATA_PKT:
4589 BT_DBG("%s SCO data packet", hdev->name);
4590 hci_scodata_packet(hdev, skb);
4591 break;
4592
4593 default:
4594 kfree_skb(skb);
4595 break;
4596 }
4597 }
1da177e4
LT
4598}
4599
c347b765 4600static void hci_cmd_work(struct work_struct *work)
1da177e4 4601{
c347b765 4602 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
4603 struct sk_buff *skb;
4604
2104786b
AE
4605 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4606 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 4607
1da177e4 4608 /* Send queued commands */
5a08ecce
AE
4609 if (atomic_read(&hdev->cmd_cnt)) {
4610 skb = skb_dequeue(&hdev->cmd_q);
4611 if (!skb)
4612 return;
4613
7585b97a 4614 kfree_skb(hdev->sent_cmd);
1da177e4 4615
a675d7f1 4616 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
70f23020 4617 if (hdev->sent_cmd) {
1da177e4 4618 atomic_dec(&hdev->cmd_cnt);
57d17d70 4619 hci_send_frame(hdev, skb);
7bdb8a5c
SJ
4620 if (test_bit(HCI_RESET, &hdev->flags))
4621 del_timer(&hdev->cmd_timer);
4622 else
4623 mod_timer(&hdev->cmd_timer,
5f246e89 4624 jiffies + HCI_CMD_TIMEOUT);
1da177e4
LT
4625 } else {
4626 skb_queue_head(&hdev->cmd_q, skb);
c347b765 4627 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
4628 }
4629 }
4630}