]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - net/bluetooth/hci_core.c
Bluetooth: Fix properly ignoring unexpected SMP PDUs
[mirror_ubuntu-artful-kernel.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
8c520a59 30#include <linux/rfkill.h>
baf27f6e 31#include <linux/debugfs.h>
99780a7b 32#include <linux/crypto.h>
47219839 33#include <asm/unaligned.h>
1da177e4
LT
34
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
37
970c4e46
JH
38#include "smp.h"
39
b78752cc 40static void hci_rx_work(struct work_struct *work);
c347b765 41static void hci_cmd_work(struct work_struct *work);
3eff45ea 42static void hci_tx_work(struct work_struct *work);
1da177e4 43
1da177e4
LT
44/* HCI device list */
45LIST_HEAD(hci_dev_list);
46DEFINE_RWLOCK(hci_dev_list_lock);
47
48/* HCI callback list */
49LIST_HEAD(hci_cb_list);
50DEFINE_RWLOCK(hci_cb_list_lock);
51
3df92b31
SL
52/* HCI ID Numbering */
53static DEFINE_IDA(hci_index_ida);
54
1da177e4
LT
55/* ---- HCI notifications ---- */
56
6516455d 57static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 58{
040030ef 59 hci_sock_dev_event(hdev, event);
1da177e4
LT
60}
61
baf27f6e
MH
62/* ---- HCI debugfs entries ---- */
63
4b4148e9
MH
64static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
65 size_t count, loff_t *ppos)
66{
67 struct hci_dev *hdev = file->private_data;
68 char buf[3];
69
70 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dev_flags) ? 'Y': 'N';
71 buf[1] = '\n';
72 buf[2] = '\0';
73 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
74}
75
76static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
77 size_t count, loff_t *ppos)
78{
79 struct hci_dev *hdev = file->private_data;
80 struct sk_buff *skb;
81 char buf[32];
82 size_t buf_size = min(count, (sizeof(buf)-1));
83 bool enable;
84 int err;
85
86 if (!test_bit(HCI_UP, &hdev->flags))
87 return -ENETDOWN;
88
89 if (copy_from_user(buf, user_buf, buf_size))
90 return -EFAULT;
91
92 buf[buf_size] = '\0';
93 if (strtobool(buf, &enable))
94 return -EINVAL;
95
96 if (enable == test_bit(HCI_DUT_MODE, &hdev->dev_flags))
97 return -EALREADY;
98
99 hci_req_lock(hdev);
100 if (enable)
101 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
102 HCI_CMD_TIMEOUT);
103 else
104 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
105 HCI_CMD_TIMEOUT);
106 hci_req_unlock(hdev);
107
108 if (IS_ERR(skb))
109 return PTR_ERR(skb);
110
111 err = -bt_to_errno(skb->data[0]);
112 kfree_skb(skb);
113
114 if (err < 0)
115 return err;
116
117 change_bit(HCI_DUT_MODE, &hdev->dev_flags);
118
119 return count;
120}
121
122static const struct file_operations dut_mode_fops = {
123 .open = simple_open,
124 .read = dut_mode_read,
125 .write = dut_mode_write,
126 .llseek = default_llseek,
127};
128
dfb826a8
MH
129static int features_show(struct seq_file *f, void *ptr)
130{
131 struct hci_dev *hdev = f->private;
132 u8 p;
133
134 hci_dev_lock(hdev);
135 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
cfbb2b5b 136 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
dfb826a8
MH
137 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
138 hdev->features[p][0], hdev->features[p][1],
139 hdev->features[p][2], hdev->features[p][3],
140 hdev->features[p][4], hdev->features[p][5],
141 hdev->features[p][6], hdev->features[p][7]);
142 }
cfbb2b5b
MH
143 if (lmp_le_capable(hdev))
144 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
145 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
146 hdev->le_features[0], hdev->le_features[1],
147 hdev->le_features[2], hdev->le_features[3],
148 hdev->le_features[4], hdev->le_features[5],
149 hdev->le_features[6], hdev->le_features[7]);
dfb826a8
MH
150 hci_dev_unlock(hdev);
151
152 return 0;
153}
154
155static int features_open(struct inode *inode, struct file *file)
156{
157 return single_open(file, features_show, inode->i_private);
158}
159
160static const struct file_operations features_fops = {
161 .open = features_open,
162 .read = seq_read,
163 .llseek = seq_lseek,
164 .release = single_release,
165};
166
70afe0b8
MH
167static int blacklist_show(struct seq_file *f, void *p)
168{
169 struct hci_dev *hdev = f->private;
170 struct bdaddr_list *b;
171
172 hci_dev_lock(hdev);
173 list_for_each_entry(b, &hdev->blacklist, list)
b25f0785 174 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
70afe0b8
MH
175 hci_dev_unlock(hdev);
176
177 return 0;
178}
179
180static int blacklist_open(struct inode *inode, struct file *file)
181{
182 return single_open(file, blacklist_show, inode->i_private);
183}
184
185static const struct file_operations blacklist_fops = {
186 .open = blacklist_open,
187 .read = seq_read,
188 .llseek = seq_lseek,
189 .release = single_release,
190};
191
47219839
MH
192static int uuids_show(struct seq_file *f, void *p)
193{
194 struct hci_dev *hdev = f->private;
195 struct bt_uuid *uuid;
196
197 hci_dev_lock(hdev);
198 list_for_each_entry(uuid, &hdev->uuids, list) {
58f01aa9
MH
199 u8 i, val[16];
200
201 /* The Bluetooth UUID values are stored in big endian,
202 * but with reversed byte order. So convert them into
203 * the right order for the %pUb modifier.
204 */
205 for (i = 0; i < 16; i++)
206 val[i] = uuid->uuid[15 - i];
207
208 seq_printf(f, "%pUb\n", val);
47219839
MH
209 }
210 hci_dev_unlock(hdev);
211
212 return 0;
213}
214
215static int uuids_open(struct inode *inode, struct file *file)
216{
217 return single_open(file, uuids_show, inode->i_private);
218}
219
220static const struct file_operations uuids_fops = {
221 .open = uuids_open,
222 .read = seq_read,
223 .llseek = seq_lseek,
224 .release = single_release,
225};
226
baf27f6e
MH
227static int inquiry_cache_show(struct seq_file *f, void *p)
228{
229 struct hci_dev *hdev = f->private;
230 struct discovery_state *cache = &hdev->discovery;
231 struct inquiry_entry *e;
232
233 hci_dev_lock(hdev);
234
235 list_for_each_entry(e, &cache->all, all) {
236 struct inquiry_data *data = &e->data;
237 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
238 &data->bdaddr,
239 data->pscan_rep_mode, data->pscan_period_mode,
240 data->pscan_mode, data->dev_class[2],
241 data->dev_class[1], data->dev_class[0],
242 __le16_to_cpu(data->clock_offset),
243 data->rssi, data->ssp_mode, e->timestamp);
244 }
245
246 hci_dev_unlock(hdev);
247
248 return 0;
249}
250
251static int inquiry_cache_open(struct inode *inode, struct file *file)
252{
253 return single_open(file, inquiry_cache_show, inode->i_private);
254}
255
256static const struct file_operations inquiry_cache_fops = {
257 .open = inquiry_cache_open,
258 .read = seq_read,
259 .llseek = seq_lseek,
260 .release = single_release,
261};
262
02d08d15
MH
263static int link_keys_show(struct seq_file *f, void *ptr)
264{
265 struct hci_dev *hdev = f->private;
266 struct list_head *p, *n;
267
268 hci_dev_lock(hdev);
269 list_for_each_safe(p, n, &hdev->link_keys) {
270 struct link_key *key = list_entry(p, struct link_key, list);
271 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
272 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
273 }
274 hci_dev_unlock(hdev);
275
276 return 0;
277}
278
279static int link_keys_open(struct inode *inode, struct file *file)
280{
281 return single_open(file, link_keys_show, inode->i_private);
282}
283
284static const struct file_operations link_keys_fops = {
285 .open = link_keys_open,
286 .read = seq_read,
287 .llseek = seq_lseek,
288 .release = single_release,
289};
290
babdbb3c
MH
291static int dev_class_show(struct seq_file *f, void *ptr)
292{
293 struct hci_dev *hdev = f->private;
294
295 hci_dev_lock(hdev);
296 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
297 hdev->dev_class[1], hdev->dev_class[0]);
298 hci_dev_unlock(hdev);
299
300 return 0;
301}
302
303static int dev_class_open(struct inode *inode, struct file *file)
304{
305 return single_open(file, dev_class_show, inode->i_private);
306}
307
308static const struct file_operations dev_class_fops = {
309 .open = dev_class_open,
310 .read = seq_read,
311 .llseek = seq_lseek,
312 .release = single_release,
313};
314
041000b9
MH
315static int voice_setting_get(void *data, u64 *val)
316{
317 struct hci_dev *hdev = data;
318
319 hci_dev_lock(hdev);
320 *val = hdev->voice_setting;
321 hci_dev_unlock(hdev);
322
323 return 0;
324}
325
326DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
327 NULL, "0x%4.4llx\n");
328
ebd1e33b
MH
329static int auto_accept_delay_set(void *data, u64 val)
330{
331 struct hci_dev *hdev = data;
332
333 hci_dev_lock(hdev);
334 hdev->auto_accept_delay = val;
335 hci_dev_unlock(hdev);
336
337 return 0;
338}
339
340static int auto_accept_delay_get(void *data, u64 *val)
341{
342 struct hci_dev *hdev = data;
343
344 hci_dev_lock(hdev);
345 *val = hdev->auto_accept_delay;
346 hci_dev_unlock(hdev);
347
348 return 0;
349}
350
351DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
352 auto_accept_delay_set, "%llu\n");
353
06f5b778
MH
354static int ssp_debug_mode_set(void *data, u64 val)
355{
356 struct hci_dev *hdev = data;
357 struct sk_buff *skb;
358 __u8 mode;
359 int err;
360
361 if (val != 0 && val != 1)
362 return -EINVAL;
363
364 if (!test_bit(HCI_UP, &hdev->flags))
365 return -ENETDOWN;
366
367 hci_req_lock(hdev);
368 mode = val;
369 skb = __hci_cmd_sync(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode),
370 &mode, HCI_CMD_TIMEOUT);
371 hci_req_unlock(hdev);
372
373 if (IS_ERR(skb))
374 return PTR_ERR(skb);
375
376 err = -bt_to_errno(skb->data[0]);
377 kfree_skb(skb);
378
379 if (err < 0)
380 return err;
381
382 hci_dev_lock(hdev);
383 hdev->ssp_debug_mode = val;
384 hci_dev_unlock(hdev);
385
386 return 0;
387}
388
389static int ssp_debug_mode_get(void *data, u64 *val)
390{
391 struct hci_dev *hdev = data;
392
393 hci_dev_lock(hdev);
394 *val = hdev->ssp_debug_mode;
395 hci_dev_unlock(hdev);
396
397 return 0;
398}
399
400DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get,
401 ssp_debug_mode_set, "%llu\n");
402
5afeac14
MH
403static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
404 size_t count, loff_t *ppos)
405{
406 struct hci_dev *hdev = file->private_data;
407 char buf[3];
408
409 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dev_flags) ? 'Y': 'N';
410 buf[1] = '\n';
411 buf[2] = '\0';
412 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
413}
414
415static ssize_t force_sc_support_write(struct file *file,
416 const char __user *user_buf,
417 size_t count, loff_t *ppos)
418{
419 struct hci_dev *hdev = file->private_data;
420 char buf[32];
421 size_t buf_size = min(count, (sizeof(buf)-1));
422 bool enable;
423
424 if (test_bit(HCI_UP, &hdev->flags))
425 return -EBUSY;
426
427 if (copy_from_user(buf, user_buf, buf_size))
428 return -EFAULT;
429
430 buf[buf_size] = '\0';
431 if (strtobool(buf, &enable))
432 return -EINVAL;
433
434 if (enable == test_bit(HCI_FORCE_SC, &hdev->dev_flags))
435 return -EALREADY;
436
437 change_bit(HCI_FORCE_SC, &hdev->dev_flags);
438
439 return count;
440}
441
442static const struct file_operations force_sc_support_fops = {
443 .open = simple_open,
444 .read = force_sc_support_read,
445 .write = force_sc_support_write,
446 .llseek = default_llseek,
447};
448
134c2a89
MH
449static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
450 size_t count, loff_t *ppos)
451{
452 struct hci_dev *hdev = file->private_data;
453 char buf[3];
454
455 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
456 buf[1] = '\n';
457 buf[2] = '\0';
458 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
459}
460
461static const struct file_operations sc_only_mode_fops = {
462 .open = simple_open,
463 .read = sc_only_mode_read,
464 .llseek = default_llseek,
465};
466
2bfa3531
MH
467static int idle_timeout_set(void *data, u64 val)
468{
469 struct hci_dev *hdev = data;
470
471 if (val != 0 && (val < 500 || val > 3600000))
472 return -EINVAL;
473
474 hci_dev_lock(hdev);
2be48b65 475 hdev->idle_timeout = val;
2bfa3531
MH
476 hci_dev_unlock(hdev);
477
478 return 0;
479}
480
481static int idle_timeout_get(void *data, u64 *val)
482{
483 struct hci_dev *hdev = data;
484
485 hci_dev_lock(hdev);
486 *val = hdev->idle_timeout;
487 hci_dev_unlock(hdev);
488
489 return 0;
490}
491
492DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
493 idle_timeout_set, "%llu\n");
494
495static int sniff_min_interval_set(void *data, u64 val)
496{
497 struct hci_dev *hdev = data;
498
499 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
500 return -EINVAL;
501
502 hci_dev_lock(hdev);
2be48b65 503 hdev->sniff_min_interval = val;
2bfa3531
MH
504 hci_dev_unlock(hdev);
505
506 return 0;
507}
508
509static int sniff_min_interval_get(void *data, u64 *val)
510{
511 struct hci_dev *hdev = data;
512
513 hci_dev_lock(hdev);
514 *val = hdev->sniff_min_interval;
515 hci_dev_unlock(hdev);
516
517 return 0;
518}
519
520DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
521 sniff_min_interval_set, "%llu\n");
522
523static int sniff_max_interval_set(void *data, u64 val)
524{
525 struct hci_dev *hdev = data;
526
527 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
528 return -EINVAL;
529
530 hci_dev_lock(hdev);
2be48b65 531 hdev->sniff_max_interval = val;
2bfa3531
MH
532 hci_dev_unlock(hdev);
533
534 return 0;
535}
536
537static int sniff_max_interval_get(void *data, u64 *val)
538{
539 struct hci_dev *hdev = data;
540
541 hci_dev_lock(hdev);
542 *val = hdev->sniff_max_interval;
543 hci_dev_unlock(hdev);
544
545 return 0;
546}
547
548DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
549 sniff_max_interval_set, "%llu\n");
550
e7b8fc92
MH
551static int static_address_show(struct seq_file *f, void *p)
552{
553 struct hci_dev *hdev = f->private;
554
555 hci_dev_lock(hdev);
556 seq_printf(f, "%pMR\n", &hdev->static_addr);
557 hci_dev_unlock(hdev);
558
559 return 0;
560}
561
562static int static_address_open(struct inode *inode, struct file *file)
563{
564 return single_open(file, static_address_show, inode->i_private);
565}
566
567static const struct file_operations static_address_fops = {
568 .open = static_address_open,
569 .read = seq_read,
570 .llseek = seq_lseek,
571 .release = single_release,
572};
573
92202185
MH
574static int own_address_type_set(void *data, u64 val)
575{
576 struct hci_dev *hdev = data;
577
578 if (val != 0 && val != 1)
579 return -EINVAL;
580
581 hci_dev_lock(hdev);
582 hdev->own_addr_type = val;
583 hci_dev_unlock(hdev);
584
585 return 0;
586}
587
588static int own_address_type_get(void *data, u64 *val)
589{
590 struct hci_dev *hdev = data;
591
592 hci_dev_lock(hdev);
593 *val = hdev->own_addr_type;
594 hci_dev_unlock(hdev);
595
596 return 0;
597}
598
599DEFINE_SIMPLE_ATTRIBUTE(own_address_type_fops, own_address_type_get,
600 own_address_type_set, "%llu\n");
601
8f8625cd
MH
602static int long_term_keys_show(struct seq_file *f, void *ptr)
603{
604 struct hci_dev *hdev = f->private;
605 struct list_head *p, *n;
606
607 hci_dev_lock(hdev);
f813f1be 608 list_for_each_safe(p, n, &hdev->long_term_keys) {
8f8625cd 609 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
f813f1be 610 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %*phN %*phN\n",
8f8625cd
MH
611 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
612 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
613 8, ltk->rand, 16, ltk->val);
614 }
615 hci_dev_unlock(hdev);
616
617 return 0;
618}
619
620static int long_term_keys_open(struct inode *inode, struct file *file)
621{
622 return single_open(file, long_term_keys_show, inode->i_private);
623}
624
625static const struct file_operations long_term_keys_fops = {
626 .open = long_term_keys_open,
627 .read = seq_read,
628 .llseek = seq_lseek,
629 .release = single_release,
630};
631
4e70c7e7
MH
632static int conn_min_interval_set(void *data, u64 val)
633{
634 struct hci_dev *hdev = data;
635
636 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
637 return -EINVAL;
638
639 hci_dev_lock(hdev);
2be48b65 640 hdev->le_conn_min_interval = val;
4e70c7e7
MH
641 hci_dev_unlock(hdev);
642
643 return 0;
644}
645
646static int conn_min_interval_get(void *data, u64 *val)
647{
648 struct hci_dev *hdev = data;
649
650 hci_dev_lock(hdev);
651 *val = hdev->le_conn_min_interval;
652 hci_dev_unlock(hdev);
653
654 return 0;
655}
656
657DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
658 conn_min_interval_set, "%llu\n");
659
660static int conn_max_interval_set(void *data, u64 val)
661{
662 struct hci_dev *hdev = data;
663
664 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
665 return -EINVAL;
666
667 hci_dev_lock(hdev);
2be48b65 668 hdev->le_conn_max_interval = val;
4e70c7e7
MH
669 hci_dev_unlock(hdev);
670
671 return 0;
672}
673
674static int conn_max_interval_get(void *data, u64 *val)
675{
676 struct hci_dev *hdev = data;
677
678 hci_dev_lock(hdev);
679 *val = hdev->le_conn_max_interval;
680 hci_dev_unlock(hdev);
681
682 return 0;
683}
684
685DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
686 conn_max_interval_set, "%llu\n");
687
89863109
JR
688static ssize_t lowpan_read(struct file *file, char __user *user_buf,
689 size_t count, loff_t *ppos)
690{
691 struct hci_dev *hdev = file->private_data;
692 char buf[3];
693
694 buf[0] = test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags) ? 'Y' : 'N';
695 buf[1] = '\n';
696 buf[2] = '\0';
697 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
698}
699
700static ssize_t lowpan_write(struct file *fp, const char __user *user_buffer,
701 size_t count, loff_t *position)
702{
703 struct hci_dev *hdev = fp->private_data;
704 bool enable;
705 char buf[32];
706 size_t buf_size = min(count, (sizeof(buf)-1));
707
708 if (copy_from_user(buf, user_buffer, buf_size))
709 return -EFAULT;
710
711 buf[buf_size] = '\0';
712
713 if (strtobool(buf, &enable) < 0)
714 return -EINVAL;
715
716 if (enable == test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags))
717 return -EALREADY;
718
719 change_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags);
720
721 return count;
722}
723
724static const struct file_operations lowpan_debugfs_fops = {
725 .open = simple_open,
726 .read = lowpan_read,
727 .write = lowpan_write,
728 .llseek = default_llseek,
729};
730
1da177e4
LT
731/* ---- HCI requests ---- */
732
42c6b129 733static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1da177e4 734{
42c6b129 735 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1da177e4
LT
736
737 if (hdev->req_status == HCI_REQ_PEND) {
738 hdev->req_result = result;
739 hdev->req_status = HCI_REQ_DONE;
740 wake_up_interruptible(&hdev->req_wait_q);
741 }
742}
743
744static void hci_req_cancel(struct hci_dev *hdev, int err)
745{
746 BT_DBG("%s err 0x%2.2x", hdev->name, err);
747
748 if (hdev->req_status == HCI_REQ_PEND) {
749 hdev->req_result = err;
750 hdev->req_status = HCI_REQ_CANCELED;
751 wake_up_interruptible(&hdev->req_wait_q);
752 }
753}
754
77a63e0a
FW
755static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
756 u8 event)
75e84b7c
JH
757{
758 struct hci_ev_cmd_complete *ev;
759 struct hci_event_hdr *hdr;
760 struct sk_buff *skb;
761
762 hci_dev_lock(hdev);
763
764 skb = hdev->recv_evt;
765 hdev->recv_evt = NULL;
766
767 hci_dev_unlock(hdev);
768
769 if (!skb)
770 return ERR_PTR(-ENODATA);
771
772 if (skb->len < sizeof(*hdr)) {
773 BT_ERR("Too short HCI event");
774 goto failed;
775 }
776
777 hdr = (void *) skb->data;
778 skb_pull(skb, HCI_EVENT_HDR_SIZE);
779
7b1abbbe
JH
780 if (event) {
781 if (hdr->evt != event)
782 goto failed;
783 return skb;
784 }
785
75e84b7c
JH
786 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
787 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
788 goto failed;
789 }
790
791 if (skb->len < sizeof(*ev)) {
792 BT_ERR("Too short cmd_complete event");
793 goto failed;
794 }
795
796 ev = (void *) skb->data;
797 skb_pull(skb, sizeof(*ev));
798
799 if (opcode == __le16_to_cpu(ev->opcode))
800 return skb;
801
802 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
803 __le16_to_cpu(ev->opcode));
804
805failed:
806 kfree_skb(skb);
807 return ERR_PTR(-ENODATA);
808}
809
7b1abbbe 810struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 811 const void *param, u8 event, u32 timeout)
75e84b7c
JH
812{
813 DECLARE_WAITQUEUE(wait, current);
814 struct hci_request req;
815 int err = 0;
816
817 BT_DBG("%s", hdev->name);
818
819 hci_req_init(&req, hdev);
820
7b1abbbe 821 hci_req_add_ev(&req, opcode, plen, param, event);
75e84b7c
JH
822
823 hdev->req_status = HCI_REQ_PEND;
824
825 err = hci_req_run(&req, hci_req_sync_complete);
826 if (err < 0)
827 return ERR_PTR(err);
828
829 add_wait_queue(&hdev->req_wait_q, &wait);
830 set_current_state(TASK_INTERRUPTIBLE);
831
832 schedule_timeout(timeout);
833
834 remove_wait_queue(&hdev->req_wait_q, &wait);
835
836 if (signal_pending(current))
837 return ERR_PTR(-EINTR);
838
839 switch (hdev->req_status) {
840 case HCI_REQ_DONE:
841 err = -bt_to_errno(hdev->req_result);
842 break;
843
844 case HCI_REQ_CANCELED:
845 err = -hdev->req_result;
846 break;
847
848 default:
849 err = -ETIMEDOUT;
850 break;
851 }
852
853 hdev->req_status = hdev->req_result = 0;
854
855 BT_DBG("%s end: err %d", hdev->name, err);
856
857 if (err < 0)
858 return ERR_PTR(err);
859
7b1abbbe
JH
860 return hci_get_cmd_complete(hdev, opcode, event);
861}
862EXPORT_SYMBOL(__hci_cmd_sync_ev);
863
864struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 865 const void *param, u32 timeout)
7b1abbbe
JH
866{
867 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
75e84b7c
JH
868}
869EXPORT_SYMBOL(__hci_cmd_sync);
870
1da177e4 871/* Execute request and wait for completion. */
01178cd4 872static int __hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
873 void (*func)(struct hci_request *req,
874 unsigned long opt),
01178cd4 875 unsigned long opt, __u32 timeout)
1da177e4 876{
42c6b129 877 struct hci_request req;
1da177e4
LT
878 DECLARE_WAITQUEUE(wait, current);
879 int err = 0;
880
881 BT_DBG("%s start", hdev->name);
882
42c6b129
JH
883 hci_req_init(&req, hdev);
884
1da177e4
LT
885 hdev->req_status = HCI_REQ_PEND;
886
42c6b129 887 func(&req, opt);
53cce22d 888
42c6b129
JH
889 err = hci_req_run(&req, hci_req_sync_complete);
890 if (err < 0) {
53cce22d 891 hdev->req_status = 0;
920c8300
AG
892
893 /* ENODATA means the HCI request command queue is empty.
894 * This can happen when a request with conditionals doesn't
895 * trigger any commands to be sent. This is normal behavior
896 * and should not trigger an error return.
42c6b129 897 */
920c8300
AG
898 if (err == -ENODATA)
899 return 0;
900
901 return err;
53cce22d
JH
902 }
903
bc4445c7
AG
904 add_wait_queue(&hdev->req_wait_q, &wait);
905 set_current_state(TASK_INTERRUPTIBLE);
906
1da177e4
LT
907 schedule_timeout(timeout);
908
909 remove_wait_queue(&hdev->req_wait_q, &wait);
910
911 if (signal_pending(current))
912 return -EINTR;
913
914 switch (hdev->req_status) {
915 case HCI_REQ_DONE:
e175072f 916 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
917 break;
918
919 case HCI_REQ_CANCELED:
920 err = -hdev->req_result;
921 break;
922
923 default:
924 err = -ETIMEDOUT;
925 break;
3ff50b79 926 }
1da177e4 927
a5040efa 928 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
929
930 BT_DBG("%s end: err %d", hdev->name, err);
931
932 return err;
933}
934
01178cd4 935static int hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
936 void (*req)(struct hci_request *req,
937 unsigned long opt),
01178cd4 938 unsigned long opt, __u32 timeout)
1da177e4
LT
939{
940 int ret;
941
7c6a329e
MH
942 if (!test_bit(HCI_UP, &hdev->flags))
943 return -ENETDOWN;
944
1da177e4
LT
945 /* Serialize all requests */
946 hci_req_lock(hdev);
01178cd4 947 ret = __hci_req_sync(hdev, req, opt, timeout);
1da177e4
LT
948 hci_req_unlock(hdev);
949
950 return ret;
951}
952
42c6b129 953static void hci_reset_req(struct hci_request *req, unsigned long opt)
1da177e4 954{
42c6b129 955 BT_DBG("%s %ld", req->hdev->name, opt);
1da177e4
LT
956
957 /* Reset device */
42c6b129
JH
958 set_bit(HCI_RESET, &req->hdev->flags);
959 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1da177e4
LT
960}
961
42c6b129 962static void bredr_init(struct hci_request *req)
1da177e4 963{
42c6b129 964 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
2455a3ea 965
1da177e4 966 /* Read Local Supported Features */
42c6b129 967 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 968
1143e5a6 969 /* Read Local Version */
42c6b129 970 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
2177bab5
JH
971
972 /* Read BD Address */
42c6b129 973 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1da177e4
LT
974}
975
42c6b129 976static void amp_init(struct hci_request *req)
e61ef499 977{
42c6b129 978 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
2455a3ea 979
e61ef499 980 /* Read Local Version */
42c6b129 981 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489 982
f6996cfe
MH
983 /* Read Local Supported Commands */
984 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
985
986 /* Read Local Supported Features */
987 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
988
6bcbc489 989 /* Read Local AMP Info */
42c6b129 990 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
991
992 /* Read Data Blk size */
42c6b129 993 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
7528ca1c 994
f38ba941
MH
995 /* Read Flow Control Mode */
996 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
997
7528ca1c
MH
998 /* Read Location Data */
999 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
e61ef499
AE
1000}
1001
42c6b129 1002static void hci_init1_req(struct hci_request *req, unsigned long opt)
e61ef499 1003{
42c6b129 1004 struct hci_dev *hdev = req->hdev;
e61ef499
AE
1005
1006 BT_DBG("%s %ld", hdev->name, opt);
1007
11778716
AE
1008 /* Reset */
1009 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
42c6b129 1010 hci_reset_req(req, 0);
11778716 1011
e61ef499
AE
1012 switch (hdev->dev_type) {
1013 case HCI_BREDR:
42c6b129 1014 bredr_init(req);
e61ef499
AE
1015 break;
1016
1017 case HCI_AMP:
42c6b129 1018 amp_init(req);
e61ef499
AE
1019 break;
1020
1021 default:
1022 BT_ERR("Unknown device type %d", hdev->dev_type);
1023 break;
1024 }
e61ef499
AE
1025}
1026
42c6b129 1027static void bredr_setup(struct hci_request *req)
2177bab5 1028{
4ca048e3
MH
1029 struct hci_dev *hdev = req->hdev;
1030
2177bab5
JH
1031 __le16 param;
1032 __u8 flt_type;
1033
1034 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
42c6b129 1035 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
1036
1037 /* Read Class of Device */
42c6b129 1038 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
2177bab5
JH
1039
1040 /* Read Local Name */
42c6b129 1041 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
2177bab5
JH
1042
1043 /* Read Voice Setting */
42c6b129 1044 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
2177bab5 1045
b4cb9fb2
MH
1046 /* Read Number of Supported IAC */
1047 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1048
4b836f39
MH
1049 /* Read Current IAC LAP */
1050 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1051
2177bab5
JH
1052 /* Clear Event Filters */
1053 flt_type = HCI_FLT_CLEAR_ALL;
42c6b129 1054 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
2177bab5
JH
1055
1056 /* Connection accept timeout ~20 secs */
1057 param = __constant_cpu_to_le16(0x7d00);
42c6b129 1058 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
2177bab5 1059
4ca048e3
MH
1060 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1061 * but it does not support page scan related HCI commands.
1062 */
1063 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
f332ec66
JH
1064 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1065 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1066 }
2177bab5
JH
1067}
1068
42c6b129 1069static void le_setup(struct hci_request *req)
2177bab5 1070{
c73eee91
JH
1071 struct hci_dev *hdev = req->hdev;
1072
2177bab5 1073 /* Read LE Buffer Size */
42c6b129 1074 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
1075
1076 /* Read LE Local Supported Features */
42c6b129 1077 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
2177bab5
JH
1078
1079 /* Read LE Advertising Channel TX Power */
42c6b129 1080 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
2177bab5
JH
1081
1082 /* Read LE White List Size */
42c6b129 1083 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
2177bab5
JH
1084
1085 /* Read LE Supported States */
42c6b129 1086 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
c73eee91
JH
1087
1088 /* LE-only controllers have LE implicitly enabled */
1089 if (!lmp_bredr_capable(hdev))
1090 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2177bab5
JH
1091}
1092
1093static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1094{
1095 if (lmp_ext_inq_capable(hdev))
1096 return 0x02;
1097
1098 if (lmp_inq_rssi_capable(hdev))
1099 return 0x01;
1100
1101 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1102 hdev->lmp_subver == 0x0757)
1103 return 0x01;
1104
1105 if (hdev->manufacturer == 15) {
1106 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1107 return 0x01;
1108 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1109 return 0x01;
1110 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1111 return 0x01;
1112 }
1113
1114 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1115 hdev->lmp_subver == 0x1805)
1116 return 0x01;
1117
1118 return 0x00;
1119}
1120
42c6b129 1121static void hci_setup_inquiry_mode(struct hci_request *req)
2177bab5
JH
1122{
1123 u8 mode;
1124
42c6b129 1125 mode = hci_get_inquiry_mode(req->hdev);
2177bab5 1126
42c6b129 1127 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
2177bab5
JH
1128}
1129
42c6b129 1130static void hci_setup_event_mask(struct hci_request *req)
2177bab5 1131{
42c6b129
JH
1132 struct hci_dev *hdev = req->hdev;
1133
2177bab5
JH
1134 /* The second byte is 0xff instead of 0x9f (two reserved bits
1135 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1136 * command otherwise.
1137 */
1138 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1139
1140 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1141 * any event mask for pre 1.2 devices.
1142 */
1143 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1144 return;
1145
1146 if (lmp_bredr_capable(hdev)) {
1147 events[4] |= 0x01; /* Flow Specification Complete */
1148 events[4] |= 0x02; /* Inquiry Result with RSSI */
1149 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1150 events[5] |= 0x08; /* Synchronous Connection Complete */
1151 events[5] |= 0x10; /* Synchronous Connection Changed */
c7882cbd
MH
1152 } else {
1153 /* Use a different default for LE-only devices */
1154 memset(events, 0, sizeof(events));
1155 events[0] |= 0x10; /* Disconnection Complete */
1156 events[0] |= 0x80; /* Encryption Change */
1157 events[1] |= 0x08; /* Read Remote Version Information Complete */
1158 events[1] |= 0x20; /* Command Complete */
1159 events[1] |= 0x40; /* Command Status */
1160 events[1] |= 0x80; /* Hardware Error */
1161 events[2] |= 0x04; /* Number of Completed Packets */
1162 events[3] |= 0x02; /* Data Buffer Overflow */
1163 events[5] |= 0x80; /* Encryption Key Refresh Complete */
2177bab5
JH
1164 }
1165
1166 if (lmp_inq_rssi_capable(hdev))
1167 events[4] |= 0x02; /* Inquiry Result with RSSI */
1168
1169 if (lmp_sniffsubr_capable(hdev))
1170 events[5] |= 0x20; /* Sniff Subrating */
1171
1172 if (lmp_pause_enc_capable(hdev))
1173 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1174
1175 if (lmp_ext_inq_capable(hdev))
1176 events[5] |= 0x40; /* Extended Inquiry Result */
1177
1178 if (lmp_no_flush_capable(hdev))
1179 events[7] |= 0x01; /* Enhanced Flush Complete */
1180
1181 if (lmp_lsto_capable(hdev))
1182 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1183
1184 if (lmp_ssp_capable(hdev)) {
1185 events[6] |= 0x01; /* IO Capability Request */
1186 events[6] |= 0x02; /* IO Capability Response */
1187 events[6] |= 0x04; /* User Confirmation Request */
1188 events[6] |= 0x08; /* User Passkey Request */
1189 events[6] |= 0x10; /* Remote OOB Data Request */
1190 events[6] |= 0x20; /* Simple Pairing Complete */
1191 events[7] |= 0x04; /* User Passkey Notification */
1192 events[7] |= 0x08; /* Keypress Notification */
1193 events[7] |= 0x10; /* Remote Host Supported
1194 * Features Notification
1195 */
1196 }
1197
1198 if (lmp_le_capable(hdev))
1199 events[7] |= 0x20; /* LE Meta-Event */
1200
42c6b129 1201 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
2177bab5
JH
1202
1203 if (lmp_le_capable(hdev)) {
1204 memset(events, 0, sizeof(events));
1205 events[0] = 0x1f;
42c6b129
JH
1206 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1207 sizeof(events), events);
2177bab5
JH
1208 }
1209}
1210
42c6b129 1211static void hci_init2_req(struct hci_request *req, unsigned long opt)
2177bab5 1212{
42c6b129
JH
1213 struct hci_dev *hdev = req->hdev;
1214
2177bab5 1215 if (lmp_bredr_capable(hdev))
42c6b129 1216 bredr_setup(req);
56f87901
JH
1217 else
1218 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2177bab5
JH
1219
1220 if (lmp_le_capable(hdev))
42c6b129 1221 le_setup(req);
2177bab5 1222
42c6b129 1223 hci_setup_event_mask(req);
2177bab5 1224
3f8e2d75
JH
1225 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1226 * local supported commands HCI command.
1227 */
1228 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
42c6b129 1229 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
2177bab5
JH
1230
1231 if (lmp_ssp_capable(hdev)) {
57af75a8
MH
1232 /* When SSP is available, then the host features page
1233 * should also be available as well. However some
1234 * controllers list the max_page as 0 as long as SSP
1235 * has not been enabled. To achieve proper debugging
1236 * output, force the minimum max_page to 1 at least.
1237 */
1238 hdev->max_page = 0x01;
1239
2177bab5
JH
1240 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1241 u8 mode = 0x01;
42c6b129
JH
1242 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1243 sizeof(mode), &mode);
2177bab5
JH
1244 } else {
1245 struct hci_cp_write_eir cp;
1246
1247 memset(hdev->eir, 0, sizeof(hdev->eir));
1248 memset(&cp, 0, sizeof(cp));
1249
42c6b129 1250 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
2177bab5
JH
1251 }
1252 }
1253
1254 if (lmp_inq_rssi_capable(hdev))
42c6b129 1255 hci_setup_inquiry_mode(req);
2177bab5
JH
1256
1257 if (lmp_inq_tx_pwr_capable(hdev))
42c6b129 1258 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
2177bab5
JH
1259
1260 if (lmp_ext_feat_capable(hdev)) {
1261 struct hci_cp_read_local_ext_features cp;
1262
1263 cp.page = 0x01;
42c6b129
JH
1264 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1265 sizeof(cp), &cp);
2177bab5
JH
1266 }
1267
1268 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1269 u8 enable = 1;
42c6b129
JH
1270 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1271 &enable);
2177bab5
JH
1272 }
1273}
1274
42c6b129 1275static void hci_setup_link_policy(struct hci_request *req)
2177bab5 1276{
42c6b129 1277 struct hci_dev *hdev = req->hdev;
2177bab5
JH
1278 struct hci_cp_write_def_link_policy cp;
1279 u16 link_policy = 0;
1280
1281 if (lmp_rswitch_capable(hdev))
1282 link_policy |= HCI_LP_RSWITCH;
1283 if (lmp_hold_capable(hdev))
1284 link_policy |= HCI_LP_HOLD;
1285 if (lmp_sniff_capable(hdev))
1286 link_policy |= HCI_LP_SNIFF;
1287 if (lmp_park_capable(hdev))
1288 link_policy |= HCI_LP_PARK;
1289
1290 cp.policy = cpu_to_le16(link_policy);
42c6b129 1291 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
2177bab5
JH
1292}
1293
42c6b129 1294static void hci_set_le_support(struct hci_request *req)
2177bab5 1295{
42c6b129 1296 struct hci_dev *hdev = req->hdev;
2177bab5
JH
1297 struct hci_cp_write_le_host_supported cp;
1298
c73eee91
JH
1299 /* LE-only devices do not support explicit enablement */
1300 if (!lmp_bredr_capable(hdev))
1301 return;
1302
2177bab5
JH
1303 memset(&cp, 0, sizeof(cp));
1304
1305 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1306 cp.le = 0x01;
1307 cp.simul = lmp_le_br_capable(hdev);
1308 }
1309
1310 if (cp.le != lmp_host_le_capable(hdev))
42c6b129
JH
1311 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1312 &cp);
2177bab5
JH
1313}
1314
d62e6d67
JH
1315static void hci_set_event_mask_page_2(struct hci_request *req)
1316{
1317 struct hci_dev *hdev = req->hdev;
1318 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1319
1320 /* If Connectionless Slave Broadcast master role is supported
1321 * enable all necessary events for it.
1322 */
53b834d2 1323 if (lmp_csb_master_capable(hdev)) {
d62e6d67
JH
1324 events[1] |= 0x40; /* Triggered Clock Capture */
1325 events[1] |= 0x80; /* Synchronization Train Complete */
1326 events[2] |= 0x10; /* Slave Page Response Timeout */
1327 events[2] |= 0x20; /* CSB Channel Map Change */
1328 }
1329
1330 /* If Connectionless Slave Broadcast slave role is supported
1331 * enable all necessary events for it.
1332 */
53b834d2 1333 if (lmp_csb_slave_capable(hdev)) {
d62e6d67
JH
1334 events[2] |= 0x01; /* Synchronization Train Received */
1335 events[2] |= 0x02; /* CSB Receive */
1336 events[2] |= 0x04; /* CSB Timeout */
1337 events[2] |= 0x08; /* Truncated Page Complete */
1338 }
1339
40c59fcb
MH
1340 /* Enable Authenticated Payload Timeout Expired event if supported */
1341 if (lmp_ping_capable(hdev))
1342 events[2] |= 0x80;
1343
d62e6d67
JH
1344 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1345}
1346
42c6b129 1347static void hci_init3_req(struct hci_request *req, unsigned long opt)
2177bab5 1348{
42c6b129 1349 struct hci_dev *hdev = req->hdev;
d2c5d77f 1350 u8 p;
42c6b129 1351
b8f4e068
GP
1352 /* Some Broadcom based Bluetooth controllers do not support the
1353 * Delete Stored Link Key command. They are clearly indicating its
1354 * absence in the bit mask of supported commands.
1355 *
1356 * Check the supported commands and only if the the command is marked
1357 * as supported send it. If not supported assume that the controller
1358 * does not have actual support for stored link keys which makes this
1359 * command redundant anyway.
f9f462fa
MH
1360 *
1361 * Some controllers indicate that they support handling deleting
1362 * stored link keys, but they don't. The quirk lets a driver
1363 * just disable this command.
637b4cae 1364 */
f9f462fa
MH
1365 if (hdev->commands[6] & 0x80 &&
1366 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
59f45d57
JH
1367 struct hci_cp_delete_stored_link_key cp;
1368
1369 bacpy(&cp.bdaddr, BDADDR_ANY);
1370 cp.delete_all = 0x01;
1371 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1372 sizeof(cp), &cp);
1373 }
1374
2177bab5 1375 if (hdev->commands[5] & 0x10)
42c6b129 1376 hci_setup_link_policy(req);
2177bab5 1377
79830f66 1378 if (lmp_le_capable(hdev)) {
bef34c0a
MH
1379 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1380 /* If the controller has a public BD_ADDR, then
1381 * by default use that one. If this is a LE only
1382 * controller without a public address, default
1383 * to the random address.
1384 */
1385 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1386 hdev->own_addr_type = ADDR_LE_DEV_PUBLIC;
1387 else
1388 hdev->own_addr_type = ADDR_LE_DEV_RANDOM;
1389 }
79830f66 1390
42c6b129 1391 hci_set_le_support(req);
79830f66 1392 }
d2c5d77f
JH
1393
1394 /* Read features beyond page 1 if available */
1395 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1396 struct hci_cp_read_local_ext_features cp;
1397
1398 cp.page = p;
1399 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1400 sizeof(cp), &cp);
1401 }
2177bab5
JH
1402}
1403
5d4e7e8d
JH
1404static void hci_init4_req(struct hci_request *req, unsigned long opt)
1405{
1406 struct hci_dev *hdev = req->hdev;
1407
d62e6d67
JH
1408 /* Set event mask page 2 if the HCI command for it is supported */
1409 if (hdev->commands[22] & 0x04)
1410 hci_set_event_mask_page_2(req);
1411
5d4e7e8d 1412 /* Check for Synchronization Train support */
53b834d2 1413 if (lmp_sync_train_capable(hdev))
5d4e7e8d 1414 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
a6d0d690
MH
1415
1416 /* Enable Secure Connections if supported and configured */
5afeac14
MH
1417 if ((lmp_sc_capable(hdev) ||
1418 test_bit(HCI_FORCE_SC, &hdev->dev_flags)) &&
a6d0d690
MH
1419 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1420 u8 support = 0x01;
1421 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1422 sizeof(support), &support);
1423 }
5d4e7e8d
JH
1424}
1425
2177bab5
JH
1426static int __hci_init(struct hci_dev *hdev)
1427{
1428 int err;
1429
1430 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1431 if (err < 0)
1432 return err;
1433
4b4148e9
MH
1434 /* The Device Under Test (DUT) mode is special and available for
1435 * all controller types. So just create it early on.
1436 */
1437 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1438 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1439 &dut_mode_fops);
1440 }
1441
2177bab5
JH
1442 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1443 * BR/EDR/LE type controllers. AMP controllers only need the
1444 * first stage init.
1445 */
1446 if (hdev->dev_type != HCI_BREDR)
1447 return 0;
1448
1449 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1450 if (err < 0)
1451 return err;
1452
5d4e7e8d
JH
1453 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1454 if (err < 0)
1455 return err;
1456
baf27f6e
MH
1457 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1458 if (err < 0)
1459 return err;
1460
1461 /* Only create debugfs entries during the initial setup
1462 * phase and not every time the controller gets powered on.
1463 */
1464 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1465 return 0;
1466
dfb826a8
MH
1467 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1468 &features_fops);
ceeb3bc0
MH
1469 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1470 &hdev->manufacturer);
1471 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1472 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
70afe0b8
MH
1473 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1474 &blacklist_fops);
47219839
MH
1475 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1476
baf27f6e
MH
1477 if (lmp_bredr_capable(hdev)) {
1478 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1479 hdev, &inquiry_cache_fops);
02d08d15
MH
1480 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1481 hdev, &link_keys_fops);
babdbb3c
MH
1482 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1483 hdev, &dev_class_fops);
041000b9
MH
1484 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1485 hdev, &voice_setting_fops);
baf27f6e
MH
1486 }
1487
06f5b778 1488 if (lmp_ssp_capable(hdev)) {
ebd1e33b
MH
1489 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1490 hdev, &auto_accept_delay_fops);
06f5b778
MH
1491 debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs,
1492 hdev, &ssp_debug_mode_fops);
5afeac14
MH
1493 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1494 hdev, &force_sc_support_fops);
134c2a89
MH
1495 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1496 hdev, &sc_only_mode_fops);
06f5b778 1497 }
ebd1e33b 1498
2bfa3531
MH
1499 if (lmp_sniff_capable(hdev)) {
1500 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1501 hdev, &idle_timeout_fops);
1502 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1503 hdev, &sniff_min_interval_fops);
1504 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1505 hdev, &sniff_max_interval_fops);
1506 }
1507
d0f729b8
MH
1508 if (lmp_le_capable(hdev)) {
1509 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1510 &hdev->le_white_list_size);
e7b8fc92
MH
1511 debugfs_create_file("static_address", 0444, hdev->debugfs,
1512 hdev, &static_address_fops);
92202185
MH
1513 debugfs_create_file("own_address_type", 0644, hdev->debugfs,
1514 hdev, &own_address_type_fops);
8f8625cd
MH
1515 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1516 hdev, &long_term_keys_fops);
4e70c7e7
MH
1517 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1518 hdev, &conn_min_interval_fops);
1519 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1520 hdev, &conn_max_interval_fops);
89863109
JR
1521 debugfs_create_file("6lowpan", 0644, hdev->debugfs, hdev,
1522 &lowpan_debugfs_fops);
d0f729b8 1523 }
e7b8fc92 1524
baf27f6e 1525 return 0;
2177bab5
JH
1526}
1527
42c6b129 1528static void hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1529{
1530 __u8 scan = opt;
1531
42c6b129 1532 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
1533
1534 /* Inquiry and Page scans */
42c6b129 1535 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
1536}
1537
42c6b129 1538static void hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1539{
1540 __u8 auth = opt;
1541
42c6b129 1542 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
1543
1544 /* Authentication */
42c6b129 1545 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
1546}
1547
42c6b129 1548static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1549{
1550 __u8 encrypt = opt;
1551
42c6b129 1552 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 1553
e4e8e37c 1554 /* Encryption */
42c6b129 1555 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
1556}
1557
42c6b129 1558static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
1559{
1560 __le16 policy = cpu_to_le16(opt);
1561
42c6b129 1562 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
1563
1564 /* Default link policy */
42c6b129 1565 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
e4e8e37c
MH
1566}
1567
8e87d142 1568/* Get HCI device by index.
1da177e4
LT
1569 * Device is held on return. */
1570struct hci_dev *hci_dev_get(int index)
1571{
8035ded4 1572 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
1573
1574 BT_DBG("%d", index);
1575
1576 if (index < 0)
1577 return NULL;
1578
1579 read_lock(&hci_dev_list_lock);
8035ded4 1580 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
1581 if (d->id == index) {
1582 hdev = hci_dev_hold(d);
1583 break;
1584 }
1585 }
1586 read_unlock(&hci_dev_list_lock);
1587 return hdev;
1588}
1da177e4
LT
1589
1590/* ---- Inquiry support ---- */
ff9ef578 1591
30dc78e1
JH
1592bool hci_discovery_active(struct hci_dev *hdev)
1593{
1594 struct discovery_state *discov = &hdev->discovery;
1595
6fbe195d 1596 switch (discov->state) {
343f935b 1597 case DISCOVERY_FINDING:
6fbe195d 1598 case DISCOVERY_RESOLVING:
30dc78e1
JH
1599 return true;
1600
6fbe195d
AG
1601 default:
1602 return false;
1603 }
30dc78e1
JH
1604}
1605
ff9ef578
JH
1606void hci_discovery_set_state(struct hci_dev *hdev, int state)
1607{
1608 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1609
1610 if (hdev->discovery.state == state)
1611 return;
1612
1613 switch (state) {
1614 case DISCOVERY_STOPPED:
7b99b659
AG
1615 if (hdev->discovery.state != DISCOVERY_STARTING)
1616 mgmt_discovering(hdev, 0);
ff9ef578
JH
1617 break;
1618 case DISCOVERY_STARTING:
1619 break;
343f935b 1620 case DISCOVERY_FINDING:
ff9ef578
JH
1621 mgmt_discovering(hdev, 1);
1622 break;
30dc78e1
JH
1623 case DISCOVERY_RESOLVING:
1624 break;
ff9ef578
JH
1625 case DISCOVERY_STOPPING:
1626 break;
1627 }
1628
1629 hdev->discovery.state = state;
1630}
1631
1f9b9a5d 1632void hci_inquiry_cache_flush(struct hci_dev *hdev)
1da177e4 1633{
30883512 1634 struct discovery_state *cache = &hdev->discovery;
b57c1a56 1635 struct inquiry_entry *p, *n;
1da177e4 1636
561aafbc
JH
1637 list_for_each_entry_safe(p, n, &cache->all, all) {
1638 list_del(&p->all);
b57c1a56 1639 kfree(p);
1da177e4 1640 }
561aafbc
JH
1641
1642 INIT_LIST_HEAD(&cache->unknown);
1643 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
1644}
1645
a8c5fb1a
GP
1646struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1647 bdaddr_t *bdaddr)
1da177e4 1648{
30883512 1649 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1650 struct inquiry_entry *e;
1651
6ed93dc6 1652 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 1653
561aafbc
JH
1654 list_for_each_entry(e, &cache->all, all) {
1655 if (!bacmp(&e->data.bdaddr, bdaddr))
1656 return e;
1657 }
1658
1659 return NULL;
1660}
1661
1662struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 1663 bdaddr_t *bdaddr)
561aafbc 1664{
30883512 1665 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
1666 struct inquiry_entry *e;
1667
6ed93dc6 1668 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
1669
1670 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 1671 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
1672 return e;
1673 }
1674
1675 return NULL;
1da177e4
LT
1676}
1677
30dc78e1 1678struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
1679 bdaddr_t *bdaddr,
1680 int state)
30dc78e1
JH
1681{
1682 struct discovery_state *cache = &hdev->discovery;
1683 struct inquiry_entry *e;
1684
6ed93dc6 1685 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
1686
1687 list_for_each_entry(e, &cache->resolve, list) {
1688 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1689 return e;
1690 if (!bacmp(&e->data.bdaddr, bdaddr))
1691 return e;
1692 }
1693
1694 return NULL;
1695}
1696
a3d4e20a 1697void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 1698 struct inquiry_entry *ie)
a3d4e20a
JH
1699{
1700 struct discovery_state *cache = &hdev->discovery;
1701 struct list_head *pos = &cache->resolve;
1702 struct inquiry_entry *p;
1703
1704 list_del(&ie->list);
1705
1706 list_for_each_entry(p, &cache->resolve, list) {
1707 if (p->name_state != NAME_PENDING &&
a8c5fb1a 1708 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
1709 break;
1710 pos = &p->list;
1711 }
1712
1713 list_add(&ie->list, pos);
1714}
1715
3175405b 1716bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
04124681 1717 bool name_known, bool *ssp)
1da177e4 1718{
30883512 1719 struct discovery_state *cache = &hdev->discovery;
70f23020 1720 struct inquiry_entry *ie;
1da177e4 1721
6ed93dc6 1722 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 1723
2b2fec4d
SJ
1724 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1725
388fc8fa
JH
1726 if (ssp)
1727 *ssp = data->ssp_mode;
1728
70f23020 1729 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 1730 if (ie) {
388fc8fa
JH
1731 if (ie->data.ssp_mode && ssp)
1732 *ssp = true;
1733
a3d4e20a 1734 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 1735 data->rssi != ie->data.rssi) {
a3d4e20a
JH
1736 ie->data.rssi = data->rssi;
1737 hci_inquiry_cache_update_resolve(hdev, ie);
1738 }
1739
561aafbc 1740 goto update;
a3d4e20a 1741 }
561aafbc
JH
1742
1743 /* Entry not in the cache. Add new one. */
1744 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
1745 if (!ie)
3175405b 1746 return false;
561aafbc
JH
1747
1748 list_add(&ie->all, &cache->all);
1749
1750 if (name_known) {
1751 ie->name_state = NAME_KNOWN;
1752 } else {
1753 ie->name_state = NAME_NOT_KNOWN;
1754 list_add(&ie->list, &cache->unknown);
1755 }
70f23020 1756
561aafbc
JH
1757update:
1758 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 1759 ie->name_state != NAME_PENDING) {
561aafbc
JH
1760 ie->name_state = NAME_KNOWN;
1761 list_del(&ie->list);
1da177e4
LT
1762 }
1763
70f23020
AE
1764 memcpy(&ie->data, data, sizeof(*data));
1765 ie->timestamp = jiffies;
1da177e4 1766 cache->timestamp = jiffies;
3175405b
JH
1767
1768 if (ie->name_state == NAME_NOT_KNOWN)
1769 return false;
1770
1771 return true;
1da177e4
LT
1772}
1773
1774static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1775{
30883512 1776 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1777 struct inquiry_info *info = (struct inquiry_info *) buf;
1778 struct inquiry_entry *e;
1779 int copied = 0;
1780
561aafbc 1781 list_for_each_entry(e, &cache->all, all) {
1da177e4 1782 struct inquiry_data *data = &e->data;
b57c1a56
JH
1783
1784 if (copied >= num)
1785 break;
1786
1da177e4
LT
1787 bacpy(&info->bdaddr, &data->bdaddr);
1788 info->pscan_rep_mode = data->pscan_rep_mode;
1789 info->pscan_period_mode = data->pscan_period_mode;
1790 info->pscan_mode = data->pscan_mode;
1791 memcpy(info->dev_class, data->dev_class, 3);
1792 info->clock_offset = data->clock_offset;
b57c1a56 1793
1da177e4 1794 info++;
b57c1a56 1795 copied++;
1da177e4
LT
1796 }
1797
1798 BT_DBG("cache %p, copied %d", cache, copied);
1799 return copied;
1800}
1801
42c6b129 1802static void hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1803{
1804 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 1805 struct hci_dev *hdev = req->hdev;
1da177e4
LT
1806 struct hci_cp_inquiry cp;
1807
1808 BT_DBG("%s", hdev->name);
1809
1810 if (test_bit(HCI_INQUIRY, &hdev->flags))
1811 return;
1812
1813 /* Start Inquiry */
1814 memcpy(&cp.lap, &ir->lap, 3);
1815 cp.length = ir->length;
1816 cp.num_rsp = ir->num_rsp;
42c6b129 1817 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
1818}
1819
3e13fa1e
AG
1820static int wait_inquiry(void *word)
1821{
1822 schedule();
1823 return signal_pending(current);
1824}
1825
1da177e4
LT
1826int hci_inquiry(void __user *arg)
1827{
1828 __u8 __user *ptr = arg;
1829 struct hci_inquiry_req ir;
1830 struct hci_dev *hdev;
1831 int err = 0, do_inquiry = 0, max_rsp;
1832 long timeo;
1833 __u8 *buf;
1834
1835 if (copy_from_user(&ir, ptr, sizeof(ir)))
1836 return -EFAULT;
1837
5a08ecce
AE
1838 hdev = hci_dev_get(ir.dev_id);
1839 if (!hdev)
1da177e4
LT
1840 return -ENODEV;
1841
0736cfa8
MH
1842 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1843 err = -EBUSY;
1844 goto done;
1845 }
1846
5b69bef5
MH
1847 if (hdev->dev_type != HCI_BREDR) {
1848 err = -EOPNOTSUPP;
1849 goto done;
1850 }
1851
56f87901
JH
1852 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1853 err = -EOPNOTSUPP;
1854 goto done;
1855 }
1856
09fd0de5 1857 hci_dev_lock(hdev);
8e87d142 1858 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 1859 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1f9b9a5d 1860 hci_inquiry_cache_flush(hdev);
1da177e4
LT
1861 do_inquiry = 1;
1862 }
09fd0de5 1863 hci_dev_unlock(hdev);
1da177e4 1864
04837f64 1865 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
1866
1867 if (do_inquiry) {
01178cd4
JH
1868 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1869 timeo);
70f23020
AE
1870 if (err < 0)
1871 goto done;
3e13fa1e
AG
1872
1873 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1874 * cleared). If it is interrupted by a signal, return -EINTR.
1875 */
1876 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1877 TASK_INTERRUPTIBLE))
1878 return -EINTR;
70f23020 1879 }
1da177e4 1880
8fc9ced3
GP
1881 /* for unlimited number of responses we will use buffer with
1882 * 255 entries
1883 */
1da177e4
LT
1884 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1885
1886 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1887 * copy it to the user space.
1888 */
01df8c31 1889 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 1890 if (!buf) {
1da177e4
LT
1891 err = -ENOMEM;
1892 goto done;
1893 }
1894
09fd0de5 1895 hci_dev_lock(hdev);
1da177e4 1896 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 1897 hci_dev_unlock(hdev);
1da177e4
LT
1898
1899 BT_DBG("num_rsp %d", ir.num_rsp);
1900
1901 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1902 ptr += sizeof(ir);
1903 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 1904 ir.num_rsp))
1da177e4 1905 err = -EFAULT;
8e87d142 1906 } else
1da177e4
LT
1907 err = -EFAULT;
1908
1909 kfree(buf);
1910
1911done:
1912 hci_dev_put(hdev);
1913 return err;
1914}
1915
cbed0ca1 1916static int hci_dev_do_open(struct hci_dev *hdev)
1da177e4 1917{
1da177e4
LT
1918 int ret = 0;
1919
1da177e4
LT
1920 BT_DBG("%s %p", hdev->name, hdev);
1921
1922 hci_req_lock(hdev);
1923
94324962
JH
1924 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1925 ret = -ENODEV;
1926 goto done;
1927 }
1928
a5c8f270
MH
1929 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
1930 /* Check for rfkill but allow the HCI setup stage to
1931 * proceed (which in itself doesn't cause any RF activity).
1932 */
1933 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1934 ret = -ERFKILL;
1935 goto done;
1936 }
1937
1938 /* Check for valid public address or a configured static
1939 * random adddress, but let the HCI setup proceed to
1940 * be able to determine if there is a public address
1941 * or not.
1942 *
c6beca0e
MH
1943 * In case of user channel usage, it is not important
1944 * if a public address or static random address is
1945 * available.
1946 *
a5c8f270
MH
1947 * This check is only valid for BR/EDR controllers
1948 * since AMP controllers do not have an address.
1949 */
c6beca0e
MH
1950 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1951 hdev->dev_type == HCI_BREDR &&
a5c8f270
MH
1952 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1953 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1954 ret = -EADDRNOTAVAIL;
1955 goto done;
1956 }
611b30f7
MH
1957 }
1958
1da177e4
LT
1959 if (test_bit(HCI_UP, &hdev->flags)) {
1960 ret = -EALREADY;
1961 goto done;
1962 }
1963
1da177e4
LT
1964 if (hdev->open(hdev)) {
1965 ret = -EIO;
1966 goto done;
1967 }
1968
f41c70c4
MH
1969 atomic_set(&hdev->cmd_cnt, 1);
1970 set_bit(HCI_INIT, &hdev->flags);
1971
1972 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1973 ret = hdev->setup(hdev);
1974
1975 if (!ret) {
f41c70c4
MH
1976 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1977 set_bit(HCI_RAW, &hdev->flags);
1978
0736cfa8
MH
1979 if (!test_bit(HCI_RAW, &hdev->flags) &&
1980 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
f41c70c4 1981 ret = __hci_init(hdev);
1da177e4
LT
1982 }
1983
f41c70c4
MH
1984 clear_bit(HCI_INIT, &hdev->flags);
1985
1da177e4
LT
1986 if (!ret) {
1987 hci_dev_hold(hdev);
1988 set_bit(HCI_UP, &hdev->flags);
1989 hci_notify(hdev, HCI_DEV_UP);
bb4b2a9a 1990 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
0736cfa8 1991 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1514b892 1992 hdev->dev_type == HCI_BREDR) {
09fd0de5 1993 hci_dev_lock(hdev);
744cf19e 1994 mgmt_powered(hdev, 1);
09fd0de5 1995 hci_dev_unlock(hdev);
56e5cb86 1996 }
8e87d142 1997 } else {
1da177e4 1998 /* Init failed, cleanup */
3eff45ea 1999 flush_work(&hdev->tx_work);
c347b765 2000 flush_work(&hdev->cmd_work);
b78752cc 2001 flush_work(&hdev->rx_work);
1da177e4
LT
2002
2003 skb_queue_purge(&hdev->cmd_q);
2004 skb_queue_purge(&hdev->rx_q);
2005
2006 if (hdev->flush)
2007 hdev->flush(hdev);
2008
2009 if (hdev->sent_cmd) {
2010 kfree_skb(hdev->sent_cmd);
2011 hdev->sent_cmd = NULL;
2012 }
2013
2014 hdev->close(hdev);
2015 hdev->flags = 0;
2016 }
2017
2018done:
2019 hci_req_unlock(hdev);
1da177e4
LT
2020 return ret;
2021}
2022
cbed0ca1
JH
2023/* ---- HCI ioctl helpers ---- */
2024
2025int hci_dev_open(__u16 dev)
2026{
2027 struct hci_dev *hdev;
2028 int err;
2029
2030 hdev = hci_dev_get(dev);
2031 if (!hdev)
2032 return -ENODEV;
2033
e1d08f40
JH
2034 /* We need to ensure that no other power on/off work is pending
2035 * before proceeding to call hci_dev_do_open. This is
2036 * particularly important if the setup procedure has not yet
2037 * completed.
2038 */
2039 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2040 cancel_delayed_work(&hdev->power_off);
2041
a5c8f270
MH
2042 /* After this call it is guaranteed that the setup procedure
2043 * has finished. This means that error conditions like RFKILL
2044 * or no valid public or static random address apply.
2045 */
e1d08f40
JH
2046 flush_workqueue(hdev->req_workqueue);
2047
cbed0ca1
JH
2048 err = hci_dev_do_open(hdev);
2049
2050 hci_dev_put(hdev);
2051
2052 return err;
2053}
2054
1da177e4
LT
2055static int hci_dev_do_close(struct hci_dev *hdev)
2056{
2057 BT_DBG("%s %p", hdev->name, hdev);
2058
78c04c0b
VCG
2059 cancel_delayed_work(&hdev->power_off);
2060
1da177e4
LT
2061 hci_req_cancel(hdev, ENODEV);
2062 hci_req_lock(hdev);
2063
2064 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
b79f44c1 2065 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
2066 hci_req_unlock(hdev);
2067 return 0;
2068 }
2069
3eff45ea
GP
2070 /* Flush RX and TX works */
2071 flush_work(&hdev->tx_work);
b78752cc 2072 flush_work(&hdev->rx_work);
1da177e4 2073
16ab91ab 2074 if (hdev->discov_timeout > 0) {
e0f9309f 2075 cancel_delayed_work(&hdev->discov_off);
16ab91ab 2076 hdev->discov_timeout = 0;
5e5282bb 2077 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
310a3d48 2078 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
16ab91ab
JH
2079 }
2080
a8b2d5c2 2081 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
7d78525d
JH
2082 cancel_delayed_work(&hdev->service_cache);
2083
7ba8b4be
AG
2084 cancel_delayed_work_sync(&hdev->le_scan_disable);
2085
09fd0de5 2086 hci_dev_lock(hdev);
1f9b9a5d 2087 hci_inquiry_cache_flush(hdev);
1da177e4 2088 hci_conn_hash_flush(hdev);
09fd0de5 2089 hci_dev_unlock(hdev);
1da177e4
LT
2090
2091 hci_notify(hdev, HCI_DEV_DOWN);
2092
2093 if (hdev->flush)
2094 hdev->flush(hdev);
2095
2096 /* Reset device */
2097 skb_queue_purge(&hdev->cmd_q);
2098 atomic_set(&hdev->cmd_cnt, 1);
8af59467 2099 if (!test_bit(HCI_RAW, &hdev->flags) &&
3a6afbd2 2100 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
a6c511c6 2101 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1da177e4 2102 set_bit(HCI_INIT, &hdev->flags);
01178cd4 2103 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1da177e4
LT
2104 clear_bit(HCI_INIT, &hdev->flags);
2105 }
2106
c347b765
GP
2107 /* flush cmd work */
2108 flush_work(&hdev->cmd_work);
1da177e4
LT
2109
2110 /* Drop queues */
2111 skb_queue_purge(&hdev->rx_q);
2112 skb_queue_purge(&hdev->cmd_q);
2113 skb_queue_purge(&hdev->raw_q);
2114
2115 /* Drop last sent command */
2116 if (hdev->sent_cmd) {
b79f44c1 2117 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
2118 kfree_skb(hdev->sent_cmd);
2119 hdev->sent_cmd = NULL;
2120 }
2121
b6ddb638
JH
2122 kfree_skb(hdev->recv_evt);
2123 hdev->recv_evt = NULL;
2124
1da177e4
LT
2125 /* After this point our queues are empty
2126 * and no tasks are scheduled. */
2127 hdev->close(hdev);
2128
35b973c9
JH
2129 /* Clear flags */
2130 hdev->flags = 0;
2131 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2132
93c311a0
MH
2133 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2134 if (hdev->dev_type == HCI_BREDR) {
2135 hci_dev_lock(hdev);
2136 mgmt_powered(hdev, 0);
2137 hci_dev_unlock(hdev);
2138 }
8ee56540 2139 }
5add6af8 2140
ced5c338 2141 /* Controller radio is available but is currently powered down */
536619e8 2142 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
ced5c338 2143
e59fda8d 2144 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 2145 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
e59fda8d 2146
1da177e4
LT
2147 hci_req_unlock(hdev);
2148
2149 hci_dev_put(hdev);
2150 return 0;
2151}
2152
2153int hci_dev_close(__u16 dev)
2154{
2155 struct hci_dev *hdev;
2156 int err;
2157
70f23020
AE
2158 hdev = hci_dev_get(dev);
2159 if (!hdev)
1da177e4 2160 return -ENODEV;
8ee56540 2161
0736cfa8
MH
2162 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2163 err = -EBUSY;
2164 goto done;
2165 }
2166
8ee56540
MH
2167 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2168 cancel_delayed_work(&hdev->power_off);
2169
1da177e4 2170 err = hci_dev_do_close(hdev);
8ee56540 2171
0736cfa8 2172done:
1da177e4
LT
2173 hci_dev_put(hdev);
2174 return err;
2175}
2176
2177int hci_dev_reset(__u16 dev)
2178{
2179 struct hci_dev *hdev;
2180 int ret = 0;
2181
70f23020
AE
2182 hdev = hci_dev_get(dev);
2183 if (!hdev)
1da177e4
LT
2184 return -ENODEV;
2185
2186 hci_req_lock(hdev);
1da177e4 2187
808a049e
MH
2188 if (!test_bit(HCI_UP, &hdev->flags)) {
2189 ret = -ENETDOWN;
1da177e4 2190 goto done;
808a049e 2191 }
1da177e4 2192
0736cfa8
MH
2193 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2194 ret = -EBUSY;
2195 goto done;
2196 }
2197
1da177e4
LT
2198 /* Drop queues */
2199 skb_queue_purge(&hdev->rx_q);
2200 skb_queue_purge(&hdev->cmd_q);
2201
09fd0de5 2202 hci_dev_lock(hdev);
1f9b9a5d 2203 hci_inquiry_cache_flush(hdev);
1da177e4 2204 hci_conn_hash_flush(hdev);
09fd0de5 2205 hci_dev_unlock(hdev);
1da177e4
LT
2206
2207 if (hdev->flush)
2208 hdev->flush(hdev);
2209
8e87d142 2210 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 2211 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4
LT
2212
2213 if (!test_bit(HCI_RAW, &hdev->flags))
01178cd4 2214 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1da177e4
LT
2215
2216done:
1da177e4
LT
2217 hci_req_unlock(hdev);
2218 hci_dev_put(hdev);
2219 return ret;
2220}
2221
2222int hci_dev_reset_stat(__u16 dev)
2223{
2224 struct hci_dev *hdev;
2225 int ret = 0;
2226
70f23020
AE
2227 hdev = hci_dev_get(dev);
2228 if (!hdev)
1da177e4
LT
2229 return -ENODEV;
2230
0736cfa8
MH
2231 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2232 ret = -EBUSY;
2233 goto done;
2234 }
2235
1da177e4
LT
2236 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2237
0736cfa8 2238done:
1da177e4 2239 hci_dev_put(hdev);
1da177e4
LT
2240 return ret;
2241}
2242
2243int hci_dev_cmd(unsigned int cmd, void __user *arg)
2244{
2245 struct hci_dev *hdev;
2246 struct hci_dev_req dr;
2247 int err = 0;
2248
2249 if (copy_from_user(&dr, arg, sizeof(dr)))
2250 return -EFAULT;
2251
70f23020
AE
2252 hdev = hci_dev_get(dr.dev_id);
2253 if (!hdev)
1da177e4
LT
2254 return -ENODEV;
2255
0736cfa8
MH
2256 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2257 err = -EBUSY;
2258 goto done;
2259 }
2260
5b69bef5
MH
2261 if (hdev->dev_type != HCI_BREDR) {
2262 err = -EOPNOTSUPP;
2263 goto done;
2264 }
2265
56f87901
JH
2266 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2267 err = -EOPNOTSUPP;
2268 goto done;
2269 }
2270
1da177e4
LT
2271 switch (cmd) {
2272 case HCISETAUTH:
01178cd4
JH
2273 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2274 HCI_INIT_TIMEOUT);
1da177e4
LT
2275 break;
2276
2277 case HCISETENCRYPT:
2278 if (!lmp_encrypt_capable(hdev)) {
2279 err = -EOPNOTSUPP;
2280 break;
2281 }
2282
2283 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2284 /* Auth must be enabled first */
01178cd4
JH
2285 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2286 HCI_INIT_TIMEOUT);
1da177e4
LT
2287 if (err)
2288 break;
2289 }
2290
01178cd4
JH
2291 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2292 HCI_INIT_TIMEOUT);
1da177e4
LT
2293 break;
2294
2295 case HCISETSCAN:
01178cd4
JH
2296 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2297 HCI_INIT_TIMEOUT);
1da177e4
LT
2298 break;
2299
1da177e4 2300 case HCISETLINKPOL:
01178cd4
JH
2301 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2302 HCI_INIT_TIMEOUT);
1da177e4
LT
2303 break;
2304
2305 case HCISETLINKMODE:
e4e8e37c
MH
2306 hdev->link_mode = ((__u16) dr.dev_opt) &
2307 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2308 break;
2309
2310 case HCISETPTYPE:
2311 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
2312 break;
2313
2314 case HCISETACLMTU:
e4e8e37c
MH
2315 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2316 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2317 break;
2318
2319 case HCISETSCOMTU:
e4e8e37c
MH
2320 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2321 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2322 break;
2323
2324 default:
2325 err = -EINVAL;
2326 break;
2327 }
e4e8e37c 2328
0736cfa8 2329done:
1da177e4
LT
2330 hci_dev_put(hdev);
2331 return err;
2332}
2333
2334int hci_get_dev_list(void __user *arg)
2335{
8035ded4 2336 struct hci_dev *hdev;
1da177e4
LT
2337 struct hci_dev_list_req *dl;
2338 struct hci_dev_req *dr;
1da177e4
LT
2339 int n = 0, size, err;
2340 __u16 dev_num;
2341
2342 if (get_user(dev_num, (__u16 __user *) arg))
2343 return -EFAULT;
2344
2345 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2346 return -EINVAL;
2347
2348 size = sizeof(*dl) + dev_num * sizeof(*dr);
2349
70f23020
AE
2350 dl = kzalloc(size, GFP_KERNEL);
2351 if (!dl)
1da177e4
LT
2352 return -ENOMEM;
2353
2354 dr = dl->dev_req;
2355
f20d09d5 2356 read_lock(&hci_dev_list_lock);
8035ded4 2357 list_for_each_entry(hdev, &hci_dev_list, list) {
a8b2d5c2 2358 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
e0f9309f 2359 cancel_delayed_work(&hdev->power_off);
c542a06c 2360
a8b2d5c2
JH
2361 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2362 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 2363
1da177e4
LT
2364 (dr + n)->dev_id = hdev->id;
2365 (dr + n)->dev_opt = hdev->flags;
c542a06c 2366
1da177e4
LT
2367 if (++n >= dev_num)
2368 break;
2369 }
f20d09d5 2370 read_unlock(&hci_dev_list_lock);
1da177e4
LT
2371
2372 dl->dev_num = n;
2373 size = sizeof(*dl) + n * sizeof(*dr);
2374
2375 err = copy_to_user(arg, dl, size);
2376 kfree(dl);
2377
2378 return err ? -EFAULT : 0;
2379}
2380
2381int hci_get_dev_info(void __user *arg)
2382{
2383 struct hci_dev *hdev;
2384 struct hci_dev_info di;
2385 int err = 0;
2386
2387 if (copy_from_user(&di, arg, sizeof(di)))
2388 return -EFAULT;
2389
70f23020
AE
2390 hdev = hci_dev_get(di.dev_id);
2391 if (!hdev)
1da177e4
LT
2392 return -ENODEV;
2393
a8b2d5c2 2394 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
3243553f 2395 cancel_delayed_work_sync(&hdev->power_off);
ab81cbf9 2396
a8b2d5c2
JH
2397 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2398 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 2399
1da177e4
LT
2400 strcpy(di.name, hdev->name);
2401 di.bdaddr = hdev->bdaddr;
60f2a3ed 2402 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
1da177e4
LT
2403 di.flags = hdev->flags;
2404 di.pkt_type = hdev->pkt_type;
572c7f84
JH
2405 if (lmp_bredr_capable(hdev)) {
2406 di.acl_mtu = hdev->acl_mtu;
2407 di.acl_pkts = hdev->acl_pkts;
2408 di.sco_mtu = hdev->sco_mtu;
2409 di.sco_pkts = hdev->sco_pkts;
2410 } else {
2411 di.acl_mtu = hdev->le_mtu;
2412 di.acl_pkts = hdev->le_pkts;
2413 di.sco_mtu = 0;
2414 di.sco_pkts = 0;
2415 }
1da177e4
LT
2416 di.link_policy = hdev->link_policy;
2417 di.link_mode = hdev->link_mode;
2418
2419 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2420 memcpy(&di.features, &hdev->features, sizeof(di.features));
2421
2422 if (copy_to_user(arg, &di, sizeof(di)))
2423 err = -EFAULT;
2424
2425 hci_dev_put(hdev);
2426
2427 return err;
2428}
2429
2430/* ---- Interface to HCI drivers ---- */
2431
611b30f7
MH
2432static int hci_rfkill_set_block(void *data, bool blocked)
2433{
2434 struct hci_dev *hdev = data;
2435
2436 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2437
0736cfa8
MH
2438 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2439 return -EBUSY;
2440
5e130367
JH
2441 if (blocked) {
2442 set_bit(HCI_RFKILLED, &hdev->dev_flags);
bf543036
JH
2443 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2444 hci_dev_do_close(hdev);
5e130367
JH
2445 } else {
2446 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
1025c04c 2447 }
611b30f7
MH
2448
2449 return 0;
2450}
2451
2452static const struct rfkill_ops hci_rfkill_ops = {
2453 .set_block = hci_rfkill_set_block,
2454};
2455
ab81cbf9
JH
2456static void hci_power_on(struct work_struct *work)
2457{
2458 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
96570ffc 2459 int err;
ab81cbf9
JH
2460
2461 BT_DBG("%s", hdev->name);
2462
cbed0ca1 2463 err = hci_dev_do_open(hdev);
96570ffc
JH
2464 if (err < 0) {
2465 mgmt_set_powered_failed(hdev, err);
ab81cbf9 2466 return;
96570ffc 2467 }
ab81cbf9 2468
a5c8f270
MH
2469 /* During the HCI setup phase, a few error conditions are
2470 * ignored and they need to be checked now. If they are still
2471 * valid, it is important to turn the device back off.
2472 */
2473 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2474 (hdev->dev_type == HCI_BREDR &&
2475 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2476 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
bf543036
JH
2477 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2478 hci_dev_do_close(hdev);
2479 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
19202573
JH
2480 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2481 HCI_AUTO_OFF_TIMEOUT);
bf543036 2482 }
ab81cbf9 2483
a8b2d5c2 2484 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
744cf19e 2485 mgmt_index_added(hdev);
ab81cbf9
JH
2486}
2487
2488static void hci_power_off(struct work_struct *work)
2489{
3243553f 2490 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 2491 power_off.work);
ab81cbf9
JH
2492
2493 BT_DBG("%s", hdev->name);
2494
8ee56540 2495 hci_dev_do_close(hdev);
ab81cbf9
JH
2496}
2497
16ab91ab
JH
2498static void hci_discov_off(struct work_struct *work)
2499{
2500 struct hci_dev *hdev;
16ab91ab
JH
2501
2502 hdev = container_of(work, struct hci_dev, discov_off.work);
2503
2504 BT_DBG("%s", hdev->name);
2505
d1967ff8 2506 mgmt_discoverable_timeout(hdev);
16ab91ab
JH
2507}
2508
2aeb9a1a
JH
2509int hci_uuids_clear(struct hci_dev *hdev)
2510{
4821002c 2511 struct bt_uuid *uuid, *tmp;
2aeb9a1a 2512
4821002c
JH
2513 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2514 list_del(&uuid->list);
2aeb9a1a
JH
2515 kfree(uuid);
2516 }
2517
2518 return 0;
2519}
2520
55ed8ca1
JH
2521int hci_link_keys_clear(struct hci_dev *hdev)
2522{
2523 struct list_head *p, *n;
2524
2525 list_for_each_safe(p, n, &hdev->link_keys) {
2526 struct link_key *key;
2527
2528 key = list_entry(p, struct link_key, list);
2529
2530 list_del(p);
2531 kfree(key);
2532 }
2533
2534 return 0;
2535}
2536
b899efaf
VCG
2537int hci_smp_ltks_clear(struct hci_dev *hdev)
2538{
2539 struct smp_ltk *k, *tmp;
2540
2541 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2542 list_del(&k->list);
2543 kfree(k);
2544 }
2545
2546 return 0;
2547}
2548
970c4e46
JH
2549void hci_smp_irks_clear(struct hci_dev *hdev)
2550{
2551 struct smp_irk *k, *tmp;
2552
2553 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2554 list_del(&k->list);
2555 kfree(k);
2556 }
2557}
2558
55ed8ca1
JH
2559struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2560{
8035ded4 2561 struct link_key *k;
55ed8ca1 2562
8035ded4 2563 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
2564 if (bacmp(bdaddr, &k->bdaddr) == 0)
2565 return k;
55ed8ca1
JH
2566
2567 return NULL;
2568}
2569
745c0ce3 2570static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 2571 u8 key_type, u8 old_key_type)
d25e28ab
JH
2572{
2573 /* Legacy key */
2574 if (key_type < 0x03)
745c0ce3 2575 return true;
d25e28ab
JH
2576
2577 /* Debug keys are insecure so don't store them persistently */
2578 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 2579 return false;
d25e28ab
JH
2580
2581 /* Changed combination key and there's no previous one */
2582 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 2583 return false;
d25e28ab
JH
2584
2585 /* Security mode 3 case */
2586 if (!conn)
745c0ce3 2587 return true;
d25e28ab
JH
2588
2589 /* Neither local nor remote side had no-bonding as requirement */
2590 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 2591 return true;
d25e28ab
JH
2592
2593 /* Local side had dedicated bonding as requirement */
2594 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 2595 return true;
d25e28ab
JH
2596
2597 /* Remote side had dedicated bonding as requirement */
2598 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 2599 return true;
d25e28ab
JH
2600
2601 /* If none of the above criteria match, then don't store the key
2602 * persistently */
745c0ce3 2603 return false;
d25e28ab
JH
2604}
2605
98a0b845
JH
2606static bool ltk_type_master(u8 type)
2607{
2608 if (type == HCI_SMP_STK || type == HCI_SMP_LTK)
2609 return true;
2610
2611 return false;
2612}
2613
2614struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8],
2615 bool master)
75d262c2 2616{
c9839a11 2617 struct smp_ltk *k;
75d262c2 2618
c9839a11
VCG
2619 list_for_each_entry(k, &hdev->long_term_keys, list) {
2620 if (k->ediv != ediv ||
a8c5fb1a 2621 memcmp(rand, k->rand, sizeof(k->rand)))
75d262c2
VCG
2622 continue;
2623
98a0b845
JH
2624 if (ltk_type_master(k->type) != master)
2625 continue;
2626
c9839a11 2627 return k;
75d262c2
VCG
2628 }
2629
2630 return NULL;
2631}
75d262c2 2632
c9839a11 2633struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
98a0b845 2634 u8 addr_type, bool master)
75d262c2 2635{
c9839a11 2636 struct smp_ltk *k;
75d262c2 2637
c9839a11
VCG
2638 list_for_each_entry(k, &hdev->long_term_keys, list)
2639 if (addr_type == k->bdaddr_type &&
98a0b845
JH
2640 bacmp(bdaddr, &k->bdaddr) == 0 &&
2641 ltk_type_master(k->type) == master)
75d262c2
VCG
2642 return k;
2643
2644 return NULL;
2645}
75d262c2 2646
970c4e46
JH
2647struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2648{
2649 struct smp_irk *irk;
2650
2651 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2652 if (!bacmp(&irk->rpa, rpa))
2653 return irk;
2654 }
2655
2656 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2657 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
2658 bacpy(&irk->rpa, rpa);
2659 return irk;
2660 }
2661 }
2662
2663 return NULL;
2664}
2665
2666struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2667 u8 addr_type)
2668{
2669 struct smp_irk *irk;
2670
2671 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2672 if (addr_type == irk->addr_type &&
2673 bacmp(bdaddr, &irk->bdaddr) == 0)
2674 return irk;
2675 }
2676
2677 return NULL;
2678}
2679
d25e28ab 2680int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
04124681 2681 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
55ed8ca1
JH
2682{
2683 struct link_key *key, *old_key;
745c0ce3
VA
2684 u8 old_key_type;
2685 bool persistent;
55ed8ca1
JH
2686
2687 old_key = hci_find_link_key(hdev, bdaddr);
2688 if (old_key) {
2689 old_key_type = old_key->type;
2690 key = old_key;
2691 } else {
12adcf3a 2692 old_key_type = conn ? conn->key_type : 0xff;
55ed8ca1
JH
2693 key = kzalloc(sizeof(*key), GFP_ATOMIC);
2694 if (!key)
2695 return -ENOMEM;
2696 list_add(&key->list, &hdev->link_keys);
2697 }
2698
6ed93dc6 2699 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 2700
d25e28ab
JH
2701 /* Some buggy controller combinations generate a changed
2702 * combination key for legacy pairing even when there's no
2703 * previous key */
2704 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 2705 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 2706 type = HCI_LK_COMBINATION;
655fe6ec
JH
2707 if (conn)
2708 conn->key_type = type;
2709 }
d25e28ab 2710
55ed8ca1 2711 bacpy(&key->bdaddr, bdaddr);
9b3b4460 2712 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
2713 key->pin_len = pin_len;
2714
b6020ba0 2715 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 2716 key->type = old_key_type;
4748fed2
JH
2717 else
2718 key->type = type;
2719
4df378a1
JH
2720 if (!new_key)
2721 return 0;
2722
2723 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
2724
744cf19e 2725 mgmt_new_link_key(hdev, key, persistent);
4df378a1 2726
6ec5bcad
VA
2727 if (conn)
2728 conn->flush_key = !persistent;
55ed8ca1
JH
2729
2730 return 0;
2731}
2732
c9839a11 2733int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
9a006657 2734 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
04124681 2735 ediv, u8 rand[8])
75d262c2 2736{
c9839a11 2737 struct smp_ltk *key, *old_key;
98a0b845 2738 bool master = ltk_type_master(type);
0fe442ff 2739 u8 persistent;
75d262c2 2740
98a0b845 2741 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
c9839a11 2742 if (old_key)
75d262c2 2743 key = old_key;
c9839a11
VCG
2744 else {
2745 key = kzalloc(sizeof(*key), GFP_ATOMIC);
75d262c2
VCG
2746 if (!key)
2747 return -ENOMEM;
c9839a11 2748 list_add(&key->list, &hdev->long_term_keys);
75d262c2
VCG
2749 }
2750
75d262c2 2751 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
2752 key->bdaddr_type = addr_type;
2753 memcpy(key->val, tk, sizeof(key->val));
2754 key->authenticated = authenticated;
2755 key->ediv = ediv;
2756 key->enc_size = enc_size;
2757 key->type = type;
2758 memcpy(key->rand, rand, sizeof(key->rand));
75d262c2 2759
c9839a11
VCG
2760 if (!new_key)
2761 return 0;
75d262c2 2762
0fe442ff
MH
2763 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2764 persistent = 0;
2765 else
2766 persistent = 1;
2767
21b93b75 2768 if (type == HCI_SMP_LTK || type == HCI_SMP_LTK_SLAVE)
0fe442ff 2769 mgmt_new_ltk(hdev, key, persistent);
261cc5aa 2770
75d262c2
VCG
2771 return 0;
2772}
2773
970c4e46
JH
2774int hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type,
2775 u8 val[16], bdaddr_t *rpa)
2776{
2777 struct smp_irk *irk;
2778
2779 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2780 if (!irk) {
2781 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2782 if (!irk)
2783 return -ENOMEM;
2784
2785 bacpy(&irk->bdaddr, bdaddr);
2786 irk->addr_type = addr_type;
2787
2788 list_add(&irk->list, &hdev->identity_resolving_keys);
2789 }
2790
2791 memcpy(irk->val, val, 16);
2792 bacpy(&irk->rpa, rpa);
2793
2794 return 0;
2795}
2796
55ed8ca1
JH
2797int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2798{
2799 struct link_key *key;
2800
2801 key = hci_find_link_key(hdev, bdaddr);
2802 if (!key)
2803 return -ENOENT;
2804
6ed93dc6 2805 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1
JH
2806
2807 list_del(&key->list);
2808 kfree(key);
2809
2810 return 0;
2811}
2812
b899efaf
VCG
2813int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
2814{
2815 struct smp_ltk *k, *tmp;
2816
2817 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2818 if (bacmp(bdaddr, &k->bdaddr))
2819 continue;
2820
6ed93dc6 2821 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf
VCG
2822
2823 list_del(&k->list);
2824 kfree(k);
2825 }
2826
2827 return 0;
2828}
2829
6bd32326 2830/* HCI command timer function */
bda4f23a 2831static void hci_cmd_timeout(unsigned long arg)
6bd32326
VT
2832{
2833 struct hci_dev *hdev = (void *) arg;
2834
bda4f23a
AE
2835 if (hdev->sent_cmd) {
2836 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2837 u16 opcode = __le16_to_cpu(sent->opcode);
2838
2839 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2840 } else {
2841 BT_ERR("%s command tx timeout", hdev->name);
2842 }
2843
6bd32326 2844 atomic_set(&hdev->cmd_cnt, 1);
c347b765 2845 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
2846}
2847
2763eda6 2848struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
04124681 2849 bdaddr_t *bdaddr)
2763eda6
SJ
2850{
2851 struct oob_data *data;
2852
2853 list_for_each_entry(data, &hdev->remote_oob_data, list)
2854 if (bacmp(bdaddr, &data->bdaddr) == 0)
2855 return data;
2856
2857 return NULL;
2858}
2859
2860int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2861{
2862 struct oob_data *data;
2863
2864 data = hci_find_remote_oob_data(hdev, bdaddr);
2865 if (!data)
2866 return -ENOENT;
2867
6ed93dc6 2868 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2763eda6
SJ
2869
2870 list_del(&data->list);
2871 kfree(data);
2872
2873 return 0;
2874}
2875
2876int hci_remote_oob_data_clear(struct hci_dev *hdev)
2877{
2878 struct oob_data *data, *n;
2879
2880 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2881 list_del(&data->list);
2882 kfree(data);
2883 }
2884
2885 return 0;
2886}
2887
0798872e
MH
2888int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2889 u8 *hash, u8 *randomizer)
2763eda6
SJ
2890{
2891 struct oob_data *data;
2892
2893 data = hci_find_remote_oob_data(hdev, bdaddr);
2763eda6 2894 if (!data) {
0798872e 2895 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2763eda6
SJ
2896 if (!data)
2897 return -ENOMEM;
2898
2899 bacpy(&data->bdaddr, bdaddr);
2900 list_add(&data->list, &hdev->remote_oob_data);
2901 }
2902
519ca9d0
MH
2903 memcpy(data->hash192, hash, sizeof(data->hash192));
2904 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
2763eda6 2905
0798872e
MH
2906 memset(data->hash256, 0, sizeof(data->hash256));
2907 memset(data->randomizer256, 0, sizeof(data->randomizer256));
2908
2909 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2910
2911 return 0;
2912}
2913
2914int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2915 u8 *hash192, u8 *randomizer192,
2916 u8 *hash256, u8 *randomizer256)
2917{
2918 struct oob_data *data;
2919
2920 data = hci_find_remote_oob_data(hdev, bdaddr);
2921 if (!data) {
2922 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2923 if (!data)
2924 return -ENOMEM;
2925
2926 bacpy(&data->bdaddr, bdaddr);
2927 list_add(&data->list, &hdev->remote_oob_data);
2928 }
2929
2930 memcpy(data->hash192, hash192, sizeof(data->hash192));
2931 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
2932
2933 memcpy(data->hash256, hash256, sizeof(data->hash256));
2934 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
2935
6ed93dc6 2936 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
2937
2938 return 0;
2939}
2940
b9ee0a78
MH
2941struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
2942 bdaddr_t *bdaddr, u8 type)
b2a66aad 2943{
8035ded4 2944 struct bdaddr_list *b;
b2a66aad 2945
b9ee0a78
MH
2946 list_for_each_entry(b, &hdev->blacklist, list) {
2947 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
b2a66aad 2948 return b;
b9ee0a78 2949 }
b2a66aad
AJ
2950
2951 return NULL;
2952}
2953
2954int hci_blacklist_clear(struct hci_dev *hdev)
2955{
2956 struct list_head *p, *n;
2957
2958 list_for_each_safe(p, n, &hdev->blacklist) {
b9ee0a78 2959 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
b2a66aad
AJ
2960
2961 list_del(p);
2962 kfree(b);
2963 }
2964
2965 return 0;
2966}
2967
88c1fe4b 2968int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
2969{
2970 struct bdaddr_list *entry;
b2a66aad 2971
b9ee0a78 2972 if (!bacmp(bdaddr, BDADDR_ANY))
b2a66aad
AJ
2973 return -EBADF;
2974
b9ee0a78 2975 if (hci_blacklist_lookup(hdev, bdaddr, type))
5e762444 2976 return -EEXIST;
b2a66aad
AJ
2977
2978 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
5e762444
AJ
2979 if (!entry)
2980 return -ENOMEM;
b2a66aad
AJ
2981
2982 bacpy(&entry->bdaddr, bdaddr);
b9ee0a78 2983 entry->bdaddr_type = type;
b2a66aad
AJ
2984
2985 list_add(&entry->list, &hdev->blacklist);
2986
88c1fe4b 2987 return mgmt_device_blocked(hdev, bdaddr, type);
b2a66aad
AJ
2988}
2989
88c1fe4b 2990int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
2991{
2992 struct bdaddr_list *entry;
b2a66aad 2993
b9ee0a78 2994 if (!bacmp(bdaddr, BDADDR_ANY))
5e762444 2995 return hci_blacklist_clear(hdev);
b2a66aad 2996
b9ee0a78 2997 entry = hci_blacklist_lookup(hdev, bdaddr, type);
1ec918ce 2998 if (!entry)
5e762444 2999 return -ENOENT;
b2a66aad
AJ
3000
3001 list_del(&entry->list);
3002 kfree(entry);
3003
88c1fe4b 3004 return mgmt_device_unblocked(hdev, bdaddr, type);
b2a66aad
AJ
3005}
3006
15819a70
AG
3007/* This function requires the caller holds hdev->lock */
3008struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3009 bdaddr_t *addr, u8 addr_type)
3010{
3011 struct hci_conn_params *params;
3012
3013 list_for_each_entry(params, &hdev->le_conn_params, list) {
3014 if (bacmp(&params->addr, addr) == 0 &&
3015 params->addr_type == addr_type) {
3016 return params;
3017 }
3018 }
3019
3020 return NULL;
3021}
3022
3023/* This function requires the caller holds hdev->lock */
3024void hci_conn_params_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3025 u16 conn_min_interval, u16 conn_max_interval)
3026{
3027 struct hci_conn_params *params;
3028
3029 params = hci_conn_params_lookup(hdev, addr, addr_type);
3030 if (params) {
3031 params->conn_min_interval = conn_min_interval;
3032 params->conn_max_interval = conn_max_interval;
3033 return;
3034 }
3035
3036 params = kzalloc(sizeof(*params), GFP_KERNEL);
3037 if (!params) {
3038 BT_ERR("Out of memory");
3039 return;
3040 }
3041
3042 bacpy(&params->addr, addr);
3043 params->addr_type = addr_type;
3044 params->conn_min_interval = conn_min_interval;
3045 params->conn_max_interval = conn_max_interval;
3046
3047 list_add(&params->list, &hdev->le_conn_params);
3048
3049 BT_DBG("addr %pMR (type %u) conn_min_interval 0x%.4x "
3050 "conn_max_interval 0x%.4x", addr, addr_type, conn_min_interval,
3051 conn_max_interval);
3052}
3053
3054/* This function requires the caller holds hdev->lock */
3055void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3056{
3057 struct hci_conn_params *params;
3058
3059 params = hci_conn_params_lookup(hdev, addr, addr_type);
3060 if (!params)
3061 return;
3062
3063 list_del(&params->list);
3064 kfree(params);
3065
3066 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3067}
3068
3069/* This function requires the caller holds hdev->lock */
3070void hci_conn_params_clear(struct hci_dev *hdev)
3071{
3072 struct hci_conn_params *params, *tmp;
3073
3074 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3075 list_del(&params->list);
3076 kfree(params);
3077 }
3078
3079 BT_DBG("All LE connection parameters were removed");
3080}
3081
4c87eaab 3082static void inquiry_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 3083{
4c87eaab
AG
3084 if (status) {
3085 BT_ERR("Failed to start inquiry: status %d", status);
7ba8b4be 3086
4c87eaab
AG
3087 hci_dev_lock(hdev);
3088 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3089 hci_dev_unlock(hdev);
3090 return;
3091 }
7ba8b4be
AG
3092}
3093
4c87eaab 3094static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 3095{
4c87eaab
AG
3096 /* General inquiry access code (GIAC) */
3097 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3098 struct hci_request req;
3099 struct hci_cp_inquiry cp;
7ba8b4be
AG
3100 int err;
3101
4c87eaab
AG
3102 if (status) {
3103 BT_ERR("Failed to disable LE scanning: status %d", status);
3104 return;
3105 }
7ba8b4be 3106
4c87eaab
AG
3107 switch (hdev->discovery.type) {
3108 case DISCOV_TYPE_LE:
3109 hci_dev_lock(hdev);
3110 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3111 hci_dev_unlock(hdev);
3112 break;
7ba8b4be 3113
4c87eaab
AG
3114 case DISCOV_TYPE_INTERLEAVED:
3115 hci_req_init(&req, hdev);
7ba8b4be 3116
4c87eaab
AG
3117 memset(&cp, 0, sizeof(cp));
3118 memcpy(&cp.lap, lap, sizeof(cp.lap));
3119 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3120 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
7ba8b4be 3121
4c87eaab 3122 hci_dev_lock(hdev);
7dbfac1d 3123
4c87eaab 3124 hci_inquiry_cache_flush(hdev);
7dbfac1d 3125
4c87eaab
AG
3126 err = hci_req_run(&req, inquiry_complete);
3127 if (err) {
3128 BT_ERR("Inquiry request failed: err %d", err);
3129 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3130 }
7dbfac1d 3131
4c87eaab
AG
3132 hci_dev_unlock(hdev);
3133 break;
7dbfac1d 3134 }
7dbfac1d
AG
3135}
3136
7ba8b4be
AG
3137static void le_scan_disable_work(struct work_struct *work)
3138{
3139 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 3140 le_scan_disable.work);
7ba8b4be 3141 struct hci_cp_le_set_scan_enable cp;
4c87eaab
AG
3142 struct hci_request req;
3143 int err;
7ba8b4be
AG
3144
3145 BT_DBG("%s", hdev->name);
3146
4c87eaab 3147 hci_req_init(&req, hdev);
28b75a89 3148
7ba8b4be 3149 memset(&cp, 0, sizeof(cp));
4c87eaab
AG
3150 cp.enable = LE_SCAN_DISABLE;
3151 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
28b75a89 3152
4c87eaab
AG
3153 err = hci_req_run(&req, le_scan_disable_work_complete);
3154 if (err)
3155 BT_ERR("Disable LE scanning request failed: err %d", err);
28b75a89
AG
3156}
3157
9be0dab7
DH
3158/* Alloc HCI device */
3159struct hci_dev *hci_alloc_dev(void)
3160{
3161 struct hci_dev *hdev;
3162
3163 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3164 if (!hdev)
3165 return NULL;
3166
b1b813d4
DH
3167 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3168 hdev->esco_type = (ESCO_HV1);
3169 hdev->link_mode = (HCI_LM_ACCEPT);
b4cb9fb2
MH
3170 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3171 hdev->io_capability = 0x03; /* No Input No Output */
bbaf444a
JH
3172 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3173 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
b1b813d4 3174
b1b813d4
DH
3175 hdev->sniff_max_interval = 800;
3176 hdev->sniff_min_interval = 80;
3177
bef64738
MH
3178 hdev->le_scan_interval = 0x0060;
3179 hdev->le_scan_window = 0x0030;
4e70c7e7
MH
3180 hdev->le_conn_min_interval = 0x0028;
3181 hdev->le_conn_max_interval = 0x0038;
bef64738 3182
b1b813d4
DH
3183 mutex_init(&hdev->lock);
3184 mutex_init(&hdev->req_lock);
3185
3186 INIT_LIST_HEAD(&hdev->mgmt_pending);
3187 INIT_LIST_HEAD(&hdev->blacklist);
3188 INIT_LIST_HEAD(&hdev->uuids);
3189 INIT_LIST_HEAD(&hdev->link_keys);
3190 INIT_LIST_HEAD(&hdev->long_term_keys);
970c4e46 3191 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
b1b813d4 3192 INIT_LIST_HEAD(&hdev->remote_oob_data);
15819a70 3193 INIT_LIST_HEAD(&hdev->le_conn_params);
6b536b5e 3194 INIT_LIST_HEAD(&hdev->conn_hash.list);
b1b813d4
DH
3195
3196 INIT_WORK(&hdev->rx_work, hci_rx_work);
3197 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3198 INIT_WORK(&hdev->tx_work, hci_tx_work);
3199 INIT_WORK(&hdev->power_on, hci_power_on);
b1b813d4 3200
b1b813d4
DH
3201 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3202 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3203 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3204
b1b813d4
DH
3205 skb_queue_head_init(&hdev->rx_q);
3206 skb_queue_head_init(&hdev->cmd_q);
3207 skb_queue_head_init(&hdev->raw_q);
3208
3209 init_waitqueue_head(&hdev->req_wait_q);
3210
bda4f23a 3211 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
b1b813d4 3212
b1b813d4
DH
3213 hci_init_sysfs(hdev);
3214 discovery_init(hdev);
9be0dab7
DH
3215
3216 return hdev;
3217}
3218EXPORT_SYMBOL(hci_alloc_dev);
3219
3220/* Free HCI device */
3221void hci_free_dev(struct hci_dev *hdev)
3222{
9be0dab7
DH
3223 /* will free via device release */
3224 put_device(&hdev->dev);
3225}
3226EXPORT_SYMBOL(hci_free_dev);
3227
1da177e4
LT
3228/* Register HCI device */
3229int hci_register_dev(struct hci_dev *hdev)
3230{
b1b813d4 3231 int id, error;
1da177e4 3232
010666a1 3233 if (!hdev->open || !hdev->close)
1da177e4
LT
3234 return -EINVAL;
3235
08add513
MM
3236 /* Do not allow HCI_AMP devices to register at index 0,
3237 * so the index can be used as the AMP controller ID.
3238 */
3df92b31
SL
3239 switch (hdev->dev_type) {
3240 case HCI_BREDR:
3241 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3242 break;
3243 case HCI_AMP:
3244 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3245 break;
3246 default:
3247 return -EINVAL;
1da177e4 3248 }
8e87d142 3249
3df92b31
SL
3250 if (id < 0)
3251 return id;
3252
1da177e4
LT
3253 sprintf(hdev->name, "hci%d", id);
3254 hdev->id = id;
2d8b3a11
AE
3255
3256 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3257
d8537548
KC
3258 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3259 WQ_MEM_RECLAIM, 1, hdev->name);
33ca954d
DH
3260 if (!hdev->workqueue) {
3261 error = -ENOMEM;
3262 goto err;
3263 }
f48fd9c8 3264
d8537548
KC
3265 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3266 WQ_MEM_RECLAIM, 1, hdev->name);
6ead1bbc
JH
3267 if (!hdev->req_workqueue) {
3268 destroy_workqueue(hdev->workqueue);
3269 error = -ENOMEM;
3270 goto err;
3271 }
3272
0153e2ec
MH
3273 if (!IS_ERR_OR_NULL(bt_debugfs))
3274 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3275
bdc3e0f1
MH
3276 dev_set_name(&hdev->dev, "%s", hdev->name);
3277
99780a7b
JH
3278 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3279 CRYPTO_ALG_ASYNC);
3280 if (IS_ERR(hdev->tfm_aes)) {
3281 BT_ERR("Unable to create crypto context");
3282 error = PTR_ERR(hdev->tfm_aes);
3283 hdev->tfm_aes = NULL;
3284 goto err_wqueue;
3285 }
3286
bdc3e0f1 3287 error = device_add(&hdev->dev);
33ca954d 3288 if (error < 0)
99780a7b 3289 goto err_tfm;
1da177e4 3290
611b30f7 3291 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
3292 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3293 hdev);
611b30f7
MH
3294 if (hdev->rfkill) {
3295 if (rfkill_register(hdev->rfkill) < 0) {
3296 rfkill_destroy(hdev->rfkill);
3297 hdev->rfkill = NULL;
3298 }
3299 }
3300
5e130367
JH
3301 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3302 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3303
a8b2d5c2 3304 set_bit(HCI_SETUP, &hdev->dev_flags);
004b0258 3305 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
ce2be9ac 3306
01cd3404 3307 if (hdev->dev_type == HCI_BREDR) {
56f87901
JH
3308 /* Assume BR/EDR support until proven otherwise (such as
3309 * through reading supported features during init.
3310 */
3311 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3312 }
ce2be9ac 3313
fcee3377
GP
3314 write_lock(&hci_dev_list_lock);
3315 list_add(&hdev->list, &hci_dev_list);
3316 write_unlock(&hci_dev_list_lock);
3317
1da177e4 3318 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 3319 hci_dev_hold(hdev);
1da177e4 3320
19202573 3321 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 3322
1da177e4 3323 return id;
f48fd9c8 3324
99780a7b
JH
3325err_tfm:
3326 crypto_free_blkcipher(hdev->tfm_aes);
33ca954d
DH
3327err_wqueue:
3328 destroy_workqueue(hdev->workqueue);
6ead1bbc 3329 destroy_workqueue(hdev->req_workqueue);
33ca954d 3330err:
3df92b31 3331 ida_simple_remove(&hci_index_ida, hdev->id);
f48fd9c8 3332
33ca954d 3333 return error;
1da177e4
LT
3334}
3335EXPORT_SYMBOL(hci_register_dev);
3336
3337/* Unregister HCI device */
59735631 3338void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 3339{
3df92b31 3340 int i, id;
ef222013 3341
c13854ce 3342 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 3343
94324962
JH
3344 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3345
3df92b31
SL
3346 id = hdev->id;
3347
f20d09d5 3348 write_lock(&hci_dev_list_lock);
1da177e4 3349 list_del(&hdev->list);
f20d09d5 3350 write_unlock(&hci_dev_list_lock);
1da177e4
LT
3351
3352 hci_dev_do_close(hdev);
3353
cd4c5391 3354 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
3355 kfree_skb(hdev->reassembly[i]);
3356
b9b5ef18
GP
3357 cancel_work_sync(&hdev->power_on);
3358
ab81cbf9 3359 if (!test_bit(HCI_INIT, &hdev->flags) &&
a8c5fb1a 3360 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
09fd0de5 3361 hci_dev_lock(hdev);
744cf19e 3362 mgmt_index_removed(hdev);
09fd0de5 3363 hci_dev_unlock(hdev);
56e5cb86 3364 }
ab81cbf9 3365
2e58ef3e
JH
3366 /* mgmt_index_removed should take care of emptying the
3367 * pending list */
3368 BUG_ON(!list_empty(&hdev->mgmt_pending));
3369
1da177e4
LT
3370 hci_notify(hdev, HCI_DEV_UNREG);
3371
611b30f7
MH
3372 if (hdev->rfkill) {
3373 rfkill_unregister(hdev->rfkill);
3374 rfkill_destroy(hdev->rfkill);
3375 }
3376
99780a7b
JH
3377 if (hdev->tfm_aes)
3378 crypto_free_blkcipher(hdev->tfm_aes);
3379
bdc3e0f1 3380 device_del(&hdev->dev);
147e2d59 3381
0153e2ec
MH
3382 debugfs_remove_recursive(hdev->debugfs);
3383
f48fd9c8 3384 destroy_workqueue(hdev->workqueue);
6ead1bbc 3385 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 3386
09fd0de5 3387 hci_dev_lock(hdev);
e2e0cacb 3388 hci_blacklist_clear(hdev);
2aeb9a1a 3389 hci_uuids_clear(hdev);
55ed8ca1 3390 hci_link_keys_clear(hdev);
b899efaf 3391 hci_smp_ltks_clear(hdev);
970c4e46 3392 hci_smp_irks_clear(hdev);
2763eda6 3393 hci_remote_oob_data_clear(hdev);
15819a70 3394 hci_conn_params_clear(hdev);
09fd0de5 3395 hci_dev_unlock(hdev);
e2e0cacb 3396
dc946bd8 3397 hci_dev_put(hdev);
3df92b31
SL
3398
3399 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
3400}
3401EXPORT_SYMBOL(hci_unregister_dev);
3402
3403/* Suspend HCI device */
3404int hci_suspend_dev(struct hci_dev *hdev)
3405{
3406 hci_notify(hdev, HCI_DEV_SUSPEND);
3407 return 0;
3408}
3409EXPORT_SYMBOL(hci_suspend_dev);
3410
3411/* Resume HCI device */
3412int hci_resume_dev(struct hci_dev *hdev)
3413{
3414 hci_notify(hdev, HCI_DEV_RESUME);
3415 return 0;
3416}
3417EXPORT_SYMBOL(hci_resume_dev);
3418
76bca880 3419/* Receive frame from HCI drivers */
e1a26170 3420int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
76bca880 3421{
76bca880 3422 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 3423 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
3424 kfree_skb(skb);
3425 return -ENXIO;
3426 }
3427
d82603c6 3428 /* Incoming skb */
76bca880
MH
3429 bt_cb(skb)->incoming = 1;
3430
3431 /* Time stamp */
3432 __net_timestamp(skb);
3433
76bca880 3434 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 3435 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 3436
76bca880
MH
3437 return 0;
3438}
3439EXPORT_SYMBOL(hci_recv_frame);
3440
33e882a5 3441static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
a8c5fb1a 3442 int count, __u8 index)
33e882a5
SS
3443{
3444 int len = 0;
3445 int hlen = 0;
3446 int remain = count;
3447 struct sk_buff *skb;
3448 struct bt_skb_cb *scb;
3449
3450 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
a8c5fb1a 3451 index >= NUM_REASSEMBLY)
33e882a5
SS
3452 return -EILSEQ;
3453
3454 skb = hdev->reassembly[index];
3455
3456 if (!skb) {
3457 switch (type) {
3458 case HCI_ACLDATA_PKT:
3459 len = HCI_MAX_FRAME_SIZE;
3460 hlen = HCI_ACL_HDR_SIZE;
3461 break;
3462 case HCI_EVENT_PKT:
3463 len = HCI_MAX_EVENT_SIZE;
3464 hlen = HCI_EVENT_HDR_SIZE;
3465 break;
3466 case HCI_SCODATA_PKT:
3467 len = HCI_MAX_SCO_SIZE;
3468 hlen = HCI_SCO_HDR_SIZE;
3469 break;
3470 }
3471
1e429f38 3472 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
3473 if (!skb)
3474 return -ENOMEM;
3475
3476 scb = (void *) skb->cb;
3477 scb->expect = hlen;
3478 scb->pkt_type = type;
3479
33e882a5
SS
3480 hdev->reassembly[index] = skb;
3481 }
3482
3483 while (count) {
3484 scb = (void *) skb->cb;
89bb46d0 3485 len = min_t(uint, scb->expect, count);
33e882a5
SS
3486
3487 memcpy(skb_put(skb, len), data, len);
3488
3489 count -= len;
3490 data += len;
3491 scb->expect -= len;
3492 remain = count;
3493
3494 switch (type) {
3495 case HCI_EVENT_PKT:
3496 if (skb->len == HCI_EVENT_HDR_SIZE) {
3497 struct hci_event_hdr *h = hci_event_hdr(skb);
3498 scb->expect = h->plen;
3499
3500 if (skb_tailroom(skb) < scb->expect) {
3501 kfree_skb(skb);
3502 hdev->reassembly[index] = NULL;
3503 return -ENOMEM;
3504 }
3505 }
3506 break;
3507
3508 case HCI_ACLDATA_PKT:
3509 if (skb->len == HCI_ACL_HDR_SIZE) {
3510 struct hci_acl_hdr *h = hci_acl_hdr(skb);
3511 scb->expect = __le16_to_cpu(h->dlen);
3512
3513 if (skb_tailroom(skb) < scb->expect) {
3514 kfree_skb(skb);
3515 hdev->reassembly[index] = NULL;
3516 return -ENOMEM;
3517 }
3518 }
3519 break;
3520
3521 case HCI_SCODATA_PKT:
3522 if (skb->len == HCI_SCO_HDR_SIZE) {
3523 struct hci_sco_hdr *h = hci_sco_hdr(skb);
3524 scb->expect = h->dlen;
3525
3526 if (skb_tailroom(skb) < scb->expect) {
3527 kfree_skb(skb);
3528 hdev->reassembly[index] = NULL;
3529 return -ENOMEM;
3530 }
3531 }
3532 break;
3533 }
3534
3535 if (scb->expect == 0) {
3536 /* Complete frame */
3537
3538 bt_cb(skb)->pkt_type = type;
e1a26170 3539 hci_recv_frame(hdev, skb);
33e882a5
SS
3540
3541 hdev->reassembly[index] = NULL;
3542 return remain;
3543 }
3544 }
3545
3546 return remain;
3547}
3548
ef222013
MH
3549int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
3550{
f39a3c06
SS
3551 int rem = 0;
3552
ef222013
MH
3553 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
3554 return -EILSEQ;
3555
da5f6c37 3556 while (count) {
1e429f38 3557 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
3558 if (rem < 0)
3559 return rem;
ef222013 3560
f39a3c06
SS
3561 data += (count - rem);
3562 count = rem;
f81c6224 3563 }
ef222013 3564
f39a3c06 3565 return rem;
ef222013
MH
3566}
3567EXPORT_SYMBOL(hci_recv_fragment);
3568
99811510
SS
3569#define STREAM_REASSEMBLY 0
3570
3571int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
3572{
3573 int type;
3574 int rem = 0;
3575
da5f6c37 3576 while (count) {
99811510
SS
3577 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
3578
3579 if (!skb) {
3580 struct { char type; } *pkt;
3581
3582 /* Start of the frame */
3583 pkt = data;
3584 type = pkt->type;
3585
3586 data++;
3587 count--;
3588 } else
3589 type = bt_cb(skb)->pkt_type;
3590
1e429f38 3591 rem = hci_reassembly(hdev, type, data, count,
a8c5fb1a 3592 STREAM_REASSEMBLY);
99811510
SS
3593 if (rem < 0)
3594 return rem;
3595
3596 data += (count - rem);
3597 count = rem;
f81c6224 3598 }
99811510
SS
3599
3600 return rem;
3601}
3602EXPORT_SYMBOL(hci_recv_stream_fragment);
3603
1da177e4
LT
3604/* ---- Interface to upper protocols ---- */
3605
1da177e4
LT
3606int hci_register_cb(struct hci_cb *cb)
3607{
3608 BT_DBG("%p name %s", cb, cb->name);
3609
f20d09d5 3610 write_lock(&hci_cb_list_lock);
1da177e4 3611 list_add(&cb->list, &hci_cb_list);
f20d09d5 3612 write_unlock(&hci_cb_list_lock);
1da177e4
LT
3613
3614 return 0;
3615}
3616EXPORT_SYMBOL(hci_register_cb);
3617
3618int hci_unregister_cb(struct hci_cb *cb)
3619{
3620 BT_DBG("%p name %s", cb, cb->name);
3621
f20d09d5 3622 write_lock(&hci_cb_list_lock);
1da177e4 3623 list_del(&cb->list);
f20d09d5 3624 write_unlock(&hci_cb_list_lock);
1da177e4
LT
3625
3626 return 0;
3627}
3628EXPORT_SYMBOL(hci_unregister_cb);
3629
51086991 3630static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4 3631{
0d48d939 3632 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 3633
cd82e61c
MH
3634 /* Time stamp */
3635 __net_timestamp(skb);
1da177e4 3636
cd82e61c
MH
3637 /* Send copy to monitor */
3638 hci_send_to_monitor(hdev, skb);
3639
3640 if (atomic_read(&hdev->promisc)) {
3641 /* Send copy to the sockets */
470fe1b5 3642 hci_send_to_sock(hdev, skb);
1da177e4
LT
3643 }
3644
3645 /* Get rid of skb owner, prior to sending to the driver. */
3646 skb_orphan(skb);
3647
7bd8f09f 3648 if (hdev->send(hdev, skb) < 0)
51086991 3649 BT_ERR("%s sending frame failed", hdev->name);
1da177e4
LT
3650}
3651
3119ae95
JH
3652void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
3653{
3654 skb_queue_head_init(&req->cmd_q);
3655 req->hdev = hdev;
5d73e034 3656 req->err = 0;
3119ae95
JH
3657}
3658
3659int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
3660{
3661 struct hci_dev *hdev = req->hdev;
3662 struct sk_buff *skb;
3663 unsigned long flags;
3664
3665 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
3666
5d73e034
AG
3667 /* If an error occured during request building, remove all HCI
3668 * commands queued on the HCI request queue.
3669 */
3670 if (req->err) {
3671 skb_queue_purge(&req->cmd_q);
3672 return req->err;
3673 }
3674
3119ae95
JH
3675 /* Do not allow empty requests */
3676 if (skb_queue_empty(&req->cmd_q))
382b0c39 3677 return -ENODATA;
3119ae95
JH
3678
3679 skb = skb_peek_tail(&req->cmd_q);
3680 bt_cb(skb)->req.complete = complete;
3681
3682 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3683 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
3684 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3685
3686 queue_work(hdev->workqueue, &hdev->cmd_work);
3687
3688 return 0;
3689}
3690
1ca3a9d0 3691static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
07dc93dd 3692 u32 plen, const void *param)
1da177e4
LT
3693{
3694 int len = HCI_COMMAND_HDR_SIZE + plen;
3695 struct hci_command_hdr *hdr;
3696 struct sk_buff *skb;
3697
1da177e4 3698 skb = bt_skb_alloc(len, GFP_ATOMIC);
1ca3a9d0
JH
3699 if (!skb)
3700 return NULL;
1da177e4
LT
3701
3702 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 3703 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
3704 hdr->plen = plen;
3705
3706 if (plen)
3707 memcpy(skb_put(skb, plen), param, plen);
3708
3709 BT_DBG("skb len %d", skb->len);
3710
0d48d939 3711 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
c78ae283 3712
1ca3a9d0
JH
3713 return skb;
3714}
3715
3716/* Send HCI command */
07dc93dd
JH
3717int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3718 const void *param)
1ca3a9d0
JH
3719{
3720 struct sk_buff *skb;
3721
3722 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3723
3724 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3725 if (!skb) {
3726 BT_ERR("%s no memory for command", hdev->name);
3727 return -ENOMEM;
3728 }
3729
11714b3d
JH
3730 /* Stand-alone HCI commands must be flaged as
3731 * single-command requests.
3732 */
3733 bt_cb(skb)->req.start = true;
3734
1da177e4 3735 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 3736 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
3737
3738 return 0;
3739}
1da177e4 3740
71c76a17 3741/* Queue a command to an asynchronous HCI request */
07dc93dd
JH
3742void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
3743 const void *param, u8 event)
71c76a17
JH
3744{
3745 struct hci_dev *hdev = req->hdev;
3746 struct sk_buff *skb;
3747
3748 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3749
34739c1e
AG
3750 /* If an error occured during request building, there is no point in
3751 * queueing the HCI command. We can simply return.
3752 */
3753 if (req->err)
3754 return;
3755
71c76a17
JH
3756 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3757 if (!skb) {
5d73e034
AG
3758 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
3759 hdev->name, opcode);
3760 req->err = -ENOMEM;
e348fe6b 3761 return;
71c76a17
JH
3762 }
3763
3764 if (skb_queue_empty(&req->cmd_q))
3765 bt_cb(skb)->req.start = true;
3766
02350a72
JH
3767 bt_cb(skb)->req.event = event;
3768
71c76a17 3769 skb_queue_tail(&req->cmd_q, skb);
71c76a17
JH
3770}
3771
07dc93dd
JH
3772void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
3773 const void *param)
02350a72
JH
3774{
3775 hci_req_add_ev(req, opcode, plen, param, 0);
3776}
3777
1da177e4 3778/* Get data from the previously sent command */
a9de9248 3779void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
3780{
3781 struct hci_command_hdr *hdr;
3782
3783 if (!hdev->sent_cmd)
3784 return NULL;
3785
3786 hdr = (void *) hdev->sent_cmd->data;
3787
a9de9248 3788 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
3789 return NULL;
3790
f0e09510 3791 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
3792
3793 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3794}
3795
3796/* Send ACL data */
3797static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3798{
3799 struct hci_acl_hdr *hdr;
3800 int len = skb->len;
3801
badff6d0
ACM
3802 skb_push(skb, HCI_ACL_HDR_SIZE);
3803 skb_reset_transport_header(skb);
9c70220b 3804 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
3805 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3806 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
3807}
3808
ee22be7e 3809static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 3810 struct sk_buff *skb, __u16 flags)
1da177e4 3811{
ee22be7e 3812 struct hci_conn *conn = chan->conn;
1da177e4
LT
3813 struct hci_dev *hdev = conn->hdev;
3814 struct sk_buff *list;
3815
087bfd99
GP
3816 skb->len = skb_headlen(skb);
3817 skb->data_len = 0;
3818
3819 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
204a6e54
AE
3820
3821 switch (hdev->dev_type) {
3822 case HCI_BREDR:
3823 hci_add_acl_hdr(skb, conn->handle, flags);
3824 break;
3825 case HCI_AMP:
3826 hci_add_acl_hdr(skb, chan->handle, flags);
3827 break;
3828 default:
3829 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3830 return;
3831 }
087bfd99 3832
70f23020
AE
3833 list = skb_shinfo(skb)->frag_list;
3834 if (!list) {
1da177e4
LT
3835 /* Non fragmented */
3836 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3837
73d80deb 3838 skb_queue_tail(queue, skb);
1da177e4
LT
3839 } else {
3840 /* Fragmented */
3841 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3842
3843 skb_shinfo(skb)->frag_list = NULL;
3844
3845 /* Queue all fragments atomically */
af3e6359 3846 spin_lock(&queue->lock);
1da177e4 3847
73d80deb 3848 __skb_queue_tail(queue, skb);
e702112f
AE
3849
3850 flags &= ~ACL_START;
3851 flags |= ACL_CONT;
1da177e4
LT
3852 do {
3853 skb = list; list = list->next;
8e87d142 3854
0d48d939 3855 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 3856 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
3857
3858 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3859
73d80deb 3860 __skb_queue_tail(queue, skb);
1da177e4
LT
3861 } while (list);
3862
af3e6359 3863 spin_unlock(&queue->lock);
1da177e4 3864 }
73d80deb
LAD
3865}
3866
3867void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3868{
ee22be7e 3869 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 3870
f0e09510 3871 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb 3872
ee22be7e 3873 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 3874
3eff45ea 3875 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 3876}
1da177e4
LT
3877
3878/* Send SCO data */
0d861d8b 3879void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
3880{
3881 struct hci_dev *hdev = conn->hdev;
3882 struct hci_sco_hdr hdr;
3883
3884 BT_DBG("%s len %d", hdev->name, skb->len);
3885
aca3192c 3886 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
3887 hdr.dlen = skb->len;
3888
badff6d0
ACM
3889 skb_push(skb, HCI_SCO_HDR_SIZE);
3890 skb_reset_transport_header(skb);
9c70220b 3891 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4 3892
0d48d939 3893 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 3894
1da177e4 3895 skb_queue_tail(&conn->data_q, skb);
3eff45ea 3896 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 3897}
1da177e4
LT
3898
3899/* ---- HCI TX task (outgoing data) ---- */
3900
3901/* HCI Connection scheduler */
6039aa73
GP
3902static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3903 int *quote)
1da177e4
LT
3904{
3905 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 3906 struct hci_conn *conn = NULL, *c;
abc5de8f 3907 unsigned int num = 0, min = ~0;
1da177e4 3908
8e87d142 3909 /* We don't have to lock device here. Connections are always
1da177e4 3910 * added and removed with TX task disabled. */
bf4c6325
GP
3911
3912 rcu_read_lock();
3913
3914 list_for_each_entry_rcu(c, &h->list, list) {
769be974 3915 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 3916 continue;
769be974
MH
3917
3918 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3919 continue;
3920
1da177e4
LT
3921 num++;
3922
3923 if (c->sent < min) {
3924 min = c->sent;
3925 conn = c;
3926 }
52087a79
LAD
3927
3928 if (hci_conn_num(hdev, type) == num)
3929 break;
1da177e4
LT
3930 }
3931
bf4c6325
GP
3932 rcu_read_unlock();
3933
1da177e4 3934 if (conn) {
6ed58ec5
VT
3935 int cnt, q;
3936
3937 switch (conn->type) {
3938 case ACL_LINK:
3939 cnt = hdev->acl_cnt;
3940 break;
3941 case SCO_LINK:
3942 case ESCO_LINK:
3943 cnt = hdev->sco_cnt;
3944 break;
3945 case LE_LINK:
3946 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3947 break;
3948 default:
3949 cnt = 0;
3950 BT_ERR("Unknown link type");
3951 }
3952
3953 q = cnt / num;
1da177e4
LT
3954 *quote = q ? q : 1;
3955 } else
3956 *quote = 0;
3957
3958 BT_DBG("conn %p quote %d", conn, *quote);
3959 return conn;
3960}
3961
6039aa73 3962static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
3963{
3964 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 3965 struct hci_conn *c;
1da177e4 3966
bae1f5d9 3967 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 3968
bf4c6325
GP
3969 rcu_read_lock();
3970
1da177e4 3971 /* Kill stalled connections */
bf4c6325 3972 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 3973 if (c->type == type && c->sent) {
6ed93dc6
AE
3974 BT_ERR("%s killing stalled connection %pMR",
3975 hdev->name, &c->dst);
bed71748 3976 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
3977 }
3978 }
bf4c6325
GP
3979
3980 rcu_read_unlock();
1da177e4
LT
3981}
3982
6039aa73
GP
3983static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3984 int *quote)
1da177e4 3985{
73d80deb
LAD
3986 struct hci_conn_hash *h = &hdev->conn_hash;
3987 struct hci_chan *chan = NULL;
abc5de8f 3988 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 3989 struct hci_conn *conn;
73d80deb
LAD
3990 int cnt, q, conn_num = 0;
3991
3992 BT_DBG("%s", hdev->name);
3993
bf4c6325
GP
3994 rcu_read_lock();
3995
3996 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
3997 struct hci_chan *tmp;
3998
3999 if (conn->type != type)
4000 continue;
4001
4002 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4003 continue;
4004
4005 conn_num++;
4006
8192edef 4007 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
4008 struct sk_buff *skb;
4009
4010 if (skb_queue_empty(&tmp->data_q))
4011 continue;
4012
4013 skb = skb_peek(&tmp->data_q);
4014 if (skb->priority < cur_prio)
4015 continue;
4016
4017 if (skb->priority > cur_prio) {
4018 num = 0;
4019 min = ~0;
4020 cur_prio = skb->priority;
4021 }
4022
4023 num++;
4024
4025 if (conn->sent < min) {
4026 min = conn->sent;
4027 chan = tmp;
4028 }
4029 }
4030
4031 if (hci_conn_num(hdev, type) == conn_num)
4032 break;
4033 }
4034
bf4c6325
GP
4035 rcu_read_unlock();
4036
73d80deb
LAD
4037 if (!chan)
4038 return NULL;
4039
4040 switch (chan->conn->type) {
4041 case ACL_LINK:
4042 cnt = hdev->acl_cnt;
4043 break;
bd1eb66b
AE
4044 case AMP_LINK:
4045 cnt = hdev->block_cnt;
4046 break;
73d80deb
LAD
4047 case SCO_LINK:
4048 case ESCO_LINK:
4049 cnt = hdev->sco_cnt;
4050 break;
4051 case LE_LINK:
4052 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4053 break;
4054 default:
4055 cnt = 0;
4056 BT_ERR("Unknown link type");
4057 }
4058
4059 q = cnt / num;
4060 *quote = q ? q : 1;
4061 BT_DBG("chan %p quote %d", chan, *quote);
4062 return chan;
4063}
4064
02b20f0b
LAD
4065static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4066{
4067 struct hci_conn_hash *h = &hdev->conn_hash;
4068 struct hci_conn *conn;
4069 int num = 0;
4070
4071 BT_DBG("%s", hdev->name);
4072
bf4c6325
GP
4073 rcu_read_lock();
4074
4075 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
4076 struct hci_chan *chan;
4077
4078 if (conn->type != type)
4079 continue;
4080
4081 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4082 continue;
4083
4084 num++;
4085
8192edef 4086 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
4087 struct sk_buff *skb;
4088
4089 if (chan->sent) {
4090 chan->sent = 0;
4091 continue;
4092 }
4093
4094 if (skb_queue_empty(&chan->data_q))
4095 continue;
4096
4097 skb = skb_peek(&chan->data_q);
4098 if (skb->priority >= HCI_PRIO_MAX - 1)
4099 continue;
4100
4101 skb->priority = HCI_PRIO_MAX - 1;
4102
4103 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 4104 skb->priority);
02b20f0b
LAD
4105 }
4106
4107 if (hci_conn_num(hdev, type) == num)
4108 break;
4109 }
bf4c6325
GP
4110
4111 rcu_read_unlock();
4112
02b20f0b
LAD
4113}
4114
b71d385a
AE
4115static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4116{
4117 /* Calculate count of blocks used by this packet */
4118 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4119}
4120
6039aa73 4121static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 4122{
1da177e4
LT
4123 if (!test_bit(HCI_RAW, &hdev->flags)) {
4124 /* ACL tx timeout must be longer than maximum
4125 * link supervision timeout (40.9 seconds) */
63d2bc1b 4126 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 4127 HCI_ACL_TX_TIMEOUT))
bae1f5d9 4128 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 4129 }
63d2bc1b 4130}
1da177e4 4131
6039aa73 4132static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
4133{
4134 unsigned int cnt = hdev->acl_cnt;
4135 struct hci_chan *chan;
4136 struct sk_buff *skb;
4137 int quote;
4138
4139 __check_timeout(hdev, cnt);
04837f64 4140
73d80deb 4141 while (hdev->acl_cnt &&
a8c5fb1a 4142 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
4143 u32 priority = (skb_peek(&chan->data_q))->priority;
4144 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4145 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4146 skb->len, skb->priority);
73d80deb 4147
ec1cce24
LAD
4148 /* Stop if priority has changed */
4149 if (skb->priority < priority)
4150 break;
4151
4152 skb = skb_dequeue(&chan->data_q);
4153
73d80deb 4154 hci_conn_enter_active_mode(chan->conn,
04124681 4155 bt_cb(skb)->force_active);
04837f64 4156
57d17d70 4157 hci_send_frame(hdev, skb);
1da177e4
LT
4158 hdev->acl_last_tx = jiffies;
4159
4160 hdev->acl_cnt--;
73d80deb
LAD
4161 chan->sent++;
4162 chan->conn->sent++;
1da177e4
LT
4163 }
4164 }
02b20f0b
LAD
4165
4166 if (cnt != hdev->acl_cnt)
4167 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
4168}
4169
6039aa73 4170static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 4171{
63d2bc1b 4172 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
4173 struct hci_chan *chan;
4174 struct sk_buff *skb;
4175 int quote;
bd1eb66b 4176 u8 type;
b71d385a 4177
63d2bc1b 4178 __check_timeout(hdev, cnt);
b71d385a 4179
bd1eb66b
AE
4180 BT_DBG("%s", hdev->name);
4181
4182 if (hdev->dev_type == HCI_AMP)
4183 type = AMP_LINK;
4184 else
4185 type = ACL_LINK;
4186
b71d385a 4187 while (hdev->block_cnt > 0 &&
bd1eb66b 4188 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
4189 u32 priority = (skb_peek(&chan->data_q))->priority;
4190 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4191 int blocks;
4192
4193 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4194 skb->len, skb->priority);
b71d385a
AE
4195
4196 /* Stop if priority has changed */
4197 if (skb->priority < priority)
4198 break;
4199
4200 skb = skb_dequeue(&chan->data_q);
4201
4202 blocks = __get_blocks(hdev, skb);
4203 if (blocks > hdev->block_cnt)
4204 return;
4205
4206 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 4207 bt_cb(skb)->force_active);
b71d385a 4208
57d17d70 4209 hci_send_frame(hdev, skb);
b71d385a
AE
4210 hdev->acl_last_tx = jiffies;
4211
4212 hdev->block_cnt -= blocks;
4213 quote -= blocks;
4214
4215 chan->sent += blocks;
4216 chan->conn->sent += blocks;
4217 }
4218 }
4219
4220 if (cnt != hdev->block_cnt)
bd1eb66b 4221 hci_prio_recalculate(hdev, type);
b71d385a
AE
4222}
4223
6039aa73 4224static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
4225{
4226 BT_DBG("%s", hdev->name);
4227
bd1eb66b
AE
4228 /* No ACL link over BR/EDR controller */
4229 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4230 return;
4231
4232 /* No AMP link over AMP controller */
4233 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
4234 return;
4235
4236 switch (hdev->flow_ctl_mode) {
4237 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4238 hci_sched_acl_pkt(hdev);
4239 break;
4240
4241 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4242 hci_sched_acl_blk(hdev);
4243 break;
4244 }
4245}
4246
1da177e4 4247/* Schedule SCO */
6039aa73 4248static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
4249{
4250 struct hci_conn *conn;
4251 struct sk_buff *skb;
4252 int quote;
4253
4254 BT_DBG("%s", hdev->name);
4255
52087a79
LAD
4256 if (!hci_conn_num(hdev, SCO_LINK))
4257 return;
4258
1da177e4
LT
4259 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4260 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4261 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 4262 hci_send_frame(hdev, skb);
1da177e4
LT
4263
4264 conn->sent++;
4265 if (conn->sent == ~0)
4266 conn->sent = 0;
4267 }
4268 }
4269}
4270
6039aa73 4271static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
4272{
4273 struct hci_conn *conn;
4274 struct sk_buff *skb;
4275 int quote;
4276
4277 BT_DBG("%s", hdev->name);
4278
52087a79
LAD
4279 if (!hci_conn_num(hdev, ESCO_LINK))
4280 return;
4281
8fc9ced3
GP
4282 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4283 &quote))) {
b6a0dc82
MH
4284 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4285 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 4286 hci_send_frame(hdev, skb);
b6a0dc82
MH
4287
4288 conn->sent++;
4289 if (conn->sent == ~0)
4290 conn->sent = 0;
4291 }
4292 }
4293}
4294
6039aa73 4295static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 4296{
73d80deb 4297 struct hci_chan *chan;
6ed58ec5 4298 struct sk_buff *skb;
02b20f0b 4299 int quote, cnt, tmp;
6ed58ec5
VT
4300
4301 BT_DBG("%s", hdev->name);
4302
52087a79
LAD
4303 if (!hci_conn_num(hdev, LE_LINK))
4304 return;
4305
6ed58ec5
VT
4306 if (!test_bit(HCI_RAW, &hdev->flags)) {
4307 /* LE tx timeout must be longer than maximum
4308 * link supervision timeout (40.9 seconds) */
bae1f5d9 4309 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 4310 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 4311 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
4312 }
4313
4314 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 4315 tmp = cnt;
73d80deb 4316 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
4317 u32 priority = (skb_peek(&chan->data_q))->priority;
4318 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4319 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4320 skb->len, skb->priority);
6ed58ec5 4321
ec1cce24
LAD
4322 /* Stop if priority has changed */
4323 if (skb->priority < priority)
4324 break;
4325
4326 skb = skb_dequeue(&chan->data_q);
4327
57d17d70 4328 hci_send_frame(hdev, skb);
6ed58ec5
VT
4329 hdev->le_last_tx = jiffies;
4330
4331 cnt--;
73d80deb
LAD
4332 chan->sent++;
4333 chan->conn->sent++;
6ed58ec5
VT
4334 }
4335 }
73d80deb 4336
6ed58ec5
VT
4337 if (hdev->le_pkts)
4338 hdev->le_cnt = cnt;
4339 else
4340 hdev->acl_cnt = cnt;
02b20f0b
LAD
4341
4342 if (cnt != tmp)
4343 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
4344}
4345
3eff45ea 4346static void hci_tx_work(struct work_struct *work)
1da177e4 4347{
3eff45ea 4348 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
4349 struct sk_buff *skb;
4350
6ed58ec5 4351 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 4352 hdev->sco_cnt, hdev->le_cnt);
1da177e4 4353
52de599e
MH
4354 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4355 /* Schedule queues and send stuff to HCI driver */
4356 hci_sched_acl(hdev);
4357 hci_sched_sco(hdev);
4358 hci_sched_esco(hdev);
4359 hci_sched_le(hdev);
4360 }
6ed58ec5 4361
1da177e4
LT
4362 /* Send next queued raw (unknown type) packet */
4363 while ((skb = skb_dequeue(&hdev->raw_q)))
57d17d70 4364 hci_send_frame(hdev, skb);
1da177e4
LT
4365}
4366
25985edc 4367/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
4368
4369/* ACL data packet */
6039aa73 4370static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
4371{
4372 struct hci_acl_hdr *hdr = (void *) skb->data;
4373 struct hci_conn *conn;
4374 __u16 handle, flags;
4375
4376 skb_pull(skb, HCI_ACL_HDR_SIZE);
4377
4378 handle = __le16_to_cpu(hdr->handle);
4379 flags = hci_flags(handle);
4380 handle = hci_handle(handle);
4381
f0e09510 4382 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 4383 handle, flags);
1da177e4
LT
4384
4385 hdev->stat.acl_rx++;
4386
4387 hci_dev_lock(hdev);
4388 conn = hci_conn_hash_lookup_handle(hdev, handle);
4389 hci_dev_unlock(hdev);
8e87d142 4390
1da177e4 4391 if (conn) {
65983fc7 4392 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 4393
1da177e4 4394 /* Send to upper protocol */
686ebf28
UF
4395 l2cap_recv_acldata(conn, skb, flags);
4396 return;
1da177e4 4397 } else {
8e87d142 4398 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 4399 hdev->name, handle);
1da177e4
LT
4400 }
4401
4402 kfree_skb(skb);
4403}
4404
4405/* SCO data packet */
6039aa73 4406static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
4407{
4408 struct hci_sco_hdr *hdr = (void *) skb->data;
4409 struct hci_conn *conn;
4410 __u16 handle;
4411
4412 skb_pull(skb, HCI_SCO_HDR_SIZE);
4413
4414 handle = __le16_to_cpu(hdr->handle);
4415
f0e09510 4416 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
1da177e4
LT
4417
4418 hdev->stat.sco_rx++;
4419
4420 hci_dev_lock(hdev);
4421 conn = hci_conn_hash_lookup_handle(hdev, handle);
4422 hci_dev_unlock(hdev);
4423
4424 if (conn) {
1da177e4 4425 /* Send to upper protocol */
686ebf28
UF
4426 sco_recv_scodata(conn, skb);
4427 return;
1da177e4 4428 } else {
8e87d142 4429 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 4430 hdev->name, handle);
1da177e4
LT
4431 }
4432
4433 kfree_skb(skb);
4434}
4435
9238f36a
JH
4436static bool hci_req_is_complete(struct hci_dev *hdev)
4437{
4438 struct sk_buff *skb;
4439
4440 skb = skb_peek(&hdev->cmd_q);
4441 if (!skb)
4442 return true;
4443
4444 return bt_cb(skb)->req.start;
4445}
4446
42c6b129
JH
4447static void hci_resend_last(struct hci_dev *hdev)
4448{
4449 struct hci_command_hdr *sent;
4450 struct sk_buff *skb;
4451 u16 opcode;
4452
4453 if (!hdev->sent_cmd)
4454 return;
4455
4456 sent = (void *) hdev->sent_cmd->data;
4457 opcode = __le16_to_cpu(sent->opcode);
4458 if (opcode == HCI_OP_RESET)
4459 return;
4460
4461 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4462 if (!skb)
4463 return;
4464
4465 skb_queue_head(&hdev->cmd_q, skb);
4466 queue_work(hdev->workqueue, &hdev->cmd_work);
4467}
4468
9238f36a
JH
4469void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
4470{
4471 hci_req_complete_t req_complete = NULL;
4472 struct sk_buff *skb;
4473 unsigned long flags;
4474
4475 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4476
42c6b129
JH
4477 /* If the completed command doesn't match the last one that was
4478 * sent we need to do special handling of it.
9238f36a 4479 */
42c6b129
JH
4480 if (!hci_sent_cmd_data(hdev, opcode)) {
4481 /* Some CSR based controllers generate a spontaneous
4482 * reset complete event during init and any pending
4483 * command will never be completed. In such a case we
4484 * need to resend whatever was the last sent
4485 * command.
4486 */
4487 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4488 hci_resend_last(hdev);
4489
9238f36a 4490 return;
42c6b129 4491 }
9238f36a
JH
4492
4493 /* If the command succeeded and there's still more commands in
4494 * this request the request is not yet complete.
4495 */
4496 if (!status && !hci_req_is_complete(hdev))
4497 return;
4498
4499 /* If this was the last command in a request the complete
4500 * callback would be found in hdev->sent_cmd instead of the
4501 * command queue (hdev->cmd_q).
4502 */
4503 if (hdev->sent_cmd) {
4504 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
53e21fbc
JH
4505
4506 if (req_complete) {
4507 /* We must set the complete callback to NULL to
4508 * avoid calling the callback more than once if
4509 * this function gets called again.
4510 */
4511 bt_cb(hdev->sent_cmd)->req.complete = NULL;
4512
9238f36a 4513 goto call_complete;
53e21fbc 4514 }
9238f36a
JH
4515 }
4516
4517 /* Remove all pending commands belonging to this request */
4518 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4519 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4520 if (bt_cb(skb)->req.start) {
4521 __skb_queue_head(&hdev->cmd_q, skb);
4522 break;
4523 }
4524
4525 req_complete = bt_cb(skb)->req.complete;
4526 kfree_skb(skb);
4527 }
4528 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4529
4530call_complete:
4531 if (req_complete)
4532 req_complete(hdev, status);
4533}
4534
b78752cc 4535static void hci_rx_work(struct work_struct *work)
1da177e4 4536{
b78752cc 4537 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
4538 struct sk_buff *skb;
4539
4540 BT_DBG("%s", hdev->name);
4541
1da177e4 4542 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
4543 /* Send copy to monitor */
4544 hci_send_to_monitor(hdev, skb);
4545
1da177e4
LT
4546 if (atomic_read(&hdev->promisc)) {
4547 /* Send copy to the sockets */
470fe1b5 4548 hci_send_to_sock(hdev, skb);
1da177e4
LT
4549 }
4550
0736cfa8
MH
4551 if (test_bit(HCI_RAW, &hdev->flags) ||
4552 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1da177e4
LT
4553 kfree_skb(skb);
4554 continue;
4555 }
4556
4557 if (test_bit(HCI_INIT, &hdev->flags)) {
4558 /* Don't process data packets in this states. */
0d48d939 4559 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
4560 case HCI_ACLDATA_PKT:
4561 case HCI_SCODATA_PKT:
4562 kfree_skb(skb);
4563 continue;
3ff50b79 4564 }
1da177e4
LT
4565 }
4566
4567 /* Process frame */
0d48d939 4568 switch (bt_cb(skb)->pkt_type) {
1da177e4 4569 case HCI_EVENT_PKT:
b78752cc 4570 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
4571 hci_event_packet(hdev, skb);
4572 break;
4573
4574 case HCI_ACLDATA_PKT:
4575 BT_DBG("%s ACL data packet", hdev->name);
4576 hci_acldata_packet(hdev, skb);
4577 break;
4578
4579 case HCI_SCODATA_PKT:
4580 BT_DBG("%s SCO data packet", hdev->name);
4581 hci_scodata_packet(hdev, skb);
4582 break;
4583
4584 default:
4585 kfree_skb(skb);
4586 break;
4587 }
4588 }
1da177e4
LT
4589}
4590
c347b765 4591static void hci_cmd_work(struct work_struct *work)
1da177e4 4592{
c347b765 4593 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
4594 struct sk_buff *skb;
4595
2104786b
AE
4596 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4597 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 4598
1da177e4 4599 /* Send queued commands */
5a08ecce
AE
4600 if (atomic_read(&hdev->cmd_cnt)) {
4601 skb = skb_dequeue(&hdev->cmd_q);
4602 if (!skb)
4603 return;
4604
7585b97a 4605 kfree_skb(hdev->sent_cmd);
1da177e4 4606
a675d7f1 4607 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
70f23020 4608 if (hdev->sent_cmd) {
1da177e4 4609 atomic_dec(&hdev->cmd_cnt);
57d17d70 4610 hci_send_frame(hdev, skb);
7bdb8a5c
SJ
4611 if (test_bit(HCI_RESET, &hdev->flags))
4612 del_timer(&hdev->cmd_timer);
4613 else
4614 mod_timer(&hdev->cmd_timer,
5f246e89 4615 jiffies + HCI_CMD_TIMEOUT);
1da177e4
LT
4616 } else {
4617 skb_queue_head(&hdev->cmd_q, skb);
c347b765 4618 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
4619 }
4620 }
4621}