]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - net/bluetooth/hci_core.c
Bluetooth: Enable 6LoWPAN support for BT LE devices
[mirror_ubuntu-artful-kernel.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
8c520a59 30#include <linux/rfkill.h>
baf27f6e 31#include <linux/debugfs.h>
47219839 32#include <asm/unaligned.h>
1da177e4
LT
33
34#include <net/bluetooth/bluetooth.h>
35#include <net/bluetooth/hci_core.h>
36
b78752cc 37static void hci_rx_work(struct work_struct *work);
c347b765 38static void hci_cmd_work(struct work_struct *work);
3eff45ea 39static void hci_tx_work(struct work_struct *work);
1da177e4 40
1da177e4
LT
41/* HCI device list */
42LIST_HEAD(hci_dev_list);
43DEFINE_RWLOCK(hci_dev_list_lock);
44
45/* HCI callback list */
46LIST_HEAD(hci_cb_list);
47DEFINE_RWLOCK(hci_cb_list_lock);
48
3df92b31
SL
49/* HCI ID Numbering */
50static DEFINE_IDA(hci_index_ida);
51
1da177e4
LT
52/* ---- HCI notifications ---- */
53
6516455d 54static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 55{
040030ef 56 hci_sock_dev_event(hdev, event);
1da177e4
LT
57}
58
baf27f6e
MH
59/* ---- HCI debugfs entries ---- */
60
4b4148e9
MH
61static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
62 size_t count, loff_t *ppos)
63{
64 struct hci_dev *hdev = file->private_data;
65 char buf[3];
66
67 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dev_flags) ? 'Y': 'N';
68 buf[1] = '\n';
69 buf[2] = '\0';
70 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
71}
72
73static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
74 size_t count, loff_t *ppos)
75{
76 struct hci_dev *hdev = file->private_data;
77 struct sk_buff *skb;
78 char buf[32];
79 size_t buf_size = min(count, (sizeof(buf)-1));
80 bool enable;
81 int err;
82
83 if (!test_bit(HCI_UP, &hdev->flags))
84 return -ENETDOWN;
85
86 if (copy_from_user(buf, user_buf, buf_size))
87 return -EFAULT;
88
89 buf[buf_size] = '\0';
90 if (strtobool(buf, &enable))
91 return -EINVAL;
92
93 if (enable == test_bit(HCI_DUT_MODE, &hdev->dev_flags))
94 return -EALREADY;
95
96 hci_req_lock(hdev);
97 if (enable)
98 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
99 HCI_CMD_TIMEOUT);
100 else
101 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
102 HCI_CMD_TIMEOUT);
103 hci_req_unlock(hdev);
104
105 if (IS_ERR(skb))
106 return PTR_ERR(skb);
107
108 err = -bt_to_errno(skb->data[0]);
109 kfree_skb(skb);
110
111 if (err < 0)
112 return err;
113
114 change_bit(HCI_DUT_MODE, &hdev->dev_flags);
115
116 return count;
117}
118
119static const struct file_operations dut_mode_fops = {
120 .open = simple_open,
121 .read = dut_mode_read,
122 .write = dut_mode_write,
123 .llseek = default_llseek,
124};
125
dfb826a8
MH
126static int features_show(struct seq_file *f, void *ptr)
127{
128 struct hci_dev *hdev = f->private;
129 u8 p;
130
131 hci_dev_lock(hdev);
132 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
cfbb2b5b 133 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
dfb826a8
MH
134 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
135 hdev->features[p][0], hdev->features[p][1],
136 hdev->features[p][2], hdev->features[p][3],
137 hdev->features[p][4], hdev->features[p][5],
138 hdev->features[p][6], hdev->features[p][7]);
139 }
cfbb2b5b
MH
140 if (lmp_le_capable(hdev))
141 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
142 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
143 hdev->le_features[0], hdev->le_features[1],
144 hdev->le_features[2], hdev->le_features[3],
145 hdev->le_features[4], hdev->le_features[5],
146 hdev->le_features[6], hdev->le_features[7]);
dfb826a8
MH
147 hci_dev_unlock(hdev);
148
149 return 0;
150}
151
152static int features_open(struct inode *inode, struct file *file)
153{
154 return single_open(file, features_show, inode->i_private);
155}
156
157static const struct file_operations features_fops = {
158 .open = features_open,
159 .read = seq_read,
160 .llseek = seq_lseek,
161 .release = single_release,
162};
163
70afe0b8
MH
164static int blacklist_show(struct seq_file *f, void *p)
165{
166 struct hci_dev *hdev = f->private;
167 struct bdaddr_list *b;
168
169 hci_dev_lock(hdev);
170 list_for_each_entry(b, &hdev->blacklist, list)
b25f0785 171 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
70afe0b8
MH
172 hci_dev_unlock(hdev);
173
174 return 0;
175}
176
177static int blacklist_open(struct inode *inode, struct file *file)
178{
179 return single_open(file, blacklist_show, inode->i_private);
180}
181
182static const struct file_operations blacklist_fops = {
183 .open = blacklist_open,
184 .read = seq_read,
185 .llseek = seq_lseek,
186 .release = single_release,
187};
188
47219839
MH
189static int uuids_show(struct seq_file *f, void *p)
190{
191 struct hci_dev *hdev = f->private;
192 struct bt_uuid *uuid;
193
194 hci_dev_lock(hdev);
195 list_for_each_entry(uuid, &hdev->uuids, list) {
58f01aa9
MH
196 u8 i, val[16];
197
198 /* The Bluetooth UUID values are stored in big endian,
199 * but with reversed byte order. So convert them into
200 * the right order for the %pUb modifier.
201 */
202 for (i = 0; i < 16; i++)
203 val[i] = uuid->uuid[15 - i];
204
205 seq_printf(f, "%pUb\n", val);
47219839
MH
206 }
207 hci_dev_unlock(hdev);
208
209 return 0;
210}
211
212static int uuids_open(struct inode *inode, struct file *file)
213{
214 return single_open(file, uuids_show, inode->i_private);
215}
216
217static const struct file_operations uuids_fops = {
218 .open = uuids_open,
219 .read = seq_read,
220 .llseek = seq_lseek,
221 .release = single_release,
222};
223
baf27f6e
MH
224static int inquiry_cache_show(struct seq_file *f, void *p)
225{
226 struct hci_dev *hdev = f->private;
227 struct discovery_state *cache = &hdev->discovery;
228 struct inquiry_entry *e;
229
230 hci_dev_lock(hdev);
231
232 list_for_each_entry(e, &cache->all, all) {
233 struct inquiry_data *data = &e->data;
234 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
235 &data->bdaddr,
236 data->pscan_rep_mode, data->pscan_period_mode,
237 data->pscan_mode, data->dev_class[2],
238 data->dev_class[1], data->dev_class[0],
239 __le16_to_cpu(data->clock_offset),
240 data->rssi, data->ssp_mode, e->timestamp);
241 }
242
243 hci_dev_unlock(hdev);
244
245 return 0;
246}
247
248static int inquiry_cache_open(struct inode *inode, struct file *file)
249{
250 return single_open(file, inquiry_cache_show, inode->i_private);
251}
252
253static const struct file_operations inquiry_cache_fops = {
254 .open = inquiry_cache_open,
255 .read = seq_read,
256 .llseek = seq_lseek,
257 .release = single_release,
258};
259
02d08d15
MH
260static int link_keys_show(struct seq_file *f, void *ptr)
261{
262 struct hci_dev *hdev = f->private;
263 struct list_head *p, *n;
264
265 hci_dev_lock(hdev);
266 list_for_each_safe(p, n, &hdev->link_keys) {
267 struct link_key *key = list_entry(p, struct link_key, list);
268 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
269 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
270 }
271 hci_dev_unlock(hdev);
272
273 return 0;
274}
275
276static int link_keys_open(struct inode *inode, struct file *file)
277{
278 return single_open(file, link_keys_show, inode->i_private);
279}
280
281static const struct file_operations link_keys_fops = {
282 .open = link_keys_open,
283 .read = seq_read,
284 .llseek = seq_lseek,
285 .release = single_release,
286};
287
12c269d7
MH
288static ssize_t use_debug_keys_read(struct file *file, char __user *user_buf,
289 size_t count, loff_t *ppos)
290{
291 struct hci_dev *hdev = file->private_data;
292 char buf[3];
293
294 buf[0] = test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) ? 'Y': 'N';
295 buf[1] = '\n';
296 buf[2] = '\0';
297 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
298}
299
300static const struct file_operations use_debug_keys_fops = {
301 .open = simple_open,
302 .read = use_debug_keys_read,
303 .llseek = default_llseek,
304};
305
babdbb3c
MH
306static int dev_class_show(struct seq_file *f, void *ptr)
307{
308 struct hci_dev *hdev = f->private;
309
310 hci_dev_lock(hdev);
311 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
312 hdev->dev_class[1], hdev->dev_class[0]);
313 hci_dev_unlock(hdev);
314
315 return 0;
316}
317
318static int dev_class_open(struct inode *inode, struct file *file)
319{
320 return single_open(file, dev_class_show, inode->i_private);
321}
322
323static const struct file_operations dev_class_fops = {
324 .open = dev_class_open,
325 .read = seq_read,
326 .llseek = seq_lseek,
327 .release = single_release,
328};
329
041000b9
MH
330static int voice_setting_get(void *data, u64 *val)
331{
332 struct hci_dev *hdev = data;
333
334 hci_dev_lock(hdev);
335 *val = hdev->voice_setting;
336 hci_dev_unlock(hdev);
337
338 return 0;
339}
340
341DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
342 NULL, "0x%4.4llx\n");
343
ebd1e33b
MH
344static int auto_accept_delay_set(void *data, u64 val)
345{
346 struct hci_dev *hdev = data;
347
348 hci_dev_lock(hdev);
349 hdev->auto_accept_delay = val;
350 hci_dev_unlock(hdev);
351
352 return 0;
353}
354
355static int auto_accept_delay_get(void *data, u64 *val)
356{
357 struct hci_dev *hdev = data;
358
359 hci_dev_lock(hdev);
360 *val = hdev->auto_accept_delay;
361 hci_dev_unlock(hdev);
362
363 return 0;
364}
365
366DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
367 auto_accept_delay_set, "%llu\n");
368
06f5b778
MH
369static int ssp_debug_mode_set(void *data, u64 val)
370{
371 struct hci_dev *hdev = data;
372 struct sk_buff *skb;
373 __u8 mode;
374 int err;
375
376 if (val != 0 && val != 1)
377 return -EINVAL;
378
379 if (!test_bit(HCI_UP, &hdev->flags))
380 return -ENETDOWN;
381
382 hci_req_lock(hdev);
383 mode = val;
384 skb = __hci_cmd_sync(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode),
385 &mode, HCI_CMD_TIMEOUT);
386 hci_req_unlock(hdev);
387
388 if (IS_ERR(skb))
389 return PTR_ERR(skb);
390
391 err = -bt_to_errno(skb->data[0]);
392 kfree_skb(skb);
393
394 if (err < 0)
395 return err;
396
397 hci_dev_lock(hdev);
398 hdev->ssp_debug_mode = val;
399 hci_dev_unlock(hdev);
400
401 return 0;
402}
403
404static int ssp_debug_mode_get(void *data, u64 *val)
405{
406 struct hci_dev *hdev = data;
407
408 hci_dev_lock(hdev);
409 *val = hdev->ssp_debug_mode;
410 hci_dev_unlock(hdev);
411
412 return 0;
413}
414
415DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get,
416 ssp_debug_mode_set, "%llu\n");
417
2bfa3531
MH
418static int idle_timeout_set(void *data, u64 val)
419{
420 struct hci_dev *hdev = data;
421
422 if (val != 0 && (val < 500 || val > 3600000))
423 return -EINVAL;
424
425 hci_dev_lock(hdev);
2be48b65 426 hdev->idle_timeout = val;
2bfa3531
MH
427 hci_dev_unlock(hdev);
428
429 return 0;
430}
431
432static int idle_timeout_get(void *data, u64 *val)
433{
434 struct hci_dev *hdev = data;
435
436 hci_dev_lock(hdev);
437 *val = hdev->idle_timeout;
438 hci_dev_unlock(hdev);
439
440 return 0;
441}
442
443DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
444 idle_timeout_set, "%llu\n");
445
446static int sniff_min_interval_set(void *data, u64 val)
447{
448 struct hci_dev *hdev = data;
449
450 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
451 return -EINVAL;
452
453 hci_dev_lock(hdev);
2be48b65 454 hdev->sniff_min_interval = val;
2bfa3531
MH
455 hci_dev_unlock(hdev);
456
457 return 0;
458}
459
460static int sniff_min_interval_get(void *data, u64 *val)
461{
462 struct hci_dev *hdev = data;
463
464 hci_dev_lock(hdev);
465 *val = hdev->sniff_min_interval;
466 hci_dev_unlock(hdev);
467
468 return 0;
469}
470
471DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
472 sniff_min_interval_set, "%llu\n");
473
474static int sniff_max_interval_set(void *data, u64 val)
475{
476 struct hci_dev *hdev = data;
477
478 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
479 return -EINVAL;
480
481 hci_dev_lock(hdev);
2be48b65 482 hdev->sniff_max_interval = val;
2bfa3531
MH
483 hci_dev_unlock(hdev);
484
485 return 0;
486}
487
488static int sniff_max_interval_get(void *data, u64 *val)
489{
490 struct hci_dev *hdev = data;
491
492 hci_dev_lock(hdev);
493 *val = hdev->sniff_max_interval;
494 hci_dev_unlock(hdev);
495
496 return 0;
497}
498
499DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
500 sniff_max_interval_set, "%llu\n");
501
e7b8fc92
MH
502static int static_address_show(struct seq_file *f, void *p)
503{
504 struct hci_dev *hdev = f->private;
505
506 hci_dev_lock(hdev);
507 seq_printf(f, "%pMR\n", &hdev->static_addr);
508 hci_dev_unlock(hdev);
509
510 return 0;
511}
512
513static int static_address_open(struct inode *inode, struct file *file)
514{
515 return single_open(file, static_address_show, inode->i_private);
516}
517
518static const struct file_operations static_address_fops = {
519 .open = static_address_open,
520 .read = seq_read,
521 .llseek = seq_lseek,
522 .release = single_release,
523};
524
92202185
MH
525static int own_address_type_set(void *data, u64 val)
526{
527 struct hci_dev *hdev = data;
528
529 if (val != 0 && val != 1)
530 return -EINVAL;
531
532 hci_dev_lock(hdev);
533 hdev->own_addr_type = val;
534 hci_dev_unlock(hdev);
535
536 return 0;
537}
538
539static int own_address_type_get(void *data, u64 *val)
540{
541 struct hci_dev *hdev = data;
542
543 hci_dev_lock(hdev);
544 *val = hdev->own_addr_type;
545 hci_dev_unlock(hdev);
546
547 return 0;
548}
549
550DEFINE_SIMPLE_ATTRIBUTE(own_address_type_fops, own_address_type_get,
551 own_address_type_set, "%llu\n");
552
8f8625cd
MH
553static int long_term_keys_show(struct seq_file *f, void *ptr)
554{
555 struct hci_dev *hdev = f->private;
556 struct list_head *p, *n;
557
558 hci_dev_lock(hdev);
559 list_for_each_safe(p, n, &hdev->link_keys) {
560 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
561 seq_printf(f, "%pMR (type %u) %u %u %u %.4x %*phN %*phN\\n",
562 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
563 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
564 8, ltk->rand, 16, ltk->val);
565 }
566 hci_dev_unlock(hdev);
567
568 return 0;
569}
570
571static int long_term_keys_open(struct inode *inode, struct file *file)
572{
573 return single_open(file, long_term_keys_show, inode->i_private);
574}
575
576static const struct file_operations long_term_keys_fops = {
577 .open = long_term_keys_open,
578 .read = seq_read,
579 .llseek = seq_lseek,
580 .release = single_release,
581};
582
4e70c7e7
MH
583static int conn_min_interval_set(void *data, u64 val)
584{
585 struct hci_dev *hdev = data;
586
587 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
588 return -EINVAL;
589
590 hci_dev_lock(hdev);
2be48b65 591 hdev->le_conn_min_interval = val;
4e70c7e7
MH
592 hci_dev_unlock(hdev);
593
594 return 0;
595}
596
597static int conn_min_interval_get(void *data, u64 *val)
598{
599 struct hci_dev *hdev = data;
600
601 hci_dev_lock(hdev);
602 *val = hdev->le_conn_min_interval;
603 hci_dev_unlock(hdev);
604
605 return 0;
606}
607
608DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
609 conn_min_interval_set, "%llu\n");
610
611static int conn_max_interval_set(void *data, u64 val)
612{
613 struct hci_dev *hdev = data;
614
615 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
616 return -EINVAL;
617
618 hci_dev_lock(hdev);
2be48b65 619 hdev->le_conn_max_interval = val;
4e70c7e7
MH
620 hci_dev_unlock(hdev);
621
622 return 0;
623}
624
625static int conn_max_interval_get(void *data, u64 *val)
626{
627 struct hci_dev *hdev = data;
628
629 hci_dev_lock(hdev);
630 *val = hdev->le_conn_max_interval;
631 hci_dev_unlock(hdev);
632
633 return 0;
634}
635
636DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
637 conn_max_interval_set, "%llu\n");
638
1da177e4
LT
639/* ---- HCI requests ---- */
640
42c6b129 641static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1da177e4 642{
42c6b129 643 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1da177e4
LT
644
645 if (hdev->req_status == HCI_REQ_PEND) {
646 hdev->req_result = result;
647 hdev->req_status = HCI_REQ_DONE;
648 wake_up_interruptible(&hdev->req_wait_q);
649 }
650}
651
652static void hci_req_cancel(struct hci_dev *hdev, int err)
653{
654 BT_DBG("%s err 0x%2.2x", hdev->name, err);
655
656 if (hdev->req_status == HCI_REQ_PEND) {
657 hdev->req_result = err;
658 hdev->req_status = HCI_REQ_CANCELED;
659 wake_up_interruptible(&hdev->req_wait_q);
660 }
661}
662
77a63e0a
FW
663static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
664 u8 event)
75e84b7c
JH
665{
666 struct hci_ev_cmd_complete *ev;
667 struct hci_event_hdr *hdr;
668 struct sk_buff *skb;
669
670 hci_dev_lock(hdev);
671
672 skb = hdev->recv_evt;
673 hdev->recv_evt = NULL;
674
675 hci_dev_unlock(hdev);
676
677 if (!skb)
678 return ERR_PTR(-ENODATA);
679
680 if (skb->len < sizeof(*hdr)) {
681 BT_ERR("Too short HCI event");
682 goto failed;
683 }
684
685 hdr = (void *) skb->data;
686 skb_pull(skb, HCI_EVENT_HDR_SIZE);
687
7b1abbbe
JH
688 if (event) {
689 if (hdr->evt != event)
690 goto failed;
691 return skb;
692 }
693
75e84b7c
JH
694 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
695 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
696 goto failed;
697 }
698
699 if (skb->len < sizeof(*ev)) {
700 BT_ERR("Too short cmd_complete event");
701 goto failed;
702 }
703
704 ev = (void *) skb->data;
705 skb_pull(skb, sizeof(*ev));
706
707 if (opcode == __le16_to_cpu(ev->opcode))
708 return skb;
709
710 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
711 __le16_to_cpu(ev->opcode));
712
713failed:
714 kfree_skb(skb);
715 return ERR_PTR(-ENODATA);
716}
717
7b1abbbe 718struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 719 const void *param, u8 event, u32 timeout)
75e84b7c
JH
720{
721 DECLARE_WAITQUEUE(wait, current);
722 struct hci_request req;
723 int err = 0;
724
725 BT_DBG("%s", hdev->name);
726
727 hci_req_init(&req, hdev);
728
7b1abbbe 729 hci_req_add_ev(&req, opcode, plen, param, event);
75e84b7c
JH
730
731 hdev->req_status = HCI_REQ_PEND;
732
733 err = hci_req_run(&req, hci_req_sync_complete);
734 if (err < 0)
735 return ERR_PTR(err);
736
737 add_wait_queue(&hdev->req_wait_q, &wait);
738 set_current_state(TASK_INTERRUPTIBLE);
739
740 schedule_timeout(timeout);
741
742 remove_wait_queue(&hdev->req_wait_q, &wait);
743
744 if (signal_pending(current))
745 return ERR_PTR(-EINTR);
746
747 switch (hdev->req_status) {
748 case HCI_REQ_DONE:
749 err = -bt_to_errno(hdev->req_result);
750 break;
751
752 case HCI_REQ_CANCELED:
753 err = -hdev->req_result;
754 break;
755
756 default:
757 err = -ETIMEDOUT;
758 break;
759 }
760
761 hdev->req_status = hdev->req_result = 0;
762
763 BT_DBG("%s end: err %d", hdev->name, err);
764
765 if (err < 0)
766 return ERR_PTR(err);
767
7b1abbbe
JH
768 return hci_get_cmd_complete(hdev, opcode, event);
769}
770EXPORT_SYMBOL(__hci_cmd_sync_ev);
771
772struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 773 const void *param, u32 timeout)
7b1abbbe
JH
774{
775 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
75e84b7c
JH
776}
777EXPORT_SYMBOL(__hci_cmd_sync);
778
1da177e4 779/* Execute request and wait for completion. */
01178cd4 780static int __hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
781 void (*func)(struct hci_request *req,
782 unsigned long opt),
01178cd4 783 unsigned long opt, __u32 timeout)
1da177e4 784{
42c6b129 785 struct hci_request req;
1da177e4
LT
786 DECLARE_WAITQUEUE(wait, current);
787 int err = 0;
788
789 BT_DBG("%s start", hdev->name);
790
42c6b129
JH
791 hci_req_init(&req, hdev);
792
1da177e4
LT
793 hdev->req_status = HCI_REQ_PEND;
794
42c6b129 795 func(&req, opt);
53cce22d 796
42c6b129
JH
797 err = hci_req_run(&req, hci_req_sync_complete);
798 if (err < 0) {
53cce22d 799 hdev->req_status = 0;
920c8300
AG
800
801 /* ENODATA means the HCI request command queue is empty.
802 * This can happen when a request with conditionals doesn't
803 * trigger any commands to be sent. This is normal behavior
804 * and should not trigger an error return.
42c6b129 805 */
920c8300
AG
806 if (err == -ENODATA)
807 return 0;
808
809 return err;
53cce22d
JH
810 }
811
bc4445c7
AG
812 add_wait_queue(&hdev->req_wait_q, &wait);
813 set_current_state(TASK_INTERRUPTIBLE);
814
1da177e4
LT
815 schedule_timeout(timeout);
816
817 remove_wait_queue(&hdev->req_wait_q, &wait);
818
819 if (signal_pending(current))
820 return -EINTR;
821
822 switch (hdev->req_status) {
823 case HCI_REQ_DONE:
e175072f 824 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
825 break;
826
827 case HCI_REQ_CANCELED:
828 err = -hdev->req_result;
829 break;
830
831 default:
832 err = -ETIMEDOUT;
833 break;
3ff50b79 834 }
1da177e4 835
a5040efa 836 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
837
838 BT_DBG("%s end: err %d", hdev->name, err);
839
840 return err;
841}
842
01178cd4 843static int hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
844 void (*req)(struct hci_request *req,
845 unsigned long opt),
01178cd4 846 unsigned long opt, __u32 timeout)
1da177e4
LT
847{
848 int ret;
849
7c6a329e
MH
850 if (!test_bit(HCI_UP, &hdev->flags))
851 return -ENETDOWN;
852
1da177e4
LT
853 /* Serialize all requests */
854 hci_req_lock(hdev);
01178cd4 855 ret = __hci_req_sync(hdev, req, opt, timeout);
1da177e4
LT
856 hci_req_unlock(hdev);
857
858 return ret;
859}
860
42c6b129 861static void hci_reset_req(struct hci_request *req, unsigned long opt)
1da177e4 862{
42c6b129 863 BT_DBG("%s %ld", req->hdev->name, opt);
1da177e4
LT
864
865 /* Reset device */
42c6b129
JH
866 set_bit(HCI_RESET, &req->hdev->flags);
867 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1da177e4
LT
868}
869
42c6b129 870static void bredr_init(struct hci_request *req)
1da177e4 871{
42c6b129 872 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
2455a3ea 873
1da177e4 874 /* Read Local Supported Features */
42c6b129 875 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 876
1143e5a6 877 /* Read Local Version */
42c6b129 878 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
2177bab5
JH
879
880 /* Read BD Address */
42c6b129 881 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1da177e4
LT
882}
883
42c6b129 884static void amp_init(struct hci_request *req)
e61ef499 885{
42c6b129 886 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
2455a3ea 887
e61ef499 888 /* Read Local Version */
42c6b129 889 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489 890
f6996cfe
MH
891 /* Read Local Supported Commands */
892 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
893
894 /* Read Local Supported Features */
895 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
896
6bcbc489 897 /* Read Local AMP Info */
42c6b129 898 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
899
900 /* Read Data Blk size */
42c6b129 901 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
7528ca1c 902
f38ba941
MH
903 /* Read Flow Control Mode */
904 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
905
7528ca1c
MH
906 /* Read Location Data */
907 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
e61ef499
AE
908}
909
42c6b129 910static void hci_init1_req(struct hci_request *req, unsigned long opt)
e61ef499 911{
42c6b129 912 struct hci_dev *hdev = req->hdev;
e61ef499
AE
913
914 BT_DBG("%s %ld", hdev->name, opt);
915
11778716
AE
916 /* Reset */
917 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
42c6b129 918 hci_reset_req(req, 0);
11778716 919
e61ef499
AE
920 switch (hdev->dev_type) {
921 case HCI_BREDR:
42c6b129 922 bredr_init(req);
e61ef499
AE
923 break;
924
925 case HCI_AMP:
42c6b129 926 amp_init(req);
e61ef499
AE
927 break;
928
929 default:
930 BT_ERR("Unknown device type %d", hdev->dev_type);
931 break;
932 }
e61ef499
AE
933}
934
42c6b129 935static void bredr_setup(struct hci_request *req)
2177bab5 936{
4ca048e3
MH
937 struct hci_dev *hdev = req->hdev;
938
2177bab5
JH
939 __le16 param;
940 __u8 flt_type;
941
942 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
42c6b129 943 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
944
945 /* Read Class of Device */
42c6b129 946 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
2177bab5
JH
947
948 /* Read Local Name */
42c6b129 949 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
2177bab5
JH
950
951 /* Read Voice Setting */
42c6b129 952 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
2177bab5 953
b4cb9fb2
MH
954 /* Read Number of Supported IAC */
955 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
956
4b836f39
MH
957 /* Read Current IAC LAP */
958 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
959
2177bab5
JH
960 /* Clear Event Filters */
961 flt_type = HCI_FLT_CLEAR_ALL;
42c6b129 962 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
2177bab5
JH
963
964 /* Connection accept timeout ~20 secs */
965 param = __constant_cpu_to_le16(0x7d00);
42c6b129 966 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
2177bab5 967
4ca048e3
MH
968 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
969 * but it does not support page scan related HCI commands.
970 */
971 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
f332ec66
JH
972 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
973 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
974 }
2177bab5
JH
975}
976
42c6b129 977static void le_setup(struct hci_request *req)
2177bab5 978{
c73eee91
JH
979 struct hci_dev *hdev = req->hdev;
980
2177bab5 981 /* Read LE Buffer Size */
42c6b129 982 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
983
984 /* Read LE Local Supported Features */
42c6b129 985 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
2177bab5
JH
986
987 /* Read LE Advertising Channel TX Power */
42c6b129 988 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
2177bab5
JH
989
990 /* Read LE White List Size */
42c6b129 991 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
2177bab5
JH
992
993 /* Read LE Supported States */
42c6b129 994 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
c73eee91
JH
995
996 /* LE-only controllers have LE implicitly enabled */
997 if (!lmp_bredr_capable(hdev))
998 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2177bab5
JH
999}
1000
1001static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1002{
1003 if (lmp_ext_inq_capable(hdev))
1004 return 0x02;
1005
1006 if (lmp_inq_rssi_capable(hdev))
1007 return 0x01;
1008
1009 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1010 hdev->lmp_subver == 0x0757)
1011 return 0x01;
1012
1013 if (hdev->manufacturer == 15) {
1014 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1015 return 0x01;
1016 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1017 return 0x01;
1018 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1019 return 0x01;
1020 }
1021
1022 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1023 hdev->lmp_subver == 0x1805)
1024 return 0x01;
1025
1026 return 0x00;
1027}
1028
42c6b129 1029static void hci_setup_inquiry_mode(struct hci_request *req)
2177bab5
JH
1030{
1031 u8 mode;
1032
42c6b129 1033 mode = hci_get_inquiry_mode(req->hdev);
2177bab5 1034
42c6b129 1035 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
2177bab5
JH
1036}
1037
42c6b129 1038static void hci_setup_event_mask(struct hci_request *req)
2177bab5 1039{
42c6b129
JH
1040 struct hci_dev *hdev = req->hdev;
1041
2177bab5
JH
1042 /* The second byte is 0xff instead of 0x9f (two reserved bits
1043 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1044 * command otherwise.
1045 */
1046 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1047
1048 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1049 * any event mask for pre 1.2 devices.
1050 */
1051 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1052 return;
1053
1054 if (lmp_bredr_capable(hdev)) {
1055 events[4] |= 0x01; /* Flow Specification Complete */
1056 events[4] |= 0x02; /* Inquiry Result with RSSI */
1057 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1058 events[5] |= 0x08; /* Synchronous Connection Complete */
1059 events[5] |= 0x10; /* Synchronous Connection Changed */
c7882cbd
MH
1060 } else {
1061 /* Use a different default for LE-only devices */
1062 memset(events, 0, sizeof(events));
1063 events[0] |= 0x10; /* Disconnection Complete */
1064 events[0] |= 0x80; /* Encryption Change */
1065 events[1] |= 0x08; /* Read Remote Version Information Complete */
1066 events[1] |= 0x20; /* Command Complete */
1067 events[1] |= 0x40; /* Command Status */
1068 events[1] |= 0x80; /* Hardware Error */
1069 events[2] |= 0x04; /* Number of Completed Packets */
1070 events[3] |= 0x02; /* Data Buffer Overflow */
1071 events[5] |= 0x80; /* Encryption Key Refresh Complete */
2177bab5
JH
1072 }
1073
1074 if (lmp_inq_rssi_capable(hdev))
1075 events[4] |= 0x02; /* Inquiry Result with RSSI */
1076
1077 if (lmp_sniffsubr_capable(hdev))
1078 events[5] |= 0x20; /* Sniff Subrating */
1079
1080 if (lmp_pause_enc_capable(hdev))
1081 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1082
1083 if (lmp_ext_inq_capable(hdev))
1084 events[5] |= 0x40; /* Extended Inquiry Result */
1085
1086 if (lmp_no_flush_capable(hdev))
1087 events[7] |= 0x01; /* Enhanced Flush Complete */
1088
1089 if (lmp_lsto_capable(hdev))
1090 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1091
1092 if (lmp_ssp_capable(hdev)) {
1093 events[6] |= 0x01; /* IO Capability Request */
1094 events[6] |= 0x02; /* IO Capability Response */
1095 events[6] |= 0x04; /* User Confirmation Request */
1096 events[6] |= 0x08; /* User Passkey Request */
1097 events[6] |= 0x10; /* Remote OOB Data Request */
1098 events[6] |= 0x20; /* Simple Pairing Complete */
1099 events[7] |= 0x04; /* User Passkey Notification */
1100 events[7] |= 0x08; /* Keypress Notification */
1101 events[7] |= 0x10; /* Remote Host Supported
1102 * Features Notification
1103 */
1104 }
1105
1106 if (lmp_le_capable(hdev))
1107 events[7] |= 0x20; /* LE Meta-Event */
1108
42c6b129 1109 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
2177bab5
JH
1110
1111 if (lmp_le_capable(hdev)) {
1112 memset(events, 0, sizeof(events));
1113 events[0] = 0x1f;
42c6b129
JH
1114 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1115 sizeof(events), events);
2177bab5
JH
1116 }
1117}
1118
42c6b129 1119static void hci_init2_req(struct hci_request *req, unsigned long opt)
2177bab5 1120{
42c6b129
JH
1121 struct hci_dev *hdev = req->hdev;
1122
2177bab5 1123 if (lmp_bredr_capable(hdev))
42c6b129 1124 bredr_setup(req);
56f87901
JH
1125 else
1126 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2177bab5
JH
1127
1128 if (lmp_le_capable(hdev))
42c6b129 1129 le_setup(req);
2177bab5 1130
42c6b129 1131 hci_setup_event_mask(req);
2177bab5 1132
3f8e2d75
JH
1133 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1134 * local supported commands HCI command.
1135 */
1136 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
42c6b129 1137 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
2177bab5
JH
1138
1139 if (lmp_ssp_capable(hdev)) {
57af75a8
MH
1140 /* When SSP is available, then the host features page
1141 * should also be available as well. However some
1142 * controllers list the max_page as 0 as long as SSP
1143 * has not been enabled. To achieve proper debugging
1144 * output, force the minimum max_page to 1 at least.
1145 */
1146 hdev->max_page = 0x01;
1147
2177bab5
JH
1148 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1149 u8 mode = 0x01;
42c6b129
JH
1150 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1151 sizeof(mode), &mode);
2177bab5
JH
1152 } else {
1153 struct hci_cp_write_eir cp;
1154
1155 memset(hdev->eir, 0, sizeof(hdev->eir));
1156 memset(&cp, 0, sizeof(cp));
1157
42c6b129 1158 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
2177bab5
JH
1159 }
1160 }
1161
1162 if (lmp_inq_rssi_capable(hdev))
42c6b129 1163 hci_setup_inquiry_mode(req);
2177bab5
JH
1164
1165 if (lmp_inq_tx_pwr_capable(hdev))
42c6b129 1166 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
2177bab5
JH
1167
1168 if (lmp_ext_feat_capable(hdev)) {
1169 struct hci_cp_read_local_ext_features cp;
1170
1171 cp.page = 0x01;
42c6b129
JH
1172 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1173 sizeof(cp), &cp);
2177bab5
JH
1174 }
1175
1176 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1177 u8 enable = 1;
42c6b129
JH
1178 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1179 &enable);
2177bab5
JH
1180 }
1181}
1182
42c6b129 1183static void hci_setup_link_policy(struct hci_request *req)
2177bab5 1184{
42c6b129 1185 struct hci_dev *hdev = req->hdev;
2177bab5
JH
1186 struct hci_cp_write_def_link_policy cp;
1187 u16 link_policy = 0;
1188
1189 if (lmp_rswitch_capable(hdev))
1190 link_policy |= HCI_LP_RSWITCH;
1191 if (lmp_hold_capable(hdev))
1192 link_policy |= HCI_LP_HOLD;
1193 if (lmp_sniff_capable(hdev))
1194 link_policy |= HCI_LP_SNIFF;
1195 if (lmp_park_capable(hdev))
1196 link_policy |= HCI_LP_PARK;
1197
1198 cp.policy = cpu_to_le16(link_policy);
42c6b129 1199 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
2177bab5
JH
1200}
1201
42c6b129 1202static void hci_set_le_support(struct hci_request *req)
2177bab5 1203{
42c6b129 1204 struct hci_dev *hdev = req->hdev;
2177bab5
JH
1205 struct hci_cp_write_le_host_supported cp;
1206
c73eee91
JH
1207 /* LE-only devices do not support explicit enablement */
1208 if (!lmp_bredr_capable(hdev))
1209 return;
1210
2177bab5
JH
1211 memset(&cp, 0, sizeof(cp));
1212
1213 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1214 cp.le = 0x01;
1215 cp.simul = lmp_le_br_capable(hdev);
1216 }
1217
1218 if (cp.le != lmp_host_le_capable(hdev))
42c6b129
JH
1219 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1220 &cp);
2177bab5
JH
1221}
1222
d62e6d67
JH
1223static void hci_set_event_mask_page_2(struct hci_request *req)
1224{
1225 struct hci_dev *hdev = req->hdev;
1226 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1227
1228 /* If Connectionless Slave Broadcast master role is supported
1229 * enable all necessary events for it.
1230 */
53b834d2 1231 if (lmp_csb_master_capable(hdev)) {
d62e6d67
JH
1232 events[1] |= 0x40; /* Triggered Clock Capture */
1233 events[1] |= 0x80; /* Synchronization Train Complete */
1234 events[2] |= 0x10; /* Slave Page Response Timeout */
1235 events[2] |= 0x20; /* CSB Channel Map Change */
1236 }
1237
1238 /* If Connectionless Slave Broadcast slave role is supported
1239 * enable all necessary events for it.
1240 */
53b834d2 1241 if (lmp_csb_slave_capable(hdev)) {
d62e6d67
JH
1242 events[2] |= 0x01; /* Synchronization Train Received */
1243 events[2] |= 0x02; /* CSB Receive */
1244 events[2] |= 0x04; /* CSB Timeout */
1245 events[2] |= 0x08; /* Truncated Page Complete */
1246 }
1247
1248 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1249}
1250
42c6b129 1251static void hci_init3_req(struct hci_request *req, unsigned long opt)
2177bab5 1252{
42c6b129 1253 struct hci_dev *hdev = req->hdev;
d2c5d77f 1254 u8 p;
42c6b129 1255
b8f4e068
GP
1256 /* Some Broadcom based Bluetooth controllers do not support the
1257 * Delete Stored Link Key command. They are clearly indicating its
1258 * absence in the bit mask of supported commands.
1259 *
1260 * Check the supported commands and only if the the command is marked
1261 * as supported send it. If not supported assume that the controller
1262 * does not have actual support for stored link keys which makes this
1263 * command redundant anyway.
637b4cae 1264 */
59f45d57
JH
1265 if (hdev->commands[6] & 0x80) {
1266 struct hci_cp_delete_stored_link_key cp;
1267
1268 bacpy(&cp.bdaddr, BDADDR_ANY);
1269 cp.delete_all = 0x01;
1270 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1271 sizeof(cp), &cp);
1272 }
1273
2177bab5 1274 if (hdev->commands[5] & 0x10)
42c6b129 1275 hci_setup_link_policy(req);
2177bab5 1276
79830f66 1277 if (lmp_le_capable(hdev)) {
bef34c0a
MH
1278 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1279 /* If the controller has a public BD_ADDR, then
1280 * by default use that one. If this is a LE only
1281 * controller without a public address, default
1282 * to the random address.
1283 */
1284 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1285 hdev->own_addr_type = ADDR_LE_DEV_PUBLIC;
1286 else
1287 hdev->own_addr_type = ADDR_LE_DEV_RANDOM;
1288 }
79830f66 1289
42c6b129 1290 hci_set_le_support(req);
79830f66 1291 }
d2c5d77f
JH
1292
1293 /* Read features beyond page 1 if available */
1294 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1295 struct hci_cp_read_local_ext_features cp;
1296
1297 cp.page = p;
1298 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1299 sizeof(cp), &cp);
1300 }
2177bab5
JH
1301}
1302
5d4e7e8d
JH
1303static void hci_init4_req(struct hci_request *req, unsigned long opt)
1304{
1305 struct hci_dev *hdev = req->hdev;
1306
d62e6d67
JH
1307 /* Set event mask page 2 if the HCI command for it is supported */
1308 if (hdev->commands[22] & 0x04)
1309 hci_set_event_mask_page_2(req);
1310
5d4e7e8d 1311 /* Check for Synchronization Train support */
53b834d2 1312 if (lmp_sync_train_capable(hdev))
5d4e7e8d
JH
1313 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1314}
1315
2177bab5
JH
1316static int __hci_init(struct hci_dev *hdev)
1317{
1318 int err;
1319
1320 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1321 if (err < 0)
1322 return err;
1323
4b4148e9
MH
1324 /* The Device Under Test (DUT) mode is special and available for
1325 * all controller types. So just create it early on.
1326 */
1327 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1328 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1329 &dut_mode_fops);
1330 }
1331
2177bab5
JH
1332 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1333 * BR/EDR/LE type controllers. AMP controllers only need the
1334 * first stage init.
1335 */
1336 if (hdev->dev_type != HCI_BREDR)
1337 return 0;
1338
1339 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1340 if (err < 0)
1341 return err;
1342
5d4e7e8d
JH
1343 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1344 if (err < 0)
1345 return err;
1346
baf27f6e
MH
1347 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1348 if (err < 0)
1349 return err;
1350
1351 /* Only create debugfs entries during the initial setup
1352 * phase and not every time the controller gets powered on.
1353 */
1354 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1355 return 0;
1356
dfb826a8
MH
1357 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1358 &features_fops);
ceeb3bc0
MH
1359 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1360 &hdev->manufacturer);
1361 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1362 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
70afe0b8
MH
1363 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1364 &blacklist_fops);
47219839
MH
1365 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1366
baf27f6e
MH
1367 if (lmp_bredr_capable(hdev)) {
1368 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1369 hdev, &inquiry_cache_fops);
02d08d15
MH
1370 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1371 hdev, &link_keys_fops);
12c269d7
MH
1372 debugfs_create_file("use_debug_keys", 0444, hdev->debugfs,
1373 hdev, &use_debug_keys_fops);
babdbb3c
MH
1374 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1375 hdev, &dev_class_fops);
041000b9
MH
1376 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1377 hdev, &voice_setting_fops);
baf27f6e
MH
1378 }
1379
06f5b778 1380 if (lmp_ssp_capable(hdev)) {
ebd1e33b
MH
1381 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1382 hdev, &auto_accept_delay_fops);
06f5b778
MH
1383 debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs,
1384 hdev, &ssp_debug_mode_fops);
1385 }
ebd1e33b 1386
2bfa3531
MH
1387 if (lmp_sniff_capable(hdev)) {
1388 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1389 hdev, &idle_timeout_fops);
1390 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1391 hdev, &sniff_min_interval_fops);
1392 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1393 hdev, &sniff_max_interval_fops);
1394 }
1395
d0f729b8
MH
1396 if (lmp_le_capable(hdev)) {
1397 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1398 &hdev->le_white_list_size);
e7b8fc92
MH
1399 debugfs_create_file("static_address", 0444, hdev->debugfs,
1400 hdev, &static_address_fops);
92202185
MH
1401 debugfs_create_file("own_address_type", 0644, hdev->debugfs,
1402 hdev, &own_address_type_fops);
8f8625cd
MH
1403 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1404 hdev, &long_term_keys_fops);
4e70c7e7
MH
1405 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1406 hdev, &conn_min_interval_fops);
1407 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1408 hdev, &conn_max_interval_fops);
d0f729b8 1409 }
e7b8fc92 1410
baf27f6e 1411 return 0;
2177bab5
JH
1412}
1413
42c6b129 1414static void hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1415{
1416 __u8 scan = opt;
1417
42c6b129 1418 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
1419
1420 /* Inquiry and Page scans */
42c6b129 1421 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
1422}
1423
42c6b129 1424static void hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1425{
1426 __u8 auth = opt;
1427
42c6b129 1428 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
1429
1430 /* Authentication */
42c6b129 1431 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
1432}
1433
42c6b129 1434static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1435{
1436 __u8 encrypt = opt;
1437
42c6b129 1438 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 1439
e4e8e37c 1440 /* Encryption */
42c6b129 1441 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
1442}
1443
42c6b129 1444static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
1445{
1446 __le16 policy = cpu_to_le16(opt);
1447
42c6b129 1448 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
1449
1450 /* Default link policy */
42c6b129 1451 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
e4e8e37c
MH
1452}
1453
8e87d142 1454/* Get HCI device by index.
1da177e4
LT
1455 * Device is held on return. */
1456struct hci_dev *hci_dev_get(int index)
1457{
8035ded4 1458 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
1459
1460 BT_DBG("%d", index);
1461
1462 if (index < 0)
1463 return NULL;
1464
1465 read_lock(&hci_dev_list_lock);
8035ded4 1466 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
1467 if (d->id == index) {
1468 hdev = hci_dev_hold(d);
1469 break;
1470 }
1471 }
1472 read_unlock(&hci_dev_list_lock);
1473 return hdev;
1474}
1da177e4
LT
1475
1476/* ---- Inquiry support ---- */
ff9ef578 1477
30dc78e1
JH
1478bool hci_discovery_active(struct hci_dev *hdev)
1479{
1480 struct discovery_state *discov = &hdev->discovery;
1481
6fbe195d 1482 switch (discov->state) {
343f935b 1483 case DISCOVERY_FINDING:
6fbe195d 1484 case DISCOVERY_RESOLVING:
30dc78e1
JH
1485 return true;
1486
6fbe195d
AG
1487 default:
1488 return false;
1489 }
30dc78e1
JH
1490}
1491
ff9ef578
JH
1492void hci_discovery_set_state(struct hci_dev *hdev, int state)
1493{
1494 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1495
1496 if (hdev->discovery.state == state)
1497 return;
1498
1499 switch (state) {
1500 case DISCOVERY_STOPPED:
7b99b659
AG
1501 if (hdev->discovery.state != DISCOVERY_STARTING)
1502 mgmt_discovering(hdev, 0);
ff9ef578
JH
1503 break;
1504 case DISCOVERY_STARTING:
1505 break;
343f935b 1506 case DISCOVERY_FINDING:
ff9ef578
JH
1507 mgmt_discovering(hdev, 1);
1508 break;
30dc78e1
JH
1509 case DISCOVERY_RESOLVING:
1510 break;
ff9ef578
JH
1511 case DISCOVERY_STOPPING:
1512 break;
1513 }
1514
1515 hdev->discovery.state = state;
1516}
1517
1f9b9a5d 1518void hci_inquiry_cache_flush(struct hci_dev *hdev)
1da177e4 1519{
30883512 1520 struct discovery_state *cache = &hdev->discovery;
b57c1a56 1521 struct inquiry_entry *p, *n;
1da177e4 1522
561aafbc
JH
1523 list_for_each_entry_safe(p, n, &cache->all, all) {
1524 list_del(&p->all);
b57c1a56 1525 kfree(p);
1da177e4 1526 }
561aafbc
JH
1527
1528 INIT_LIST_HEAD(&cache->unknown);
1529 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
1530}
1531
a8c5fb1a
GP
1532struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1533 bdaddr_t *bdaddr)
1da177e4 1534{
30883512 1535 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1536 struct inquiry_entry *e;
1537
6ed93dc6 1538 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 1539
561aafbc
JH
1540 list_for_each_entry(e, &cache->all, all) {
1541 if (!bacmp(&e->data.bdaddr, bdaddr))
1542 return e;
1543 }
1544
1545 return NULL;
1546}
1547
1548struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 1549 bdaddr_t *bdaddr)
561aafbc 1550{
30883512 1551 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
1552 struct inquiry_entry *e;
1553
6ed93dc6 1554 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
1555
1556 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 1557 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
1558 return e;
1559 }
1560
1561 return NULL;
1da177e4
LT
1562}
1563
30dc78e1 1564struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
1565 bdaddr_t *bdaddr,
1566 int state)
30dc78e1
JH
1567{
1568 struct discovery_state *cache = &hdev->discovery;
1569 struct inquiry_entry *e;
1570
6ed93dc6 1571 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
1572
1573 list_for_each_entry(e, &cache->resolve, list) {
1574 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1575 return e;
1576 if (!bacmp(&e->data.bdaddr, bdaddr))
1577 return e;
1578 }
1579
1580 return NULL;
1581}
1582
a3d4e20a 1583void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 1584 struct inquiry_entry *ie)
a3d4e20a
JH
1585{
1586 struct discovery_state *cache = &hdev->discovery;
1587 struct list_head *pos = &cache->resolve;
1588 struct inquiry_entry *p;
1589
1590 list_del(&ie->list);
1591
1592 list_for_each_entry(p, &cache->resolve, list) {
1593 if (p->name_state != NAME_PENDING &&
a8c5fb1a 1594 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
1595 break;
1596 pos = &p->list;
1597 }
1598
1599 list_add(&ie->list, pos);
1600}
1601
3175405b 1602bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
04124681 1603 bool name_known, bool *ssp)
1da177e4 1604{
30883512 1605 struct discovery_state *cache = &hdev->discovery;
70f23020 1606 struct inquiry_entry *ie;
1da177e4 1607
6ed93dc6 1608 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 1609
2b2fec4d
SJ
1610 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1611
388fc8fa
JH
1612 if (ssp)
1613 *ssp = data->ssp_mode;
1614
70f23020 1615 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 1616 if (ie) {
388fc8fa
JH
1617 if (ie->data.ssp_mode && ssp)
1618 *ssp = true;
1619
a3d4e20a 1620 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 1621 data->rssi != ie->data.rssi) {
a3d4e20a
JH
1622 ie->data.rssi = data->rssi;
1623 hci_inquiry_cache_update_resolve(hdev, ie);
1624 }
1625
561aafbc 1626 goto update;
a3d4e20a 1627 }
561aafbc
JH
1628
1629 /* Entry not in the cache. Add new one. */
1630 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
1631 if (!ie)
3175405b 1632 return false;
561aafbc
JH
1633
1634 list_add(&ie->all, &cache->all);
1635
1636 if (name_known) {
1637 ie->name_state = NAME_KNOWN;
1638 } else {
1639 ie->name_state = NAME_NOT_KNOWN;
1640 list_add(&ie->list, &cache->unknown);
1641 }
70f23020 1642
561aafbc
JH
1643update:
1644 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 1645 ie->name_state != NAME_PENDING) {
561aafbc
JH
1646 ie->name_state = NAME_KNOWN;
1647 list_del(&ie->list);
1da177e4
LT
1648 }
1649
70f23020
AE
1650 memcpy(&ie->data, data, sizeof(*data));
1651 ie->timestamp = jiffies;
1da177e4 1652 cache->timestamp = jiffies;
3175405b
JH
1653
1654 if (ie->name_state == NAME_NOT_KNOWN)
1655 return false;
1656
1657 return true;
1da177e4
LT
1658}
1659
1660static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1661{
30883512 1662 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1663 struct inquiry_info *info = (struct inquiry_info *) buf;
1664 struct inquiry_entry *e;
1665 int copied = 0;
1666
561aafbc 1667 list_for_each_entry(e, &cache->all, all) {
1da177e4 1668 struct inquiry_data *data = &e->data;
b57c1a56
JH
1669
1670 if (copied >= num)
1671 break;
1672
1da177e4
LT
1673 bacpy(&info->bdaddr, &data->bdaddr);
1674 info->pscan_rep_mode = data->pscan_rep_mode;
1675 info->pscan_period_mode = data->pscan_period_mode;
1676 info->pscan_mode = data->pscan_mode;
1677 memcpy(info->dev_class, data->dev_class, 3);
1678 info->clock_offset = data->clock_offset;
b57c1a56 1679
1da177e4 1680 info++;
b57c1a56 1681 copied++;
1da177e4
LT
1682 }
1683
1684 BT_DBG("cache %p, copied %d", cache, copied);
1685 return copied;
1686}
1687
42c6b129 1688static void hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1689{
1690 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 1691 struct hci_dev *hdev = req->hdev;
1da177e4
LT
1692 struct hci_cp_inquiry cp;
1693
1694 BT_DBG("%s", hdev->name);
1695
1696 if (test_bit(HCI_INQUIRY, &hdev->flags))
1697 return;
1698
1699 /* Start Inquiry */
1700 memcpy(&cp.lap, &ir->lap, 3);
1701 cp.length = ir->length;
1702 cp.num_rsp = ir->num_rsp;
42c6b129 1703 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
1704}
1705
3e13fa1e
AG
1706static int wait_inquiry(void *word)
1707{
1708 schedule();
1709 return signal_pending(current);
1710}
1711
1da177e4
LT
1712int hci_inquiry(void __user *arg)
1713{
1714 __u8 __user *ptr = arg;
1715 struct hci_inquiry_req ir;
1716 struct hci_dev *hdev;
1717 int err = 0, do_inquiry = 0, max_rsp;
1718 long timeo;
1719 __u8 *buf;
1720
1721 if (copy_from_user(&ir, ptr, sizeof(ir)))
1722 return -EFAULT;
1723
5a08ecce
AE
1724 hdev = hci_dev_get(ir.dev_id);
1725 if (!hdev)
1da177e4
LT
1726 return -ENODEV;
1727
0736cfa8
MH
1728 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1729 err = -EBUSY;
1730 goto done;
1731 }
1732
5b69bef5
MH
1733 if (hdev->dev_type != HCI_BREDR) {
1734 err = -EOPNOTSUPP;
1735 goto done;
1736 }
1737
56f87901
JH
1738 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1739 err = -EOPNOTSUPP;
1740 goto done;
1741 }
1742
09fd0de5 1743 hci_dev_lock(hdev);
8e87d142 1744 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 1745 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1f9b9a5d 1746 hci_inquiry_cache_flush(hdev);
1da177e4
LT
1747 do_inquiry = 1;
1748 }
09fd0de5 1749 hci_dev_unlock(hdev);
1da177e4 1750
04837f64 1751 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
1752
1753 if (do_inquiry) {
01178cd4
JH
1754 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1755 timeo);
70f23020
AE
1756 if (err < 0)
1757 goto done;
3e13fa1e
AG
1758
1759 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1760 * cleared). If it is interrupted by a signal, return -EINTR.
1761 */
1762 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1763 TASK_INTERRUPTIBLE))
1764 return -EINTR;
70f23020 1765 }
1da177e4 1766
8fc9ced3
GP
1767 /* for unlimited number of responses we will use buffer with
1768 * 255 entries
1769 */
1da177e4
LT
1770 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1771
1772 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1773 * copy it to the user space.
1774 */
01df8c31 1775 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 1776 if (!buf) {
1da177e4
LT
1777 err = -ENOMEM;
1778 goto done;
1779 }
1780
09fd0de5 1781 hci_dev_lock(hdev);
1da177e4 1782 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 1783 hci_dev_unlock(hdev);
1da177e4
LT
1784
1785 BT_DBG("num_rsp %d", ir.num_rsp);
1786
1787 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1788 ptr += sizeof(ir);
1789 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 1790 ir.num_rsp))
1da177e4 1791 err = -EFAULT;
8e87d142 1792 } else
1da177e4
LT
1793 err = -EFAULT;
1794
1795 kfree(buf);
1796
1797done:
1798 hci_dev_put(hdev);
1799 return err;
1800}
1801
cbed0ca1 1802static int hci_dev_do_open(struct hci_dev *hdev)
1da177e4 1803{
1da177e4
LT
1804 int ret = 0;
1805
1da177e4
LT
1806 BT_DBG("%s %p", hdev->name, hdev);
1807
1808 hci_req_lock(hdev);
1809
94324962
JH
1810 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1811 ret = -ENODEV;
1812 goto done;
1813 }
1814
a5c8f270
MH
1815 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
1816 /* Check for rfkill but allow the HCI setup stage to
1817 * proceed (which in itself doesn't cause any RF activity).
1818 */
1819 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1820 ret = -ERFKILL;
1821 goto done;
1822 }
1823
1824 /* Check for valid public address or a configured static
1825 * random adddress, but let the HCI setup proceed to
1826 * be able to determine if there is a public address
1827 * or not.
1828 *
1829 * This check is only valid for BR/EDR controllers
1830 * since AMP controllers do not have an address.
1831 */
1832 if (hdev->dev_type == HCI_BREDR &&
1833 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1834 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1835 ret = -EADDRNOTAVAIL;
1836 goto done;
1837 }
611b30f7
MH
1838 }
1839
1da177e4
LT
1840 if (test_bit(HCI_UP, &hdev->flags)) {
1841 ret = -EALREADY;
1842 goto done;
1843 }
1844
1da177e4
LT
1845 if (hdev->open(hdev)) {
1846 ret = -EIO;
1847 goto done;
1848 }
1849
f41c70c4
MH
1850 atomic_set(&hdev->cmd_cnt, 1);
1851 set_bit(HCI_INIT, &hdev->flags);
1852
1853 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1854 ret = hdev->setup(hdev);
1855
1856 if (!ret) {
f41c70c4
MH
1857 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1858 set_bit(HCI_RAW, &hdev->flags);
1859
0736cfa8
MH
1860 if (!test_bit(HCI_RAW, &hdev->flags) &&
1861 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
f41c70c4 1862 ret = __hci_init(hdev);
1da177e4
LT
1863 }
1864
f41c70c4
MH
1865 clear_bit(HCI_INIT, &hdev->flags);
1866
1da177e4
LT
1867 if (!ret) {
1868 hci_dev_hold(hdev);
1869 set_bit(HCI_UP, &hdev->flags);
1870 hci_notify(hdev, HCI_DEV_UP);
bb4b2a9a 1871 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
0736cfa8 1872 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1514b892 1873 hdev->dev_type == HCI_BREDR) {
09fd0de5 1874 hci_dev_lock(hdev);
744cf19e 1875 mgmt_powered(hdev, 1);
09fd0de5 1876 hci_dev_unlock(hdev);
56e5cb86 1877 }
8e87d142 1878 } else {
1da177e4 1879 /* Init failed, cleanup */
3eff45ea 1880 flush_work(&hdev->tx_work);
c347b765 1881 flush_work(&hdev->cmd_work);
b78752cc 1882 flush_work(&hdev->rx_work);
1da177e4
LT
1883
1884 skb_queue_purge(&hdev->cmd_q);
1885 skb_queue_purge(&hdev->rx_q);
1886
1887 if (hdev->flush)
1888 hdev->flush(hdev);
1889
1890 if (hdev->sent_cmd) {
1891 kfree_skb(hdev->sent_cmd);
1892 hdev->sent_cmd = NULL;
1893 }
1894
1895 hdev->close(hdev);
1896 hdev->flags = 0;
1897 }
1898
1899done:
1900 hci_req_unlock(hdev);
1da177e4
LT
1901 return ret;
1902}
1903
cbed0ca1
JH
1904/* ---- HCI ioctl helpers ---- */
1905
1906int hci_dev_open(__u16 dev)
1907{
1908 struct hci_dev *hdev;
1909 int err;
1910
1911 hdev = hci_dev_get(dev);
1912 if (!hdev)
1913 return -ENODEV;
1914
e1d08f40
JH
1915 /* We need to ensure that no other power on/off work is pending
1916 * before proceeding to call hci_dev_do_open. This is
1917 * particularly important if the setup procedure has not yet
1918 * completed.
1919 */
1920 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1921 cancel_delayed_work(&hdev->power_off);
1922
a5c8f270
MH
1923 /* After this call it is guaranteed that the setup procedure
1924 * has finished. This means that error conditions like RFKILL
1925 * or no valid public or static random address apply.
1926 */
e1d08f40
JH
1927 flush_workqueue(hdev->req_workqueue);
1928
cbed0ca1
JH
1929 err = hci_dev_do_open(hdev);
1930
1931 hci_dev_put(hdev);
1932
1933 return err;
1934}
1935
1da177e4
LT
1936static int hci_dev_do_close(struct hci_dev *hdev)
1937{
1938 BT_DBG("%s %p", hdev->name, hdev);
1939
78c04c0b
VCG
1940 cancel_delayed_work(&hdev->power_off);
1941
1da177e4
LT
1942 hci_req_cancel(hdev, ENODEV);
1943 hci_req_lock(hdev);
1944
1945 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
b79f44c1 1946 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
1947 hci_req_unlock(hdev);
1948 return 0;
1949 }
1950
3eff45ea
GP
1951 /* Flush RX and TX works */
1952 flush_work(&hdev->tx_work);
b78752cc 1953 flush_work(&hdev->rx_work);
1da177e4 1954
16ab91ab 1955 if (hdev->discov_timeout > 0) {
e0f9309f 1956 cancel_delayed_work(&hdev->discov_off);
16ab91ab 1957 hdev->discov_timeout = 0;
5e5282bb 1958 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
310a3d48 1959 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
16ab91ab
JH
1960 }
1961
a8b2d5c2 1962 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
7d78525d
JH
1963 cancel_delayed_work(&hdev->service_cache);
1964
7ba8b4be
AG
1965 cancel_delayed_work_sync(&hdev->le_scan_disable);
1966
09fd0de5 1967 hci_dev_lock(hdev);
1f9b9a5d 1968 hci_inquiry_cache_flush(hdev);
1da177e4 1969 hci_conn_hash_flush(hdev);
09fd0de5 1970 hci_dev_unlock(hdev);
1da177e4
LT
1971
1972 hci_notify(hdev, HCI_DEV_DOWN);
1973
1974 if (hdev->flush)
1975 hdev->flush(hdev);
1976
1977 /* Reset device */
1978 skb_queue_purge(&hdev->cmd_q);
1979 atomic_set(&hdev->cmd_cnt, 1);
8af59467 1980 if (!test_bit(HCI_RAW, &hdev->flags) &&
3a6afbd2 1981 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
a6c511c6 1982 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1da177e4 1983 set_bit(HCI_INIT, &hdev->flags);
01178cd4 1984 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1da177e4
LT
1985 clear_bit(HCI_INIT, &hdev->flags);
1986 }
1987
c347b765
GP
1988 /* flush cmd work */
1989 flush_work(&hdev->cmd_work);
1da177e4
LT
1990
1991 /* Drop queues */
1992 skb_queue_purge(&hdev->rx_q);
1993 skb_queue_purge(&hdev->cmd_q);
1994 skb_queue_purge(&hdev->raw_q);
1995
1996 /* Drop last sent command */
1997 if (hdev->sent_cmd) {
b79f44c1 1998 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
1999 kfree_skb(hdev->sent_cmd);
2000 hdev->sent_cmd = NULL;
2001 }
2002
b6ddb638
JH
2003 kfree_skb(hdev->recv_evt);
2004 hdev->recv_evt = NULL;
2005
1da177e4
LT
2006 /* After this point our queues are empty
2007 * and no tasks are scheduled. */
2008 hdev->close(hdev);
2009
35b973c9
JH
2010 /* Clear flags */
2011 hdev->flags = 0;
2012 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2013
93c311a0
MH
2014 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2015 if (hdev->dev_type == HCI_BREDR) {
2016 hci_dev_lock(hdev);
2017 mgmt_powered(hdev, 0);
2018 hci_dev_unlock(hdev);
2019 }
8ee56540 2020 }
5add6af8 2021
ced5c338 2022 /* Controller radio is available but is currently powered down */
536619e8 2023 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
ced5c338 2024
e59fda8d 2025 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 2026 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
e59fda8d 2027
1da177e4
LT
2028 hci_req_unlock(hdev);
2029
2030 hci_dev_put(hdev);
2031 return 0;
2032}
2033
2034int hci_dev_close(__u16 dev)
2035{
2036 struct hci_dev *hdev;
2037 int err;
2038
70f23020
AE
2039 hdev = hci_dev_get(dev);
2040 if (!hdev)
1da177e4 2041 return -ENODEV;
8ee56540 2042
0736cfa8
MH
2043 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2044 err = -EBUSY;
2045 goto done;
2046 }
2047
8ee56540
MH
2048 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2049 cancel_delayed_work(&hdev->power_off);
2050
1da177e4 2051 err = hci_dev_do_close(hdev);
8ee56540 2052
0736cfa8 2053done:
1da177e4
LT
2054 hci_dev_put(hdev);
2055 return err;
2056}
2057
2058int hci_dev_reset(__u16 dev)
2059{
2060 struct hci_dev *hdev;
2061 int ret = 0;
2062
70f23020
AE
2063 hdev = hci_dev_get(dev);
2064 if (!hdev)
1da177e4
LT
2065 return -ENODEV;
2066
2067 hci_req_lock(hdev);
1da177e4 2068
808a049e
MH
2069 if (!test_bit(HCI_UP, &hdev->flags)) {
2070 ret = -ENETDOWN;
1da177e4 2071 goto done;
808a049e 2072 }
1da177e4 2073
0736cfa8
MH
2074 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2075 ret = -EBUSY;
2076 goto done;
2077 }
2078
1da177e4
LT
2079 /* Drop queues */
2080 skb_queue_purge(&hdev->rx_q);
2081 skb_queue_purge(&hdev->cmd_q);
2082
09fd0de5 2083 hci_dev_lock(hdev);
1f9b9a5d 2084 hci_inquiry_cache_flush(hdev);
1da177e4 2085 hci_conn_hash_flush(hdev);
09fd0de5 2086 hci_dev_unlock(hdev);
1da177e4
LT
2087
2088 if (hdev->flush)
2089 hdev->flush(hdev);
2090
8e87d142 2091 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 2092 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4
LT
2093
2094 if (!test_bit(HCI_RAW, &hdev->flags))
01178cd4 2095 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1da177e4
LT
2096
2097done:
1da177e4
LT
2098 hci_req_unlock(hdev);
2099 hci_dev_put(hdev);
2100 return ret;
2101}
2102
2103int hci_dev_reset_stat(__u16 dev)
2104{
2105 struct hci_dev *hdev;
2106 int ret = 0;
2107
70f23020
AE
2108 hdev = hci_dev_get(dev);
2109 if (!hdev)
1da177e4
LT
2110 return -ENODEV;
2111
0736cfa8
MH
2112 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2113 ret = -EBUSY;
2114 goto done;
2115 }
2116
1da177e4
LT
2117 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2118
0736cfa8 2119done:
1da177e4 2120 hci_dev_put(hdev);
1da177e4
LT
2121 return ret;
2122}
2123
2124int hci_dev_cmd(unsigned int cmd, void __user *arg)
2125{
2126 struct hci_dev *hdev;
2127 struct hci_dev_req dr;
2128 int err = 0;
2129
2130 if (copy_from_user(&dr, arg, sizeof(dr)))
2131 return -EFAULT;
2132
70f23020
AE
2133 hdev = hci_dev_get(dr.dev_id);
2134 if (!hdev)
1da177e4
LT
2135 return -ENODEV;
2136
0736cfa8
MH
2137 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2138 err = -EBUSY;
2139 goto done;
2140 }
2141
5b69bef5
MH
2142 if (hdev->dev_type != HCI_BREDR) {
2143 err = -EOPNOTSUPP;
2144 goto done;
2145 }
2146
56f87901
JH
2147 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2148 err = -EOPNOTSUPP;
2149 goto done;
2150 }
2151
1da177e4
LT
2152 switch (cmd) {
2153 case HCISETAUTH:
01178cd4
JH
2154 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2155 HCI_INIT_TIMEOUT);
1da177e4
LT
2156 break;
2157
2158 case HCISETENCRYPT:
2159 if (!lmp_encrypt_capable(hdev)) {
2160 err = -EOPNOTSUPP;
2161 break;
2162 }
2163
2164 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2165 /* Auth must be enabled first */
01178cd4
JH
2166 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2167 HCI_INIT_TIMEOUT);
1da177e4
LT
2168 if (err)
2169 break;
2170 }
2171
01178cd4
JH
2172 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2173 HCI_INIT_TIMEOUT);
1da177e4
LT
2174 break;
2175
2176 case HCISETSCAN:
01178cd4
JH
2177 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2178 HCI_INIT_TIMEOUT);
1da177e4
LT
2179 break;
2180
1da177e4 2181 case HCISETLINKPOL:
01178cd4
JH
2182 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2183 HCI_INIT_TIMEOUT);
1da177e4
LT
2184 break;
2185
2186 case HCISETLINKMODE:
e4e8e37c
MH
2187 hdev->link_mode = ((__u16) dr.dev_opt) &
2188 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2189 break;
2190
2191 case HCISETPTYPE:
2192 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
2193 break;
2194
2195 case HCISETACLMTU:
e4e8e37c
MH
2196 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2197 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2198 break;
2199
2200 case HCISETSCOMTU:
e4e8e37c
MH
2201 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2202 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2203 break;
2204
2205 default:
2206 err = -EINVAL;
2207 break;
2208 }
e4e8e37c 2209
0736cfa8 2210done:
1da177e4
LT
2211 hci_dev_put(hdev);
2212 return err;
2213}
2214
2215int hci_get_dev_list(void __user *arg)
2216{
8035ded4 2217 struct hci_dev *hdev;
1da177e4
LT
2218 struct hci_dev_list_req *dl;
2219 struct hci_dev_req *dr;
1da177e4
LT
2220 int n = 0, size, err;
2221 __u16 dev_num;
2222
2223 if (get_user(dev_num, (__u16 __user *) arg))
2224 return -EFAULT;
2225
2226 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2227 return -EINVAL;
2228
2229 size = sizeof(*dl) + dev_num * sizeof(*dr);
2230
70f23020
AE
2231 dl = kzalloc(size, GFP_KERNEL);
2232 if (!dl)
1da177e4
LT
2233 return -ENOMEM;
2234
2235 dr = dl->dev_req;
2236
f20d09d5 2237 read_lock(&hci_dev_list_lock);
8035ded4 2238 list_for_each_entry(hdev, &hci_dev_list, list) {
a8b2d5c2 2239 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
e0f9309f 2240 cancel_delayed_work(&hdev->power_off);
c542a06c 2241
a8b2d5c2
JH
2242 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2243 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 2244
1da177e4
LT
2245 (dr + n)->dev_id = hdev->id;
2246 (dr + n)->dev_opt = hdev->flags;
c542a06c 2247
1da177e4
LT
2248 if (++n >= dev_num)
2249 break;
2250 }
f20d09d5 2251 read_unlock(&hci_dev_list_lock);
1da177e4
LT
2252
2253 dl->dev_num = n;
2254 size = sizeof(*dl) + n * sizeof(*dr);
2255
2256 err = copy_to_user(arg, dl, size);
2257 kfree(dl);
2258
2259 return err ? -EFAULT : 0;
2260}
2261
2262int hci_get_dev_info(void __user *arg)
2263{
2264 struct hci_dev *hdev;
2265 struct hci_dev_info di;
2266 int err = 0;
2267
2268 if (copy_from_user(&di, arg, sizeof(di)))
2269 return -EFAULT;
2270
70f23020
AE
2271 hdev = hci_dev_get(di.dev_id);
2272 if (!hdev)
1da177e4
LT
2273 return -ENODEV;
2274
a8b2d5c2 2275 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
3243553f 2276 cancel_delayed_work_sync(&hdev->power_off);
ab81cbf9 2277
a8b2d5c2
JH
2278 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2279 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 2280
1da177e4
LT
2281 strcpy(di.name, hdev->name);
2282 di.bdaddr = hdev->bdaddr;
60f2a3ed 2283 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
1da177e4
LT
2284 di.flags = hdev->flags;
2285 di.pkt_type = hdev->pkt_type;
572c7f84
JH
2286 if (lmp_bredr_capable(hdev)) {
2287 di.acl_mtu = hdev->acl_mtu;
2288 di.acl_pkts = hdev->acl_pkts;
2289 di.sco_mtu = hdev->sco_mtu;
2290 di.sco_pkts = hdev->sco_pkts;
2291 } else {
2292 di.acl_mtu = hdev->le_mtu;
2293 di.acl_pkts = hdev->le_pkts;
2294 di.sco_mtu = 0;
2295 di.sco_pkts = 0;
2296 }
1da177e4
LT
2297 di.link_policy = hdev->link_policy;
2298 di.link_mode = hdev->link_mode;
2299
2300 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2301 memcpy(&di.features, &hdev->features, sizeof(di.features));
2302
2303 if (copy_to_user(arg, &di, sizeof(di)))
2304 err = -EFAULT;
2305
2306 hci_dev_put(hdev);
2307
2308 return err;
2309}
2310
2311/* ---- Interface to HCI drivers ---- */
2312
611b30f7
MH
2313static int hci_rfkill_set_block(void *data, bool blocked)
2314{
2315 struct hci_dev *hdev = data;
2316
2317 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2318
0736cfa8
MH
2319 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2320 return -EBUSY;
2321
5e130367
JH
2322 if (blocked) {
2323 set_bit(HCI_RFKILLED, &hdev->dev_flags);
bf543036
JH
2324 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2325 hci_dev_do_close(hdev);
5e130367
JH
2326 } else {
2327 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
1025c04c 2328 }
611b30f7
MH
2329
2330 return 0;
2331}
2332
2333static const struct rfkill_ops hci_rfkill_ops = {
2334 .set_block = hci_rfkill_set_block,
2335};
2336
ab81cbf9
JH
2337static void hci_power_on(struct work_struct *work)
2338{
2339 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
96570ffc 2340 int err;
ab81cbf9
JH
2341
2342 BT_DBG("%s", hdev->name);
2343
cbed0ca1 2344 err = hci_dev_do_open(hdev);
96570ffc
JH
2345 if (err < 0) {
2346 mgmt_set_powered_failed(hdev, err);
ab81cbf9 2347 return;
96570ffc 2348 }
ab81cbf9 2349
a5c8f270
MH
2350 /* During the HCI setup phase, a few error conditions are
2351 * ignored and they need to be checked now. If they are still
2352 * valid, it is important to turn the device back off.
2353 */
2354 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2355 (hdev->dev_type == HCI_BREDR &&
2356 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2357 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
bf543036
JH
2358 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2359 hci_dev_do_close(hdev);
2360 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
19202573
JH
2361 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2362 HCI_AUTO_OFF_TIMEOUT);
bf543036 2363 }
ab81cbf9 2364
a8b2d5c2 2365 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
744cf19e 2366 mgmt_index_added(hdev);
ab81cbf9
JH
2367}
2368
2369static void hci_power_off(struct work_struct *work)
2370{
3243553f 2371 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 2372 power_off.work);
ab81cbf9
JH
2373
2374 BT_DBG("%s", hdev->name);
2375
8ee56540 2376 hci_dev_do_close(hdev);
ab81cbf9
JH
2377}
2378
16ab91ab
JH
2379static void hci_discov_off(struct work_struct *work)
2380{
2381 struct hci_dev *hdev;
16ab91ab
JH
2382
2383 hdev = container_of(work, struct hci_dev, discov_off.work);
2384
2385 BT_DBG("%s", hdev->name);
2386
d1967ff8 2387 mgmt_discoverable_timeout(hdev);
16ab91ab
JH
2388}
2389
2aeb9a1a
JH
2390int hci_uuids_clear(struct hci_dev *hdev)
2391{
4821002c 2392 struct bt_uuid *uuid, *tmp;
2aeb9a1a 2393
4821002c
JH
2394 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2395 list_del(&uuid->list);
2aeb9a1a
JH
2396 kfree(uuid);
2397 }
2398
2399 return 0;
2400}
2401
55ed8ca1
JH
2402int hci_link_keys_clear(struct hci_dev *hdev)
2403{
2404 struct list_head *p, *n;
2405
2406 list_for_each_safe(p, n, &hdev->link_keys) {
2407 struct link_key *key;
2408
2409 key = list_entry(p, struct link_key, list);
2410
2411 list_del(p);
2412 kfree(key);
2413 }
2414
2415 return 0;
2416}
2417
b899efaf
VCG
2418int hci_smp_ltks_clear(struct hci_dev *hdev)
2419{
2420 struct smp_ltk *k, *tmp;
2421
2422 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2423 list_del(&k->list);
2424 kfree(k);
2425 }
2426
2427 return 0;
2428}
2429
55ed8ca1
JH
2430struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2431{
8035ded4 2432 struct link_key *k;
55ed8ca1 2433
8035ded4 2434 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
2435 if (bacmp(bdaddr, &k->bdaddr) == 0)
2436 return k;
55ed8ca1
JH
2437
2438 return NULL;
2439}
2440
745c0ce3 2441static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 2442 u8 key_type, u8 old_key_type)
d25e28ab
JH
2443{
2444 /* Legacy key */
2445 if (key_type < 0x03)
745c0ce3 2446 return true;
d25e28ab
JH
2447
2448 /* Debug keys are insecure so don't store them persistently */
2449 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 2450 return false;
d25e28ab
JH
2451
2452 /* Changed combination key and there's no previous one */
2453 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 2454 return false;
d25e28ab
JH
2455
2456 /* Security mode 3 case */
2457 if (!conn)
745c0ce3 2458 return true;
d25e28ab
JH
2459
2460 /* Neither local nor remote side had no-bonding as requirement */
2461 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 2462 return true;
d25e28ab
JH
2463
2464 /* Local side had dedicated bonding as requirement */
2465 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 2466 return true;
d25e28ab
JH
2467
2468 /* Remote side had dedicated bonding as requirement */
2469 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 2470 return true;
d25e28ab
JH
2471
2472 /* If none of the above criteria match, then don't store the key
2473 * persistently */
745c0ce3 2474 return false;
d25e28ab
JH
2475}
2476
c9839a11 2477struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
75d262c2 2478{
c9839a11 2479 struct smp_ltk *k;
75d262c2 2480
c9839a11
VCG
2481 list_for_each_entry(k, &hdev->long_term_keys, list) {
2482 if (k->ediv != ediv ||
a8c5fb1a 2483 memcmp(rand, k->rand, sizeof(k->rand)))
75d262c2
VCG
2484 continue;
2485
c9839a11 2486 return k;
75d262c2
VCG
2487 }
2488
2489 return NULL;
2490}
75d262c2 2491
c9839a11 2492struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
04124681 2493 u8 addr_type)
75d262c2 2494{
c9839a11 2495 struct smp_ltk *k;
75d262c2 2496
c9839a11
VCG
2497 list_for_each_entry(k, &hdev->long_term_keys, list)
2498 if (addr_type == k->bdaddr_type &&
a8c5fb1a 2499 bacmp(bdaddr, &k->bdaddr) == 0)
75d262c2
VCG
2500 return k;
2501
2502 return NULL;
2503}
75d262c2 2504
d25e28ab 2505int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
04124681 2506 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
55ed8ca1
JH
2507{
2508 struct link_key *key, *old_key;
745c0ce3
VA
2509 u8 old_key_type;
2510 bool persistent;
55ed8ca1
JH
2511
2512 old_key = hci_find_link_key(hdev, bdaddr);
2513 if (old_key) {
2514 old_key_type = old_key->type;
2515 key = old_key;
2516 } else {
12adcf3a 2517 old_key_type = conn ? conn->key_type : 0xff;
55ed8ca1
JH
2518 key = kzalloc(sizeof(*key), GFP_ATOMIC);
2519 if (!key)
2520 return -ENOMEM;
2521 list_add(&key->list, &hdev->link_keys);
2522 }
2523
6ed93dc6 2524 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 2525
d25e28ab
JH
2526 /* Some buggy controller combinations generate a changed
2527 * combination key for legacy pairing even when there's no
2528 * previous key */
2529 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 2530 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 2531 type = HCI_LK_COMBINATION;
655fe6ec
JH
2532 if (conn)
2533 conn->key_type = type;
2534 }
d25e28ab 2535
55ed8ca1 2536 bacpy(&key->bdaddr, bdaddr);
9b3b4460 2537 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
2538 key->pin_len = pin_len;
2539
b6020ba0 2540 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 2541 key->type = old_key_type;
4748fed2
JH
2542 else
2543 key->type = type;
2544
4df378a1
JH
2545 if (!new_key)
2546 return 0;
2547
2548 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
2549
744cf19e 2550 mgmt_new_link_key(hdev, key, persistent);
4df378a1 2551
6ec5bcad
VA
2552 if (conn)
2553 conn->flush_key = !persistent;
55ed8ca1
JH
2554
2555 return 0;
2556}
2557
c9839a11 2558int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
9a006657 2559 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
04124681 2560 ediv, u8 rand[8])
75d262c2 2561{
c9839a11 2562 struct smp_ltk *key, *old_key;
75d262c2 2563
c9839a11
VCG
2564 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
2565 return 0;
75d262c2 2566
c9839a11
VCG
2567 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
2568 if (old_key)
75d262c2 2569 key = old_key;
c9839a11
VCG
2570 else {
2571 key = kzalloc(sizeof(*key), GFP_ATOMIC);
75d262c2
VCG
2572 if (!key)
2573 return -ENOMEM;
c9839a11 2574 list_add(&key->list, &hdev->long_term_keys);
75d262c2
VCG
2575 }
2576
75d262c2 2577 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
2578 key->bdaddr_type = addr_type;
2579 memcpy(key->val, tk, sizeof(key->val));
2580 key->authenticated = authenticated;
2581 key->ediv = ediv;
2582 key->enc_size = enc_size;
2583 key->type = type;
2584 memcpy(key->rand, rand, sizeof(key->rand));
75d262c2 2585
c9839a11
VCG
2586 if (!new_key)
2587 return 0;
75d262c2 2588
261cc5aa
VCG
2589 if (type & HCI_SMP_LTK)
2590 mgmt_new_ltk(hdev, key, 1);
2591
75d262c2
VCG
2592 return 0;
2593}
2594
55ed8ca1
JH
2595int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2596{
2597 struct link_key *key;
2598
2599 key = hci_find_link_key(hdev, bdaddr);
2600 if (!key)
2601 return -ENOENT;
2602
6ed93dc6 2603 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1
JH
2604
2605 list_del(&key->list);
2606 kfree(key);
2607
2608 return 0;
2609}
2610
b899efaf
VCG
2611int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
2612{
2613 struct smp_ltk *k, *tmp;
2614
2615 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2616 if (bacmp(bdaddr, &k->bdaddr))
2617 continue;
2618
6ed93dc6 2619 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf
VCG
2620
2621 list_del(&k->list);
2622 kfree(k);
2623 }
2624
2625 return 0;
2626}
2627
6bd32326 2628/* HCI command timer function */
bda4f23a 2629static void hci_cmd_timeout(unsigned long arg)
6bd32326
VT
2630{
2631 struct hci_dev *hdev = (void *) arg;
2632
bda4f23a
AE
2633 if (hdev->sent_cmd) {
2634 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2635 u16 opcode = __le16_to_cpu(sent->opcode);
2636
2637 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2638 } else {
2639 BT_ERR("%s command tx timeout", hdev->name);
2640 }
2641
6bd32326 2642 atomic_set(&hdev->cmd_cnt, 1);
c347b765 2643 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
2644}
2645
2763eda6 2646struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
04124681 2647 bdaddr_t *bdaddr)
2763eda6
SJ
2648{
2649 struct oob_data *data;
2650
2651 list_for_each_entry(data, &hdev->remote_oob_data, list)
2652 if (bacmp(bdaddr, &data->bdaddr) == 0)
2653 return data;
2654
2655 return NULL;
2656}
2657
2658int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2659{
2660 struct oob_data *data;
2661
2662 data = hci_find_remote_oob_data(hdev, bdaddr);
2663 if (!data)
2664 return -ENOENT;
2665
6ed93dc6 2666 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2763eda6
SJ
2667
2668 list_del(&data->list);
2669 kfree(data);
2670
2671 return 0;
2672}
2673
2674int hci_remote_oob_data_clear(struct hci_dev *hdev)
2675{
2676 struct oob_data *data, *n;
2677
2678 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2679 list_del(&data->list);
2680 kfree(data);
2681 }
2682
2683 return 0;
2684}
2685
2686int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
04124681 2687 u8 *randomizer)
2763eda6
SJ
2688{
2689 struct oob_data *data;
2690
2691 data = hci_find_remote_oob_data(hdev, bdaddr);
2692
2693 if (!data) {
2694 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2695 if (!data)
2696 return -ENOMEM;
2697
2698 bacpy(&data->bdaddr, bdaddr);
2699 list_add(&data->list, &hdev->remote_oob_data);
2700 }
2701
2702 memcpy(data->hash, hash, sizeof(data->hash));
2703 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
2704
6ed93dc6 2705 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
2706
2707 return 0;
2708}
2709
b9ee0a78
MH
2710struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
2711 bdaddr_t *bdaddr, u8 type)
b2a66aad 2712{
8035ded4 2713 struct bdaddr_list *b;
b2a66aad 2714
b9ee0a78
MH
2715 list_for_each_entry(b, &hdev->blacklist, list) {
2716 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
b2a66aad 2717 return b;
b9ee0a78 2718 }
b2a66aad
AJ
2719
2720 return NULL;
2721}
2722
2723int hci_blacklist_clear(struct hci_dev *hdev)
2724{
2725 struct list_head *p, *n;
2726
2727 list_for_each_safe(p, n, &hdev->blacklist) {
b9ee0a78 2728 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
b2a66aad
AJ
2729
2730 list_del(p);
2731 kfree(b);
2732 }
2733
2734 return 0;
2735}
2736
88c1fe4b 2737int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
2738{
2739 struct bdaddr_list *entry;
b2a66aad 2740
b9ee0a78 2741 if (!bacmp(bdaddr, BDADDR_ANY))
b2a66aad
AJ
2742 return -EBADF;
2743
b9ee0a78 2744 if (hci_blacklist_lookup(hdev, bdaddr, type))
5e762444 2745 return -EEXIST;
b2a66aad
AJ
2746
2747 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
5e762444
AJ
2748 if (!entry)
2749 return -ENOMEM;
b2a66aad
AJ
2750
2751 bacpy(&entry->bdaddr, bdaddr);
b9ee0a78 2752 entry->bdaddr_type = type;
b2a66aad
AJ
2753
2754 list_add(&entry->list, &hdev->blacklist);
2755
88c1fe4b 2756 return mgmt_device_blocked(hdev, bdaddr, type);
b2a66aad
AJ
2757}
2758
88c1fe4b 2759int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
2760{
2761 struct bdaddr_list *entry;
b2a66aad 2762
b9ee0a78 2763 if (!bacmp(bdaddr, BDADDR_ANY))
5e762444 2764 return hci_blacklist_clear(hdev);
b2a66aad 2765
b9ee0a78 2766 entry = hci_blacklist_lookup(hdev, bdaddr, type);
1ec918ce 2767 if (!entry)
5e762444 2768 return -ENOENT;
b2a66aad
AJ
2769
2770 list_del(&entry->list);
2771 kfree(entry);
2772
88c1fe4b 2773 return mgmt_device_unblocked(hdev, bdaddr, type);
b2a66aad
AJ
2774}
2775
4c87eaab 2776static void inquiry_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 2777{
4c87eaab
AG
2778 if (status) {
2779 BT_ERR("Failed to start inquiry: status %d", status);
7ba8b4be 2780
4c87eaab
AG
2781 hci_dev_lock(hdev);
2782 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2783 hci_dev_unlock(hdev);
2784 return;
2785 }
7ba8b4be
AG
2786}
2787
4c87eaab 2788static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 2789{
4c87eaab
AG
2790 /* General inquiry access code (GIAC) */
2791 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2792 struct hci_request req;
2793 struct hci_cp_inquiry cp;
7ba8b4be
AG
2794 int err;
2795
4c87eaab
AG
2796 if (status) {
2797 BT_ERR("Failed to disable LE scanning: status %d", status);
2798 return;
2799 }
7ba8b4be 2800
4c87eaab
AG
2801 switch (hdev->discovery.type) {
2802 case DISCOV_TYPE_LE:
2803 hci_dev_lock(hdev);
2804 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2805 hci_dev_unlock(hdev);
2806 break;
7ba8b4be 2807
4c87eaab
AG
2808 case DISCOV_TYPE_INTERLEAVED:
2809 hci_req_init(&req, hdev);
7ba8b4be 2810
4c87eaab
AG
2811 memset(&cp, 0, sizeof(cp));
2812 memcpy(&cp.lap, lap, sizeof(cp.lap));
2813 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2814 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
7ba8b4be 2815
4c87eaab 2816 hci_dev_lock(hdev);
7dbfac1d 2817
4c87eaab 2818 hci_inquiry_cache_flush(hdev);
7dbfac1d 2819
4c87eaab
AG
2820 err = hci_req_run(&req, inquiry_complete);
2821 if (err) {
2822 BT_ERR("Inquiry request failed: err %d", err);
2823 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2824 }
7dbfac1d 2825
4c87eaab
AG
2826 hci_dev_unlock(hdev);
2827 break;
7dbfac1d 2828 }
7dbfac1d
AG
2829}
2830
7ba8b4be
AG
2831static void le_scan_disable_work(struct work_struct *work)
2832{
2833 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 2834 le_scan_disable.work);
7ba8b4be 2835 struct hci_cp_le_set_scan_enable cp;
4c87eaab
AG
2836 struct hci_request req;
2837 int err;
7ba8b4be
AG
2838
2839 BT_DBG("%s", hdev->name);
2840
4c87eaab 2841 hci_req_init(&req, hdev);
28b75a89 2842
7ba8b4be 2843 memset(&cp, 0, sizeof(cp));
4c87eaab
AG
2844 cp.enable = LE_SCAN_DISABLE;
2845 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
28b75a89 2846
4c87eaab
AG
2847 err = hci_req_run(&req, le_scan_disable_work_complete);
2848 if (err)
2849 BT_ERR("Disable LE scanning request failed: err %d", err);
28b75a89
AG
2850}
2851
9be0dab7
DH
2852/* Alloc HCI device */
2853struct hci_dev *hci_alloc_dev(void)
2854{
2855 struct hci_dev *hdev;
2856
2857 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2858 if (!hdev)
2859 return NULL;
2860
b1b813d4
DH
2861 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2862 hdev->esco_type = (ESCO_HV1);
2863 hdev->link_mode = (HCI_LM_ACCEPT);
b4cb9fb2
MH
2864 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2865 hdev->io_capability = 0x03; /* No Input No Output */
bbaf444a
JH
2866 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2867 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
b1b813d4 2868
b1b813d4
DH
2869 hdev->sniff_max_interval = 800;
2870 hdev->sniff_min_interval = 80;
2871
bef64738
MH
2872 hdev->le_scan_interval = 0x0060;
2873 hdev->le_scan_window = 0x0030;
4e70c7e7
MH
2874 hdev->le_conn_min_interval = 0x0028;
2875 hdev->le_conn_max_interval = 0x0038;
bef64738 2876
b1b813d4
DH
2877 mutex_init(&hdev->lock);
2878 mutex_init(&hdev->req_lock);
2879
2880 INIT_LIST_HEAD(&hdev->mgmt_pending);
2881 INIT_LIST_HEAD(&hdev->blacklist);
2882 INIT_LIST_HEAD(&hdev->uuids);
2883 INIT_LIST_HEAD(&hdev->link_keys);
2884 INIT_LIST_HEAD(&hdev->long_term_keys);
2885 INIT_LIST_HEAD(&hdev->remote_oob_data);
6b536b5e 2886 INIT_LIST_HEAD(&hdev->conn_hash.list);
b1b813d4
DH
2887
2888 INIT_WORK(&hdev->rx_work, hci_rx_work);
2889 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2890 INIT_WORK(&hdev->tx_work, hci_tx_work);
2891 INIT_WORK(&hdev->power_on, hci_power_on);
b1b813d4 2892
b1b813d4
DH
2893 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2894 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2895 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2896
b1b813d4
DH
2897 skb_queue_head_init(&hdev->rx_q);
2898 skb_queue_head_init(&hdev->cmd_q);
2899 skb_queue_head_init(&hdev->raw_q);
2900
2901 init_waitqueue_head(&hdev->req_wait_q);
2902
bda4f23a 2903 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
b1b813d4 2904
b1b813d4
DH
2905 hci_init_sysfs(hdev);
2906 discovery_init(hdev);
9be0dab7
DH
2907
2908 return hdev;
2909}
2910EXPORT_SYMBOL(hci_alloc_dev);
2911
2912/* Free HCI device */
2913void hci_free_dev(struct hci_dev *hdev)
2914{
9be0dab7
DH
2915 /* will free via device release */
2916 put_device(&hdev->dev);
2917}
2918EXPORT_SYMBOL(hci_free_dev);
2919
1da177e4
LT
2920/* Register HCI device */
2921int hci_register_dev(struct hci_dev *hdev)
2922{
b1b813d4 2923 int id, error;
1da177e4 2924
010666a1 2925 if (!hdev->open || !hdev->close)
1da177e4
LT
2926 return -EINVAL;
2927
08add513
MM
2928 /* Do not allow HCI_AMP devices to register at index 0,
2929 * so the index can be used as the AMP controller ID.
2930 */
3df92b31
SL
2931 switch (hdev->dev_type) {
2932 case HCI_BREDR:
2933 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2934 break;
2935 case HCI_AMP:
2936 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2937 break;
2938 default:
2939 return -EINVAL;
1da177e4 2940 }
8e87d142 2941
3df92b31
SL
2942 if (id < 0)
2943 return id;
2944
1da177e4
LT
2945 sprintf(hdev->name, "hci%d", id);
2946 hdev->id = id;
2d8b3a11
AE
2947
2948 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2949
d8537548
KC
2950 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2951 WQ_MEM_RECLAIM, 1, hdev->name);
33ca954d
DH
2952 if (!hdev->workqueue) {
2953 error = -ENOMEM;
2954 goto err;
2955 }
f48fd9c8 2956
d8537548
KC
2957 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2958 WQ_MEM_RECLAIM, 1, hdev->name);
6ead1bbc
JH
2959 if (!hdev->req_workqueue) {
2960 destroy_workqueue(hdev->workqueue);
2961 error = -ENOMEM;
2962 goto err;
2963 }
2964
0153e2ec
MH
2965 if (!IS_ERR_OR_NULL(bt_debugfs))
2966 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
2967
bdc3e0f1
MH
2968 dev_set_name(&hdev->dev, "%s", hdev->name);
2969
2970 error = device_add(&hdev->dev);
33ca954d
DH
2971 if (error < 0)
2972 goto err_wqueue;
1da177e4 2973
611b30f7 2974 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
2975 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2976 hdev);
611b30f7
MH
2977 if (hdev->rfkill) {
2978 if (rfkill_register(hdev->rfkill) < 0) {
2979 rfkill_destroy(hdev->rfkill);
2980 hdev->rfkill = NULL;
2981 }
2982 }
2983
5e130367
JH
2984 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2985 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2986
a8b2d5c2 2987 set_bit(HCI_SETUP, &hdev->dev_flags);
004b0258 2988 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
ce2be9ac 2989
01cd3404 2990 if (hdev->dev_type == HCI_BREDR) {
56f87901
JH
2991 /* Assume BR/EDR support until proven otherwise (such as
2992 * through reading supported features during init.
2993 */
2994 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2995 }
ce2be9ac 2996
fcee3377
GP
2997 write_lock(&hci_dev_list_lock);
2998 list_add(&hdev->list, &hci_dev_list);
2999 write_unlock(&hci_dev_list_lock);
3000
1da177e4 3001 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 3002 hci_dev_hold(hdev);
1da177e4 3003
19202573 3004 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 3005
1da177e4 3006 return id;
f48fd9c8 3007
33ca954d
DH
3008err_wqueue:
3009 destroy_workqueue(hdev->workqueue);
6ead1bbc 3010 destroy_workqueue(hdev->req_workqueue);
33ca954d 3011err:
3df92b31 3012 ida_simple_remove(&hci_index_ida, hdev->id);
f48fd9c8 3013
33ca954d 3014 return error;
1da177e4
LT
3015}
3016EXPORT_SYMBOL(hci_register_dev);
3017
3018/* Unregister HCI device */
59735631 3019void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 3020{
3df92b31 3021 int i, id;
ef222013 3022
c13854ce 3023 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 3024
94324962
JH
3025 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3026
3df92b31
SL
3027 id = hdev->id;
3028
f20d09d5 3029 write_lock(&hci_dev_list_lock);
1da177e4 3030 list_del(&hdev->list);
f20d09d5 3031 write_unlock(&hci_dev_list_lock);
1da177e4
LT
3032
3033 hci_dev_do_close(hdev);
3034
cd4c5391 3035 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
3036 kfree_skb(hdev->reassembly[i]);
3037
b9b5ef18
GP
3038 cancel_work_sync(&hdev->power_on);
3039
ab81cbf9 3040 if (!test_bit(HCI_INIT, &hdev->flags) &&
a8c5fb1a 3041 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
09fd0de5 3042 hci_dev_lock(hdev);
744cf19e 3043 mgmt_index_removed(hdev);
09fd0de5 3044 hci_dev_unlock(hdev);
56e5cb86 3045 }
ab81cbf9 3046
2e58ef3e
JH
3047 /* mgmt_index_removed should take care of emptying the
3048 * pending list */
3049 BUG_ON(!list_empty(&hdev->mgmt_pending));
3050
1da177e4
LT
3051 hci_notify(hdev, HCI_DEV_UNREG);
3052
611b30f7
MH
3053 if (hdev->rfkill) {
3054 rfkill_unregister(hdev->rfkill);
3055 rfkill_destroy(hdev->rfkill);
3056 }
3057
bdc3e0f1 3058 device_del(&hdev->dev);
147e2d59 3059
0153e2ec
MH
3060 debugfs_remove_recursive(hdev->debugfs);
3061
f48fd9c8 3062 destroy_workqueue(hdev->workqueue);
6ead1bbc 3063 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 3064
09fd0de5 3065 hci_dev_lock(hdev);
e2e0cacb 3066 hci_blacklist_clear(hdev);
2aeb9a1a 3067 hci_uuids_clear(hdev);
55ed8ca1 3068 hci_link_keys_clear(hdev);
b899efaf 3069 hci_smp_ltks_clear(hdev);
2763eda6 3070 hci_remote_oob_data_clear(hdev);
09fd0de5 3071 hci_dev_unlock(hdev);
e2e0cacb 3072
dc946bd8 3073 hci_dev_put(hdev);
3df92b31
SL
3074
3075 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
3076}
3077EXPORT_SYMBOL(hci_unregister_dev);
3078
3079/* Suspend HCI device */
3080int hci_suspend_dev(struct hci_dev *hdev)
3081{
3082 hci_notify(hdev, HCI_DEV_SUSPEND);
3083 return 0;
3084}
3085EXPORT_SYMBOL(hci_suspend_dev);
3086
3087/* Resume HCI device */
3088int hci_resume_dev(struct hci_dev *hdev)
3089{
3090 hci_notify(hdev, HCI_DEV_RESUME);
3091 return 0;
3092}
3093EXPORT_SYMBOL(hci_resume_dev);
3094
76bca880 3095/* Receive frame from HCI drivers */
e1a26170 3096int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
76bca880 3097{
76bca880 3098 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 3099 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
3100 kfree_skb(skb);
3101 return -ENXIO;
3102 }
3103
d82603c6 3104 /* Incoming skb */
76bca880
MH
3105 bt_cb(skb)->incoming = 1;
3106
3107 /* Time stamp */
3108 __net_timestamp(skb);
3109
76bca880 3110 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 3111 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 3112
76bca880
MH
3113 return 0;
3114}
3115EXPORT_SYMBOL(hci_recv_frame);
3116
33e882a5 3117static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
a8c5fb1a 3118 int count, __u8 index)
33e882a5
SS
3119{
3120 int len = 0;
3121 int hlen = 0;
3122 int remain = count;
3123 struct sk_buff *skb;
3124 struct bt_skb_cb *scb;
3125
3126 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
a8c5fb1a 3127 index >= NUM_REASSEMBLY)
33e882a5
SS
3128 return -EILSEQ;
3129
3130 skb = hdev->reassembly[index];
3131
3132 if (!skb) {
3133 switch (type) {
3134 case HCI_ACLDATA_PKT:
3135 len = HCI_MAX_FRAME_SIZE;
3136 hlen = HCI_ACL_HDR_SIZE;
3137 break;
3138 case HCI_EVENT_PKT:
3139 len = HCI_MAX_EVENT_SIZE;
3140 hlen = HCI_EVENT_HDR_SIZE;
3141 break;
3142 case HCI_SCODATA_PKT:
3143 len = HCI_MAX_SCO_SIZE;
3144 hlen = HCI_SCO_HDR_SIZE;
3145 break;
3146 }
3147
1e429f38 3148 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
3149 if (!skb)
3150 return -ENOMEM;
3151
3152 scb = (void *) skb->cb;
3153 scb->expect = hlen;
3154 scb->pkt_type = type;
3155
33e882a5
SS
3156 hdev->reassembly[index] = skb;
3157 }
3158
3159 while (count) {
3160 scb = (void *) skb->cb;
89bb46d0 3161 len = min_t(uint, scb->expect, count);
33e882a5
SS
3162
3163 memcpy(skb_put(skb, len), data, len);
3164
3165 count -= len;
3166 data += len;
3167 scb->expect -= len;
3168 remain = count;
3169
3170 switch (type) {
3171 case HCI_EVENT_PKT:
3172 if (skb->len == HCI_EVENT_HDR_SIZE) {
3173 struct hci_event_hdr *h = hci_event_hdr(skb);
3174 scb->expect = h->plen;
3175
3176 if (skb_tailroom(skb) < scb->expect) {
3177 kfree_skb(skb);
3178 hdev->reassembly[index] = NULL;
3179 return -ENOMEM;
3180 }
3181 }
3182 break;
3183
3184 case HCI_ACLDATA_PKT:
3185 if (skb->len == HCI_ACL_HDR_SIZE) {
3186 struct hci_acl_hdr *h = hci_acl_hdr(skb);
3187 scb->expect = __le16_to_cpu(h->dlen);
3188
3189 if (skb_tailroom(skb) < scb->expect) {
3190 kfree_skb(skb);
3191 hdev->reassembly[index] = NULL;
3192 return -ENOMEM;
3193 }
3194 }
3195 break;
3196
3197 case HCI_SCODATA_PKT:
3198 if (skb->len == HCI_SCO_HDR_SIZE) {
3199 struct hci_sco_hdr *h = hci_sco_hdr(skb);
3200 scb->expect = h->dlen;
3201
3202 if (skb_tailroom(skb) < scb->expect) {
3203 kfree_skb(skb);
3204 hdev->reassembly[index] = NULL;
3205 return -ENOMEM;
3206 }
3207 }
3208 break;
3209 }
3210
3211 if (scb->expect == 0) {
3212 /* Complete frame */
3213
3214 bt_cb(skb)->pkt_type = type;
e1a26170 3215 hci_recv_frame(hdev, skb);
33e882a5
SS
3216
3217 hdev->reassembly[index] = NULL;
3218 return remain;
3219 }
3220 }
3221
3222 return remain;
3223}
3224
ef222013
MH
3225int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
3226{
f39a3c06
SS
3227 int rem = 0;
3228
ef222013
MH
3229 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
3230 return -EILSEQ;
3231
da5f6c37 3232 while (count) {
1e429f38 3233 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
3234 if (rem < 0)
3235 return rem;
ef222013 3236
f39a3c06
SS
3237 data += (count - rem);
3238 count = rem;
f81c6224 3239 }
ef222013 3240
f39a3c06 3241 return rem;
ef222013
MH
3242}
3243EXPORT_SYMBOL(hci_recv_fragment);
3244
99811510
SS
3245#define STREAM_REASSEMBLY 0
3246
3247int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
3248{
3249 int type;
3250 int rem = 0;
3251
da5f6c37 3252 while (count) {
99811510
SS
3253 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
3254
3255 if (!skb) {
3256 struct { char type; } *pkt;
3257
3258 /* Start of the frame */
3259 pkt = data;
3260 type = pkt->type;
3261
3262 data++;
3263 count--;
3264 } else
3265 type = bt_cb(skb)->pkt_type;
3266
1e429f38 3267 rem = hci_reassembly(hdev, type, data, count,
a8c5fb1a 3268 STREAM_REASSEMBLY);
99811510
SS
3269 if (rem < 0)
3270 return rem;
3271
3272 data += (count - rem);
3273 count = rem;
f81c6224 3274 }
99811510
SS
3275
3276 return rem;
3277}
3278EXPORT_SYMBOL(hci_recv_stream_fragment);
3279
1da177e4
LT
3280/* ---- Interface to upper protocols ---- */
3281
1da177e4
LT
3282int hci_register_cb(struct hci_cb *cb)
3283{
3284 BT_DBG("%p name %s", cb, cb->name);
3285
f20d09d5 3286 write_lock(&hci_cb_list_lock);
1da177e4 3287 list_add(&cb->list, &hci_cb_list);
f20d09d5 3288 write_unlock(&hci_cb_list_lock);
1da177e4
LT
3289
3290 return 0;
3291}
3292EXPORT_SYMBOL(hci_register_cb);
3293
3294int hci_unregister_cb(struct hci_cb *cb)
3295{
3296 BT_DBG("%p name %s", cb, cb->name);
3297
f20d09d5 3298 write_lock(&hci_cb_list_lock);
1da177e4 3299 list_del(&cb->list);
f20d09d5 3300 write_unlock(&hci_cb_list_lock);
1da177e4
LT
3301
3302 return 0;
3303}
3304EXPORT_SYMBOL(hci_unregister_cb);
3305
51086991 3306static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4 3307{
0d48d939 3308 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 3309
cd82e61c
MH
3310 /* Time stamp */
3311 __net_timestamp(skb);
1da177e4 3312
cd82e61c
MH
3313 /* Send copy to monitor */
3314 hci_send_to_monitor(hdev, skb);
3315
3316 if (atomic_read(&hdev->promisc)) {
3317 /* Send copy to the sockets */
470fe1b5 3318 hci_send_to_sock(hdev, skb);
1da177e4
LT
3319 }
3320
3321 /* Get rid of skb owner, prior to sending to the driver. */
3322 skb_orphan(skb);
3323
7bd8f09f 3324 if (hdev->send(hdev, skb) < 0)
51086991 3325 BT_ERR("%s sending frame failed", hdev->name);
1da177e4
LT
3326}
3327
3119ae95
JH
3328void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
3329{
3330 skb_queue_head_init(&req->cmd_q);
3331 req->hdev = hdev;
5d73e034 3332 req->err = 0;
3119ae95
JH
3333}
3334
3335int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
3336{
3337 struct hci_dev *hdev = req->hdev;
3338 struct sk_buff *skb;
3339 unsigned long flags;
3340
3341 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
3342
5d73e034
AG
3343 /* If an error occured during request building, remove all HCI
3344 * commands queued on the HCI request queue.
3345 */
3346 if (req->err) {
3347 skb_queue_purge(&req->cmd_q);
3348 return req->err;
3349 }
3350
3119ae95
JH
3351 /* Do not allow empty requests */
3352 if (skb_queue_empty(&req->cmd_q))
382b0c39 3353 return -ENODATA;
3119ae95
JH
3354
3355 skb = skb_peek_tail(&req->cmd_q);
3356 bt_cb(skb)->req.complete = complete;
3357
3358 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3359 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
3360 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3361
3362 queue_work(hdev->workqueue, &hdev->cmd_work);
3363
3364 return 0;
3365}
3366
1ca3a9d0 3367static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
07dc93dd 3368 u32 plen, const void *param)
1da177e4
LT
3369{
3370 int len = HCI_COMMAND_HDR_SIZE + plen;
3371 struct hci_command_hdr *hdr;
3372 struct sk_buff *skb;
3373
1da177e4 3374 skb = bt_skb_alloc(len, GFP_ATOMIC);
1ca3a9d0
JH
3375 if (!skb)
3376 return NULL;
1da177e4
LT
3377
3378 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 3379 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
3380 hdr->plen = plen;
3381
3382 if (plen)
3383 memcpy(skb_put(skb, plen), param, plen);
3384
3385 BT_DBG("skb len %d", skb->len);
3386
0d48d939 3387 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
c78ae283 3388
1ca3a9d0
JH
3389 return skb;
3390}
3391
3392/* Send HCI command */
07dc93dd
JH
3393int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3394 const void *param)
1ca3a9d0
JH
3395{
3396 struct sk_buff *skb;
3397
3398 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3399
3400 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3401 if (!skb) {
3402 BT_ERR("%s no memory for command", hdev->name);
3403 return -ENOMEM;
3404 }
3405
11714b3d
JH
3406 /* Stand-alone HCI commands must be flaged as
3407 * single-command requests.
3408 */
3409 bt_cb(skb)->req.start = true;
3410
1da177e4 3411 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 3412 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
3413
3414 return 0;
3415}
1da177e4 3416
71c76a17 3417/* Queue a command to an asynchronous HCI request */
07dc93dd
JH
3418void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
3419 const void *param, u8 event)
71c76a17
JH
3420{
3421 struct hci_dev *hdev = req->hdev;
3422 struct sk_buff *skb;
3423
3424 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3425
34739c1e
AG
3426 /* If an error occured during request building, there is no point in
3427 * queueing the HCI command. We can simply return.
3428 */
3429 if (req->err)
3430 return;
3431
71c76a17
JH
3432 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3433 if (!skb) {
5d73e034
AG
3434 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
3435 hdev->name, opcode);
3436 req->err = -ENOMEM;
e348fe6b 3437 return;
71c76a17
JH
3438 }
3439
3440 if (skb_queue_empty(&req->cmd_q))
3441 bt_cb(skb)->req.start = true;
3442
02350a72
JH
3443 bt_cb(skb)->req.event = event;
3444
71c76a17 3445 skb_queue_tail(&req->cmd_q, skb);
71c76a17
JH
3446}
3447
07dc93dd
JH
3448void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
3449 const void *param)
02350a72
JH
3450{
3451 hci_req_add_ev(req, opcode, plen, param, 0);
3452}
3453
1da177e4 3454/* Get data from the previously sent command */
a9de9248 3455void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
3456{
3457 struct hci_command_hdr *hdr;
3458
3459 if (!hdev->sent_cmd)
3460 return NULL;
3461
3462 hdr = (void *) hdev->sent_cmd->data;
3463
a9de9248 3464 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
3465 return NULL;
3466
f0e09510 3467 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
3468
3469 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3470}
3471
3472/* Send ACL data */
3473static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3474{
3475 struct hci_acl_hdr *hdr;
3476 int len = skb->len;
3477
badff6d0
ACM
3478 skb_push(skb, HCI_ACL_HDR_SIZE);
3479 skb_reset_transport_header(skb);
9c70220b 3480 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
3481 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3482 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
3483}
3484
ee22be7e 3485static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 3486 struct sk_buff *skb, __u16 flags)
1da177e4 3487{
ee22be7e 3488 struct hci_conn *conn = chan->conn;
1da177e4
LT
3489 struct hci_dev *hdev = conn->hdev;
3490 struct sk_buff *list;
3491
087bfd99
GP
3492 skb->len = skb_headlen(skb);
3493 skb->data_len = 0;
3494
3495 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
204a6e54
AE
3496
3497 switch (hdev->dev_type) {
3498 case HCI_BREDR:
3499 hci_add_acl_hdr(skb, conn->handle, flags);
3500 break;
3501 case HCI_AMP:
3502 hci_add_acl_hdr(skb, chan->handle, flags);
3503 break;
3504 default:
3505 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3506 return;
3507 }
087bfd99 3508
70f23020
AE
3509 list = skb_shinfo(skb)->frag_list;
3510 if (!list) {
1da177e4
LT
3511 /* Non fragmented */
3512 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3513
73d80deb 3514 skb_queue_tail(queue, skb);
1da177e4
LT
3515 } else {
3516 /* Fragmented */
3517 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3518
3519 skb_shinfo(skb)->frag_list = NULL;
3520
3521 /* Queue all fragments atomically */
af3e6359 3522 spin_lock(&queue->lock);
1da177e4 3523
73d80deb 3524 __skb_queue_tail(queue, skb);
e702112f
AE
3525
3526 flags &= ~ACL_START;
3527 flags |= ACL_CONT;
1da177e4
LT
3528 do {
3529 skb = list; list = list->next;
8e87d142 3530
0d48d939 3531 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 3532 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
3533
3534 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3535
73d80deb 3536 __skb_queue_tail(queue, skb);
1da177e4
LT
3537 } while (list);
3538
af3e6359 3539 spin_unlock(&queue->lock);
1da177e4 3540 }
73d80deb
LAD
3541}
3542
3543void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3544{
ee22be7e 3545 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 3546
f0e09510 3547 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb 3548
ee22be7e 3549 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 3550
3eff45ea 3551 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 3552}
1da177e4
LT
3553
3554/* Send SCO data */
0d861d8b 3555void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
3556{
3557 struct hci_dev *hdev = conn->hdev;
3558 struct hci_sco_hdr hdr;
3559
3560 BT_DBG("%s len %d", hdev->name, skb->len);
3561
aca3192c 3562 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
3563 hdr.dlen = skb->len;
3564
badff6d0
ACM
3565 skb_push(skb, HCI_SCO_HDR_SIZE);
3566 skb_reset_transport_header(skb);
9c70220b 3567 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4 3568
0d48d939 3569 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 3570
1da177e4 3571 skb_queue_tail(&conn->data_q, skb);
3eff45ea 3572 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 3573}
1da177e4
LT
3574
3575/* ---- HCI TX task (outgoing data) ---- */
3576
3577/* HCI Connection scheduler */
6039aa73
GP
3578static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3579 int *quote)
1da177e4
LT
3580{
3581 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 3582 struct hci_conn *conn = NULL, *c;
abc5de8f 3583 unsigned int num = 0, min = ~0;
1da177e4 3584
8e87d142 3585 /* We don't have to lock device here. Connections are always
1da177e4 3586 * added and removed with TX task disabled. */
bf4c6325
GP
3587
3588 rcu_read_lock();
3589
3590 list_for_each_entry_rcu(c, &h->list, list) {
769be974 3591 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 3592 continue;
769be974
MH
3593
3594 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3595 continue;
3596
1da177e4
LT
3597 num++;
3598
3599 if (c->sent < min) {
3600 min = c->sent;
3601 conn = c;
3602 }
52087a79
LAD
3603
3604 if (hci_conn_num(hdev, type) == num)
3605 break;
1da177e4
LT
3606 }
3607
bf4c6325
GP
3608 rcu_read_unlock();
3609
1da177e4 3610 if (conn) {
6ed58ec5
VT
3611 int cnt, q;
3612
3613 switch (conn->type) {
3614 case ACL_LINK:
3615 cnt = hdev->acl_cnt;
3616 break;
3617 case SCO_LINK:
3618 case ESCO_LINK:
3619 cnt = hdev->sco_cnt;
3620 break;
3621 case LE_LINK:
3622 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3623 break;
3624 default:
3625 cnt = 0;
3626 BT_ERR("Unknown link type");
3627 }
3628
3629 q = cnt / num;
1da177e4
LT
3630 *quote = q ? q : 1;
3631 } else
3632 *quote = 0;
3633
3634 BT_DBG("conn %p quote %d", conn, *quote);
3635 return conn;
3636}
3637
6039aa73 3638static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
3639{
3640 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 3641 struct hci_conn *c;
1da177e4 3642
bae1f5d9 3643 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 3644
bf4c6325
GP
3645 rcu_read_lock();
3646
1da177e4 3647 /* Kill stalled connections */
bf4c6325 3648 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 3649 if (c->type == type && c->sent) {
6ed93dc6
AE
3650 BT_ERR("%s killing stalled connection %pMR",
3651 hdev->name, &c->dst);
bed71748 3652 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
3653 }
3654 }
bf4c6325
GP
3655
3656 rcu_read_unlock();
1da177e4
LT
3657}
3658
6039aa73
GP
3659static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3660 int *quote)
1da177e4 3661{
73d80deb
LAD
3662 struct hci_conn_hash *h = &hdev->conn_hash;
3663 struct hci_chan *chan = NULL;
abc5de8f 3664 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 3665 struct hci_conn *conn;
73d80deb
LAD
3666 int cnt, q, conn_num = 0;
3667
3668 BT_DBG("%s", hdev->name);
3669
bf4c6325
GP
3670 rcu_read_lock();
3671
3672 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
3673 struct hci_chan *tmp;
3674
3675 if (conn->type != type)
3676 continue;
3677
3678 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3679 continue;
3680
3681 conn_num++;
3682
8192edef 3683 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
3684 struct sk_buff *skb;
3685
3686 if (skb_queue_empty(&tmp->data_q))
3687 continue;
3688
3689 skb = skb_peek(&tmp->data_q);
3690 if (skb->priority < cur_prio)
3691 continue;
3692
3693 if (skb->priority > cur_prio) {
3694 num = 0;
3695 min = ~0;
3696 cur_prio = skb->priority;
3697 }
3698
3699 num++;
3700
3701 if (conn->sent < min) {
3702 min = conn->sent;
3703 chan = tmp;
3704 }
3705 }
3706
3707 if (hci_conn_num(hdev, type) == conn_num)
3708 break;
3709 }
3710
bf4c6325
GP
3711 rcu_read_unlock();
3712
73d80deb
LAD
3713 if (!chan)
3714 return NULL;
3715
3716 switch (chan->conn->type) {
3717 case ACL_LINK:
3718 cnt = hdev->acl_cnt;
3719 break;
bd1eb66b
AE
3720 case AMP_LINK:
3721 cnt = hdev->block_cnt;
3722 break;
73d80deb
LAD
3723 case SCO_LINK:
3724 case ESCO_LINK:
3725 cnt = hdev->sco_cnt;
3726 break;
3727 case LE_LINK:
3728 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3729 break;
3730 default:
3731 cnt = 0;
3732 BT_ERR("Unknown link type");
3733 }
3734
3735 q = cnt / num;
3736 *quote = q ? q : 1;
3737 BT_DBG("chan %p quote %d", chan, *quote);
3738 return chan;
3739}
3740
02b20f0b
LAD
3741static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3742{
3743 struct hci_conn_hash *h = &hdev->conn_hash;
3744 struct hci_conn *conn;
3745 int num = 0;
3746
3747 BT_DBG("%s", hdev->name);
3748
bf4c6325
GP
3749 rcu_read_lock();
3750
3751 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
3752 struct hci_chan *chan;
3753
3754 if (conn->type != type)
3755 continue;
3756
3757 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3758 continue;
3759
3760 num++;
3761
8192edef 3762 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
3763 struct sk_buff *skb;
3764
3765 if (chan->sent) {
3766 chan->sent = 0;
3767 continue;
3768 }
3769
3770 if (skb_queue_empty(&chan->data_q))
3771 continue;
3772
3773 skb = skb_peek(&chan->data_q);
3774 if (skb->priority >= HCI_PRIO_MAX - 1)
3775 continue;
3776
3777 skb->priority = HCI_PRIO_MAX - 1;
3778
3779 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 3780 skb->priority);
02b20f0b
LAD
3781 }
3782
3783 if (hci_conn_num(hdev, type) == num)
3784 break;
3785 }
bf4c6325
GP
3786
3787 rcu_read_unlock();
3788
02b20f0b
LAD
3789}
3790
b71d385a
AE
3791static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3792{
3793 /* Calculate count of blocks used by this packet */
3794 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3795}
3796
6039aa73 3797static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 3798{
1da177e4
LT
3799 if (!test_bit(HCI_RAW, &hdev->flags)) {
3800 /* ACL tx timeout must be longer than maximum
3801 * link supervision timeout (40.9 seconds) */
63d2bc1b 3802 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 3803 HCI_ACL_TX_TIMEOUT))
bae1f5d9 3804 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 3805 }
63d2bc1b 3806}
1da177e4 3807
6039aa73 3808static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
3809{
3810 unsigned int cnt = hdev->acl_cnt;
3811 struct hci_chan *chan;
3812 struct sk_buff *skb;
3813 int quote;
3814
3815 __check_timeout(hdev, cnt);
04837f64 3816
73d80deb 3817 while (hdev->acl_cnt &&
a8c5fb1a 3818 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
3819 u32 priority = (skb_peek(&chan->data_q))->priority;
3820 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 3821 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3822 skb->len, skb->priority);
73d80deb 3823
ec1cce24
LAD
3824 /* Stop if priority has changed */
3825 if (skb->priority < priority)
3826 break;
3827
3828 skb = skb_dequeue(&chan->data_q);
3829
73d80deb 3830 hci_conn_enter_active_mode(chan->conn,
04124681 3831 bt_cb(skb)->force_active);
04837f64 3832
57d17d70 3833 hci_send_frame(hdev, skb);
1da177e4
LT
3834 hdev->acl_last_tx = jiffies;
3835
3836 hdev->acl_cnt--;
73d80deb
LAD
3837 chan->sent++;
3838 chan->conn->sent++;
1da177e4
LT
3839 }
3840 }
02b20f0b
LAD
3841
3842 if (cnt != hdev->acl_cnt)
3843 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
3844}
3845
6039aa73 3846static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 3847{
63d2bc1b 3848 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
3849 struct hci_chan *chan;
3850 struct sk_buff *skb;
3851 int quote;
bd1eb66b 3852 u8 type;
b71d385a 3853
63d2bc1b 3854 __check_timeout(hdev, cnt);
b71d385a 3855
bd1eb66b
AE
3856 BT_DBG("%s", hdev->name);
3857
3858 if (hdev->dev_type == HCI_AMP)
3859 type = AMP_LINK;
3860 else
3861 type = ACL_LINK;
3862
b71d385a 3863 while (hdev->block_cnt > 0 &&
bd1eb66b 3864 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
3865 u32 priority = (skb_peek(&chan->data_q))->priority;
3866 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3867 int blocks;
3868
3869 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3870 skb->len, skb->priority);
b71d385a
AE
3871
3872 /* Stop if priority has changed */
3873 if (skb->priority < priority)
3874 break;
3875
3876 skb = skb_dequeue(&chan->data_q);
3877
3878 blocks = __get_blocks(hdev, skb);
3879 if (blocks > hdev->block_cnt)
3880 return;
3881
3882 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 3883 bt_cb(skb)->force_active);
b71d385a 3884
57d17d70 3885 hci_send_frame(hdev, skb);
b71d385a
AE
3886 hdev->acl_last_tx = jiffies;
3887
3888 hdev->block_cnt -= blocks;
3889 quote -= blocks;
3890
3891 chan->sent += blocks;
3892 chan->conn->sent += blocks;
3893 }
3894 }
3895
3896 if (cnt != hdev->block_cnt)
bd1eb66b 3897 hci_prio_recalculate(hdev, type);
b71d385a
AE
3898}
3899
6039aa73 3900static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
3901{
3902 BT_DBG("%s", hdev->name);
3903
bd1eb66b
AE
3904 /* No ACL link over BR/EDR controller */
3905 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3906 return;
3907
3908 /* No AMP link over AMP controller */
3909 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
3910 return;
3911
3912 switch (hdev->flow_ctl_mode) {
3913 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3914 hci_sched_acl_pkt(hdev);
3915 break;
3916
3917 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3918 hci_sched_acl_blk(hdev);
3919 break;
3920 }
3921}
3922
1da177e4 3923/* Schedule SCO */
6039aa73 3924static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
3925{
3926 struct hci_conn *conn;
3927 struct sk_buff *skb;
3928 int quote;
3929
3930 BT_DBG("%s", hdev->name);
3931
52087a79
LAD
3932 if (!hci_conn_num(hdev, SCO_LINK))
3933 return;
3934
1da177e4
LT
3935 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3936 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3937 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 3938 hci_send_frame(hdev, skb);
1da177e4
LT
3939
3940 conn->sent++;
3941 if (conn->sent == ~0)
3942 conn->sent = 0;
3943 }
3944 }
3945}
3946
6039aa73 3947static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
3948{
3949 struct hci_conn *conn;
3950 struct sk_buff *skb;
3951 int quote;
3952
3953 BT_DBG("%s", hdev->name);
3954
52087a79
LAD
3955 if (!hci_conn_num(hdev, ESCO_LINK))
3956 return;
3957
8fc9ced3
GP
3958 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3959 &quote))) {
b6a0dc82
MH
3960 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3961 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 3962 hci_send_frame(hdev, skb);
b6a0dc82
MH
3963
3964 conn->sent++;
3965 if (conn->sent == ~0)
3966 conn->sent = 0;
3967 }
3968 }
3969}
3970
6039aa73 3971static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 3972{
73d80deb 3973 struct hci_chan *chan;
6ed58ec5 3974 struct sk_buff *skb;
02b20f0b 3975 int quote, cnt, tmp;
6ed58ec5
VT
3976
3977 BT_DBG("%s", hdev->name);
3978
52087a79
LAD
3979 if (!hci_conn_num(hdev, LE_LINK))
3980 return;
3981
6ed58ec5
VT
3982 if (!test_bit(HCI_RAW, &hdev->flags)) {
3983 /* LE tx timeout must be longer than maximum
3984 * link supervision timeout (40.9 seconds) */
bae1f5d9 3985 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 3986 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 3987 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
3988 }
3989
3990 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 3991 tmp = cnt;
73d80deb 3992 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
3993 u32 priority = (skb_peek(&chan->data_q))->priority;
3994 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 3995 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3996 skb->len, skb->priority);
6ed58ec5 3997
ec1cce24
LAD
3998 /* Stop if priority has changed */
3999 if (skb->priority < priority)
4000 break;
4001
4002 skb = skb_dequeue(&chan->data_q);
4003
57d17d70 4004 hci_send_frame(hdev, skb);
6ed58ec5
VT
4005 hdev->le_last_tx = jiffies;
4006
4007 cnt--;
73d80deb
LAD
4008 chan->sent++;
4009 chan->conn->sent++;
6ed58ec5
VT
4010 }
4011 }
73d80deb 4012
6ed58ec5
VT
4013 if (hdev->le_pkts)
4014 hdev->le_cnt = cnt;
4015 else
4016 hdev->acl_cnt = cnt;
02b20f0b
LAD
4017
4018 if (cnt != tmp)
4019 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
4020}
4021
3eff45ea 4022static void hci_tx_work(struct work_struct *work)
1da177e4 4023{
3eff45ea 4024 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
4025 struct sk_buff *skb;
4026
6ed58ec5 4027 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 4028 hdev->sco_cnt, hdev->le_cnt);
1da177e4 4029
52de599e
MH
4030 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4031 /* Schedule queues and send stuff to HCI driver */
4032 hci_sched_acl(hdev);
4033 hci_sched_sco(hdev);
4034 hci_sched_esco(hdev);
4035 hci_sched_le(hdev);
4036 }
6ed58ec5 4037
1da177e4
LT
4038 /* Send next queued raw (unknown type) packet */
4039 while ((skb = skb_dequeue(&hdev->raw_q)))
57d17d70 4040 hci_send_frame(hdev, skb);
1da177e4
LT
4041}
4042
25985edc 4043/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
4044
4045/* ACL data packet */
6039aa73 4046static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
4047{
4048 struct hci_acl_hdr *hdr = (void *) skb->data;
4049 struct hci_conn *conn;
4050 __u16 handle, flags;
4051
4052 skb_pull(skb, HCI_ACL_HDR_SIZE);
4053
4054 handle = __le16_to_cpu(hdr->handle);
4055 flags = hci_flags(handle);
4056 handle = hci_handle(handle);
4057
f0e09510 4058 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 4059 handle, flags);
1da177e4
LT
4060
4061 hdev->stat.acl_rx++;
4062
4063 hci_dev_lock(hdev);
4064 conn = hci_conn_hash_lookup_handle(hdev, handle);
4065 hci_dev_unlock(hdev);
8e87d142 4066
1da177e4 4067 if (conn) {
65983fc7 4068 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 4069
1da177e4 4070 /* Send to upper protocol */
686ebf28
UF
4071 l2cap_recv_acldata(conn, skb, flags);
4072 return;
1da177e4 4073 } else {
8e87d142 4074 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 4075 hdev->name, handle);
1da177e4
LT
4076 }
4077
4078 kfree_skb(skb);
4079}
4080
4081/* SCO data packet */
6039aa73 4082static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
4083{
4084 struct hci_sco_hdr *hdr = (void *) skb->data;
4085 struct hci_conn *conn;
4086 __u16 handle;
4087
4088 skb_pull(skb, HCI_SCO_HDR_SIZE);
4089
4090 handle = __le16_to_cpu(hdr->handle);
4091
f0e09510 4092 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
1da177e4
LT
4093
4094 hdev->stat.sco_rx++;
4095
4096 hci_dev_lock(hdev);
4097 conn = hci_conn_hash_lookup_handle(hdev, handle);
4098 hci_dev_unlock(hdev);
4099
4100 if (conn) {
1da177e4 4101 /* Send to upper protocol */
686ebf28
UF
4102 sco_recv_scodata(conn, skb);
4103 return;
1da177e4 4104 } else {
8e87d142 4105 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 4106 hdev->name, handle);
1da177e4
LT
4107 }
4108
4109 kfree_skb(skb);
4110}
4111
9238f36a
JH
4112static bool hci_req_is_complete(struct hci_dev *hdev)
4113{
4114 struct sk_buff *skb;
4115
4116 skb = skb_peek(&hdev->cmd_q);
4117 if (!skb)
4118 return true;
4119
4120 return bt_cb(skb)->req.start;
4121}
4122
42c6b129
JH
4123static void hci_resend_last(struct hci_dev *hdev)
4124{
4125 struct hci_command_hdr *sent;
4126 struct sk_buff *skb;
4127 u16 opcode;
4128
4129 if (!hdev->sent_cmd)
4130 return;
4131
4132 sent = (void *) hdev->sent_cmd->data;
4133 opcode = __le16_to_cpu(sent->opcode);
4134 if (opcode == HCI_OP_RESET)
4135 return;
4136
4137 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4138 if (!skb)
4139 return;
4140
4141 skb_queue_head(&hdev->cmd_q, skb);
4142 queue_work(hdev->workqueue, &hdev->cmd_work);
4143}
4144
9238f36a
JH
4145void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
4146{
4147 hci_req_complete_t req_complete = NULL;
4148 struct sk_buff *skb;
4149 unsigned long flags;
4150
4151 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4152
42c6b129
JH
4153 /* If the completed command doesn't match the last one that was
4154 * sent we need to do special handling of it.
9238f36a 4155 */
42c6b129
JH
4156 if (!hci_sent_cmd_data(hdev, opcode)) {
4157 /* Some CSR based controllers generate a spontaneous
4158 * reset complete event during init and any pending
4159 * command will never be completed. In such a case we
4160 * need to resend whatever was the last sent
4161 * command.
4162 */
4163 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4164 hci_resend_last(hdev);
4165
9238f36a 4166 return;
42c6b129 4167 }
9238f36a
JH
4168
4169 /* If the command succeeded and there's still more commands in
4170 * this request the request is not yet complete.
4171 */
4172 if (!status && !hci_req_is_complete(hdev))
4173 return;
4174
4175 /* If this was the last command in a request the complete
4176 * callback would be found in hdev->sent_cmd instead of the
4177 * command queue (hdev->cmd_q).
4178 */
4179 if (hdev->sent_cmd) {
4180 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
53e21fbc
JH
4181
4182 if (req_complete) {
4183 /* We must set the complete callback to NULL to
4184 * avoid calling the callback more than once if
4185 * this function gets called again.
4186 */
4187 bt_cb(hdev->sent_cmd)->req.complete = NULL;
4188
9238f36a 4189 goto call_complete;
53e21fbc 4190 }
9238f36a
JH
4191 }
4192
4193 /* Remove all pending commands belonging to this request */
4194 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4195 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4196 if (bt_cb(skb)->req.start) {
4197 __skb_queue_head(&hdev->cmd_q, skb);
4198 break;
4199 }
4200
4201 req_complete = bt_cb(skb)->req.complete;
4202 kfree_skb(skb);
4203 }
4204 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4205
4206call_complete:
4207 if (req_complete)
4208 req_complete(hdev, status);
4209}
4210
b78752cc 4211static void hci_rx_work(struct work_struct *work)
1da177e4 4212{
b78752cc 4213 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
4214 struct sk_buff *skb;
4215
4216 BT_DBG("%s", hdev->name);
4217
1da177e4 4218 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
4219 /* Send copy to monitor */
4220 hci_send_to_monitor(hdev, skb);
4221
1da177e4
LT
4222 if (atomic_read(&hdev->promisc)) {
4223 /* Send copy to the sockets */
470fe1b5 4224 hci_send_to_sock(hdev, skb);
1da177e4
LT
4225 }
4226
0736cfa8
MH
4227 if (test_bit(HCI_RAW, &hdev->flags) ||
4228 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1da177e4
LT
4229 kfree_skb(skb);
4230 continue;
4231 }
4232
4233 if (test_bit(HCI_INIT, &hdev->flags)) {
4234 /* Don't process data packets in this states. */
0d48d939 4235 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
4236 case HCI_ACLDATA_PKT:
4237 case HCI_SCODATA_PKT:
4238 kfree_skb(skb);
4239 continue;
3ff50b79 4240 }
1da177e4
LT
4241 }
4242
4243 /* Process frame */
0d48d939 4244 switch (bt_cb(skb)->pkt_type) {
1da177e4 4245 case HCI_EVENT_PKT:
b78752cc 4246 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
4247 hci_event_packet(hdev, skb);
4248 break;
4249
4250 case HCI_ACLDATA_PKT:
4251 BT_DBG("%s ACL data packet", hdev->name);
4252 hci_acldata_packet(hdev, skb);
4253 break;
4254
4255 case HCI_SCODATA_PKT:
4256 BT_DBG("%s SCO data packet", hdev->name);
4257 hci_scodata_packet(hdev, skb);
4258 break;
4259
4260 default:
4261 kfree_skb(skb);
4262 break;
4263 }
4264 }
1da177e4
LT
4265}
4266
c347b765 4267static void hci_cmd_work(struct work_struct *work)
1da177e4 4268{
c347b765 4269 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
4270 struct sk_buff *skb;
4271
2104786b
AE
4272 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4273 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 4274
1da177e4 4275 /* Send queued commands */
5a08ecce
AE
4276 if (atomic_read(&hdev->cmd_cnt)) {
4277 skb = skb_dequeue(&hdev->cmd_q);
4278 if (!skb)
4279 return;
4280
7585b97a 4281 kfree_skb(hdev->sent_cmd);
1da177e4 4282
a675d7f1 4283 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
70f23020 4284 if (hdev->sent_cmd) {
1da177e4 4285 atomic_dec(&hdev->cmd_cnt);
57d17d70 4286 hci_send_frame(hdev, skb);
7bdb8a5c
SJ
4287 if (test_bit(HCI_RESET, &hdev->flags))
4288 del_timer(&hdev->cmd_timer);
4289 else
4290 mod_timer(&hdev->cmd_timer,
5f246e89 4291 jiffies + HCI_CMD_TIMEOUT);
1da177e4
LT
4292 } else {
4293 skb_queue_head(&hdev->cmd_q, skb);
c347b765 4294 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
4295 }
4296 }
4297}