]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - net/bluetooth/hci_core.c
80462a126ebdeedcd7fe5dad93f339543efa712b
[mirror_ubuntu-artful-kernel.git] / net / bluetooth / hci_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
34
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37
38 #include "smp.h"
39
40 static void hci_rx_work(struct work_struct *work);
41 static void hci_cmd_work(struct work_struct *work);
42 static void hci_tx_work(struct work_struct *work);
43
44 /* HCI device list */
45 LIST_HEAD(hci_dev_list);
46 DEFINE_RWLOCK(hci_dev_list_lock);
47
48 /* HCI callback list */
49 LIST_HEAD(hci_cb_list);
50 DEFINE_RWLOCK(hci_cb_list_lock);
51
52 /* HCI ID Numbering */
53 static DEFINE_IDA(hci_index_ida);
54
55 /* ---- HCI notifications ---- */
56
57 static void hci_notify(struct hci_dev *hdev, int event)
58 {
59 hci_sock_dev_event(hdev, event);
60 }
61
62 /* ---- HCI debugfs entries ---- */
63
64 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
65 size_t count, loff_t *ppos)
66 {
67 struct hci_dev *hdev = file->private_data;
68 char buf[3];
69
70 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dev_flags) ? 'Y': 'N';
71 buf[1] = '\n';
72 buf[2] = '\0';
73 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
74 }
75
76 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
77 size_t count, loff_t *ppos)
78 {
79 struct hci_dev *hdev = file->private_data;
80 struct sk_buff *skb;
81 char buf[32];
82 size_t buf_size = min(count, (sizeof(buf)-1));
83 bool enable;
84 int err;
85
86 if (!test_bit(HCI_UP, &hdev->flags))
87 return -ENETDOWN;
88
89 if (copy_from_user(buf, user_buf, buf_size))
90 return -EFAULT;
91
92 buf[buf_size] = '\0';
93 if (strtobool(buf, &enable))
94 return -EINVAL;
95
96 if (enable == test_bit(HCI_DUT_MODE, &hdev->dev_flags))
97 return -EALREADY;
98
99 hci_req_lock(hdev);
100 if (enable)
101 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
102 HCI_CMD_TIMEOUT);
103 else
104 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
105 HCI_CMD_TIMEOUT);
106 hci_req_unlock(hdev);
107
108 if (IS_ERR(skb))
109 return PTR_ERR(skb);
110
111 err = -bt_to_errno(skb->data[0]);
112 kfree_skb(skb);
113
114 if (err < 0)
115 return err;
116
117 change_bit(HCI_DUT_MODE, &hdev->dev_flags);
118
119 return count;
120 }
121
122 static const struct file_operations dut_mode_fops = {
123 .open = simple_open,
124 .read = dut_mode_read,
125 .write = dut_mode_write,
126 .llseek = default_llseek,
127 };
128
129 static int features_show(struct seq_file *f, void *ptr)
130 {
131 struct hci_dev *hdev = f->private;
132 u8 p;
133
134 hci_dev_lock(hdev);
135 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
136 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
137 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
138 hdev->features[p][0], hdev->features[p][1],
139 hdev->features[p][2], hdev->features[p][3],
140 hdev->features[p][4], hdev->features[p][5],
141 hdev->features[p][6], hdev->features[p][7]);
142 }
143 if (lmp_le_capable(hdev))
144 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
145 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
146 hdev->le_features[0], hdev->le_features[1],
147 hdev->le_features[2], hdev->le_features[3],
148 hdev->le_features[4], hdev->le_features[5],
149 hdev->le_features[6], hdev->le_features[7]);
150 hci_dev_unlock(hdev);
151
152 return 0;
153 }
154
155 static int features_open(struct inode *inode, struct file *file)
156 {
157 return single_open(file, features_show, inode->i_private);
158 }
159
160 static const struct file_operations features_fops = {
161 .open = features_open,
162 .read = seq_read,
163 .llseek = seq_lseek,
164 .release = single_release,
165 };
166
167 static int blacklist_show(struct seq_file *f, void *p)
168 {
169 struct hci_dev *hdev = f->private;
170 struct bdaddr_list *b;
171
172 hci_dev_lock(hdev);
173 list_for_each_entry(b, &hdev->blacklist, list)
174 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
175 hci_dev_unlock(hdev);
176
177 return 0;
178 }
179
180 static int blacklist_open(struct inode *inode, struct file *file)
181 {
182 return single_open(file, blacklist_show, inode->i_private);
183 }
184
185 static const struct file_operations blacklist_fops = {
186 .open = blacklist_open,
187 .read = seq_read,
188 .llseek = seq_lseek,
189 .release = single_release,
190 };
191
192 static int uuids_show(struct seq_file *f, void *p)
193 {
194 struct hci_dev *hdev = f->private;
195 struct bt_uuid *uuid;
196
197 hci_dev_lock(hdev);
198 list_for_each_entry(uuid, &hdev->uuids, list) {
199 u8 i, val[16];
200
201 /* The Bluetooth UUID values are stored in big endian,
202 * but with reversed byte order. So convert them into
203 * the right order for the %pUb modifier.
204 */
205 for (i = 0; i < 16; i++)
206 val[i] = uuid->uuid[15 - i];
207
208 seq_printf(f, "%pUb\n", val);
209 }
210 hci_dev_unlock(hdev);
211
212 return 0;
213 }
214
215 static int uuids_open(struct inode *inode, struct file *file)
216 {
217 return single_open(file, uuids_show, inode->i_private);
218 }
219
220 static const struct file_operations uuids_fops = {
221 .open = uuids_open,
222 .read = seq_read,
223 .llseek = seq_lseek,
224 .release = single_release,
225 };
226
227 static int inquiry_cache_show(struct seq_file *f, void *p)
228 {
229 struct hci_dev *hdev = f->private;
230 struct discovery_state *cache = &hdev->discovery;
231 struct inquiry_entry *e;
232
233 hci_dev_lock(hdev);
234
235 list_for_each_entry(e, &cache->all, all) {
236 struct inquiry_data *data = &e->data;
237 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
238 &data->bdaddr,
239 data->pscan_rep_mode, data->pscan_period_mode,
240 data->pscan_mode, data->dev_class[2],
241 data->dev_class[1], data->dev_class[0],
242 __le16_to_cpu(data->clock_offset),
243 data->rssi, data->ssp_mode, e->timestamp);
244 }
245
246 hci_dev_unlock(hdev);
247
248 return 0;
249 }
250
251 static int inquiry_cache_open(struct inode *inode, struct file *file)
252 {
253 return single_open(file, inquiry_cache_show, inode->i_private);
254 }
255
256 static const struct file_operations inquiry_cache_fops = {
257 .open = inquiry_cache_open,
258 .read = seq_read,
259 .llseek = seq_lseek,
260 .release = single_release,
261 };
262
263 static int link_keys_show(struct seq_file *f, void *ptr)
264 {
265 struct hci_dev *hdev = f->private;
266 struct list_head *p, *n;
267
268 hci_dev_lock(hdev);
269 list_for_each_safe(p, n, &hdev->link_keys) {
270 struct link_key *key = list_entry(p, struct link_key, list);
271 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
272 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
273 }
274 hci_dev_unlock(hdev);
275
276 return 0;
277 }
278
279 static int link_keys_open(struct inode *inode, struct file *file)
280 {
281 return single_open(file, link_keys_show, inode->i_private);
282 }
283
284 static const struct file_operations link_keys_fops = {
285 .open = link_keys_open,
286 .read = seq_read,
287 .llseek = seq_lseek,
288 .release = single_release,
289 };
290
291 static int dev_class_show(struct seq_file *f, void *ptr)
292 {
293 struct hci_dev *hdev = f->private;
294
295 hci_dev_lock(hdev);
296 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
297 hdev->dev_class[1], hdev->dev_class[0]);
298 hci_dev_unlock(hdev);
299
300 return 0;
301 }
302
303 static int dev_class_open(struct inode *inode, struct file *file)
304 {
305 return single_open(file, dev_class_show, inode->i_private);
306 }
307
308 static const struct file_operations dev_class_fops = {
309 .open = dev_class_open,
310 .read = seq_read,
311 .llseek = seq_lseek,
312 .release = single_release,
313 };
314
315 static int voice_setting_get(void *data, u64 *val)
316 {
317 struct hci_dev *hdev = data;
318
319 hci_dev_lock(hdev);
320 *val = hdev->voice_setting;
321 hci_dev_unlock(hdev);
322
323 return 0;
324 }
325
326 DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
327 NULL, "0x%4.4llx\n");
328
329 static int auto_accept_delay_set(void *data, u64 val)
330 {
331 struct hci_dev *hdev = data;
332
333 hci_dev_lock(hdev);
334 hdev->auto_accept_delay = val;
335 hci_dev_unlock(hdev);
336
337 return 0;
338 }
339
340 static int auto_accept_delay_get(void *data, u64 *val)
341 {
342 struct hci_dev *hdev = data;
343
344 hci_dev_lock(hdev);
345 *val = hdev->auto_accept_delay;
346 hci_dev_unlock(hdev);
347
348 return 0;
349 }
350
351 DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
352 auto_accept_delay_set, "%llu\n");
353
354 static int ssp_debug_mode_set(void *data, u64 val)
355 {
356 struct hci_dev *hdev = data;
357 struct sk_buff *skb;
358 __u8 mode;
359 int err;
360
361 if (val != 0 && val != 1)
362 return -EINVAL;
363
364 if (!test_bit(HCI_UP, &hdev->flags))
365 return -ENETDOWN;
366
367 hci_req_lock(hdev);
368 mode = val;
369 skb = __hci_cmd_sync(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode),
370 &mode, HCI_CMD_TIMEOUT);
371 hci_req_unlock(hdev);
372
373 if (IS_ERR(skb))
374 return PTR_ERR(skb);
375
376 err = -bt_to_errno(skb->data[0]);
377 kfree_skb(skb);
378
379 if (err < 0)
380 return err;
381
382 hci_dev_lock(hdev);
383 hdev->ssp_debug_mode = val;
384 hci_dev_unlock(hdev);
385
386 return 0;
387 }
388
389 static int ssp_debug_mode_get(void *data, u64 *val)
390 {
391 struct hci_dev *hdev = data;
392
393 hci_dev_lock(hdev);
394 *val = hdev->ssp_debug_mode;
395 hci_dev_unlock(hdev);
396
397 return 0;
398 }
399
400 DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get,
401 ssp_debug_mode_set, "%llu\n");
402
403 static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
404 size_t count, loff_t *ppos)
405 {
406 struct hci_dev *hdev = file->private_data;
407 char buf[3];
408
409 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dev_flags) ? 'Y': 'N';
410 buf[1] = '\n';
411 buf[2] = '\0';
412 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
413 }
414
415 static ssize_t force_sc_support_write(struct file *file,
416 const char __user *user_buf,
417 size_t count, loff_t *ppos)
418 {
419 struct hci_dev *hdev = file->private_data;
420 char buf[32];
421 size_t buf_size = min(count, (sizeof(buf)-1));
422 bool enable;
423
424 if (test_bit(HCI_UP, &hdev->flags))
425 return -EBUSY;
426
427 if (copy_from_user(buf, user_buf, buf_size))
428 return -EFAULT;
429
430 buf[buf_size] = '\0';
431 if (strtobool(buf, &enable))
432 return -EINVAL;
433
434 if (enable == test_bit(HCI_FORCE_SC, &hdev->dev_flags))
435 return -EALREADY;
436
437 change_bit(HCI_FORCE_SC, &hdev->dev_flags);
438
439 return count;
440 }
441
442 static const struct file_operations force_sc_support_fops = {
443 .open = simple_open,
444 .read = force_sc_support_read,
445 .write = force_sc_support_write,
446 .llseek = default_llseek,
447 };
448
449 static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
450 size_t count, loff_t *ppos)
451 {
452 struct hci_dev *hdev = file->private_data;
453 char buf[3];
454
455 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
456 buf[1] = '\n';
457 buf[2] = '\0';
458 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
459 }
460
461 static const struct file_operations sc_only_mode_fops = {
462 .open = simple_open,
463 .read = sc_only_mode_read,
464 .llseek = default_llseek,
465 };
466
467 static int idle_timeout_set(void *data, u64 val)
468 {
469 struct hci_dev *hdev = data;
470
471 if (val != 0 && (val < 500 || val > 3600000))
472 return -EINVAL;
473
474 hci_dev_lock(hdev);
475 hdev->idle_timeout = val;
476 hci_dev_unlock(hdev);
477
478 return 0;
479 }
480
481 static int idle_timeout_get(void *data, u64 *val)
482 {
483 struct hci_dev *hdev = data;
484
485 hci_dev_lock(hdev);
486 *val = hdev->idle_timeout;
487 hci_dev_unlock(hdev);
488
489 return 0;
490 }
491
492 DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
493 idle_timeout_set, "%llu\n");
494
495 static int rpa_timeout_set(void *data, u64 val)
496 {
497 struct hci_dev *hdev = data;
498
499 /* Require the RPA timeout to be at least 30 seconds and at most
500 * 24 hours.
501 */
502 if (val < 30 || val > (60 * 60 * 24))
503 return -EINVAL;
504
505 hci_dev_lock(hdev);
506 hdev->rpa_timeout = val;
507 hci_dev_unlock(hdev);
508
509 return 0;
510 }
511
512 static int rpa_timeout_get(void *data, u64 *val)
513 {
514 struct hci_dev *hdev = data;
515
516 hci_dev_lock(hdev);
517 *val = hdev->rpa_timeout;
518 hci_dev_unlock(hdev);
519
520 return 0;
521 }
522
523 DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
524 rpa_timeout_set, "%llu\n");
525
526 static int sniff_min_interval_set(void *data, u64 val)
527 {
528 struct hci_dev *hdev = data;
529
530 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
531 return -EINVAL;
532
533 hci_dev_lock(hdev);
534 hdev->sniff_min_interval = val;
535 hci_dev_unlock(hdev);
536
537 return 0;
538 }
539
540 static int sniff_min_interval_get(void *data, u64 *val)
541 {
542 struct hci_dev *hdev = data;
543
544 hci_dev_lock(hdev);
545 *val = hdev->sniff_min_interval;
546 hci_dev_unlock(hdev);
547
548 return 0;
549 }
550
551 DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
552 sniff_min_interval_set, "%llu\n");
553
554 static int sniff_max_interval_set(void *data, u64 val)
555 {
556 struct hci_dev *hdev = data;
557
558 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
559 return -EINVAL;
560
561 hci_dev_lock(hdev);
562 hdev->sniff_max_interval = val;
563 hci_dev_unlock(hdev);
564
565 return 0;
566 }
567
568 static int sniff_max_interval_get(void *data, u64 *val)
569 {
570 struct hci_dev *hdev = data;
571
572 hci_dev_lock(hdev);
573 *val = hdev->sniff_max_interval;
574 hci_dev_unlock(hdev);
575
576 return 0;
577 }
578
579 DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
580 sniff_max_interval_set, "%llu\n");
581
582 static int identity_show(struct seq_file *f, void *p)
583 {
584 struct hci_dev *hdev = f->private;
585 bdaddr_t *addr;
586 u8 addr_type;
587
588 hci_dev_lock(hdev);
589
590 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ||
591 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
592 addr = &hdev->static_addr;
593 addr_type = ADDR_LE_DEV_RANDOM;
594 } else {
595 addr = &hdev->bdaddr;
596 addr_type = ADDR_LE_DEV_PUBLIC;
597 }
598
599 seq_printf(f, "%pMR (type %u) %*phN\n", addr, addr_type, 16, hdev->irk);
600
601 hci_dev_unlock(hdev);
602
603 return 0;
604 }
605
606 static int identity_open(struct inode *inode, struct file *file)
607 {
608 return single_open(file, identity_show, inode->i_private);
609 }
610
611 static const struct file_operations identity_fops = {
612 .open = identity_open,
613 .read = seq_read,
614 .llseek = seq_lseek,
615 .release = single_release,
616 };
617
618 static int random_address_show(struct seq_file *f, void *p)
619 {
620 struct hci_dev *hdev = f->private;
621
622 hci_dev_lock(hdev);
623 seq_printf(f, "%pMR\n", &hdev->random_addr);
624 hci_dev_unlock(hdev);
625
626 return 0;
627 }
628
629 static int random_address_open(struct inode *inode, struct file *file)
630 {
631 return single_open(file, random_address_show, inode->i_private);
632 }
633
634 static const struct file_operations random_address_fops = {
635 .open = random_address_open,
636 .read = seq_read,
637 .llseek = seq_lseek,
638 .release = single_release,
639 };
640
641 static int static_address_show(struct seq_file *f, void *p)
642 {
643 struct hci_dev *hdev = f->private;
644
645 hci_dev_lock(hdev);
646 seq_printf(f, "%pMR\n", &hdev->static_addr);
647 hci_dev_unlock(hdev);
648
649 return 0;
650 }
651
652 static int static_address_open(struct inode *inode, struct file *file)
653 {
654 return single_open(file, static_address_show, inode->i_private);
655 }
656
657 static const struct file_operations static_address_fops = {
658 .open = static_address_open,
659 .read = seq_read,
660 .llseek = seq_lseek,
661 .release = single_release,
662 };
663
664 static ssize_t force_static_address_read(struct file *file,
665 char __user *user_buf,
666 size_t count, loff_t *ppos)
667 {
668 struct hci_dev *hdev = file->private_data;
669 char buf[3];
670
671 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ? 'Y': 'N';
672 buf[1] = '\n';
673 buf[2] = '\0';
674 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
675 }
676
677 static ssize_t force_static_address_write(struct file *file,
678 const char __user *user_buf,
679 size_t count, loff_t *ppos)
680 {
681 struct hci_dev *hdev = file->private_data;
682 char buf[32];
683 size_t buf_size = min(count, (sizeof(buf)-1));
684 bool enable;
685
686 if (test_bit(HCI_UP, &hdev->flags))
687 return -EBUSY;
688
689 if (copy_from_user(buf, user_buf, buf_size))
690 return -EFAULT;
691
692 buf[buf_size] = '\0';
693 if (strtobool(buf, &enable))
694 return -EINVAL;
695
696 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags))
697 return -EALREADY;
698
699 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags);
700
701 return count;
702 }
703
704 static const struct file_operations force_static_address_fops = {
705 .open = simple_open,
706 .read = force_static_address_read,
707 .write = force_static_address_write,
708 .llseek = default_llseek,
709 };
710
711 static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
712 {
713 struct hci_dev *hdev = f->private;
714 struct list_head *p, *n;
715
716 hci_dev_lock(hdev);
717 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
718 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
719 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
720 &irk->bdaddr, irk->addr_type,
721 16, irk->val, &irk->rpa);
722 }
723 hci_dev_unlock(hdev);
724
725 return 0;
726 }
727
728 static int identity_resolving_keys_open(struct inode *inode, struct file *file)
729 {
730 return single_open(file, identity_resolving_keys_show,
731 inode->i_private);
732 }
733
734 static const struct file_operations identity_resolving_keys_fops = {
735 .open = identity_resolving_keys_open,
736 .read = seq_read,
737 .llseek = seq_lseek,
738 .release = single_release,
739 };
740
741 static int long_term_keys_show(struct seq_file *f, void *ptr)
742 {
743 struct hci_dev *hdev = f->private;
744 struct list_head *p, *n;
745
746 hci_dev_lock(hdev);
747 list_for_each_safe(p, n, &hdev->long_term_keys) {
748 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
749 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %*phN %*phN\n",
750 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
751 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
752 8, ltk->rand, 16, ltk->val);
753 }
754 hci_dev_unlock(hdev);
755
756 return 0;
757 }
758
759 static int long_term_keys_open(struct inode *inode, struct file *file)
760 {
761 return single_open(file, long_term_keys_show, inode->i_private);
762 }
763
764 static const struct file_operations long_term_keys_fops = {
765 .open = long_term_keys_open,
766 .read = seq_read,
767 .llseek = seq_lseek,
768 .release = single_release,
769 };
770
771 static int conn_min_interval_set(void *data, u64 val)
772 {
773 struct hci_dev *hdev = data;
774
775 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
776 return -EINVAL;
777
778 hci_dev_lock(hdev);
779 hdev->le_conn_min_interval = val;
780 hci_dev_unlock(hdev);
781
782 return 0;
783 }
784
785 static int conn_min_interval_get(void *data, u64 *val)
786 {
787 struct hci_dev *hdev = data;
788
789 hci_dev_lock(hdev);
790 *val = hdev->le_conn_min_interval;
791 hci_dev_unlock(hdev);
792
793 return 0;
794 }
795
796 DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
797 conn_min_interval_set, "%llu\n");
798
799 static int conn_max_interval_set(void *data, u64 val)
800 {
801 struct hci_dev *hdev = data;
802
803 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
804 return -EINVAL;
805
806 hci_dev_lock(hdev);
807 hdev->le_conn_max_interval = val;
808 hci_dev_unlock(hdev);
809
810 return 0;
811 }
812
813 static int conn_max_interval_get(void *data, u64 *val)
814 {
815 struct hci_dev *hdev = data;
816
817 hci_dev_lock(hdev);
818 *val = hdev->le_conn_max_interval;
819 hci_dev_unlock(hdev);
820
821 return 0;
822 }
823
824 DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
825 conn_max_interval_set, "%llu\n");
826
827 static int adv_channel_map_set(void *data, u64 val)
828 {
829 struct hci_dev *hdev = data;
830
831 if (val < 0x01 || val > 0x07)
832 return -EINVAL;
833
834 hci_dev_lock(hdev);
835 hdev->le_adv_channel_map = val;
836 hci_dev_unlock(hdev);
837
838 return 0;
839 }
840
841 static int adv_channel_map_get(void *data, u64 *val)
842 {
843 struct hci_dev *hdev = data;
844
845 hci_dev_lock(hdev);
846 *val = hdev->le_adv_channel_map;
847 hci_dev_unlock(hdev);
848
849 return 0;
850 }
851
852 DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
853 adv_channel_map_set, "%llu\n");
854
855 static ssize_t lowpan_read(struct file *file, char __user *user_buf,
856 size_t count, loff_t *ppos)
857 {
858 struct hci_dev *hdev = file->private_data;
859 char buf[3];
860
861 buf[0] = test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags) ? 'Y' : 'N';
862 buf[1] = '\n';
863 buf[2] = '\0';
864 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
865 }
866
867 static ssize_t lowpan_write(struct file *fp, const char __user *user_buffer,
868 size_t count, loff_t *position)
869 {
870 struct hci_dev *hdev = fp->private_data;
871 bool enable;
872 char buf[32];
873 size_t buf_size = min(count, (sizeof(buf)-1));
874
875 if (copy_from_user(buf, user_buffer, buf_size))
876 return -EFAULT;
877
878 buf[buf_size] = '\0';
879
880 if (strtobool(buf, &enable) < 0)
881 return -EINVAL;
882
883 if (enable == test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags))
884 return -EALREADY;
885
886 change_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags);
887
888 return count;
889 }
890
891 static const struct file_operations lowpan_debugfs_fops = {
892 .open = simple_open,
893 .read = lowpan_read,
894 .write = lowpan_write,
895 .llseek = default_llseek,
896 };
897
898 /* ---- HCI requests ---- */
899
900 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
901 {
902 BT_DBG("%s result 0x%2.2x", hdev->name, result);
903
904 if (hdev->req_status == HCI_REQ_PEND) {
905 hdev->req_result = result;
906 hdev->req_status = HCI_REQ_DONE;
907 wake_up_interruptible(&hdev->req_wait_q);
908 }
909 }
910
911 static void hci_req_cancel(struct hci_dev *hdev, int err)
912 {
913 BT_DBG("%s err 0x%2.2x", hdev->name, err);
914
915 if (hdev->req_status == HCI_REQ_PEND) {
916 hdev->req_result = err;
917 hdev->req_status = HCI_REQ_CANCELED;
918 wake_up_interruptible(&hdev->req_wait_q);
919 }
920 }
921
922 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
923 u8 event)
924 {
925 struct hci_ev_cmd_complete *ev;
926 struct hci_event_hdr *hdr;
927 struct sk_buff *skb;
928
929 hci_dev_lock(hdev);
930
931 skb = hdev->recv_evt;
932 hdev->recv_evt = NULL;
933
934 hci_dev_unlock(hdev);
935
936 if (!skb)
937 return ERR_PTR(-ENODATA);
938
939 if (skb->len < sizeof(*hdr)) {
940 BT_ERR("Too short HCI event");
941 goto failed;
942 }
943
944 hdr = (void *) skb->data;
945 skb_pull(skb, HCI_EVENT_HDR_SIZE);
946
947 if (event) {
948 if (hdr->evt != event)
949 goto failed;
950 return skb;
951 }
952
953 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
954 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
955 goto failed;
956 }
957
958 if (skb->len < sizeof(*ev)) {
959 BT_ERR("Too short cmd_complete event");
960 goto failed;
961 }
962
963 ev = (void *) skb->data;
964 skb_pull(skb, sizeof(*ev));
965
966 if (opcode == __le16_to_cpu(ev->opcode))
967 return skb;
968
969 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
970 __le16_to_cpu(ev->opcode));
971
972 failed:
973 kfree_skb(skb);
974 return ERR_PTR(-ENODATA);
975 }
976
977 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
978 const void *param, u8 event, u32 timeout)
979 {
980 DECLARE_WAITQUEUE(wait, current);
981 struct hci_request req;
982 int err = 0;
983
984 BT_DBG("%s", hdev->name);
985
986 hci_req_init(&req, hdev);
987
988 hci_req_add_ev(&req, opcode, plen, param, event);
989
990 hdev->req_status = HCI_REQ_PEND;
991
992 err = hci_req_run(&req, hci_req_sync_complete);
993 if (err < 0)
994 return ERR_PTR(err);
995
996 add_wait_queue(&hdev->req_wait_q, &wait);
997 set_current_state(TASK_INTERRUPTIBLE);
998
999 schedule_timeout(timeout);
1000
1001 remove_wait_queue(&hdev->req_wait_q, &wait);
1002
1003 if (signal_pending(current))
1004 return ERR_PTR(-EINTR);
1005
1006 switch (hdev->req_status) {
1007 case HCI_REQ_DONE:
1008 err = -bt_to_errno(hdev->req_result);
1009 break;
1010
1011 case HCI_REQ_CANCELED:
1012 err = -hdev->req_result;
1013 break;
1014
1015 default:
1016 err = -ETIMEDOUT;
1017 break;
1018 }
1019
1020 hdev->req_status = hdev->req_result = 0;
1021
1022 BT_DBG("%s end: err %d", hdev->name, err);
1023
1024 if (err < 0)
1025 return ERR_PTR(err);
1026
1027 return hci_get_cmd_complete(hdev, opcode, event);
1028 }
1029 EXPORT_SYMBOL(__hci_cmd_sync_ev);
1030
1031 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
1032 const void *param, u32 timeout)
1033 {
1034 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
1035 }
1036 EXPORT_SYMBOL(__hci_cmd_sync);
1037
1038 /* Execute request and wait for completion. */
1039 static int __hci_req_sync(struct hci_dev *hdev,
1040 void (*func)(struct hci_request *req,
1041 unsigned long opt),
1042 unsigned long opt, __u32 timeout)
1043 {
1044 struct hci_request req;
1045 DECLARE_WAITQUEUE(wait, current);
1046 int err = 0;
1047
1048 BT_DBG("%s start", hdev->name);
1049
1050 hci_req_init(&req, hdev);
1051
1052 hdev->req_status = HCI_REQ_PEND;
1053
1054 func(&req, opt);
1055
1056 err = hci_req_run(&req, hci_req_sync_complete);
1057 if (err < 0) {
1058 hdev->req_status = 0;
1059
1060 /* ENODATA means the HCI request command queue is empty.
1061 * This can happen when a request with conditionals doesn't
1062 * trigger any commands to be sent. This is normal behavior
1063 * and should not trigger an error return.
1064 */
1065 if (err == -ENODATA)
1066 return 0;
1067
1068 return err;
1069 }
1070
1071 add_wait_queue(&hdev->req_wait_q, &wait);
1072 set_current_state(TASK_INTERRUPTIBLE);
1073
1074 schedule_timeout(timeout);
1075
1076 remove_wait_queue(&hdev->req_wait_q, &wait);
1077
1078 if (signal_pending(current))
1079 return -EINTR;
1080
1081 switch (hdev->req_status) {
1082 case HCI_REQ_DONE:
1083 err = -bt_to_errno(hdev->req_result);
1084 break;
1085
1086 case HCI_REQ_CANCELED:
1087 err = -hdev->req_result;
1088 break;
1089
1090 default:
1091 err = -ETIMEDOUT;
1092 break;
1093 }
1094
1095 hdev->req_status = hdev->req_result = 0;
1096
1097 BT_DBG("%s end: err %d", hdev->name, err);
1098
1099 return err;
1100 }
1101
1102 static int hci_req_sync(struct hci_dev *hdev,
1103 void (*req)(struct hci_request *req,
1104 unsigned long opt),
1105 unsigned long opt, __u32 timeout)
1106 {
1107 int ret;
1108
1109 if (!test_bit(HCI_UP, &hdev->flags))
1110 return -ENETDOWN;
1111
1112 /* Serialize all requests */
1113 hci_req_lock(hdev);
1114 ret = __hci_req_sync(hdev, req, opt, timeout);
1115 hci_req_unlock(hdev);
1116
1117 return ret;
1118 }
1119
1120 static void hci_reset_req(struct hci_request *req, unsigned long opt)
1121 {
1122 BT_DBG("%s %ld", req->hdev->name, opt);
1123
1124 /* Reset device */
1125 set_bit(HCI_RESET, &req->hdev->flags);
1126 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1127 }
1128
1129 static void bredr_init(struct hci_request *req)
1130 {
1131 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
1132
1133 /* Read Local Supported Features */
1134 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1135
1136 /* Read Local Version */
1137 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1138
1139 /* Read BD Address */
1140 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1141 }
1142
1143 static void amp_init(struct hci_request *req)
1144 {
1145 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
1146
1147 /* Read Local Version */
1148 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1149
1150 /* Read Local Supported Commands */
1151 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1152
1153 /* Read Local Supported Features */
1154 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1155
1156 /* Read Local AMP Info */
1157 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
1158
1159 /* Read Data Blk size */
1160 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
1161
1162 /* Read Flow Control Mode */
1163 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1164
1165 /* Read Location Data */
1166 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
1167 }
1168
1169 static void hci_init1_req(struct hci_request *req, unsigned long opt)
1170 {
1171 struct hci_dev *hdev = req->hdev;
1172
1173 BT_DBG("%s %ld", hdev->name, opt);
1174
1175 /* Reset */
1176 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1177 hci_reset_req(req, 0);
1178
1179 switch (hdev->dev_type) {
1180 case HCI_BREDR:
1181 bredr_init(req);
1182 break;
1183
1184 case HCI_AMP:
1185 amp_init(req);
1186 break;
1187
1188 default:
1189 BT_ERR("Unknown device type %d", hdev->dev_type);
1190 break;
1191 }
1192 }
1193
1194 static void bredr_setup(struct hci_request *req)
1195 {
1196 struct hci_dev *hdev = req->hdev;
1197
1198 __le16 param;
1199 __u8 flt_type;
1200
1201 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
1202 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1203
1204 /* Read Class of Device */
1205 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
1206
1207 /* Read Local Name */
1208 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1209
1210 /* Read Voice Setting */
1211 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1212
1213 /* Read Number of Supported IAC */
1214 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1215
1216 /* Read Current IAC LAP */
1217 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1218
1219 /* Clear Event Filters */
1220 flt_type = HCI_FLT_CLEAR_ALL;
1221 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1222
1223 /* Connection accept timeout ~20 secs */
1224 param = __constant_cpu_to_le16(0x7d00);
1225 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
1226
1227 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1228 * but it does not support page scan related HCI commands.
1229 */
1230 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
1231 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1232 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1233 }
1234 }
1235
1236 static void le_setup(struct hci_request *req)
1237 {
1238 struct hci_dev *hdev = req->hdev;
1239
1240 /* Read LE Buffer Size */
1241 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
1242
1243 /* Read LE Local Supported Features */
1244 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
1245
1246 /* Read LE Advertising Channel TX Power */
1247 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1248
1249 /* Read LE White List Size */
1250 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
1251
1252 /* Read LE Supported States */
1253 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1254
1255 /* LE-only controllers have LE implicitly enabled */
1256 if (!lmp_bredr_capable(hdev))
1257 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1258 }
1259
1260 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1261 {
1262 if (lmp_ext_inq_capable(hdev))
1263 return 0x02;
1264
1265 if (lmp_inq_rssi_capable(hdev))
1266 return 0x01;
1267
1268 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1269 hdev->lmp_subver == 0x0757)
1270 return 0x01;
1271
1272 if (hdev->manufacturer == 15) {
1273 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1274 return 0x01;
1275 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1276 return 0x01;
1277 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1278 return 0x01;
1279 }
1280
1281 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1282 hdev->lmp_subver == 0x1805)
1283 return 0x01;
1284
1285 return 0x00;
1286 }
1287
1288 static void hci_setup_inquiry_mode(struct hci_request *req)
1289 {
1290 u8 mode;
1291
1292 mode = hci_get_inquiry_mode(req->hdev);
1293
1294 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
1295 }
1296
1297 static void hci_setup_event_mask(struct hci_request *req)
1298 {
1299 struct hci_dev *hdev = req->hdev;
1300
1301 /* The second byte is 0xff instead of 0x9f (two reserved bits
1302 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1303 * command otherwise.
1304 */
1305 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1306
1307 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1308 * any event mask for pre 1.2 devices.
1309 */
1310 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1311 return;
1312
1313 if (lmp_bredr_capable(hdev)) {
1314 events[4] |= 0x01; /* Flow Specification Complete */
1315 events[4] |= 0x02; /* Inquiry Result with RSSI */
1316 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1317 events[5] |= 0x08; /* Synchronous Connection Complete */
1318 events[5] |= 0x10; /* Synchronous Connection Changed */
1319 } else {
1320 /* Use a different default for LE-only devices */
1321 memset(events, 0, sizeof(events));
1322 events[0] |= 0x10; /* Disconnection Complete */
1323 events[0] |= 0x80; /* Encryption Change */
1324 events[1] |= 0x08; /* Read Remote Version Information Complete */
1325 events[1] |= 0x20; /* Command Complete */
1326 events[1] |= 0x40; /* Command Status */
1327 events[1] |= 0x80; /* Hardware Error */
1328 events[2] |= 0x04; /* Number of Completed Packets */
1329 events[3] |= 0x02; /* Data Buffer Overflow */
1330 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1331 }
1332
1333 if (lmp_inq_rssi_capable(hdev))
1334 events[4] |= 0x02; /* Inquiry Result with RSSI */
1335
1336 if (lmp_sniffsubr_capable(hdev))
1337 events[5] |= 0x20; /* Sniff Subrating */
1338
1339 if (lmp_pause_enc_capable(hdev))
1340 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1341
1342 if (lmp_ext_inq_capable(hdev))
1343 events[5] |= 0x40; /* Extended Inquiry Result */
1344
1345 if (lmp_no_flush_capable(hdev))
1346 events[7] |= 0x01; /* Enhanced Flush Complete */
1347
1348 if (lmp_lsto_capable(hdev))
1349 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1350
1351 if (lmp_ssp_capable(hdev)) {
1352 events[6] |= 0x01; /* IO Capability Request */
1353 events[6] |= 0x02; /* IO Capability Response */
1354 events[6] |= 0x04; /* User Confirmation Request */
1355 events[6] |= 0x08; /* User Passkey Request */
1356 events[6] |= 0x10; /* Remote OOB Data Request */
1357 events[6] |= 0x20; /* Simple Pairing Complete */
1358 events[7] |= 0x04; /* User Passkey Notification */
1359 events[7] |= 0x08; /* Keypress Notification */
1360 events[7] |= 0x10; /* Remote Host Supported
1361 * Features Notification
1362 */
1363 }
1364
1365 if (lmp_le_capable(hdev))
1366 events[7] |= 0x20; /* LE Meta-Event */
1367
1368 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
1369
1370 if (lmp_le_capable(hdev)) {
1371 memset(events, 0, sizeof(events));
1372 events[0] = 0x1f;
1373 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1374 sizeof(events), events);
1375 }
1376 }
1377
1378 static void hci_init2_req(struct hci_request *req, unsigned long opt)
1379 {
1380 struct hci_dev *hdev = req->hdev;
1381
1382 if (lmp_bredr_capable(hdev))
1383 bredr_setup(req);
1384 else
1385 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1386
1387 if (lmp_le_capable(hdev))
1388 le_setup(req);
1389
1390 hci_setup_event_mask(req);
1391
1392 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1393 * local supported commands HCI command.
1394 */
1395 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
1396 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1397
1398 if (lmp_ssp_capable(hdev)) {
1399 /* When SSP is available, then the host features page
1400 * should also be available as well. However some
1401 * controllers list the max_page as 0 as long as SSP
1402 * has not been enabled. To achieve proper debugging
1403 * output, force the minimum max_page to 1 at least.
1404 */
1405 hdev->max_page = 0x01;
1406
1407 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1408 u8 mode = 0x01;
1409 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1410 sizeof(mode), &mode);
1411 } else {
1412 struct hci_cp_write_eir cp;
1413
1414 memset(hdev->eir, 0, sizeof(hdev->eir));
1415 memset(&cp, 0, sizeof(cp));
1416
1417 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1418 }
1419 }
1420
1421 if (lmp_inq_rssi_capable(hdev))
1422 hci_setup_inquiry_mode(req);
1423
1424 if (lmp_inq_tx_pwr_capable(hdev))
1425 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
1426
1427 if (lmp_ext_feat_capable(hdev)) {
1428 struct hci_cp_read_local_ext_features cp;
1429
1430 cp.page = 0x01;
1431 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1432 sizeof(cp), &cp);
1433 }
1434
1435 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1436 u8 enable = 1;
1437 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1438 &enable);
1439 }
1440 }
1441
1442 static void hci_setup_link_policy(struct hci_request *req)
1443 {
1444 struct hci_dev *hdev = req->hdev;
1445 struct hci_cp_write_def_link_policy cp;
1446 u16 link_policy = 0;
1447
1448 if (lmp_rswitch_capable(hdev))
1449 link_policy |= HCI_LP_RSWITCH;
1450 if (lmp_hold_capable(hdev))
1451 link_policy |= HCI_LP_HOLD;
1452 if (lmp_sniff_capable(hdev))
1453 link_policy |= HCI_LP_SNIFF;
1454 if (lmp_park_capable(hdev))
1455 link_policy |= HCI_LP_PARK;
1456
1457 cp.policy = cpu_to_le16(link_policy);
1458 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
1459 }
1460
1461 static void hci_set_le_support(struct hci_request *req)
1462 {
1463 struct hci_dev *hdev = req->hdev;
1464 struct hci_cp_write_le_host_supported cp;
1465
1466 /* LE-only devices do not support explicit enablement */
1467 if (!lmp_bredr_capable(hdev))
1468 return;
1469
1470 memset(&cp, 0, sizeof(cp));
1471
1472 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1473 cp.le = 0x01;
1474 cp.simul = lmp_le_br_capable(hdev);
1475 }
1476
1477 if (cp.le != lmp_host_le_capable(hdev))
1478 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1479 &cp);
1480 }
1481
1482 static void hci_set_event_mask_page_2(struct hci_request *req)
1483 {
1484 struct hci_dev *hdev = req->hdev;
1485 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1486
1487 /* If Connectionless Slave Broadcast master role is supported
1488 * enable all necessary events for it.
1489 */
1490 if (lmp_csb_master_capable(hdev)) {
1491 events[1] |= 0x40; /* Triggered Clock Capture */
1492 events[1] |= 0x80; /* Synchronization Train Complete */
1493 events[2] |= 0x10; /* Slave Page Response Timeout */
1494 events[2] |= 0x20; /* CSB Channel Map Change */
1495 }
1496
1497 /* If Connectionless Slave Broadcast slave role is supported
1498 * enable all necessary events for it.
1499 */
1500 if (lmp_csb_slave_capable(hdev)) {
1501 events[2] |= 0x01; /* Synchronization Train Received */
1502 events[2] |= 0x02; /* CSB Receive */
1503 events[2] |= 0x04; /* CSB Timeout */
1504 events[2] |= 0x08; /* Truncated Page Complete */
1505 }
1506
1507 /* Enable Authenticated Payload Timeout Expired event if supported */
1508 if (lmp_ping_capable(hdev))
1509 events[2] |= 0x80;
1510
1511 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1512 }
1513
1514 static void hci_init3_req(struct hci_request *req, unsigned long opt)
1515 {
1516 struct hci_dev *hdev = req->hdev;
1517 u8 p;
1518
1519 /* Some Broadcom based Bluetooth controllers do not support the
1520 * Delete Stored Link Key command. They are clearly indicating its
1521 * absence in the bit mask of supported commands.
1522 *
1523 * Check the supported commands and only if the the command is marked
1524 * as supported send it. If not supported assume that the controller
1525 * does not have actual support for stored link keys which makes this
1526 * command redundant anyway.
1527 *
1528 * Some controllers indicate that they support handling deleting
1529 * stored link keys, but they don't. The quirk lets a driver
1530 * just disable this command.
1531 */
1532 if (hdev->commands[6] & 0x80 &&
1533 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
1534 struct hci_cp_delete_stored_link_key cp;
1535
1536 bacpy(&cp.bdaddr, BDADDR_ANY);
1537 cp.delete_all = 0x01;
1538 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1539 sizeof(cp), &cp);
1540 }
1541
1542 if (hdev->commands[5] & 0x10)
1543 hci_setup_link_policy(req);
1544
1545 if (lmp_le_capable(hdev))
1546 hci_set_le_support(req);
1547
1548 /* Read features beyond page 1 if available */
1549 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1550 struct hci_cp_read_local_ext_features cp;
1551
1552 cp.page = p;
1553 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1554 sizeof(cp), &cp);
1555 }
1556 }
1557
1558 static void hci_init4_req(struct hci_request *req, unsigned long opt)
1559 {
1560 struct hci_dev *hdev = req->hdev;
1561
1562 /* Set event mask page 2 if the HCI command for it is supported */
1563 if (hdev->commands[22] & 0x04)
1564 hci_set_event_mask_page_2(req);
1565
1566 /* Check for Synchronization Train support */
1567 if (lmp_sync_train_capable(hdev))
1568 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1569
1570 /* Enable Secure Connections if supported and configured */
1571 if ((lmp_sc_capable(hdev) ||
1572 test_bit(HCI_FORCE_SC, &hdev->dev_flags)) &&
1573 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1574 u8 support = 0x01;
1575 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1576 sizeof(support), &support);
1577 }
1578 }
1579
1580 static int __hci_init(struct hci_dev *hdev)
1581 {
1582 int err;
1583
1584 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1585 if (err < 0)
1586 return err;
1587
1588 /* The Device Under Test (DUT) mode is special and available for
1589 * all controller types. So just create it early on.
1590 */
1591 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1592 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1593 &dut_mode_fops);
1594 }
1595
1596 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1597 * BR/EDR/LE type controllers. AMP controllers only need the
1598 * first stage init.
1599 */
1600 if (hdev->dev_type != HCI_BREDR)
1601 return 0;
1602
1603 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1604 if (err < 0)
1605 return err;
1606
1607 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1608 if (err < 0)
1609 return err;
1610
1611 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1612 if (err < 0)
1613 return err;
1614
1615 /* Only create debugfs entries during the initial setup
1616 * phase and not every time the controller gets powered on.
1617 */
1618 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1619 return 0;
1620
1621 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1622 &features_fops);
1623 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1624 &hdev->manufacturer);
1625 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1626 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
1627 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1628 &blacklist_fops);
1629 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1630
1631 if (lmp_bredr_capable(hdev)) {
1632 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1633 hdev, &inquiry_cache_fops);
1634 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1635 hdev, &link_keys_fops);
1636 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1637 hdev, &dev_class_fops);
1638 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1639 hdev, &voice_setting_fops);
1640 }
1641
1642 if (lmp_ssp_capable(hdev)) {
1643 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1644 hdev, &auto_accept_delay_fops);
1645 debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs,
1646 hdev, &ssp_debug_mode_fops);
1647 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1648 hdev, &force_sc_support_fops);
1649 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1650 hdev, &sc_only_mode_fops);
1651 }
1652
1653 if (lmp_sniff_capable(hdev)) {
1654 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1655 hdev, &idle_timeout_fops);
1656 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1657 hdev, &sniff_min_interval_fops);
1658 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1659 hdev, &sniff_max_interval_fops);
1660 }
1661
1662 if (lmp_le_capable(hdev)) {
1663 debugfs_create_file("identity", 0400, hdev->debugfs,
1664 hdev, &identity_fops);
1665 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1666 hdev, &rpa_timeout_fops);
1667 debugfs_create_file("random_address", 0444, hdev->debugfs,
1668 hdev, &random_address_fops);
1669 debugfs_create_file("static_address", 0444, hdev->debugfs,
1670 hdev, &static_address_fops);
1671
1672 /* For controllers with a public address, provide a debug
1673 * option to force the usage of the configured static
1674 * address. By default the public address is used.
1675 */
1676 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1677 debugfs_create_file("force_static_address", 0644,
1678 hdev->debugfs, hdev,
1679 &force_static_address_fops);
1680
1681 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1682 &hdev->le_white_list_size);
1683 debugfs_create_file("identity_resolving_keys", 0400,
1684 hdev->debugfs, hdev,
1685 &identity_resolving_keys_fops);
1686 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1687 hdev, &long_term_keys_fops);
1688 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1689 hdev, &conn_min_interval_fops);
1690 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1691 hdev, &conn_max_interval_fops);
1692 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1693 hdev, &adv_channel_map_fops);
1694 debugfs_create_file("6lowpan", 0644, hdev->debugfs, hdev,
1695 &lowpan_debugfs_fops);
1696 }
1697
1698 return 0;
1699 }
1700
1701 static void hci_scan_req(struct hci_request *req, unsigned long opt)
1702 {
1703 __u8 scan = opt;
1704
1705 BT_DBG("%s %x", req->hdev->name, scan);
1706
1707 /* Inquiry and Page scans */
1708 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1709 }
1710
1711 static void hci_auth_req(struct hci_request *req, unsigned long opt)
1712 {
1713 __u8 auth = opt;
1714
1715 BT_DBG("%s %x", req->hdev->name, auth);
1716
1717 /* Authentication */
1718 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1719 }
1720
1721 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1722 {
1723 __u8 encrypt = opt;
1724
1725 BT_DBG("%s %x", req->hdev->name, encrypt);
1726
1727 /* Encryption */
1728 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1729 }
1730
1731 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1732 {
1733 __le16 policy = cpu_to_le16(opt);
1734
1735 BT_DBG("%s %x", req->hdev->name, policy);
1736
1737 /* Default link policy */
1738 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1739 }
1740
1741 /* Get HCI device by index.
1742 * Device is held on return. */
1743 struct hci_dev *hci_dev_get(int index)
1744 {
1745 struct hci_dev *hdev = NULL, *d;
1746
1747 BT_DBG("%d", index);
1748
1749 if (index < 0)
1750 return NULL;
1751
1752 read_lock(&hci_dev_list_lock);
1753 list_for_each_entry(d, &hci_dev_list, list) {
1754 if (d->id == index) {
1755 hdev = hci_dev_hold(d);
1756 break;
1757 }
1758 }
1759 read_unlock(&hci_dev_list_lock);
1760 return hdev;
1761 }
1762
1763 /* ---- Inquiry support ---- */
1764
1765 bool hci_discovery_active(struct hci_dev *hdev)
1766 {
1767 struct discovery_state *discov = &hdev->discovery;
1768
1769 switch (discov->state) {
1770 case DISCOVERY_FINDING:
1771 case DISCOVERY_RESOLVING:
1772 return true;
1773
1774 default:
1775 return false;
1776 }
1777 }
1778
1779 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1780 {
1781 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1782
1783 if (hdev->discovery.state == state)
1784 return;
1785
1786 switch (state) {
1787 case DISCOVERY_STOPPED:
1788 if (hdev->discovery.state != DISCOVERY_STARTING)
1789 mgmt_discovering(hdev, 0);
1790 break;
1791 case DISCOVERY_STARTING:
1792 break;
1793 case DISCOVERY_FINDING:
1794 mgmt_discovering(hdev, 1);
1795 break;
1796 case DISCOVERY_RESOLVING:
1797 break;
1798 case DISCOVERY_STOPPING:
1799 break;
1800 }
1801
1802 hdev->discovery.state = state;
1803 }
1804
1805 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1806 {
1807 struct discovery_state *cache = &hdev->discovery;
1808 struct inquiry_entry *p, *n;
1809
1810 list_for_each_entry_safe(p, n, &cache->all, all) {
1811 list_del(&p->all);
1812 kfree(p);
1813 }
1814
1815 INIT_LIST_HEAD(&cache->unknown);
1816 INIT_LIST_HEAD(&cache->resolve);
1817 }
1818
1819 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1820 bdaddr_t *bdaddr)
1821 {
1822 struct discovery_state *cache = &hdev->discovery;
1823 struct inquiry_entry *e;
1824
1825 BT_DBG("cache %p, %pMR", cache, bdaddr);
1826
1827 list_for_each_entry(e, &cache->all, all) {
1828 if (!bacmp(&e->data.bdaddr, bdaddr))
1829 return e;
1830 }
1831
1832 return NULL;
1833 }
1834
1835 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1836 bdaddr_t *bdaddr)
1837 {
1838 struct discovery_state *cache = &hdev->discovery;
1839 struct inquiry_entry *e;
1840
1841 BT_DBG("cache %p, %pMR", cache, bdaddr);
1842
1843 list_for_each_entry(e, &cache->unknown, list) {
1844 if (!bacmp(&e->data.bdaddr, bdaddr))
1845 return e;
1846 }
1847
1848 return NULL;
1849 }
1850
1851 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1852 bdaddr_t *bdaddr,
1853 int state)
1854 {
1855 struct discovery_state *cache = &hdev->discovery;
1856 struct inquiry_entry *e;
1857
1858 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1859
1860 list_for_each_entry(e, &cache->resolve, list) {
1861 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1862 return e;
1863 if (!bacmp(&e->data.bdaddr, bdaddr))
1864 return e;
1865 }
1866
1867 return NULL;
1868 }
1869
1870 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1871 struct inquiry_entry *ie)
1872 {
1873 struct discovery_state *cache = &hdev->discovery;
1874 struct list_head *pos = &cache->resolve;
1875 struct inquiry_entry *p;
1876
1877 list_del(&ie->list);
1878
1879 list_for_each_entry(p, &cache->resolve, list) {
1880 if (p->name_state != NAME_PENDING &&
1881 abs(p->data.rssi) >= abs(ie->data.rssi))
1882 break;
1883 pos = &p->list;
1884 }
1885
1886 list_add(&ie->list, pos);
1887 }
1888
1889 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1890 bool name_known, bool *ssp)
1891 {
1892 struct discovery_state *cache = &hdev->discovery;
1893 struct inquiry_entry *ie;
1894
1895 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1896
1897 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1898
1899 if (ssp)
1900 *ssp = data->ssp_mode;
1901
1902 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1903 if (ie) {
1904 if (ie->data.ssp_mode && ssp)
1905 *ssp = true;
1906
1907 if (ie->name_state == NAME_NEEDED &&
1908 data->rssi != ie->data.rssi) {
1909 ie->data.rssi = data->rssi;
1910 hci_inquiry_cache_update_resolve(hdev, ie);
1911 }
1912
1913 goto update;
1914 }
1915
1916 /* Entry not in the cache. Add new one. */
1917 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
1918 if (!ie)
1919 return false;
1920
1921 list_add(&ie->all, &cache->all);
1922
1923 if (name_known) {
1924 ie->name_state = NAME_KNOWN;
1925 } else {
1926 ie->name_state = NAME_NOT_KNOWN;
1927 list_add(&ie->list, &cache->unknown);
1928 }
1929
1930 update:
1931 if (name_known && ie->name_state != NAME_KNOWN &&
1932 ie->name_state != NAME_PENDING) {
1933 ie->name_state = NAME_KNOWN;
1934 list_del(&ie->list);
1935 }
1936
1937 memcpy(&ie->data, data, sizeof(*data));
1938 ie->timestamp = jiffies;
1939 cache->timestamp = jiffies;
1940
1941 if (ie->name_state == NAME_NOT_KNOWN)
1942 return false;
1943
1944 return true;
1945 }
1946
1947 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1948 {
1949 struct discovery_state *cache = &hdev->discovery;
1950 struct inquiry_info *info = (struct inquiry_info *) buf;
1951 struct inquiry_entry *e;
1952 int copied = 0;
1953
1954 list_for_each_entry(e, &cache->all, all) {
1955 struct inquiry_data *data = &e->data;
1956
1957 if (copied >= num)
1958 break;
1959
1960 bacpy(&info->bdaddr, &data->bdaddr);
1961 info->pscan_rep_mode = data->pscan_rep_mode;
1962 info->pscan_period_mode = data->pscan_period_mode;
1963 info->pscan_mode = data->pscan_mode;
1964 memcpy(info->dev_class, data->dev_class, 3);
1965 info->clock_offset = data->clock_offset;
1966
1967 info++;
1968 copied++;
1969 }
1970
1971 BT_DBG("cache %p, copied %d", cache, copied);
1972 return copied;
1973 }
1974
1975 static void hci_inq_req(struct hci_request *req, unsigned long opt)
1976 {
1977 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1978 struct hci_dev *hdev = req->hdev;
1979 struct hci_cp_inquiry cp;
1980
1981 BT_DBG("%s", hdev->name);
1982
1983 if (test_bit(HCI_INQUIRY, &hdev->flags))
1984 return;
1985
1986 /* Start Inquiry */
1987 memcpy(&cp.lap, &ir->lap, 3);
1988 cp.length = ir->length;
1989 cp.num_rsp = ir->num_rsp;
1990 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1991 }
1992
1993 static int wait_inquiry(void *word)
1994 {
1995 schedule();
1996 return signal_pending(current);
1997 }
1998
1999 int hci_inquiry(void __user *arg)
2000 {
2001 __u8 __user *ptr = arg;
2002 struct hci_inquiry_req ir;
2003 struct hci_dev *hdev;
2004 int err = 0, do_inquiry = 0, max_rsp;
2005 long timeo;
2006 __u8 *buf;
2007
2008 if (copy_from_user(&ir, ptr, sizeof(ir)))
2009 return -EFAULT;
2010
2011 hdev = hci_dev_get(ir.dev_id);
2012 if (!hdev)
2013 return -ENODEV;
2014
2015 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2016 err = -EBUSY;
2017 goto done;
2018 }
2019
2020 if (hdev->dev_type != HCI_BREDR) {
2021 err = -EOPNOTSUPP;
2022 goto done;
2023 }
2024
2025 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2026 err = -EOPNOTSUPP;
2027 goto done;
2028 }
2029
2030 hci_dev_lock(hdev);
2031 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
2032 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
2033 hci_inquiry_cache_flush(hdev);
2034 do_inquiry = 1;
2035 }
2036 hci_dev_unlock(hdev);
2037
2038 timeo = ir.length * msecs_to_jiffies(2000);
2039
2040 if (do_inquiry) {
2041 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2042 timeo);
2043 if (err < 0)
2044 goto done;
2045
2046 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2047 * cleared). If it is interrupted by a signal, return -EINTR.
2048 */
2049 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2050 TASK_INTERRUPTIBLE))
2051 return -EINTR;
2052 }
2053
2054 /* for unlimited number of responses we will use buffer with
2055 * 255 entries
2056 */
2057 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2058
2059 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2060 * copy it to the user space.
2061 */
2062 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
2063 if (!buf) {
2064 err = -ENOMEM;
2065 goto done;
2066 }
2067
2068 hci_dev_lock(hdev);
2069 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
2070 hci_dev_unlock(hdev);
2071
2072 BT_DBG("num_rsp %d", ir.num_rsp);
2073
2074 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2075 ptr += sizeof(ir);
2076 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
2077 ir.num_rsp))
2078 err = -EFAULT;
2079 } else
2080 err = -EFAULT;
2081
2082 kfree(buf);
2083
2084 done:
2085 hci_dev_put(hdev);
2086 return err;
2087 }
2088
2089 static int hci_dev_do_open(struct hci_dev *hdev)
2090 {
2091 int ret = 0;
2092
2093 BT_DBG("%s %p", hdev->name, hdev);
2094
2095 hci_req_lock(hdev);
2096
2097 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2098 ret = -ENODEV;
2099 goto done;
2100 }
2101
2102 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
2103 /* Check for rfkill but allow the HCI setup stage to
2104 * proceed (which in itself doesn't cause any RF activity).
2105 */
2106 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2107 ret = -ERFKILL;
2108 goto done;
2109 }
2110
2111 /* Check for valid public address or a configured static
2112 * random adddress, but let the HCI setup proceed to
2113 * be able to determine if there is a public address
2114 * or not.
2115 *
2116 * In case of user channel usage, it is not important
2117 * if a public address or static random address is
2118 * available.
2119 *
2120 * This check is only valid for BR/EDR controllers
2121 * since AMP controllers do not have an address.
2122 */
2123 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2124 hdev->dev_type == HCI_BREDR &&
2125 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2126 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2127 ret = -EADDRNOTAVAIL;
2128 goto done;
2129 }
2130 }
2131
2132 if (test_bit(HCI_UP, &hdev->flags)) {
2133 ret = -EALREADY;
2134 goto done;
2135 }
2136
2137 if (hdev->open(hdev)) {
2138 ret = -EIO;
2139 goto done;
2140 }
2141
2142 atomic_set(&hdev->cmd_cnt, 1);
2143 set_bit(HCI_INIT, &hdev->flags);
2144
2145 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
2146 ret = hdev->setup(hdev);
2147
2148 if (!ret) {
2149 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2150 set_bit(HCI_RAW, &hdev->flags);
2151
2152 if (!test_bit(HCI_RAW, &hdev->flags) &&
2153 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2154 ret = __hci_init(hdev);
2155 }
2156
2157 clear_bit(HCI_INIT, &hdev->flags);
2158
2159 if (!ret) {
2160 hci_dev_hold(hdev);
2161 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
2162 set_bit(HCI_UP, &hdev->flags);
2163 hci_notify(hdev, HCI_DEV_UP);
2164 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2165 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2166 hdev->dev_type == HCI_BREDR) {
2167 hci_dev_lock(hdev);
2168 mgmt_powered(hdev, 1);
2169 hci_dev_unlock(hdev);
2170 }
2171 } else {
2172 /* Init failed, cleanup */
2173 flush_work(&hdev->tx_work);
2174 flush_work(&hdev->cmd_work);
2175 flush_work(&hdev->rx_work);
2176
2177 skb_queue_purge(&hdev->cmd_q);
2178 skb_queue_purge(&hdev->rx_q);
2179
2180 if (hdev->flush)
2181 hdev->flush(hdev);
2182
2183 if (hdev->sent_cmd) {
2184 kfree_skb(hdev->sent_cmd);
2185 hdev->sent_cmd = NULL;
2186 }
2187
2188 hdev->close(hdev);
2189 hdev->flags = 0;
2190 }
2191
2192 done:
2193 hci_req_unlock(hdev);
2194 return ret;
2195 }
2196
2197 /* ---- HCI ioctl helpers ---- */
2198
2199 int hci_dev_open(__u16 dev)
2200 {
2201 struct hci_dev *hdev;
2202 int err;
2203
2204 hdev = hci_dev_get(dev);
2205 if (!hdev)
2206 return -ENODEV;
2207
2208 /* We need to ensure that no other power on/off work is pending
2209 * before proceeding to call hci_dev_do_open. This is
2210 * particularly important if the setup procedure has not yet
2211 * completed.
2212 */
2213 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2214 cancel_delayed_work(&hdev->power_off);
2215
2216 /* After this call it is guaranteed that the setup procedure
2217 * has finished. This means that error conditions like RFKILL
2218 * or no valid public or static random address apply.
2219 */
2220 flush_workqueue(hdev->req_workqueue);
2221
2222 err = hci_dev_do_open(hdev);
2223
2224 hci_dev_put(hdev);
2225
2226 return err;
2227 }
2228
2229 static int hci_dev_do_close(struct hci_dev *hdev)
2230 {
2231 BT_DBG("%s %p", hdev->name, hdev);
2232
2233 cancel_delayed_work(&hdev->power_off);
2234
2235 hci_req_cancel(hdev, ENODEV);
2236 hci_req_lock(hdev);
2237
2238 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
2239 del_timer_sync(&hdev->cmd_timer);
2240 hci_req_unlock(hdev);
2241 return 0;
2242 }
2243
2244 /* Flush RX and TX works */
2245 flush_work(&hdev->tx_work);
2246 flush_work(&hdev->rx_work);
2247
2248 if (hdev->discov_timeout > 0) {
2249 cancel_delayed_work(&hdev->discov_off);
2250 hdev->discov_timeout = 0;
2251 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
2252 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2253 }
2254
2255 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
2256 cancel_delayed_work(&hdev->service_cache);
2257
2258 cancel_delayed_work_sync(&hdev->le_scan_disable);
2259 cancel_delayed_work_sync(&hdev->rpa_expired);
2260
2261 hci_dev_lock(hdev);
2262 hci_inquiry_cache_flush(hdev);
2263 hci_conn_hash_flush(hdev);
2264 hci_dev_unlock(hdev);
2265
2266 hci_notify(hdev, HCI_DEV_DOWN);
2267
2268 if (hdev->flush)
2269 hdev->flush(hdev);
2270
2271 /* Reset device */
2272 skb_queue_purge(&hdev->cmd_q);
2273 atomic_set(&hdev->cmd_cnt, 1);
2274 if (!test_bit(HCI_RAW, &hdev->flags) &&
2275 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2276 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
2277 set_bit(HCI_INIT, &hdev->flags);
2278 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
2279 clear_bit(HCI_INIT, &hdev->flags);
2280 }
2281
2282 /* flush cmd work */
2283 flush_work(&hdev->cmd_work);
2284
2285 /* Drop queues */
2286 skb_queue_purge(&hdev->rx_q);
2287 skb_queue_purge(&hdev->cmd_q);
2288 skb_queue_purge(&hdev->raw_q);
2289
2290 /* Drop last sent command */
2291 if (hdev->sent_cmd) {
2292 del_timer_sync(&hdev->cmd_timer);
2293 kfree_skb(hdev->sent_cmd);
2294 hdev->sent_cmd = NULL;
2295 }
2296
2297 kfree_skb(hdev->recv_evt);
2298 hdev->recv_evt = NULL;
2299
2300 /* After this point our queues are empty
2301 * and no tasks are scheduled. */
2302 hdev->close(hdev);
2303
2304 /* Clear flags */
2305 hdev->flags = 0;
2306 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2307
2308 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2309 if (hdev->dev_type == HCI_BREDR) {
2310 hci_dev_lock(hdev);
2311 mgmt_powered(hdev, 0);
2312 hci_dev_unlock(hdev);
2313 }
2314 }
2315
2316 /* Controller radio is available but is currently powered down */
2317 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
2318
2319 memset(hdev->eir, 0, sizeof(hdev->eir));
2320 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
2321 bacpy(&hdev->random_addr, BDADDR_ANY);
2322
2323 hci_req_unlock(hdev);
2324
2325 hci_dev_put(hdev);
2326 return 0;
2327 }
2328
2329 int hci_dev_close(__u16 dev)
2330 {
2331 struct hci_dev *hdev;
2332 int err;
2333
2334 hdev = hci_dev_get(dev);
2335 if (!hdev)
2336 return -ENODEV;
2337
2338 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2339 err = -EBUSY;
2340 goto done;
2341 }
2342
2343 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2344 cancel_delayed_work(&hdev->power_off);
2345
2346 err = hci_dev_do_close(hdev);
2347
2348 done:
2349 hci_dev_put(hdev);
2350 return err;
2351 }
2352
2353 int hci_dev_reset(__u16 dev)
2354 {
2355 struct hci_dev *hdev;
2356 int ret = 0;
2357
2358 hdev = hci_dev_get(dev);
2359 if (!hdev)
2360 return -ENODEV;
2361
2362 hci_req_lock(hdev);
2363
2364 if (!test_bit(HCI_UP, &hdev->flags)) {
2365 ret = -ENETDOWN;
2366 goto done;
2367 }
2368
2369 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2370 ret = -EBUSY;
2371 goto done;
2372 }
2373
2374 /* Drop queues */
2375 skb_queue_purge(&hdev->rx_q);
2376 skb_queue_purge(&hdev->cmd_q);
2377
2378 hci_dev_lock(hdev);
2379 hci_inquiry_cache_flush(hdev);
2380 hci_conn_hash_flush(hdev);
2381 hci_dev_unlock(hdev);
2382
2383 if (hdev->flush)
2384 hdev->flush(hdev);
2385
2386 atomic_set(&hdev->cmd_cnt, 1);
2387 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
2388
2389 if (!test_bit(HCI_RAW, &hdev->flags))
2390 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
2391
2392 done:
2393 hci_req_unlock(hdev);
2394 hci_dev_put(hdev);
2395 return ret;
2396 }
2397
2398 int hci_dev_reset_stat(__u16 dev)
2399 {
2400 struct hci_dev *hdev;
2401 int ret = 0;
2402
2403 hdev = hci_dev_get(dev);
2404 if (!hdev)
2405 return -ENODEV;
2406
2407 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2408 ret = -EBUSY;
2409 goto done;
2410 }
2411
2412 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2413
2414 done:
2415 hci_dev_put(hdev);
2416 return ret;
2417 }
2418
2419 int hci_dev_cmd(unsigned int cmd, void __user *arg)
2420 {
2421 struct hci_dev *hdev;
2422 struct hci_dev_req dr;
2423 int err = 0;
2424
2425 if (copy_from_user(&dr, arg, sizeof(dr)))
2426 return -EFAULT;
2427
2428 hdev = hci_dev_get(dr.dev_id);
2429 if (!hdev)
2430 return -ENODEV;
2431
2432 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2433 err = -EBUSY;
2434 goto done;
2435 }
2436
2437 if (hdev->dev_type != HCI_BREDR) {
2438 err = -EOPNOTSUPP;
2439 goto done;
2440 }
2441
2442 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2443 err = -EOPNOTSUPP;
2444 goto done;
2445 }
2446
2447 switch (cmd) {
2448 case HCISETAUTH:
2449 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2450 HCI_INIT_TIMEOUT);
2451 break;
2452
2453 case HCISETENCRYPT:
2454 if (!lmp_encrypt_capable(hdev)) {
2455 err = -EOPNOTSUPP;
2456 break;
2457 }
2458
2459 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2460 /* Auth must be enabled first */
2461 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2462 HCI_INIT_TIMEOUT);
2463 if (err)
2464 break;
2465 }
2466
2467 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2468 HCI_INIT_TIMEOUT);
2469 break;
2470
2471 case HCISETSCAN:
2472 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2473 HCI_INIT_TIMEOUT);
2474 break;
2475
2476 case HCISETLINKPOL:
2477 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2478 HCI_INIT_TIMEOUT);
2479 break;
2480
2481 case HCISETLINKMODE:
2482 hdev->link_mode = ((__u16) dr.dev_opt) &
2483 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2484 break;
2485
2486 case HCISETPTYPE:
2487 hdev->pkt_type = (__u16) dr.dev_opt;
2488 break;
2489
2490 case HCISETACLMTU:
2491 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2492 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2493 break;
2494
2495 case HCISETSCOMTU:
2496 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2497 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2498 break;
2499
2500 default:
2501 err = -EINVAL;
2502 break;
2503 }
2504
2505 done:
2506 hci_dev_put(hdev);
2507 return err;
2508 }
2509
2510 int hci_get_dev_list(void __user *arg)
2511 {
2512 struct hci_dev *hdev;
2513 struct hci_dev_list_req *dl;
2514 struct hci_dev_req *dr;
2515 int n = 0, size, err;
2516 __u16 dev_num;
2517
2518 if (get_user(dev_num, (__u16 __user *) arg))
2519 return -EFAULT;
2520
2521 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2522 return -EINVAL;
2523
2524 size = sizeof(*dl) + dev_num * sizeof(*dr);
2525
2526 dl = kzalloc(size, GFP_KERNEL);
2527 if (!dl)
2528 return -ENOMEM;
2529
2530 dr = dl->dev_req;
2531
2532 read_lock(&hci_dev_list_lock);
2533 list_for_each_entry(hdev, &hci_dev_list, list) {
2534 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2535 cancel_delayed_work(&hdev->power_off);
2536
2537 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2538 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2539
2540 (dr + n)->dev_id = hdev->id;
2541 (dr + n)->dev_opt = hdev->flags;
2542
2543 if (++n >= dev_num)
2544 break;
2545 }
2546 read_unlock(&hci_dev_list_lock);
2547
2548 dl->dev_num = n;
2549 size = sizeof(*dl) + n * sizeof(*dr);
2550
2551 err = copy_to_user(arg, dl, size);
2552 kfree(dl);
2553
2554 return err ? -EFAULT : 0;
2555 }
2556
2557 int hci_get_dev_info(void __user *arg)
2558 {
2559 struct hci_dev *hdev;
2560 struct hci_dev_info di;
2561 int err = 0;
2562
2563 if (copy_from_user(&di, arg, sizeof(di)))
2564 return -EFAULT;
2565
2566 hdev = hci_dev_get(di.dev_id);
2567 if (!hdev)
2568 return -ENODEV;
2569
2570 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2571 cancel_delayed_work_sync(&hdev->power_off);
2572
2573 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2574 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2575
2576 strcpy(di.name, hdev->name);
2577 di.bdaddr = hdev->bdaddr;
2578 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2579 di.flags = hdev->flags;
2580 di.pkt_type = hdev->pkt_type;
2581 if (lmp_bredr_capable(hdev)) {
2582 di.acl_mtu = hdev->acl_mtu;
2583 di.acl_pkts = hdev->acl_pkts;
2584 di.sco_mtu = hdev->sco_mtu;
2585 di.sco_pkts = hdev->sco_pkts;
2586 } else {
2587 di.acl_mtu = hdev->le_mtu;
2588 di.acl_pkts = hdev->le_pkts;
2589 di.sco_mtu = 0;
2590 di.sco_pkts = 0;
2591 }
2592 di.link_policy = hdev->link_policy;
2593 di.link_mode = hdev->link_mode;
2594
2595 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2596 memcpy(&di.features, &hdev->features, sizeof(di.features));
2597
2598 if (copy_to_user(arg, &di, sizeof(di)))
2599 err = -EFAULT;
2600
2601 hci_dev_put(hdev);
2602
2603 return err;
2604 }
2605
2606 /* ---- Interface to HCI drivers ---- */
2607
2608 static int hci_rfkill_set_block(void *data, bool blocked)
2609 {
2610 struct hci_dev *hdev = data;
2611
2612 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2613
2614 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2615 return -EBUSY;
2616
2617 if (blocked) {
2618 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2619 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2620 hci_dev_do_close(hdev);
2621 } else {
2622 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
2623 }
2624
2625 return 0;
2626 }
2627
2628 static const struct rfkill_ops hci_rfkill_ops = {
2629 .set_block = hci_rfkill_set_block,
2630 };
2631
2632 static void hci_power_on(struct work_struct *work)
2633 {
2634 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2635 int err;
2636
2637 BT_DBG("%s", hdev->name);
2638
2639 err = hci_dev_do_open(hdev);
2640 if (err < 0) {
2641 mgmt_set_powered_failed(hdev, err);
2642 return;
2643 }
2644
2645 /* During the HCI setup phase, a few error conditions are
2646 * ignored and they need to be checked now. If they are still
2647 * valid, it is important to turn the device back off.
2648 */
2649 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2650 (hdev->dev_type == HCI_BREDR &&
2651 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2652 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2653 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2654 hci_dev_do_close(hdev);
2655 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2656 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2657 HCI_AUTO_OFF_TIMEOUT);
2658 }
2659
2660 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
2661 mgmt_index_added(hdev);
2662 }
2663
2664 static void hci_power_off(struct work_struct *work)
2665 {
2666 struct hci_dev *hdev = container_of(work, struct hci_dev,
2667 power_off.work);
2668
2669 BT_DBG("%s", hdev->name);
2670
2671 hci_dev_do_close(hdev);
2672 }
2673
2674 static void hci_discov_off(struct work_struct *work)
2675 {
2676 struct hci_dev *hdev;
2677
2678 hdev = container_of(work, struct hci_dev, discov_off.work);
2679
2680 BT_DBG("%s", hdev->name);
2681
2682 mgmt_discoverable_timeout(hdev);
2683 }
2684
2685 void hci_uuids_clear(struct hci_dev *hdev)
2686 {
2687 struct bt_uuid *uuid, *tmp;
2688
2689 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2690 list_del(&uuid->list);
2691 kfree(uuid);
2692 }
2693 }
2694
2695 void hci_link_keys_clear(struct hci_dev *hdev)
2696 {
2697 struct list_head *p, *n;
2698
2699 list_for_each_safe(p, n, &hdev->link_keys) {
2700 struct link_key *key;
2701
2702 key = list_entry(p, struct link_key, list);
2703
2704 list_del(p);
2705 kfree(key);
2706 }
2707 }
2708
2709 void hci_smp_ltks_clear(struct hci_dev *hdev)
2710 {
2711 struct smp_ltk *k, *tmp;
2712
2713 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2714 list_del(&k->list);
2715 kfree(k);
2716 }
2717 }
2718
2719 void hci_smp_irks_clear(struct hci_dev *hdev)
2720 {
2721 struct smp_irk *k, *tmp;
2722
2723 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2724 list_del(&k->list);
2725 kfree(k);
2726 }
2727 }
2728
2729 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2730 {
2731 struct link_key *k;
2732
2733 list_for_each_entry(k, &hdev->link_keys, list)
2734 if (bacmp(bdaddr, &k->bdaddr) == 0)
2735 return k;
2736
2737 return NULL;
2738 }
2739
2740 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2741 u8 key_type, u8 old_key_type)
2742 {
2743 /* Legacy key */
2744 if (key_type < 0x03)
2745 return true;
2746
2747 /* Debug keys are insecure so don't store them persistently */
2748 if (key_type == HCI_LK_DEBUG_COMBINATION)
2749 return false;
2750
2751 /* Changed combination key and there's no previous one */
2752 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2753 return false;
2754
2755 /* Security mode 3 case */
2756 if (!conn)
2757 return true;
2758
2759 /* Neither local nor remote side had no-bonding as requirement */
2760 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2761 return true;
2762
2763 /* Local side had dedicated bonding as requirement */
2764 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2765 return true;
2766
2767 /* Remote side had dedicated bonding as requirement */
2768 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2769 return true;
2770
2771 /* If none of the above criteria match, then don't store the key
2772 * persistently */
2773 return false;
2774 }
2775
2776 static bool ltk_type_master(u8 type)
2777 {
2778 if (type == HCI_SMP_STK || type == HCI_SMP_LTK)
2779 return true;
2780
2781 return false;
2782 }
2783
2784 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8],
2785 bool master)
2786 {
2787 struct smp_ltk *k;
2788
2789 list_for_each_entry(k, &hdev->long_term_keys, list) {
2790 if (k->ediv != ediv ||
2791 memcmp(rand, k->rand, sizeof(k->rand)))
2792 continue;
2793
2794 if (ltk_type_master(k->type) != master)
2795 continue;
2796
2797 return k;
2798 }
2799
2800 return NULL;
2801 }
2802
2803 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2804 u8 addr_type, bool master)
2805 {
2806 struct smp_ltk *k;
2807
2808 list_for_each_entry(k, &hdev->long_term_keys, list)
2809 if (addr_type == k->bdaddr_type &&
2810 bacmp(bdaddr, &k->bdaddr) == 0 &&
2811 ltk_type_master(k->type) == master)
2812 return k;
2813
2814 return NULL;
2815 }
2816
2817 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2818 {
2819 struct smp_irk *irk;
2820
2821 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2822 if (!bacmp(&irk->rpa, rpa))
2823 return irk;
2824 }
2825
2826 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2827 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
2828 bacpy(&irk->rpa, rpa);
2829 return irk;
2830 }
2831 }
2832
2833 return NULL;
2834 }
2835
2836 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2837 u8 addr_type)
2838 {
2839 struct smp_irk *irk;
2840
2841 /* Identity Address must be public or static random */
2842 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2843 return NULL;
2844
2845 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2846 if (addr_type == irk->addr_type &&
2847 bacmp(bdaddr, &irk->bdaddr) == 0)
2848 return irk;
2849 }
2850
2851 return NULL;
2852 }
2853
2854 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
2855 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
2856 {
2857 struct link_key *key, *old_key;
2858 u8 old_key_type;
2859 bool persistent;
2860
2861 old_key = hci_find_link_key(hdev, bdaddr);
2862 if (old_key) {
2863 old_key_type = old_key->type;
2864 key = old_key;
2865 } else {
2866 old_key_type = conn ? conn->key_type : 0xff;
2867 key = kzalloc(sizeof(*key), GFP_KERNEL);
2868 if (!key)
2869 return -ENOMEM;
2870 list_add(&key->list, &hdev->link_keys);
2871 }
2872
2873 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2874
2875 /* Some buggy controller combinations generate a changed
2876 * combination key for legacy pairing even when there's no
2877 * previous key */
2878 if (type == HCI_LK_CHANGED_COMBINATION &&
2879 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2880 type = HCI_LK_COMBINATION;
2881 if (conn)
2882 conn->key_type = type;
2883 }
2884
2885 bacpy(&key->bdaddr, bdaddr);
2886 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2887 key->pin_len = pin_len;
2888
2889 if (type == HCI_LK_CHANGED_COMBINATION)
2890 key->type = old_key_type;
2891 else
2892 key->type = type;
2893
2894 if (!new_key)
2895 return 0;
2896
2897 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
2898
2899 mgmt_new_link_key(hdev, key, persistent);
2900
2901 if (conn)
2902 conn->flush_key = !persistent;
2903
2904 return 0;
2905 }
2906
2907 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2908 u8 addr_type, u8 type, u8 authenticated,
2909 u8 tk[16], u8 enc_size, __le16 ediv, u8 rand[8])
2910 {
2911 struct smp_ltk *key, *old_key;
2912 bool master = ltk_type_master(type);
2913
2914 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
2915 if (old_key)
2916 key = old_key;
2917 else {
2918 key = kzalloc(sizeof(*key), GFP_KERNEL);
2919 if (!key)
2920 return NULL;
2921 list_add(&key->list, &hdev->long_term_keys);
2922 }
2923
2924 bacpy(&key->bdaddr, bdaddr);
2925 key->bdaddr_type = addr_type;
2926 memcpy(key->val, tk, sizeof(key->val));
2927 key->authenticated = authenticated;
2928 key->ediv = ediv;
2929 key->enc_size = enc_size;
2930 key->type = type;
2931 memcpy(key->rand, rand, sizeof(key->rand));
2932
2933 return key;
2934 }
2935
2936 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2937 u8 addr_type, u8 val[16], bdaddr_t *rpa)
2938 {
2939 struct smp_irk *irk;
2940
2941 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2942 if (!irk) {
2943 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2944 if (!irk)
2945 return NULL;
2946
2947 bacpy(&irk->bdaddr, bdaddr);
2948 irk->addr_type = addr_type;
2949
2950 list_add(&irk->list, &hdev->identity_resolving_keys);
2951 }
2952
2953 memcpy(irk->val, val, 16);
2954 bacpy(&irk->rpa, rpa);
2955
2956 return irk;
2957 }
2958
2959 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2960 {
2961 struct link_key *key;
2962
2963 key = hci_find_link_key(hdev, bdaddr);
2964 if (!key)
2965 return -ENOENT;
2966
2967 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2968
2969 list_del(&key->list);
2970 kfree(key);
2971
2972 return 0;
2973 }
2974
2975 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2976 {
2977 struct smp_ltk *k, *tmp;
2978 int removed = 0;
2979
2980 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2981 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2982 continue;
2983
2984 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2985
2986 list_del(&k->list);
2987 kfree(k);
2988 removed++;
2989 }
2990
2991 return removed ? 0 : -ENOENT;
2992 }
2993
2994 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2995 {
2996 struct smp_irk *k, *tmp;
2997
2998 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2999 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3000 continue;
3001
3002 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3003
3004 list_del(&k->list);
3005 kfree(k);
3006 }
3007 }
3008
3009 /* HCI command timer function */
3010 static void hci_cmd_timeout(unsigned long arg)
3011 {
3012 struct hci_dev *hdev = (void *) arg;
3013
3014 if (hdev->sent_cmd) {
3015 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3016 u16 opcode = __le16_to_cpu(sent->opcode);
3017
3018 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3019 } else {
3020 BT_ERR("%s command tx timeout", hdev->name);
3021 }
3022
3023 atomic_set(&hdev->cmd_cnt, 1);
3024 queue_work(hdev->workqueue, &hdev->cmd_work);
3025 }
3026
3027 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
3028 bdaddr_t *bdaddr)
3029 {
3030 struct oob_data *data;
3031
3032 list_for_each_entry(data, &hdev->remote_oob_data, list)
3033 if (bacmp(bdaddr, &data->bdaddr) == 0)
3034 return data;
3035
3036 return NULL;
3037 }
3038
3039 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3040 {
3041 struct oob_data *data;
3042
3043 data = hci_find_remote_oob_data(hdev, bdaddr);
3044 if (!data)
3045 return -ENOENT;
3046
3047 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3048
3049 list_del(&data->list);
3050 kfree(data);
3051
3052 return 0;
3053 }
3054
3055 void hci_remote_oob_data_clear(struct hci_dev *hdev)
3056 {
3057 struct oob_data *data, *n;
3058
3059 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3060 list_del(&data->list);
3061 kfree(data);
3062 }
3063 }
3064
3065 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3066 u8 *hash, u8 *randomizer)
3067 {
3068 struct oob_data *data;
3069
3070 data = hci_find_remote_oob_data(hdev, bdaddr);
3071 if (!data) {
3072 data = kmalloc(sizeof(*data), GFP_KERNEL);
3073 if (!data)
3074 return -ENOMEM;
3075
3076 bacpy(&data->bdaddr, bdaddr);
3077 list_add(&data->list, &hdev->remote_oob_data);
3078 }
3079
3080 memcpy(data->hash192, hash, sizeof(data->hash192));
3081 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
3082
3083 memset(data->hash256, 0, sizeof(data->hash256));
3084 memset(data->randomizer256, 0, sizeof(data->randomizer256));
3085
3086 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3087
3088 return 0;
3089 }
3090
3091 int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3092 u8 *hash192, u8 *randomizer192,
3093 u8 *hash256, u8 *randomizer256)
3094 {
3095 struct oob_data *data;
3096
3097 data = hci_find_remote_oob_data(hdev, bdaddr);
3098 if (!data) {
3099 data = kmalloc(sizeof(*data), GFP_KERNEL);
3100 if (!data)
3101 return -ENOMEM;
3102
3103 bacpy(&data->bdaddr, bdaddr);
3104 list_add(&data->list, &hdev->remote_oob_data);
3105 }
3106
3107 memcpy(data->hash192, hash192, sizeof(data->hash192));
3108 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3109
3110 memcpy(data->hash256, hash256, sizeof(data->hash256));
3111 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3112
3113 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3114
3115 return 0;
3116 }
3117
3118 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
3119 bdaddr_t *bdaddr, u8 type)
3120 {
3121 struct bdaddr_list *b;
3122
3123 list_for_each_entry(b, &hdev->blacklist, list) {
3124 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3125 return b;
3126 }
3127
3128 return NULL;
3129 }
3130
3131 void hci_blacklist_clear(struct hci_dev *hdev)
3132 {
3133 struct list_head *p, *n;
3134
3135 list_for_each_safe(p, n, &hdev->blacklist) {
3136 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3137
3138 list_del(p);
3139 kfree(b);
3140 }
3141 }
3142
3143 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3144 {
3145 struct bdaddr_list *entry;
3146
3147 if (!bacmp(bdaddr, BDADDR_ANY))
3148 return -EBADF;
3149
3150 if (hci_blacklist_lookup(hdev, bdaddr, type))
3151 return -EEXIST;
3152
3153 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3154 if (!entry)
3155 return -ENOMEM;
3156
3157 bacpy(&entry->bdaddr, bdaddr);
3158 entry->bdaddr_type = type;
3159
3160 list_add(&entry->list, &hdev->blacklist);
3161
3162 return mgmt_device_blocked(hdev, bdaddr, type);
3163 }
3164
3165 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3166 {
3167 struct bdaddr_list *entry;
3168
3169 if (!bacmp(bdaddr, BDADDR_ANY)) {
3170 hci_blacklist_clear(hdev);
3171 return 0;
3172 }
3173
3174 entry = hci_blacklist_lookup(hdev, bdaddr, type);
3175 if (!entry)
3176 return -ENOENT;
3177
3178 list_del(&entry->list);
3179 kfree(entry);
3180
3181 return mgmt_device_unblocked(hdev, bdaddr, type);
3182 }
3183
3184 /* This function requires the caller holds hdev->lock */
3185 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3186 bdaddr_t *addr, u8 addr_type)
3187 {
3188 struct hci_conn_params *params;
3189
3190 list_for_each_entry(params, &hdev->le_conn_params, list) {
3191 if (bacmp(&params->addr, addr) == 0 &&
3192 params->addr_type == addr_type) {
3193 return params;
3194 }
3195 }
3196
3197 return NULL;
3198 }
3199
3200 /* This function requires the caller holds hdev->lock */
3201 void hci_conn_params_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3202 u16 conn_min_interval, u16 conn_max_interval)
3203 {
3204 struct hci_conn_params *params;
3205
3206 params = hci_conn_params_lookup(hdev, addr, addr_type);
3207 if (params) {
3208 params->conn_min_interval = conn_min_interval;
3209 params->conn_max_interval = conn_max_interval;
3210 return;
3211 }
3212
3213 params = kzalloc(sizeof(*params), GFP_KERNEL);
3214 if (!params) {
3215 BT_ERR("Out of memory");
3216 return;
3217 }
3218
3219 bacpy(&params->addr, addr);
3220 params->addr_type = addr_type;
3221 params->conn_min_interval = conn_min_interval;
3222 params->conn_max_interval = conn_max_interval;
3223
3224 list_add(&params->list, &hdev->le_conn_params);
3225
3226 BT_DBG("addr %pMR (type %u) conn_min_interval 0x%.4x "
3227 "conn_max_interval 0x%.4x", addr, addr_type, conn_min_interval,
3228 conn_max_interval);
3229 }
3230
3231 /* This function requires the caller holds hdev->lock */
3232 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3233 {
3234 struct hci_conn_params *params;
3235
3236 params = hci_conn_params_lookup(hdev, addr, addr_type);
3237 if (!params)
3238 return;
3239
3240 list_del(&params->list);
3241 kfree(params);
3242
3243 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3244 }
3245
3246 /* This function requires the caller holds hdev->lock */
3247 void hci_conn_params_clear(struct hci_dev *hdev)
3248 {
3249 struct hci_conn_params *params, *tmp;
3250
3251 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3252 list_del(&params->list);
3253 kfree(params);
3254 }
3255
3256 BT_DBG("All LE connection parameters were removed");
3257 }
3258
3259 static void inquiry_complete(struct hci_dev *hdev, u8 status)
3260 {
3261 if (status) {
3262 BT_ERR("Failed to start inquiry: status %d", status);
3263
3264 hci_dev_lock(hdev);
3265 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3266 hci_dev_unlock(hdev);
3267 return;
3268 }
3269 }
3270
3271 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
3272 {
3273 /* General inquiry access code (GIAC) */
3274 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3275 struct hci_request req;
3276 struct hci_cp_inquiry cp;
3277 int err;
3278
3279 if (status) {
3280 BT_ERR("Failed to disable LE scanning: status %d", status);
3281 return;
3282 }
3283
3284 switch (hdev->discovery.type) {
3285 case DISCOV_TYPE_LE:
3286 hci_dev_lock(hdev);
3287 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3288 hci_dev_unlock(hdev);
3289 break;
3290
3291 case DISCOV_TYPE_INTERLEAVED:
3292 hci_req_init(&req, hdev);
3293
3294 memset(&cp, 0, sizeof(cp));
3295 memcpy(&cp.lap, lap, sizeof(cp.lap));
3296 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3297 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3298
3299 hci_dev_lock(hdev);
3300
3301 hci_inquiry_cache_flush(hdev);
3302
3303 err = hci_req_run(&req, inquiry_complete);
3304 if (err) {
3305 BT_ERR("Inquiry request failed: err %d", err);
3306 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3307 }
3308
3309 hci_dev_unlock(hdev);
3310 break;
3311 }
3312 }
3313
3314 static void le_scan_disable_work(struct work_struct *work)
3315 {
3316 struct hci_dev *hdev = container_of(work, struct hci_dev,
3317 le_scan_disable.work);
3318 struct hci_cp_le_set_scan_enable cp;
3319 struct hci_request req;
3320 int err;
3321
3322 BT_DBG("%s", hdev->name);
3323
3324 hci_req_init(&req, hdev);
3325
3326 memset(&cp, 0, sizeof(cp));
3327 cp.enable = LE_SCAN_DISABLE;
3328 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
3329
3330 err = hci_req_run(&req, le_scan_disable_work_complete);
3331 if (err)
3332 BT_ERR("Disable LE scanning request failed: err %d", err);
3333 }
3334
3335 int hci_update_random_address(struct hci_request *req, u8 *own_addr_type)
3336 {
3337 struct hci_dev *hdev = req->hdev;
3338 int err;
3339
3340 /* If privacy is enabled use a resolvable private address. If
3341 * the current RPA has expired or there's something else than an
3342 * RPA currently in use regenerate a new one.
3343 */
3344 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
3345 bdaddr_t rpa;
3346 int to;
3347
3348 *own_addr_type = ADDR_LE_DEV_RANDOM;
3349
3350 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
3351 hci_bdaddr_is_rpa(&hdev->random_addr, ADDR_LE_DEV_RANDOM))
3352 return 0;
3353
3354 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &rpa);
3355 if (err < 0) {
3356 BT_ERR("%s failed to generate new RPA", hdev->name);
3357 return err;
3358 }
3359
3360 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, &rpa);
3361
3362 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3363 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3364
3365 return 0;
3366 }
3367
3368 /* If forcing static address is in use or there is no public
3369 * address use the static address as random address (but skip
3370 * the HCI command if the current random address is already the
3371 * static one.
3372 */
3373 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ||
3374 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3375 *own_addr_type = ADDR_LE_DEV_RANDOM;
3376 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3377 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3378 &hdev->static_addr);
3379 return 0;
3380 }
3381
3382 /* Neither privacy nor static address is being used so use a
3383 * public address.
3384 */
3385 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3386
3387 return 0;
3388 }
3389
3390 /* Alloc HCI device */
3391 struct hci_dev *hci_alloc_dev(void)
3392 {
3393 struct hci_dev *hdev;
3394
3395 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3396 if (!hdev)
3397 return NULL;
3398
3399 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3400 hdev->esco_type = (ESCO_HV1);
3401 hdev->link_mode = (HCI_LM_ACCEPT);
3402 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3403 hdev->io_capability = 0x03; /* No Input No Output */
3404 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3405 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3406
3407 hdev->sniff_max_interval = 800;
3408 hdev->sniff_min_interval = 80;
3409
3410 hdev->le_adv_channel_map = 0x07;
3411 hdev->le_scan_interval = 0x0060;
3412 hdev->le_scan_window = 0x0030;
3413 hdev->le_conn_min_interval = 0x0028;
3414 hdev->le_conn_max_interval = 0x0038;
3415
3416 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3417
3418 mutex_init(&hdev->lock);
3419 mutex_init(&hdev->req_lock);
3420
3421 INIT_LIST_HEAD(&hdev->mgmt_pending);
3422 INIT_LIST_HEAD(&hdev->blacklist);
3423 INIT_LIST_HEAD(&hdev->uuids);
3424 INIT_LIST_HEAD(&hdev->link_keys);
3425 INIT_LIST_HEAD(&hdev->long_term_keys);
3426 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3427 INIT_LIST_HEAD(&hdev->remote_oob_data);
3428 INIT_LIST_HEAD(&hdev->le_conn_params);
3429 INIT_LIST_HEAD(&hdev->conn_hash.list);
3430
3431 INIT_WORK(&hdev->rx_work, hci_rx_work);
3432 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3433 INIT_WORK(&hdev->tx_work, hci_tx_work);
3434 INIT_WORK(&hdev->power_on, hci_power_on);
3435
3436 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3437 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3438 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3439
3440 skb_queue_head_init(&hdev->rx_q);
3441 skb_queue_head_init(&hdev->cmd_q);
3442 skb_queue_head_init(&hdev->raw_q);
3443
3444 init_waitqueue_head(&hdev->req_wait_q);
3445
3446 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
3447
3448 hci_init_sysfs(hdev);
3449 discovery_init(hdev);
3450
3451 return hdev;
3452 }
3453 EXPORT_SYMBOL(hci_alloc_dev);
3454
3455 /* Free HCI device */
3456 void hci_free_dev(struct hci_dev *hdev)
3457 {
3458 /* will free via device release */
3459 put_device(&hdev->dev);
3460 }
3461 EXPORT_SYMBOL(hci_free_dev);
3462
3463 /* Register HCI device */
3464 int hci_register_dev(struct hci_dev *hdev)
3465 {
3466 int id, error;
3467
3468 if (!hdev->open || !hdev->close)
3469 return -EINVAL;
3470
3471 /* Do not allow HCI_AMP devices to register at index 0,
3472 * so the index can be used as the AMP controller ID.
3473 */
3474 switch (hdev->dev_type) {
3475 case HCI_BREDR:
3476 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3477 break;
3478 case HCI_AMP:
3479 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3480 break;
3481 default:
3482 return -EINVAL;
3483 }
3484
3485 if (id < 0)
3486 return id;
3487
3488 sprintf(hdev->name, "hci%d", id);
3489 hdev->id = id;
3490
3491 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3492
3493 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3494 WQ_MEM_RECLAIM, 1, hdev->name);
3495 if (!hdev->workqueue) {
3496 error = -ENOMEM;
3497 goto err;
3498 }
3499
3500 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3501 WQ_MEM_RECLAIM, 1, hdev->name);
3502 if (!hdev->req_workqueue) {
3503 destroy_workqueue(hdev->workqueue);
3504 error = -ENOMEM;
3505 goto err;
3506 }
3507
3508 if (!IS_ERR_OR_NULL(bt_debugfs))
3509 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3510
3511 dev_set_name(&hdev->dev, "%s", hdev->name);
3512
3513 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3514 CRYPTO_ALG_ASYNC);
3515 if (IS_ERR(hdev->tfm_aes)) {
3516 BT_ERR("Unable to create crypto context");
3517 error = PTR_ERR(hdev->tfm_aes);
3518 hdev->tfm_aes = NULL;
3519 goto err_wqueue;
3520 }
3521
3522 error = device_add(&hdev->dev);
3523 if (error < 0)
3524 goto err_tfm;
3525
3526 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3527 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3528 hdev);
3529 if (hdev->rfkill) {
3530 if (rfkill_register(hdev->rfkill) < 0) {
3531 rfkill_destroy(hdev->rfkill);
3532 hdev->rfkill = NULL;
3533 }
3534 }
3535
3536 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3537 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3538
3539 set_bit(HCI_SETUP, &hdev->dev_flags);
3540 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3541
3542 if (hdev->dev_type == HCI_BREDR) {
3543 /* Assume BR/EDR support until proven otherwise (such as
3544 * through reading supported features during init.
3545 */
3546 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3547 }
3548
3549 write_lock(&hci_dev_list_lock);
3550 list_add(&hdev->list, &hci_dev_list);
3551 write_unlock(&hci_dev_list_lock);
3552
3553 hci_notify(hdev, HCI_DEV_REG);
3554 hci_dev_hold(hdev);
3555
3556 queue_work(hdev->req_workqueue, &hdev->power_on);
3557
3558 return id;
3559
3560 err_tfm:
3561 crypto_free_blkcipher(hdev->tfm_aes);
3562 err_wqueue:
3563 destroy_workqueue(hdev->workqueue);
3564 destroy_workqueue(hdev->req_workqueue);
3565 err:
3566 ida_simple_remove(&hci_index_ida, hdev->id);
3567
3568 return error;
3569 }
3570 EXPORT_SYMBOL(hci_register_dev);
3571
3572 /* Unregister HCI device */
3573 void hci_unregister_dev(struct hci_dev *hdev)
3574 {
3575 int i, id;
3576
3577 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3578
3579 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3580
3581 id = hdev->id;
3582
3583 write_lock(&hci_dev_list_lock);
3584 list_del(&hdev->list);
3585 write_unlock(&hci_dev_list_lock);
3586
3587 hci_dev_do_close(hdev);
3588
3589 for (i = 0; i < NUM_REASSEMBLY; i++)
3590 kfree_skb(hdev->reassembly[i]);
3591
3592 cancel_work_sync(&hdev->power_on);
3593
3594 if (!test_bit(HCI_INIT, &hdev->flags) &&
3595 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
3596 hci_dev_lock(hdev);
3597 mgmt_index_removed(hdev);
3598 hci_dev_unlock(hdev);
3599 }
3600
3601 /* mgmt_index_removed should take care of emptying the
3602 * pending list */
3603 BUG_ON(!list_empty(&hdev->mgmt_pending));
3604
3605 hci_notify(hdev, HCI_DEV_UNREG);
3606
3607 if (hdev->rfkill) {
3608 rfkill_unregister(hdev->rfkill);
3609 rfkill_destroy(hdev->rfkill);
3610 }
3611
3612 if (hdev->tfm_aes)
3613 crypto_free_blkcipher(hdev->tfm_aes);
3614
3615 device_del(&hdev->dev);
3616
3617 debugfs_remove_recursive(hdev->debugfs);
3618
3619 destroy_workqueue(hdev->workqueue);
3620 destroy_workqueue(hdev->req_workqueue);
3621
3622 hci_dev_lock(hdev);
3623 hci_blacklist_clear(hdev);
3624 hci_uuids_clear(hdev);
3625 hci_link_keys_clear(hdev);
3626 hci_smp_ltks_clear(hdev);
3627 hci_smp_irks_clear(hdev);
3628 hci_remote_oob_data_clear(hdev);
3629 hci_conn_params_clear(hdev);
3630 hci_dev_unlock(hdev);
3631
3632 hci_dev_put(hdev);
3633
3634 ida_simple_remove(&hci_index_ida, id);
3635 }
3636 EXPORT_SYMBOL(hci_unregister_dev);
3637
3638 /* Suspend HCI device */
3639 int hci_suspend_dev(struct hci_dev *hdev)
3640 {
3641 hci_notify(hdev, HCI_DEV_SUSPEND);
3642 return 0;
3643 }
3644 EXPORT_SYMBOL(hci_suspend_dev);
3645
3646 /* Resume HCI device */
3647 int hci_resume_dev(struct hci_dev *hdev)
3648 {
3649 hci_notify(hdev, HCI_DEV_RESUME);
3650 return 0;
3651 }
3652 EXPORT_SYMBOL(hci_resume_dev);
3653
3654 /* Receive frame from HCI drivers */
3655 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3656 {
3657 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3658 && !test_bit(HCI_INIT, &hdev->flags))) {
3659 kfree_skb(skb);
3660 return -ENXIO;
3661 }
3662
3663 /* Incoming skb */
3664 bt_cb(skb)->incoming = 1;
3665
3666 /* Time stamp */
3667 __net_timestamp(skb);
3668
3669 skb_queue_tail(&hdev->rx_q, skb);
3670 queue_work(hdev->workqueue, &hdev->rx_work);
3671
3672 return 0;
3673 }
3674 EXPORT_SYMBOL(hci_recv_frame);
3675
3676 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
3677 int count, __u8 index)
3678 {
3679 int len = 0;
3680 int hlen = 0;
3681 int remain = count;
3682 struct sk_buff *skb;
3683 struct bt_skb_cb *scb;
3684
3685 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
3686 index >= NUM_REASSEMBLY)
3687 return -EILSEQ;
3688
3689 skb = hdev->reassembly[index];
3690
3691 if (!skb) {
3692 switch (type) {
3693 case HCI_ACLDATA_PKT:
3694 len = HCI_MAX_FRAME_SIZE;
3695 hlen = HCI_ACL_HDR_SIZE;
3696 break;
3697 case HCI_EVENT_PKT:
3698 len = HCI_MAX_EVENT_SIZE;
3699 hlen = HCI_EVENT_HDR_SIZE;
3700 break;
3701 case HCI_SCODATA_PKT:
3702 len = HCI_MAX_SCO_SIZE;
3703 hlen = HCI_SCO_HDR_SIZE;
3704 break;
3705 }
3706
3707 skb = bt_skb_alloc(len, GFP_ATOMIC);
3708 if (!skb)
3709 return -ENOMEM;
3710
3711 scb = (void *) skb->cb;
3712 scb->expect = hlen;
3713 scb->pkt_type = type;
3714
3715 hdev->reassembly[index] = skb;
3716 }
3717
3718 while (count) {
3719 scb = (void *) skb->cb;
3720 len = min_t(uint, scb->expect, count);
3721
3722 memcpy(skb_put(skb, len), data, len);
3723
3724 count -= len;
3725 data += len;
3726 scb->expect -= len;
3727 remain = count;
3728
3729 switch (type) {
3730 case HCI_EVENT_PKT:
3731 if (skb->len == HCI_EVENT_HDR_SIZE) {
3732 struct hci_event_hdr *h = hci_event_hdr(skb);
3733 scb->expect = h->plen;
3734
3735 if (skb_tailroom(skb) < scb->expect) {
3736 kfree_skb(skb);
3737 hdev->reassembly[index] = NULL;
3738 return -ENOMEM;
3739 }
3740 }
3741 break;
3742
3743 case HCI_ACLDATA_PKT:
3744 if (skb->len == HCI_ACL_HDR_SIZE) {
3745 struct hci_acl_hdr *h = hci_acl_hdr(skb);
3746 scb->expect = __le16_to_cpu(h->dlen);
3747
3748 if (skb_tailroom(skb) < scb->expect) {
3749 kfree_skb(skb);
3750 hdev->reassembly[index] = NULL;
3751 return -ENOMEM;
3752 }
3753 }
3754 break;
3755
3756 case HCI_SCODATA_PKT:
3757 if (skb->len == HCI_SCO_HDR_SIZE) {
3758 struct hci_sco_hdr *h = hci_sco_hdr(skb);
3759 scb->expect = h->dlen;
3760
3761 if (skb_tailroom(skb) < scb->expect) {
3762 kfree_skb(skb);
3763 hdev->reassembly[index] = NULL;
3764 return -ENOMEM;
3765 }
3766 }
3767 break;
3768 }
3769
3770 if (scb->expect == 0) {
3771 /* Complete frame */
3772
3773 bt_cb(skb)->pkt_type = type;
3774 hci_recv_frame(hdev, skb);
3775
3776 hdev->reassembly[index] = NULL;
3777 return remain;
3778 }
3779 }
3780
3781 return remain;
3782 }
3783
3784 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
3785 {
3786 int rem = 0;
3787
3788 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
3789 return -EILSEQ;
3790
3791 while (count) {
3792 rem = hci_reassembly(hdev, type, data, count, type - 1);
3793 if (rem < 0)
3794 return rem;
3795
3796 data += (count - rem);
3797 count = rem;
3798 }
3799
3800 return rem;
3801 }
3802 EXPORT_SYMBOL(hci_recv_fragment);
3803
3804 #define STREAM_REASSEMBLY 0
3805
3806 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
3807 {
3808 int type;
3809 int rem = 0;
3810
3811 while (count) {
3812 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
3813
3814 if (!skb) {
3815 struct { char type; } *pkt;
3816
3817 /* Start of the frame */
3818 pkt = data;
3819 type = pkt->type;
3820
3821 data++;
3822 count--;
3823 } else
3824 type = bt_cb(skb)->pkt_type;
3825
3826 rem = hci_reassembly(hdev, type, data, count,
3827 STREAM_REASSEMBLY);
3828 if (rem < 0)
3829 return rem;
3830
3831 data += (count - rem);
3832 count = rem;
3833 }
3834
3835 return rem;
3836 }
3837 EXPORT_SYMBOL(hci_recv_stream_fragment);
3838
3839 /* ---- Interface to upper protocols ---- */
3840
3841 int hci_register_cb(struct hci_cb *cb)
3842 {
3843 BT_DBG("%p name %s", cb, cb->name);
3844
3845 write_lock(&hci_cb_list_lock);
3846 list_add(&cb->list, &hci_cb_list);
3847 write_unlock(&hci_cb_list_lock);
3848
3849 return 0;
3850 }
3851 EXPORT_SYMBOL(hci_register_cb);
3852
3853 int hci_unregister_cb(struct hci_cb *cb)
3854 {
3855 BT_DBG("%p name %s", cb, cb->name);
3856
3857 write_lock(&hci_cb_list_lock);
3858 list_del(&cb->list);
3859 write_unlock(&hci_cb_list_lock);
3860
3861 return 0;
3862 }
3863 EXPORT_SYMBOL(hci_unregister_cb);
3864
3865 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3866 {
3867 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
3868
3869 /* Time stamp */
3870 __net_timestamp(skb);
3871
3872 /* Send copy to monitor */
3873 hci_send_to_monitor(hdev, skb);
3874
3875 if (atomic_read(&hdev->promisc)) {
3876 /* Send copy to the sockets */
3877 hci_send_to_sock(hdev, skb);
3878 }
3879
3880 /* Get rid of skb owner, prior to sending to the driver. */
3881 skb_orphan(skb);
3882
3883 if (hdev->send(hdev, skb) < 0)
3884 BT_ERR("%s sending frame failed", hdev->name);
3885 }
3886
3887 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
3888 {
3889 skb_queue_head_init(&req->cmd_q);
3890 req->hdev = hdev;
3891 req->err = 0;
3892 }
3893
3894 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
3895 {
3896 struct hci_dev *hdev = req->hdev;
3897 struct sk_buff *skb;
3898 unsigned long flags;
3899
3900 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
3901
3902 /* If an error occured during request building, remove all HCI
3903 * commands queued on the HCI request queue.
3904 */
3905 if (req->err) {
3906 skb_queue_purge(&req->cmd_q);
3907 return req->err;
3908 }
3909
3910 /* Do not allow empty requests */
3911 if (skb_queue_empty(&req->cmd_q))
3912 return -ENODATA;
3913
3914 skb = skb_peek_tail(&req->cmd_q);
3915 bt_cb(skb)->req.complete = complete;
3916
3917 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3918 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
3919 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3920
3921 queue_work(hdev->workqueue, &hdev->cmd_work);
3922
3923 return 0;
3924 }
3925
3926 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
3927 u32 plen, const void *param)
3928 {
3929 int len = HCI_COMMAND_HDR_SIZE + plen;
3930 struct hci_command_hdr *hdr;
3931 struct sk_buff *skb;
3932
3933 skb = bt_skb_alloc(len, GFP_ATOMIC);
3934 if (!skb)
3935 return NULL;
3936
3937 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
3938 hdr->opcode = cpu_to_le16(opcode);
3939 hdr->plen = plen;
3940
3941 if (plen)
3942 memcpy(skb_put(skb, plen), param, plen);
3943
3944 BT_DBG("skb len %d", skb->len);
3945
3946 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
3947
3948 return skb;
3949 }
3950
3951 /* Send HCI command */
3952 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3953 const void *param)
3954 {
3955 struct sk_buff *skb;
3956
3957 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3958
3959 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3960 if (!skb) {
3961 BT_ERR("%s no memory for command", hdev->name);
3962 return -ENOMEM;
3963 }
3964
3965 /* Stand-alone HCI commands must be flaged as
3966 * single-command requests.
3967 */
3968 bt_cb(skb)->req.start = true;
3969
3970 skb_queue_tail(&hdev->cmd_q, skb);
3971 queue_work(hdev->workqueue, &hdev->cmd_work);
3972
3973 return 0;
3974 }
3975
3976 /* Queue a command to an asynchronous HCI request */
3977 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
3978 const void *param, u8 event)
3979 {
3980 struct hci_dev *hdev = req->hdev;
3981 struct sk_buff *skb;
3982
3983 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3984
3985 /* If an error occured during request building, there is no point in
3986 * queueing the HCI command. We can simply return.
3987 */
3988 if (req->err)
3989 return;
3990
3991 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3992 if (!skb) {
3993 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
3994 hdev->name, opcode);
3995 req->err = -ENOMEM;
3996 return;
3997 }
3998
3999 if (skb_queue_empty(&req->cmd_q))
4000 bt_cb(skb)->req.start = true;
4001
4002 bt_cb(skb)->req.event = event;
4003
4004 skb_queue_tail(&req->cmd_q, skb);
4005 }
4006
4007 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4008 const void *param)
4009 {
4010 hci_req_add_ev(req, opcode, plen, param, 0);
4011 }
4012
4013 /* Get data from the previously sent command */
4014 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
4015 {
4016 struct hci_command_hdr *hdr;
4017
4018 if (!hdev->sent_cmd)
4019 return NULL;
4020
4021 hdr = (void *) hdev->sent_cmd->data;
4022
4023 if (hdr->opcode != cpu_to_le16(opcode))
4024 return NULL;
4025
4026 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
4027
4028 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4029 }
4030
4031 /* Send ACL data */
4032 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4033 {
4034 struct hci_acl_hdr *hdr;
4035 int len = skb->len;
4036
4037 skb_push(skb, HCI_ACL_HDR_SIZE);
4038 skb_reset_transport_header(skb);
4039 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
4040 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4041 hdr->dlen = cpu_to_le16(len);
4042 }
4043
4044 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
4045 struct sk_buff *skb, __u16 flags)
4046 {
4047 struct hci_conn *conn = chan->conn;
4048 struct hci_dev *hdev = conn->hdev;
4049 struct sk_buff *list;
4050
4051 skb->len = skb_headlen(skb);
4052 skb->data_len = 0;
4053
4054 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4055
4056 switch (hdev->dev_type) {
4057 case HCI_BREDR:
4058 hci_add_acl_hdr(skb, conn->handle, flags);
4059 break;
4060 case HCI_AMP:
4061 hci_add_acl_hdr(skb, chan->handle, flags);
4062 break;
4063 default:
4064 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4065 return;
4066 }
4067
4068 list = skb_shinfo(skb)->frag_list;
4069 if (!list) {
4070 /* Non fragmented */
4071 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4072
4073 skb_queue_tail(queue, skb);
4074 } else {
4075 /* Fragmented */
4076 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4077
4078 skb_shinfo(skb)->frag_list = NULL;
4079
4080 /* Queue all fragments atomically */
4081 spin_lock(&queue->lock);
4082
4083 __skb_queue_tail(queue, skb);
4084
4085 flags &= ~ACL_START;
4086 flags |= ACL_CONT;
4087 do {
4088 skb = list; list = list->next;
4089
4090 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4091 hci_add_acl_hdr(skb, conn->handle, flags);
4092
4093 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4094
4095 __skb_queue_tail(queue, skb);
4096 } while (list);
4097
4098 spin_unlock(&queue->lock);
4099 }
4100 }
4101
4102 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4103 {
4104 struct hci_dev *hdev = chan->conn->hdev;
4105
4106 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
4107
4108 hci_queue_acl(chan, &chan->data_q, skb, flags);
4109
4110 queue_work(hdev->workqueue, &hdev->tx_work);
4111 }
4112
4113 /* Send SCO data */
4114 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
4115 {
4116 struct hci_dev *hdev = conn->hdev;
4117 struct hci_sco_hdr hdr;
4118
4119 BT_DBG("%s len %d", hdev->name, skb->len);
4120
4121 hdr.handle = cpu_to_le16(conn->handle);
4122 hdr.dlen = skb->len;
4123
4124 skb_push(skb, HCI_SCO_HDR_SIZE);
4125 skb_reset_transport_header(skb);
4126 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
4127
4128 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
4129
4130 skb_queue_tail(&conn->data_q, skb);
4131 queue_work(hdev->workqueue, &hdev->tx_work);
4132 }
4133
4134 /* ---- HCI TX task (outgoing data) ---- */
4135
4136 /* HCI Connection scheduler */
4137 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4138 int *quote)
4139 {
4140 struct hci_conn_hash *h = &hdev->conn_hash;
4141 struct hci_conn *conn = NULL, *c;
4142 unsigned int num = 0, min = ~0;
4143
4144 /* We don't have to lock device here. Connections are always
4145 * added and removed with TX task disabled. */
4146
4147 rcu_read_lock();
4148
4149 list_for_each_entry_rcu(c, &h->list, list) {
4150 if (c->type != type || skb_queue_empty(&c->data_q))
4151 continue;
4152
4153 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4154 continue;
4155
4156 num++;
4157
4158 if (c->sent < min) {
4159 min = c->sent;
4160 conn = c;
4161 }
4162
4163 if (hci_conn_num(hdev, type) == num)
4164 break;
4165 }
4166
4167 rcu_read_unlock();
4168
4169 if (conn) {
4170 int cnt, q;
4171
4172 switch (conn->type) {
4173 case ACL_LINK:
4174 cnt = hdev->acl_cnt;
4175 break;
4176 case SCO_LINK:
4177 case ESCO_LINK:
4178 cnt = hdev->sco_cnt;
4179 break;
4180 case LE_LINK:
4181 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4182 break;
4183 default:
4184 cnt = 0;
4185 BT_ERR("Unknown link type");
4186 }
4187
4188 q = cnt / num;
4189 *quote = q ? q : 1;
4190 } else
4191 *quote = 0;
4192
4193 BT_DBG("conn %p quote %d", conn, *quote);
4194 return conn;
4195 }
4196
4197 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
4198 {
4199 struct hci_conn_hash *h = &hdev->conn_hash;
4200 struct hci_conn *c;
4201
4202 BT_ERR("%s link tx timeout", hdev->name);
4203
4204 rcu_read_lock();
4205
4206 /* Kill stalled connections */
4207 list_for_each_entry_rcu(c, &h->list, list) {
4208 if (c->type == type && c->sent) {
4209 BT_ERR("%s killing stalled connection %pMR",
4210 hdev->name, &c->dst);
4211 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
4212 }
4213 }
4214
4215 rcu_read_unlock();
4216 }
4217
4218 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4219 int *quote)
4220 {
4221 struct hci_conn_hash *h = &hdev->conn_hash;
4222 struct hci_chan *chan = NULL;
4223 unsigned int num = 0, min = ~0, cur_prio = 0;
4224 struct hci_conn *conn;
4225 int cnt, q, conn_num = 0;
4226
4227 BT_DBG("%s", hdev->name);
4228
4229 rcu_read_lock();
4230
4231 list_for_each_entry_rcu(conn, &h->list, list) {
4232 struct hci_chan *tmp;
4233
4234 if (conn->type != type)
4235 continue;
4236
4237 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4238 continue;
4239
4240 conn_num++;
4241
4242 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4243 struct sk_buff *skb;
4244
4245 if (skb_queue_empty(&tmp->data_q))
4246 continue;
4247
4248 skb = skb_peek(&tmp->data_q);
4249 if (skb->priority < cur_prio)
4250 continue;
4251
4252 if (skb->priority > cur_prio) {
4253 num = 0;
4254 min = ~0;
4255 cur_prio = skb->priority;
4256 }
4257
4258 num++;
4259
4260 if (conn->sent < min) {
4261 min = conn->sent;
4262 chan = tmp;
4263 }
4264 }
4265
4266 if (hci_conn_num(hdev, type) == conn_num)
4267 break;
4268 }
4269
4270 rcu_read_unlock();
4271
4272 if (!chan)
4273 return NULL;
4274
4275 switch (chan->conn->type) {
4276 case ACL_LINK:
4277 cnt = hdev->acl_cnt;
4278 break;
4279 case AMP_LINK:
4280 cnt = hdev->block_cnt;
4281 break;
4282 case SCO_LINK:
4283 case ESCO_LINK:
4284 cnt = hdev->sco_cnt;
4285 break;
4286 case LE_LINK:
4287 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4288 break;
4289 default:
4290 cnt = 0;
4291 BT_ERR("Unknown link type");
4292 }
4293
4294 q = cnt / num;
4295 *quote = q ? q : 1;
4296 BT_DBG("chan %p quote %d", chan, *quote);
4297 return chan;
4298 }
4299
4300 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4301 {
4302 struct hci_conn_hash *h = &hdev->conn_hash;
4303 struct hci_conn *conn;
4304 int num = 0;
4305
4306 BT_DBG("%s", hdev->name);
4307
4308 rcu_read_lock();
4309
4310 list_for_each_entry_rcu(conn, &h->list, list) {
4311 struct hci_chan *chan;
4312
4313 if (conn->type != type)
4314 continue;
4315
4316 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4317 continue;
4318
4319 num++;
4320
4321 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4322 struct sk_buff *skb;
4323
4324 if (chan->sent) {
4325 chan->sent = 0;
4326 continue;
4327 }
4328
4329 if (skb_queue_empty(&chan->data_q))
4330 continue;
4331
4332 skb = skb_peek(&chan->data_q);
4333 if (skb->priority >= HCI_PRIO_MAX - 1)
4334 continue;
4335
4336 skb->priority = HCI_PRIO_MAX - 1;
4337
4338 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4339 skb->priority);
4340 }
4341
4342 if (hci_conn_num(hdev, type) == num)
4343 break;
4344 }
4345
4346 rcu_read_unlock();
4347
4348 }
4349
4350 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4351 {
4352 /* Calculate count of blocks used by this packet */
4353 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4354 }
4355
4356 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4357 {
4358 if (!test_bit(HCI_RAW, &hdev->flags)) {
4359 /* ACL tx timeout must be longer than maximum
4360 * link supervision timeout (40.9 seconds) */
4361 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4362 HCI_ACL_TX_TIMEOUT))
4363 hci_link_tx_to(hdev, ACL_LINK);
4364 }
4365 }
4366
4367 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4368 {
4369 unsigned int cnt = hdev->acl_cnt;
4370 struct hci_chan *chan;
4371 struct sk_buff *skb;
4372 int quote;
4373
4374 __check_timeout(hdev, cnt);
4375
4376 while (hdev->acl_cnt &&
4377 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
4378 u32 priority = (skb_peek(&chan->data_q))->priority;
4379 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4380 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4381 skb->len, skb->priority);
4382
4383 /* Stop if priority has changed */
4384 if (skb->priority < priority)
4385 break;
4386
4387 skb = skb_dequeue(&chan->data_q);
4388
4389 hci_conn_enter_active_mode(chan->conn,
4390 bt_cb(skb)->force_active);
4391
4392 hci_send_frame(hdev, skb);
4393 hdev->acl_last_tx = jiffies;
4394
4395 hdev->acl_cnt--;
4396 chan->sent++;
4397 chan->conn->sent++;
4398 }
4399 }
4400
4401 if (cnt != hdev->acl_cnt)
4402 hci_prio_recalculate(hdev, ACL_LINK);
4403 }
4404
4405 static void hci_sched_acl_blk(struct hci_dev *hdev)
4406 {
4407 unsigned int cnt = hdev->block_cnt;
4408 struct hci_chan *chan;
4409 struct sk_buff *skb;
4410 int quote;
4411 u8 type;
4412
4413 __check_timeout(hdev, cnt);
4414
4415 BT_DBG("%s", hdev->name);
4416
4417 if (hdev->dev_type == HCI_AMP)
4418 type = AMP_LINK;
4419 else
4420 type = ACL_LINK;
4421
4422 while (hdev->block_cnt > 0 &&
4423 (chan = hci_chan_sent(hdev, type, &quote))) {
4424 u32 priority = (skb_peek(&chan->data_q))->priority;
4425 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4426 int blocks;
4427
4428 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4429 skb->len, skb->priority);
4430
4431 /* Stop if priority has changed */
4432 if (skb->priority < priority)
4433 break;
4434
4435 skb = skb_dequeue(&chan->data_q);
4436
4437 blocks = __get_blocks(hdev, skb);
4438 if (blocks > hdev->block_cnt)
4439 return;
4440
4441 hci_conn_enter_active_mode(chan->conn,
4442 bt_cb(skb)->force_active);
4443
4444 hci_send_frame(hdev, skb);
4445 hdev->acl_last_tx = jiffies;
4446
4447 hdev->block_cnt -= blocks;
4448 quote -= blocks;
4449
4450 chan->sent += blocks;
4451 chan->conn->sent += blocks;
4452 }
4453 }
4454
4455 if (cnt != hdev->block_cnt)
4456 hci_prio_recalculate(hdev, type);
4457 }
4458
4459 static void hci_sched_acl(struct hci_dev *hdev)
4460 {
4461 BT_DBG("%s", hdev->name);
4462
4463 /* No ACL link over BR/EDR controller */
4464 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4465 return;
4466
4467 /* No AMP link over AMP controller */
4468 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4469 return;
4470
4471 switch (hdev->flow_ctl_mode) {
4472 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4473 hci_sched_acl_pkt(hdev);
4474 break;
4475
4476 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4477 hci_sched_acl_blk(hdev);
4478 break;
4479 }
4480 }
4481
4482 /* Schedule SCO */
4483 static void hci_sched_sco(struct hci_dev *hdev)
4484 {
4485 struct hci_conn *conn;
4486 struct sk_buff *skb;
4487 int quote;
4488
4489 BT_DBG("%s", hdev->name);
4490
4491 if (!hci_conn_num(hdev, SCO_LINK))
4492 return;
4493
4494 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4495 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4496 BT_DBG("skb %p len %d", skb, skb->len);
4497 hci_send_frame(hdev, skb);
4498
4499 conn->sent++;
4500 if (conn->sent == ~0)
4501 conn->sent = 0;
4502 }
4503 }
4504 }
4505
4506 static void hci_sched_esco(struct hci_dev *hdev)
4507 {
4508 struct hci_conn *conn;
4509 struct sk_buff *skb;
4510 int quote;
4511
4512 BT_DBG("%s", hdev->name);
4513
4514 if (!hci_conn_num(hdev, ESCO_LINK))
4515 return;
4516
4517 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4518 &quote))) {
4519 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4520 BT_DBG("skb %p len %d", skb, skb->len);
4521 hci_send_frame(hdev, skb);
4522
4523 conn->sent++;
4524 if (conn->sent == ~0)
4525 conn->sent = 0;
4526 }
4527 }
4528 }
4529
4530 static void hci_sched_le(struct hci_dev *hdev)
4531 {
4532 struct hci_chan *chan;
4533 struct sk_buff *skb;
4534 int quote, cnt, tmp;
4535
4536 BT_DBG("%s", hdev->name);
4537
4538 if (!hci_conn_num(hdev, LE_LINK))
4539 return;
4540
4541 if (!test_bit(HCI_RAW, &hdev->flags)) {
4542 /* LE tx timeout must be longer than maximum
4543 * link supervision timeout (40.9 seconds) */
4544 if (!hdev->le_cnt && hdev->le_pkts &&
4545 time_after(jiffies, hdev->le_last_tx + HZ * 45))
4546 hci_link_tx_to(hdev, LE_LINK);
4547 }
4548
4549 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4550 tmp = cnt;
4551 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
4552 u32 priority = (skb_peek(&chan->data_q))->priority;
4553 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4554 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4555 skb->len, skb->priority);
4556
4557 /* Stop if priority has changed */
4558 if (skb->priority < priority)
4559 break;
4560
4561 skb = skb_dequeue(&chan->data_q);
4562
4563 hci_send_frame(hdev, skb);
4564 hdev->le_last_tx = jiffies;
4565
4566 cnt--;
4567 chan->sent++;
4568 chan->conn->sent++;
4569 }
4570 }
4571
4572 if (hdev->le_pkts)
4573 hdev->le_cnt = cnt;
4574 else
4575 hdev->acl_cnt = cnt;
4576
4577 if (cnt != tmp)
4578 hci_prio_recalculate(hdev, LE_LINK);
4579 }
4580
4581 static void hci_tx_work(struct work_struct *work)
4582 {
4583 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4584 struct sk_buff *skb;
4585
4586 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4587 hdev->sco_cnt, hdev->le_cnt);
4588
4589 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4590 /* Schedule queues and send stuff to HCI driver */
4591 hci_sched_acl(hdev);
4592 hci_sched_sco(hdev);
4593 hci_sched_esco(hdev);
4594 hci_sched_le(hdev);
4595 }
4596
4597 /* Send next queued raw (unknown type) packet */
4598 while ((skb = skb_dequeue(&hdev->raw_q)))
4599 hci_send_frame(hdev, skb);
4600 }
4601
4602 /* ----- HCI RX task (incoming data processing) ----- */
4603
4604 /* ACL data packet */
4605 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4606 {
4607 struct hci_acl_hdr *hdr = (void *) skb->data;
4608 struct hci_conn *conn;
4609 __u16 handle, flags;
4610
4611 skb_pull(skb, HCI_ACL_HDR_SIZE);
4612
4613 handle = __le16_to_cpu(hdr->handle);
4614 flags = hci_flags(handle);
4615 handle = hci_handle(handle);
4616
4617 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4618 handle, flags);
4619
4620 hdev->stat.acl_rx++;
4621
4622 hci_dev_lock(hdev);
4623 conn = hci_conn_hash_lookup_handle(hdev, handle);
4624 hci_dev_unlock(hdev);
4625
4626 if (conn) {
4627 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4628
4629 /* Send to upper protocol */
4630 l2cap_recv_acldata(conn, skb, flags);
4631 return;
4632 } else {
4633 BT_ERR("%s ACL packet for unknown connection handle %d",
4634 hdev->name, handle);
4635 }
4636
4637 kfree_skb(skb);
4638 }
4639
4640 /* SCO data packet */
4641 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4642 {
4643 struct hci_sco_hdr *hdr = (void *) skb->data;
4644 struct hci_conn *conn;
4645 __u16 handle;
4646
4647 skb_pull(skb, HCI_SCO_HDR_SIZE);
4648
4649 handle = __le16_to_cpu(hdr->handle);
4650
4651 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
4652
4653 hdev->stat.sco_rx++;
4654
4655 hci_dev_lock(hdev);
4656 conn = hci_conn_hash_lookup_handle(hdev, handle);
4657 hci_dev_unlock(hdev);
4658
4659 if (conn) {
4660 /* Send to upper protocol */
4661 sco_recv_scodata(conn, skb);
4662 return;
4663 } else {
4664 BT_ERR("%s SCO packet for unknown connection handle %d",
4665 hdev->name, handle);
4666 }
4667
4668 kfree_skb(skb);
4669 }
4670
4671 static bool hci_req_is_complete(struct hci_dev *hdev)
4672 {
4673 struct sk_buff *skb;
4674
4675 skb = skb_peek(&hdev->cmd_q);
4676 if (!skb)
4677 return true;
4678
4679 return bt_cb(skb)->req.start;
4680 }
4681
4682 static void hci_resend_last(struct hci_dev *hdev)
4683 {
4684 struct hci_command_hdr *sent;
4685 struct sk_buff *skb;
4686 u16 opcode;
4687
4688 if (!hdev->sent_cmd)
4689 return;
4690
4691 sent = (void *) hdev->sent_cmd->data;
4692 opcode = __le16_to_cpu(sent->opcode);
4693 if (opcode == HCI_OP_RESET)
4694 return;
4695
4696 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4697 if (!skb)
4698 return;
4699
4700 skb_queue_head(&hdev->cmd_q, skb);
4701 queue_work(hdev->workqueue, &hdev->cmd_work);
4702 }
4703
4704 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
4705 {
4706 hci_req_complete_t req_complete = NULL;
4707 struct sk_buff *skb;
4708 unsigned long flags;
4709
4710 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4711
4712 /* If the completed command doesn't match the last one that was
4713 * sent we need to do special handling of it.
4714 */
4715 if (!hci_sent_cmd_data(hdev, opcode)) {
4716 /* Some CSR based controllers generate a spontaneous
4717 * reset complete event during init and any pending
4718 * command will never be completed. In such a case we
4719 * need to resend whatever was the last sent
4720 * command.
4721 */
4722 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4723 hci_resend_last(hdev);
4724
4725 return;
4726 }
4727
4728 /* If the command succeeded and there's still more commands in
4729 * this request the request is not yet complete.
4730 */
4731 if (!status && !hci_req_is_complete(hdev))
4732 return;
4733
4734 /* If this was the last command in a request the complete
4735 * callback would be found in hdev->sent_cmd instead of the
4736 * command queue (hdev->cmd_q).
4737 */
4738 if (hdev->sent_cmd) {
4739 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
4740
4741 if (req_complete) {
4742 /* We must set the complete callback to NULL to
4743 * avoid calling the callback more than once if
4744 * this function gets called again.
4745 */
4746 bt_cb(hdev->sent_cmd)->req.complete = NULL;
4747
4748 goto call_complete;
4749 }
4750 }
4751
4752 /* Remove all pending commands belonging to this request */
4753 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4754 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4755 if (bt_cb(skb)->req.start) {
4756 __skb_queue_head(&hdev->cmd_q, skb);
4757 break;
4758 }
4759
4760 req_complete = bt_cb(skb)->req.complete;
4761 kfree_skb(skb);
4762 }
4763 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4764
4765 call_complete:
4766 if (req_complete)
4767 req_complete(hdev, status);
4768 }
4769
4770 static void hci_rx_work(struct work_struct *work)
4771 {
4772 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4773 struct sk_buff *skb;
4774
4775 BT_DBG("%s", hdev->name);
4776
4777 while ((skb = skb_dequeue(&hdev->rx_q))) {
4778 /* Send copy to monitor */
4779 hci_send_to_monitor(hdev, skb);
4780
4781 if (atomic_read(&hdev->promisc)) {
4782 /* Send copy to the sockets */
4783 hci_send_to_sock(hdev, skb);
4784 }
4785
4786 if (test_bit(HCI_RAW, &hdev->flags) ||
4787 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4788 kfree_skb(skb);
4789 continue;
4790 }
4791
4792 if (test_bit(HCI_INIT, &hdev->flags)) {
4793 /* Don't process data packets in this states. */
4794 switch (bt_cb(skb)->pkt_type) {
4795 case HCI_ACLDATA_PKT:
4796 case HCI_SCODATA_PKT:
4797 kfree_skb(skb);
4798 continue;
4799 }
4800 }
4801
4802 /* Process frame */
4803 switch (bt_cb(skb)->pkt_type) {
4804 case HCI_EVENT_PKT:
4805 BT_DBG("%s Event packet", hdev->name);
4806 hci_event_packet(hdev, skb);
4807 break;
4808
4809 case HCI_ACLDATA_PKT:
4810 BT_DBG("%s ACL data packet", hdev->name);
4811 hci_acldata_packet(hdev, skb);
4812 break;
4813
4814 case HCI_SCODATA_PKT:
4815 BT_DBG("%s SCO data packet", hdev->name);
4816 hci_scodata_packet(hdev, skb);
4817 break;
4818
4819 default:
4820 kfree_skb(skb);
4821 break;
4822 }
4823 }
4824 }
4825
4826 static void hci_cmd_work(struct work_struct *work)
4827 {
4828 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4829 struct sk_buff *skb;
4830
4831 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4832 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4833
4834 /* Send queued commands */
4835 if (atomic_read(&hdev->cmd_cnt)) {
4836 skb = skb_dequeue(&hdev->cmd_q);
4837 if (!skb)
4838 return;
4839
4840 kfree_skb(hdev->sent_cmd);
4841
4842 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4843 if (hdev->sent_cmd) {
4844 atomic_dec(&hdev->cmd_cnt);
4845 hci_send_frame(hdev, skb);
4846 if (test_bit(HCI_RESET, &hdev->flags))
4847 del_timer(&hdev->cmd_timer);
4848 else
4849 mod_timer(&hdev->cmd_timer,
4850 jiffies + HCI_CMD_TIMEOUT);
4851 } else {
4852 skb_queue_head(&hdev->cmd_q, skb);
4853 queue_work(hdev->workqueue, &hdev->cmd_work);
4854 }
4855 }
4856 }