2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
40 static void hci_rx_work(struct work_struct
*work
);
41 static void hci_cmd_work(struct work_struct
*work
);
42 static void hci_tx_work(struct work_struct
*work
);
45 LIST_HEAD(hci_dev_list
);
46 DEFINE_RWLOCK(hci_dev_list_lock
);
48 /* HCI callback list */
49 LIST_HEAD(hci_cb_list
);
50 DEFINE_RWLOCK(hci_cb_list_lock
);
52 /* HCI ID Numbering */
53 static DEFINE_IDA(hci_index_ida
);
55 /* ---- HCI notifications ---- */
57 static void hci_notify(struct hci_dev
*hdev
, int event
)
59 hci_sock_dev_event(hdev
, event
);
62 /* ---- HCI debugfs entries ---- */
64 static ssize_t
dut_mode_read(struct file
*file
, char __user
*user_buf
,
65 size_t count
, loff_t
*ppos
)
67 struct hci_dev
*hdev
= file
->private_data
;
70 buf
[0] = test_bit(HCI_DUT_MODE
, &hdev
->dev_flags
) ? 'Y': 'N';
73 return simple_read_from_buffer(user_buf
, count
, ppos
, buf
, 2);
76 static ssize_t
dut_mode_write(struct file
*file
, const char __user
*user_buf
,
77 size_t count
, loff_t
*ppos
)
79 struct hci_dev
*hdev
= file
->private_data
;
82 size_t buf_size
= min(count
, (sizeof(buf
)-1));
86 if (!test_bit(HCI_UP
, &hdev
->flags
))
89 if (copy_from_user(buf
, user_buf
, buf_size
))
93 if (strtobool(buf
, &enable
))
96 if (enable
== test_bit(HCI_DUT_MODE
, &hdev
->dev_flags
))
101 skb
= __hci_cmd_sync(hdev
, HCI_OP_ENABLE_DUT_MODE
, 0, NULL
,
104 skb
= __hci_cmd_sync(hdev
, HCI_OP_RESET
, 0, NULL
,
106 hci_req_unlock(hdev
);
111 err
= -bt_to_errno(skb
->data
[0]);
117 change_bit(HCI_DUT_MODE
, &hdev
->dev_flags
);
122 static const struct file_operations dut_mode_fops
= {
124 .read
= dut_mode_read
,
125 .write
= dut_mode_write
,
126 .llseek
= default_llseek
,
129 static int features_show(struct seq_file
*f
, void *ptr
)
131 struct hci_dev
*hdev
= f
->private;
135 for (p
= 0; p
< HCI_MAX_PAGES
&& p
<= hdev
->max_page
; p
++) {
136 seq_printf(f
, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
137 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p
,
138 hdev
->features
[p
][0], hdev
->features
[p
][1],
139 hdev
->features
[p
][2], hdev
->features
[p
][3],
140 hdev
->features
[p
][4], hdev
->features
[p
][5],
141 hdev
->features
[p
][6], hdev
->features
[p
][7]);
143 if (lmp_le_capable(hdev
))
144 seq_printf(f
, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
145 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
146 hdev
->le_features
[0], hdev
->le_features
[1],
147 hdev
->le_features
[2], hdev
->le_features
[3],
148 hdev
->le_features
[4], hdev
->le_features
[5],
149 hdev
->le_features
[6], hdev
->le_features
[7]);
150 hci_dev_unlock(hdev
);
155 static int features_open(struct inode
*inode
, struct file
*file
)
157 return single_open(file
, features_show
, inode
->i_private
);
160 static const struct file_operations features_fops
= {
161 .open
= features_open
,
164 .release
= single_release
,
167 static int blacklist_show(struct seq_file
*f
, void *p
)
169 struct hci_dev
*hdev
= f
->private;
170 struct bdaddr_list
*b
;
173 list_for_each_entry(b
, &hdev
->blacklist
, list
)
174 seq_printf(f
, "%pMR (type %u)\n", &b
->bdaddr
, b
->bdaddr_type
);
175 hci_dev_unlock(hdev
);
180 static int blacklist_open(struct inode
*inode
, struct file
*file
)
182 return single_open(file
, blacklist_show
, inode
->i_private
);
185 static const struct file_operations blacklist_fops
= {
186 .open
= blacklist_open
,
189 .release
= single_release
,
192 static int uuids_show(struct seq_file
*f
, void *p
)
194 struct hci_dev
*hdev
= f
->private;
195 struct bt_uuid
*uuid
;
198 list_for_each_entry(uuid
, &hdev
->uuids
, list
) {
201 /* The Bluetooth UUID values are stored in big endian,
202 * but with reversed byte order. So convert them into
203 * the right order for the %pUb modifier.
205 for (i
= 0; i
< 16; i
++)
206 val
[i
] = uuid
->uuid
[15 - i
];
208 seq_printf(f
, "%pUb\n", val
);
210 hci_dev_unlock(hdev
);
215 static int uuids_open(struct inode
*inode
, struct file
*file
)
217 return single_open(file
, uuids_show
, inode
->i_private
);
220 static const struct file_operations uuids_fops
= {
224 .release
= single_release
,
227 static int inquiry_cache_show(struct seq_file
*f
, void *p
)
229 struct hci_dev
*hdev
= f
->private;
230 struct discovery_state
*cache
= &hdev
->discovery
;
231 struct inquiry_entry
*e
;
235 list_for_each_entry(e
, &cache
->all
, all
) {
236 struct inquiry_data
*data
= &e
->data
;
237 seq_printf(f
, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
239 data
->pscan_rep_mode
, data
->pscan_period_mode
,
240 data
->pscan_mode
, data
->dev_class
[2],
241 data
->dev_class
[1], data
->dev_class
[0],
242 __le16_to_cpu(data
->clock_offset
),
243 data
->rssi
, data
->ssp_mode
, e
->timestamp
);
246 hci_dev_unlock(hdev
);
251 static int inquiry_cache_open(struct inode
*inode
, struct file
*file
)
253 return single_open(file
, inquiry_cache_show
, inode
->i_private
);
256 static const struct file_operations inquiry_cache_fops
= {
257 .open
= inquiry_cache_open
,
260 .release
= single_release
,
263 static int link_keys_show(struct seq_file
*f
, void *ptr
)
265 struct hci_dev
*hdev
= f
->private;
266 struct list_head
*p
, *n
;
269 list_for_each_safe(p
, n
, &hdev
->link_keys
) {
270 struct link_key
*key
= list_entry(p
, struct link_key
, list
);
271 seq_printf(f
, "%pMR %u %*phN %u\n", &key
->bdaddr
, key
->type
,
272 HCI_LINK_KEY_SIZE
, key
->val
, key
->pin_len
);
274 hci_dev_unlock(hdev
);
279 static int link_keys_open(struct inode
*inode
, struct file
*file
)
281 return single_open(file
, link_keys_show
, inode
->i_private
);
284 static const struct file_operations link_keys_fops
= {
285 .open
= link_keys_open
,
288 .release
= single_release
,
291 static int dev_class_show(struct seq_file
*f
, void *ptr
)
293 struct hci_dev
*hdev
= f
->private;
296 seq_printf(f
, "0x%.2x%.2x%.2x\n", hdev
->dev_class
[2],
297 hdev
->dev_class
[1], hdev
->dev_class
[0]);
298 hci_dev_unlock(hdev
);
303 static int dev_class_open(struct inode
*inode
, struct file
*file
)
305 return single_open(file
, dev_class_show
, inode
->i_private
);
308 static const struct file_operations dev_class_fops
= {
309 .open
= dev_class_open
,
312 .release
= single_release
,
315 static int voice_setting_get(void *data
, u64
*val
)
317 struct hci_dev
*hdev
= data
;
320 *val
= hdev
->voice_setting
;
321 hci_dev_unlock(hdev
);
326 DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops
, voice_setting_get
,
327 NULL
, "0x%4.4llx\n");
329 static int auto_accept_delay_set(void *data
, u64 val
)
331 struct hci_dev
*hdev
= data
;
334 hdev
->auto_accept_delay
= val
;
335 hci_dev_unlock(hdev
);
340 static int auto_accept_delay_get(void *data
, u64
*val
)
342 struct hci_dev
*hdev
= data
;
345 *val
= hdev
->auto_accept_delay
;
346 hci_dev_unlock(hdev
);
351 DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops
, auto_accept_delay_get
,
352 auto_accept_delay_set
, "%llu\n");
354 static int ssp_debug_mode_set(void *data
, u64 val
)
356 struct hci_dev
*hdev
= data
;
361 if (val
!= 0 && val
!= 1)
364 if (!test_bit(HCI_UP
, &hdev
->flags
))
369 skb
= __hci_cmd_sync(hdev
, HCI_OP_WRITE_SSP_DEBUG_MODE
, sizeof(mode
),
370 &mode
, HCI_CMD_TIMEOUT
);
371 hci_req_unlock(hdev
);
376 err
= -bt_to_errno(skb
->data
[0]);
383 hdev
->ssp_debug_mode
= val
;
384 hci_dev_unlock(hdev
);
389 static int ssp_debug_mode_get(void *data
, u64
*val
)
391 struct hci_dev
*hdev
= data
;
394 *val
= hdev
->ssp_debug_mode
;
395 hci_dev_unlock(hdev
);
400 DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops
, ssp_debug_mode_get
,
401 ssp_debug_mode_set
, "%llu\n");
403 static ssize_t
force_sc_support_read(struct file
*file
, char __user
*user_buf
,
404 size_t count
, loff_t
*ppos
)
406 struct hci_dev
*hdev
= file
->private_data
;
409 buf
[0] = test_bit(HCI_FORCE_SC
, &hdev
->dev_flags
) ? 'Y': 'N';
412 return simple_read_from_buffer(user_buf
, count
, ppos
, buf
, 2);
415 static ssize_t
force_sc_support_write(struct file
*file
,
416 const char __user
*user_buf
,
417 size_t count
, loff_t
*ppos
)
419 struct hci_dev
*hdev
= file
->private_data
;
421 size_t buf_size
= min(count
, (sizeof(buf
)-1));
424 if (test_bit(HCI_UP
, &hdev
->flags
))
427 if (copy_from_user(buf
, user_buf
, buf_size
))
430 buf
[buf_size
] = '\0';
431 if (strtobool(buf
, &enable
))
434 if (enable
== test_bit(HCI_FORCE_SC
, &hdev
->dev_flags
))
437 change_bit(HCI_FORCE_SC
, &hdev
->dev_flags
);
442 static const struct file_operations force_sc_support_fops
= {
444 .read
= force_sc_support_read
,
445 .write
= force_sc_support_write
,
446 .llseek
= default_llseek
,
449 static ssize_t
sc_only_mode_read(struct file
*file
, char __user
*user_buf
,
450 size_t count
, loff_t
*ppos
)
452 struct hci_dev
*hdev
= file
->private_data
;
455 buf
[0] = test_bit(HCI_SC_ONLY
, &hdev
->dev_flags
) ? 'Y': 'N';
458 return simple_read_from_buffer(user_buf
, count
, ppos
, buf
, 2);
461 static const struct file_operations sc_only_mode_fops
= {
463 .read
= sc_only_mode_read
,
464 .llseek
= default_llseek
,
467 static int idle_timeout_set(void *data
, u64 val
)
469 struct hci_dev
*hdev
= data
;
471 if (val
!= 0 && (val
< 500 || val
> 3600000))
475 hdev
->idle_timeout
= val
;
476 hci_dev_unlock(hdev
);
481 static int idle_timeout_get(void *data
, u64
*val
)
483 struct hci_dev
*hdev
= data
;
486 *val
= hdev
->idle_timeout
;
487 hci_dev_unlock(hdev
);
492 DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops
, idle_timeout_get
,
493 idle_timeout_set
, "%llu\n");
495 static int rpa_timeout_set(void *data
, u64 val
)
497 struct hci_dev
*hdev
= data
;
499 /* Require the RPA timeout to be at least 30 seconds and at most
502 if (val
< 30 || val
> (60 * 60 * 24))
506 hdev
->rpa_timeout
= val
;
507 hci_dev_unlock(hdev
);
512 static int rpa_timeout_get(void *data
, u64
*val
)
514 struct hci_dev
*hdev
= data
;
517 *val
= hdev
->rpa_timeout
;
518 hci_dev_unlock(hdev
);
523 DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops
, rpa_timeout_get
,
524 rpa_timeout_set
, "%llu\n");
526 static int sniff_min_interval_set(void *data
, u64 val
)
528 struct hci_dev
*hdev
= data
;
530 if (val
== 0 || val
% 2 || val
> hdev
->sniff_max_interval
)
534 hdev
->sniff_min_interval
= val
;
535 hci_dev_unlock(hdev
);
540 static int sniff_min_interval_get(void *data
, u64
*val
)
542 struct hci_dev
*hdev
= data
;
545 *val
= hdev
->sniff_min_interval
;
546 hci_dev_unlock(hdev
);
551 DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops
, sniff_min_interval_get
,
552 sniff_min_interval_set
, "%llu\n");
554 static int sniff_max_interval_set(void *data
, u64 val
)
556 struct hci_dev
*hdev
= data
;
558 if (val
== 0 || val
% 2 || val
< hdev
->sniff_min_interval
)
562 hdev
->sniff_max_interval
= val
;
563 hci_dev_unlock(hdev
);
568 static int sniff_max_interval_get(void *data
, u64
*val
)
570 struct hci_dev
*hdev
= data
;
573 *val
= hdev
->sniff_max_interval
;
574 hci_dev_unlock(hdev
);
579 DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops
, sniff_max_interval_get
,
580 sniff_max_interval_set
, "%llu\n");
582 static int identity_show(struct seq_file
*f
, void *p
)
584 struct hci_dev
*hdev
= f
->private;
590 if (test_bit(HCI_FORCE_STATIC_ADDR
, &hdev
->dev_flags
) ||
591 !bacmp(&hdev
->bdaddr
, BDADDR_ANY
)) {
592 addr
= &hdev
->static_addr
;
593 addr_type
= ADDR_LE_DEV_RANDOM
;
595 addr
= &hdev
->bdaddr
;
596 addr_type
= ADDR_LE_DEV_PUBLIC
;
599 seq_printf(f
, "%pMR (type %u) %*phN\n", addr
, addr_type
, 16, hdev
->irk
);
601 hci_dev_unlock(hdev
);
606 static int identity_open(struct inode
*inode
, struct file
*file
)
608 return single_open(file
, identity_show
, inode
->i_private
);
611 static const struct file_operations identity_fops
= {
612 .open
= identity_open
,
615 .release
= single_release
,
618 static int random_address_show(struct seq_file
*f
, void *p
)
620 struct hci_dev
*hdev
= f
->private;
623 seq_printf(f
, "%pMR\n", &hdev
->random_addr
);
624 hci_dev_unlock(hdev
);
629 static int random_address_open(struct inode
*inode
, struct file
*file
)
631 return single_open(file
, random_address_show
, inode
->i_private
);
634 static const struct file_operations random_address_fops
= {
635 .open
= random_address_open
,
638 .release
= single_release
,
641 static int static_address_show(struct seq_file
*f
, void *p
)
643 struct hci_dev
*hdev
= f
->private;
646 seq_printf(f
, "%pMR\n", &hdev
->static_addr
);
647 hci_dev_unlock(hdev
);
652 static int static_address_open(struct inode
*inode
, struct file
*file
)
654 return single_open(file
, static_address_show
, inode
->i_private
);
657 static const struct file_operations static_address_fops
= {
658 .open
= static_address_open
,
661 .release
= single_release
,
664 static ssize_t
force_static_address_read(struct file
*file
,
665 char __user
*user_buf
,
666 size_t count
, loff_t
*ppos
)
668 struct hci_dev
*hdev
= file
->private_data
;
671 buf
[0] = test_bit(HCI_FORCE_STATIC_ADDR
, &hdev
->dev_flags
) ? 'Y': 'N';
674 return simple_read_from_buffer(user_buf
, count
, ppos
, buf
, 2);
677 static ssize_t
force_static_address_write(struct file
*file
,
678 const char __user
*user_buf
,
679 size_t count
, loff_t
*ppos
)
681 struct hci_dev
*hdev
= file
->private_data
;
683 size_t buf_size
= min(count
, (sizeof(buf
)-1));
686 if (test_bit(HCI_UP
, &hdev
->flags
))
689 if (copy_from_user(buf
, user_buf
, buf_size
))
692 buf
[buf_size
] = '\0';
693 if (strtobool(buf
, &enable
))
696 if (enable
== test_bit(HCI_FORCE_STATIC_ADDR
, &hdev
->dev_flags
))
699 change_bit(HCI_FORCE_STATIC_ADDR
, &hdev
->dev_flags
);
704 static const struct file_operations force_static_address_fops
= {
706 .read
= force_static_address_read
,
707 .write
= force_static_address_write
,
708 .llseek
= default_llseek
,
711 static int identity_resolving_keys_show(struct seq_file
*f
, void *ptr
)
713 struct hci_dev
*hdev
= f
->private;
714 struct list_head
*p
, *n
;
717 list_for_each_safe(p
, n
, &hdev
->identity_resolving_keys
) {
718 struct smp_irk
*irk
= list_entry(p
, struct smp_irk
, list
);
719 seq_printf(f
, "%pMR (type %u) %*phN %pMR\n",
720 &irk
->bdaddr
, irk
->addr_type
,
721 16, irk
->val
, &irk
->rpa
);
723 hci_dev_unlock(hdev
);
728 static int identity_resolving_keys_open(struct inode
*inode
, struct file
*file
)
730 return single_open(file
, identity_resolving_keys_show
,
734 static const struct file_operations identity_resolving_keys_fops
= {
735 .open
= identity_resolving_keys_open
,
738 .release
= single_release
,
741 static int long_term_keys_show(struct seq_file
*f
, void *ptr
)
743 struct hci_dev
*hdev
= f
->private;
744 struct list_head
*p
, *n
;
747 list_for_each_safe(p
, n
, &hdev
->long_term_keys
) {
748 struct smp_ltk
*ltk
= list_entry(p
, struct smp_ltk
, list
);
749 seq_printf(f
, "%pMR (type %u) %u 0x%02x %u %.4x %*phN %*phN\n",
750 <k
->bdaddr
, ltk
->bdaddr_type
, ltk
->authenticated
,
751 ltk
->type
, ltk
->enc_size
, __le16_to_cpu(ltk
->ediv
),
752 8, ltk
->rand
, 16, ltk
->val
);
754 hci_dev_unlock(hdev
);
759 static int long_term_keys_open(struct inode
*inode
, struct file
*file
)
761 return single_open(file
, long_term_keys_show
, inode
->i_private
);
764 static const struct file_operations long_term_keys_fops
= {
765 .open
= long_term_keys_open
,
768 .release
= single_release
,
771 static int conn_min_interval_set(void *data
, u64 val
)
773 struct hci_dev
*hdev
= data
;
775 if (val
< 0x0006 || val
> 0x0c80 || val
> hdev
->le_conn_max_interval
)
779 hdev
->le_conn_min_interval
= val
;
780 hci_dev_unlock(hdev
);
785 static int conn_min_interval_get(void *data
, u64
*val
)
787 struct hci_dev
*hdev
= data
;
790 *val
= hdev
->le_conn_min_interval
;
791 hci_dev_unlock(hdev
);
796 DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops
, conn_min_interval_get
,
797 conn_min_interval_set
, "%llu\n");
799 static int conn_max_interval_set(void *data
, u64 val
)
801 struct hci_dev
*hdev
= data
;
803 if (val
< 0x0006 || val
> 0x0c80 || val
< hdev
->le_conn_min_interval
)
807 hdev
->le_conn_max_interval
= val
;
808 hci_dev_unlock(hdev
);
813 static int conn_max_interval_get(void *data
, u64
*val
)
815 struct hci_dev
*hdev
= data
;
818 *val
= hdev
->le_conn_max_interval
;
819 hci_dev_unlock(hdev
);
824 DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops
, conn_max_interval_get
,
825 conn_max_interval_set
, "%llu\n");
827 static int adv_channel_map_set(void *data
, u64 val
)
829 struct hci_dev
*hdev
= data
;
831 if (val
< 0x01 || val
> 0x07)
835 hdev
->le_adv_channel_map
= val
;
836 hci_dev_unlock(hdev
);
841 static int adv_channel_map_get(void *data
, u64
*val
)
843 struct hci_dev
*hdev
= data
;
846 *val
= hdev
->le_adv_channel_map
;
847 hci_dev_unlock(hdev
);
852 DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops
, adv_channel_map_get
,
853 adv_channel_map_set
, "%llu\n");
855 static ssize_t
lowpan_read(struct file
*file
, char __user
*user_buf
,
856 size_t count
, loff_t
*ppos
)
858 struct hci_dev
*hdev
= file
->private_data
;
861 buf
[0] = test_bit(HCI_6LOWPAN_ENABLED
, &hdev
->dev_flags
) ? 'Y' : 'N';
864 return simple_read_from_buffer(user_buf
, count
, ppos
, buf
, 2);
867 static ssize_t
lowpan_write(struct file
*fp
, const char __user
*user_buffer
,
868 size_t count
, loff_t
*position
)
870 struct hci_dev
*hdev
= fp
->private_data
;
873 size_t buf_size
= min(count
, (sizeof(buf
)-1));
875 if (copy_from_user(buf
, user_buffer
, buf_size
))
878 buf
[buf_size
] = '\0';
880 if (strtobool(buf
, &enable
) < 0)
883 if (enable
== test_bit(HCI_6LOWPAN_ENABLED
, &hdev
->dev_flags
))
886 change_bit(HCI_6LOWPAN_ENABLED
, &hdev
->dev_flags
);
891 static const struct file_operations lowpan_debugfs_fops
= {
894 .write
= lowpan_write
,
895 .llseek
= default_llseek
,
898 /* ---- HCI requests ---- */
900 static void hci_req_sync_complete(struct hci_dev
*hdev
, u8 result
)
902 BT_DBG("%s result 0x%2.2x", hdev
->name
, result
);
904 if (hdev
->req_status
== HCI_REQ_PEND
) {
905 hdev
->req_result
= result
;
906 hdev
->req_status
= HCI_REQ_DONE
;
907 wake_up_interruptible(&hdev
->req_wait_q
);
911 static void hci_req_cancel(struct hci_dev
*hdev
, int err
)
913 BT_DBG("%s err 0x%2.2x", hdev
->name
, err
);
915 if (hdev
->req_status
== HCI_REQ_PEND
) {
916 hdev
->req_result
= err
;
917 hdev
->req_status
= HCI_REQ_CANCELED
;
918 wake_up_interruptible(&hdev
->req_wait_q
);
922 static struct sk_buff
*hci_get_cmd_complete(struct hci_dev
*hdev
, u16 opcode
,
925 struct hci_ev_cmd_complete
*ev
;
926 struct hci_event_hdr
*hdr
;
931 skb
= hdev
->recv_evt
;
932 hdev
->recv_evt
= NULL
;
934 hci_dev_unlock(hdev
);
937 return ERR_PTR(-ENODATA
);
939 if (skb
->len
< sizeof(*hdr
)) {
940 BT_ERR("Too short HCI event");
944 hdr
= (void *) skb
->data
;
945 skb_pull(skb
, HCI_EVENT_HDR_SIZE
);
948 if (hdr
->evt
!= event
)
953 if (hdr
->evt
!= HCI_EV_CMD_COMPLETE
) {
954 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr
->evt
);
958 if (skb
->len
< sizeof(*ev
)) {
959 BT_ERR("Too short cmd_complete event");
963 ev
= (void *) skb
->data
;
964 skb_pull(skb
, sizeof(*ev
));
966 if (opcode
== __le16_to_cpu(ev
->opcode
))
969 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode
,
970 __le16_to_cpu(ev
->opcode
));
974 return ERR_PTR(-ENODATA
);
977 struct sk_buff
*__hci_cmd_sync_ev(struct hci_dev
*hdev
, u16 opcode
, u32 plen
,
978 const void *param
, u8 event
, u32 timeout
)
980 DECLARE_WAITQUEUE(wait
, current
);
981 struct hci_request req
;
984 BT_DBG("%s", hdev
->name
);
986 hci_req_init(&req
, hdev
);
988 hci_req_add_ev(&req
, opcode
, plen
, param
, event
);
990 hdev
->req_status
= HCI_REQ_PEND
;
992 err
= hci_req_run(&req
, hci_req_sync_complete
);
996 add_wait_queue(&hdev
->req_wait_q
, &wait
);
997 set_current_state(TASK_INTERRUPTIBLE
);
999 schedule_timeout(timeout
);
1001 remove_wait_queue(&hdev
->req_wait_q
, &wait
);
1003 if (signal_pending(current
))
1004 return ERR_PTR(-EINTR
);
1006 switch (hdev
->req_status
) {
1008 err
= -bt_to_errno(hdev
->req_result
);
1011 case HCI_REQ_CANCELED
:
1012 err
= -hdev
->req_result
;
1020 hdev
->req_status
= hdev
->req_result
= 0;
1022 BT_DBG("%s end: err %d", hdev
->name
, err
);
1025 return ERR_PTR(err
);
1027 return hci_get_cmd_complete(hdev
, opcode
, event
);
1029 EXPORT_SYMBOL(__hci_cmd_sync_ev
);
1031 struct sk_buff
*__hci_cmd_sync(struct hci_dev
*hdev
, u16 opcode
, u32 plen
,
1032 const void *param
, u32 timeout
)
1034 return __hci_cmd_sync_ev(hdev
, opcode
, plen
, param
, 0, timeout
);
1036 EXPORT_SYMBOL(__hci_cmd_sync
);
1038 /* Execute request and wait for completion. */
1039 static int __hci_req_sync(struct hci_dev
*hdev
,
1040 void (*func
)(struct hci_request
*req
,
1042 unsigned long opt
, __u32 timeout
)
1044 struct hci_request req
;
1045 DECLARE_WAITQUEUE(wait
, current
);
1048 BT_DBG("%s start", hdev
->name
);
1050 hci_req_init(&req
, hdev
);
1052 hdev
->req_status
= HCI_REQ_PEND
;
1056 err
= hci_req_run(&req
, hci_req_sync_complete
);
1058 hdev
->req_status
= 0;
1060 /* ENODATA means the HCI request command queue is empty.
1061 * This can happen when a request with conditionals doesn't
1062 * trigger any commands to be sent. This is normal behavior
1063 * and should not trigger an error return.
1065 if (err
== -ENODATA
)
1071 add_wait_queue(&hdev
->req_wait_q
, &wait
);
1072 set_current_state(TASK_INTERRUPTIBLE
);
1074 schedule_timeout(timeout
);
1076 remove_wait_queue(&hdev
->req_wait_q
, &wait
);
1078 if (signal_pending(current
))
1081 switch (hdev
->req_status
) {
1083 err
= -bt_to_errno(hdev
->req_result
);
1086 case HCI_REQ_CANCELED
:
1087 err
= -hdev
->req_result
;
1095 hdev
->req_status
= hdev
->req_result
= 0;
1097 BT_DBG("%s end: err %d", hdev
->name
, err
);
1102 static int hci_req_sync(struct hci_dev
*hdev
,
1103 void (*req
)(struct hci_request
*req
,
1105 unsigned long opt
, __u32 timeout
)
1109 if (!test_bit(HCI_UP
, &hdev
->flags
))
1112 /* Serialize all requests */
1114 ret
= __hci_req_sync(hdev
, req
, opt
, timeout
);
1115 hci_req_unlock(hdev
);
1120 static void hci_reset_req(struct hci_request
*req
, unsigned long opt
)
1122 BT_DBG("%s %ld", req
->hdev
->name
, opt
);
1125 set_bit(HCI_RESET
, &req
->hdev
->flags
);
1126 hci_req_add(req
, HCI_OP_RESET
, 0, NULL
);
1129 static void bredr_init(struct hci_request
*req
)
1131 req
->hdev
->flow_ctl_mode
= HCI_FLOW_CTL_MODE_PACKET_BASED
;
1133 /* Read Local Supported Features */
1134 hci_req_add(req
, HCI_OP_READ_LOCAL_FEATURES
, 0, NULL
);
1136 /* Read Local Version */
1137 hci_req_add(req
, HCI_OP_READ_LOCAL_VERSION
, 0, NULL
);
1139 /* Read BD Address */
1140 hci_req_add(req
, HCI_OP_READ_BD_ADDR
, 0, NULL
);
1143 static void amp_init(struct hci_request
*req
)
1145 req
->hdev
->flow_ctl_mode
= HCI_FLOW_CTL_MODE_BLOCK_BASED
;
1147 /* Read Local Version */
1148 hci_req_add(req
, HCI_OP_READ_LOCAL_VERSION
, 0, NULL
);
1150 /* Read Local Supported Commands */
1151 hci_req_add(req
, HCI_OP_READ_LOCAL_COMMANDS
, 0, NULL
);
1153 /* Read Local Supported Features */
1154 hci_req_add(req
, HCI_OP_READ_LOCAL_FEATURES
, 0, NULL
);
1156 /* Read Local AMP Info */
1157 hci_req_add(req
, HCI_OP_READ_LOCAL_AMP_INFO
, 0, NULL
);
1159 /* Read Data Blk size */
1160 hci_req_add(req
, HCI_OP_READ_DATA_BLOCK_SIZE
, 0, NULL
);
1162 /* Read Flow Control Mode */
1163 hci_req_add(req
, HCI_OP_READ_FLOW_CONTROL_MODE
, 0, NULL
);
1165 /* Read Location Data */
1166 hci_req_add(req
, HCI_OP_READ_LOCATION_DATA
, 0, NULL
);
1169 static void hci_init1_req(struct hci_request
*req
, unsigned long opt
)
1171 struct hci_dev
*hdev
= req
->hdev
;
1173 BT_DBG("%s %ld", hdev
->name
, opt
);
1176 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE
, &hdev
->quirks
))
1177 hci_reset_req(req
, 0);
1179 switch (hdev
->dev_type
) {
1189 BT_ERR("Unknown device type %d", hdev
->dev_type
);
1194 static void bredr_setup(struct hci_request
*req
)
1196 struct hci_dev
*hdev
= req
->hdev
;
1201 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
1202 hci_req_add(req
, HCI_OP_READ_BUFFER_SIZE
, 0, NULL
);
1204 /* Read Class of Device */
1205 hci_req_add(req
, HCI_OP_READ_CLASS_OF_DEV
, 0, NULL
);
1207 /* Read Local Name */
1208 hci_req_add(req
, HCI_OP_READ_LOCAL_NAME
, 0, NULL
);
1210 /* Read Voice Setting */
1211 hci_req_add(req
, HCI_OP_READ_VOICE_SETTING
, 0, NULL
);
1213 /* Read Number of Supported IAC */
1214 hci_req_add(req
, HCI_OP_READ_NUM_SUPPORTED_IAC
, 0, NULL
);
1216 /* Read Current IAC LAP */
1217 hci_req_add(req
, HCI_OP_READ_CURRENT_IAC_LAP
, 0, NULL
);
1219 /* Clear Event Filters */
1220 flt_type
= HCI_FLT_CLEAR_ALL
;
1221 hci_req_add(req
, HCI_OP_SET_EVENT_FLT
, 1, &flt_type
);
1223 /* Connection accept timeout ~20 secs */
1224 param
= __constant_cpu_to_le16(0x7d00);
1225 hci_req_add(req
, HCI_OP_WRITE_CA_TIMEOUT
, 2, ¶m
);
1227 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1228 * but it does not support page scan related HCI commands.
1230 if (hdev
->manufacturer
!= 31 && hdev
->hci_ver
> BLUETOOTH_VER_1_1
) {
1231 hci_req_add(req
, HCI_OP_READ_PAGE_SCAN_ACTIVITY
, 0, NULL
);
1232 hci_req_add(req
, HCI_OP_READ_PAGE_SCAN_TYPE
, 0, NULL
);
1236 static void le_setup(struct hci_request
*req
)
1238 struct hci_dev
*hdev
= req
->hdev
;
1240 /* Read LE Buffer Size */
1241 hci_req_add(req
, HCI_OP_LE_READ_BUFFER_SIZE
, 0, NULL
);
1243 /* Read LE Local Supported Features */
1244 hci_req_add(req
, HCI_OP_LE_READ_LOCAL_FEATURES
, 0, NULL
);
1246 /* Read LE Advertising Channel TX Power */
1247 hci_req_add(req
, HCI_OP_LE_READ_ADV_TX_POWER
, 0, NULL
);
1249 /* Read LE White List Size */
1250 hci_req_add(req
, HCI_OP_LE_READ_WHITE_LIST_SIZE
, 0, NULL
);
1252 /* Read LE Supported States */
1253 hci_req_add(req
, HCI_OP_LE_READ_SUPPORTED_STATES
, 0, NULL
);
1255 /* LE-only controllers have LE implicitly enabled */
1256 if (!lmp_bredr_capable(hdev
))
1257 set_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
);
1260 static u8
hci_get_inquiry_mode(struct hci_dev
*hdev
)
1262 if (lmp_ext_inq_capable(hdev
))
1265 if (lmp_inq_rssi_capable(hdev
))
1268 if (hdev
->manufacturer
== 11 && hdev
->hci_rev
== 0x00 &&
1269 hdev
->lmp_subver
== 0x0757)
1272 if (hdev
->manufacturer
== 15) {
1273 if (hdev
->hci_rev
== 0x03 && hdev
->lmp_subver
== 0x6963)
1275 if (hdev
->hci_rev
== 0x09 && hdev
->lmp_subver
== 0x6963)
1277 if (hdev
->hci_rev
== 0x00 && hdev
->lmp_subver
== 0x6965)
1281 if (hdev
->manufacturer
== 31 && hdev
->hci_rev
== 0x2005 &&
1282 hdev
->lmp_subver
== 0x1805)
1288 static void hci_setup_inquiry_mode(struct hci_request
*req
)
1292 mode
= hci_get_inquiry_mode(req
->hdev
);
1294 hci_req_add(req
, HCI_OP_WRITE_INQUIRY_MODE
, 1, &mode
);
1297 static void hci_setup_event_mask(struct hci_request
*req
)
1299 struct hci_dev
*hdev
= req
->hdev
;
1301 /* The second byte is 0xff instead of 0x9f (two reserved bits
1302 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1303 * command otherwise.
1305 u8 events
[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1307 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1308 * any event mask for pre 1.2 devices.
1310 if (hdev
->hci_ver
< BLUETOOTH_VER_1_2
)
1313 if (lmp_bredr_capable(hdev
)) {
1314 events
[4] |= 0x01; /* Flow Specification Complete */
1315 events
[4] |= 0x02; /* Inquiry Result with RSSI */
1316 events
[4] |= 0x04; /* Read Remote Extended Features Complete */
1317 events
[5] |= 0x08; /* Synchronous Connection Complete */
1318 events
[5] |= 0x10; /* Synchronous Connection Changed */
1320 /* Use a different default for LE-only devices */
1321 memset(events
, 0, sizeof(events
));
1322 events
[0] |= 0x10; /* Disconnection Complete */
1323 events
[0] |= 0x80; /* Encryption Change */
1324 events
[1] |= 0x08; /* Read Remote Version Information Complete */
1325 events
[1] |= 0x20; /* Command Complete */
1326 events
[1] |= 0x40; /* Command Status */
1327 events
[1] |= 0x80; /* Hardware Error */
1328 events
[2] |= 0x04; /* Number of Completed Packets */
1329 events
[3] |= 0x02; /* Data Buffer Overflow */
1330 events
[5] |= 0x80; /* Encryption Key Refresh Complete */
1333 if (lmp_inq_rssi_capable(hdev
))
1334 events
[4] |= 0x02; /* Inquiry Result with RSSI */
1336 if (lmp_sniffsubr_capable(hdev
))
1337 events
[5] |= 0x20; /* Sniff Subrating */
1339 if (lmp_pause_enc_capable(hdev
))
1340 events
[5] |= 0x80; /* Encryption Key Refresh Complete */
1342 if (lmp_ext_inq_capable(hdev
))
1343 events
[5] |= 0x40; /* Extended Inquiry Result */
1345 if (lmp_no_flush_capable(hdev
))
1346 events
[7] |= 0x01; /* Enhanced Flush Complete */
1348 if (lmp_lsto_capable(hdev
))
1349 events
[6] |= 0x80; /* Link Supervision Timeout Changed */
1351 if (lmp_ssp_capable(hdev
)) {
1352 events
[6] |= 0x01; /* IO Capability Request */
1353 events
[6] |= 0x02; /* IO Capability Response */
1354 events
[6] |= 0x04; /* User Confirmation Request */
1355 events
[6] |= 0x08; /* User Passkey Request */
1356 events
[6] |= 0x10; /* Remote OOB Data Request */
1357 events
[6] |= 0x20; /* Simple Pairing Complete */
1358 events
[7] |= 0x04; /* User Passkey Notification */
1359 events
[7] |= 0x08; /* Keypress Notification */
1360 events
[7] |= 0x10; /* Remote Host Supported
1361 * Features Notification
1365 if (lmp_le_capable(hdev
))
1366 events
[7] |= 0x20; /* LE Meta-Event */
1368 hci_req_add(req
, HCI_OP_SET_EVENT_MASK
, sizeof(events
), events
);
1370 if (lmp_le_capable(hdev
)) {
1371 memset(events
, 0, sizeof(events
));
1373 hci_req_add(req
, HCI_OP_LE_SET_EVENT_MASK
,
1374 sizeof(events
), events
);
1378 static void hci_init2_req(struct hci_request
*req
, unsigned long opt
)
1380 struct hci_dev
*hdev
= req
->hdev
;
1382 if (lmp_bredr_capable(hdev
))
1385 clear_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
);
1387 if (lmp_le_capable(hdev
))
1390 hci_setup_event_mask(req
);
1392 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1393 * local supported commands HCI command.
1395 if (hdev
->manufacturer
!= 31 && hdev
->hci_ver
> BLUETOOTH_VER_1_1
)
1396 hci_req_add(req
, HCI_OP_READ_LOCAL_COMMANDS
, 0, NULL
);
1398 if (lmp_ssp_capable(hdev
)) {
1399 /* When SSP is available, then the host features page
1400 * should also be available as well. However some
1401 * controllers list the max_page as 0 as long as SSP
1402 * has not been enabled. To achieve proper debugging
1403 * output, force the minimum max_page to 1 at least.
1405 hdev
->max_page
= 0x01;
1407 if (test_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
)) {
1409 hci_req_add(req
, HCI_OP_WRITE_SSP_MODE
,
1410 sizeof(mode
), &mode
);
1412 struct hci_cp_write_eir cp
;
1414 memset(hdev
->eir
, 0, sizeof(hdev
->eir
));
1415 memset(&cp
, 0, sizeof(cp
));
1417 hci_req_add(req
, HCI_OP_WRITE_EIR
, sizeof(cp
), &cp
);
1421 if (lmp_inq_rssi_capable(hdev
))
1422 hci_setup_inquiry_mode(req
);
1424 if (lmp_inq_tx_pwr_capable(hdev
))
1425 hci_req_add(req
, HCI_OP_READ_INQ_RSP_TX_POWER
, 0, NULL
);
1427 if (lmp_ext_feat_capable(hdev
)) {
1428 struct hci_cp_read_local_ext_features cp
;
1431 hci_req_add(req
, HCI_OP_READ_LOCAL_EXT_FEATURES
,
1435 if (test_bit(HCI_LINK_SECURITY
, &hdev
->dev_flags
)) {
1437 hci_req_add(req
, HCI_OP_WRITE_AUTH_ENABLE
, sizeof(enable
),
1442 static void hci_setup_link_policy(struct hci_request
*req
)
1444 struct hci_dev
*hdev
= req
->hdev
;
1445 struct hci_cp_write_def_link_policy cp
;
1446 u16 link_policy
= 0;
1448 if (lmp_rswitch_capable(hdev
))
1449 link_policy
|= HCI_LP_RSWITCH
;
1450 if (lmp_hold_capable(hdev
))
1451 link_policy
|= HCI_LP_HOLD
;
1452 if (lmp_sniff_capable(hdev
))
1453 link_policy
|= HCI_LP_SNIFF
;
1454 if (lmp_park_capable(hdev
))
1455 link_policy
|= HCI_LP_PARK
;
1457 cp
.policy
= cpu_to_le16(link_policy
);
1458 hci_req_add(req
, HCI_OP_WRITE_DEF_LINK_POLICY
, sizeof(cp
), &cp
);
1461 static void hci_set_le_support(struct hci_request
*req
)
1463 struct hci_dev
*hdev
= req
->hdev
;
1464 struct hci_cp_write_le_host_supported cp
;
1466 /* LE-only devices do not support explicit enablement */
1467 if (!lmp_bredr_capable(hdev
))
1470 memset(&cp
, 0, sizeof(cp
));
1472 if (test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
)) {
1474 cp
.simul
= lmp_le_br_capable(hdev
);
1477 if (cp
.le
!= lmp_host_le_capable(hdev
))
1478 hci_req_add(req
, HCI_OP_WRITE_LE_HOST_SUPPORTED
, sizeof(cp
),
1482 static void hci_set_event_mask_page_2(struct hci_request
*req
)
1484 struct hci_dev
*hdev
= req
->hdev
;
1485 u8 events
[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1487 /* If Connectionless Slave Broadcast master role is supported
1488 * enable all necessary events for it.
1490 if (lmp_csb_master_capable(hdev
)) {
1491 events
[1] |= 0x40; /* Triggered Clock Capture */
1492 events
[1] |= 0x80; /* Synchronization Train Complete */
1493 events
[2] |= 0x10; /* Slave Page Response Timeout */
1494 events
[2] |= 0x20; /* CSB Channel Map Change */
1497 /* If Connectionless Slave Broadcast slave role is supported
1498 * enable all necessary events for it.
1500 if (lmp_csb_slave_capable(hdev
)) {
1501 events
[2] |= 0x01; /* Synchronization Train Received */
1502 events
[2] |= 0x02; /* CSB Receive */
1503 events
[2] |= 0x04; /* CSB Timeout */
1504 events
[2] |= 0x08; /* Truncated Page Complete */
1507 /* Enable Authenticated Payload Timeout Expired event if supported */
1508 if (lmp_ping_capable(hdev
))
1511 hci_req_add(req
, HCI_OP_SET_EVENT_MASK_PAGE_2
, sizeof(events
), events
);
1514 static void hci_init3_req(struct hci_request
*req
, unsigned long opt
)
1516 struct hci_dev
*hdev
= req
->hdev
;
1519 /* Some Broadcom based Bluetooth controllers do not support the
1520 * Delete Stored Link Key command. They are clearly indicating its
1521 * absence in the bit mask of supported commands.
1523 * Check the supported commands and only if the the command is marked
1524 * as supported send it. If not supported assume that the controller
1525 * does not have actual support for stored link keys which makes this
1526 * command redundant anyway.
1528 * Some controllers indicate that they support handling deleting
1529 * stored link keys, but they don't. The quirk lets a driver
1530 * just disable this command.
1532 if (hdev
->commands
[6] & 0x80 &&
1533 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY
, &hdev
->quirks
)) {
1534 struct hci_cp_delete_stored_link_key cp
;
1536 bacpy(&cp
.bdaddr
, BDADDR_ANY
);
1537 cp
.delete_all
= 0x01;
1538 hci_req_add(req
, HCI_OP_DELETE_STORED_LINK_KEY
,
1542 if (hdev
->commands
[5] & 0x10)
1543 hci_setup_link_policy(req
);
1545 if (lmp_le_capable(hdev
))
1546 hci_set_le_support(req
);
1548 /* Read features beyond page 1 if available */
1549 for (p
= 2; p
< HCI_MAX_PAGES
&& p
<= hdev
->max_page
; p
++) {
1550 struct hci_cp_read_local_ext_features cp
;
1553 hci_req_add(req
, HCI_OP_READ_LOCAL_EXT_FEATURES
,
1558 static void hci_init4_req(struct hci_request
*req
, unsigned long opt
)
1560 struct hci_dev
*hdev
= req
->hdev
;
1562 /* Set event mask page 2 if the HCI command for it is supported */
1563 if (hdev
->commands
[22] & 0x04)
1564 hci_set_event_mask_page_2(req
);
1566 /* Check for Synchronization Train support */
1567 if (lmp_sync_train_capable(hdev
))
1568 hci_req_add(req
, HCI_OP_READ_SYNC_TRAIN_PARAMS
, 0, NULL
);
1570 /* Enable Secure Connections if supported and configured */
1571 if ((lmp_sc_capable(hdev
) ||
1572 test_bit(HCI_FORCE_SC
, &hdev
->dev_flags
)) &&
1573 test_bit(HCI_SC_ENABLED
, &hdev
->dev_flags
)) {
1575 hci_req_add(req
, HCI_OP_WRITE_SC_SUPPORT
,
1576 sizeof(support
), &support
);
1580 static int __hci_init(struct hci_dev
*hdev
)
1584 err
= __hci_req_sync(hdev
, hci_init1_req
, 0, HCI_INIT_TIMEOUT
);
1588 /* The Device Under Test (DUT) mode is special and available for
1589 * all controller types. So just create it early on.
1591 if (test_bit(HCI_SETUP
, &hdev
->dev_flags
)) {
1592 debugfs_create_file("dut_mode", 0644, hdev
->debugfs
, hdev
,
1596 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1597 * BR/EDR/LE type controllers. AMP controllers only need the
1600 if (hdev
->dev_type
!= HCI_BREDR
)
1603 err
= __hci_req_sync(hdev
, hci_init2_req
, 0, HCI_INIT_TIMEOUT
);
1607 err
= __hci_req_sync(hdev
, hci_init3_req
, 0, HCI_INIT_TIMEOUT
);
1611 err
= __hci_req_sync(hdev
, hci_init4_req
, 0, HCI_INIT_TIMEOUT
);
1615 /* Only create debugfs entries during the initial setup
1616 * phase and not every time the controller gets powered on.
1618 if (!test_bit(HCI_SETUP
, &hdev
->dev_flags
))
1621 debugfs_create_file("features", 0444, hdev
->debugfs
, hdev
,
1623 debugfs_create_u16("manufacturer", 0444, hdev
->debugfs
,
1624 &hdev
->manufacturer
);
1625 debugfs_create_u8("hci_version", 0444, hdev
->debugfs
, &hdev
->hci_ver
);
1626 debugfs_create_u16("hci_revision", 0444, hdev
->debugfs
, &hdev
->hci_rev
);
1627 debugfs_create_file("blacklist", 0444, hdev
->debugfs
, hdev
,
1629 debugfs_create_file("uuids", 0444, hdev
->debugfs
, hdev
, &uuids_fops
);
1631 if (lmp_bredr_capable(hdev
)) {
1632 debugfs_create_file("inquiry_cache", 0444, hdev
->debugfs
,
1633 hdev
, &inquiry_cache_fops
);
1634 debugfs_create_file("link_keys", 0400, hdev
->debugfs
,
1635 hdev
, &link_keys_fops
);
1636 debugfs_create_file("dev_class", 0444, hdev
->debugfs
,
1637 hdev
, &dev_class_fops
);
1638 debugfs_create_file("voice_setting", 0444, hdev
->debugfs
,
1639 hdev
, &voice_setting_fops
);
1642 if (lmp_ssp_capable(hdev
)) {
1643 debugfs_create_file("auto_accept_delay", 0644, hdev
->debugfs
,
1644 hdev
, &auto_accept_delay_fops
);
1645 debugfs_create_file("ssp_debug_mode", 0644, hdev
->debugfs
,
1646 hdev
, &ssp_debug_mode_fops
);
1647 debugfs_create_file("force_sc_support", 0644, hdev
->debugfs
,
1648 hdev
, &force_sc_support_fops
);
1649 debugfs_create_file("sc_only_mode", 0444, hdev
->debugfs
,
1650 hdev
, &sc_only_mode_fops
);
1653 if (lmp_sniff_capable(hdev
)) {
1654 debugfs_create_file("idle_timeout", 0644, hdev
->debugfs
,
1655 hdev
, &idle_timeout_fops
);
1656 debugfs_create_file("sniff_min_interval", 0644, hdev
->debugfs
,
1657 hdev
, &sniff_min_interval_fops
);
1658 debugfs_create_file("sniff_max_interval", 0644, hdev
->debugfs
,
1659 hdev
, &sniff_max_interval_fops
);
1662 if (lmp_le_capable(hdev
)) {
1663 debugfs_create_file("identity", 0400, hdev
->debugfs
,
1664 hdev
, &identity_fops
);
1665 debugfs_create_file("rpa_timeout", 0644, hdev
->debugfs
,
1666 hdev
, &rpa_timeout_fops
);
1667 debugfs_create_file("random_address", 0444, hdev
->debugfs
,
1668 hdev
, &random_address_fops
);
1669 debugfs_create_file("static_address", 0444, hdev
->debugfs
,
1670 hdev
, &static_address_fops
);
1672 /* For controllers with a public address, provide a debug
1673 * option to force the usage of the configured static
1674 * address. By default the public address is used.
1676 if (bacmp(&hdev
->bdaddr
, BDADDR_ANY
))
1677 debugfs_create_file("force_static_address", 0644,
1678 hdev
->debugfs
, hdev
,
1679 &force_static_address_fops
);
1681 debugfs_create_u8("white_list_size", 0444, hdev
->debugfs
,
1682 &hdev
->le_white_list_size
);
1683 debugfs_create_file("identity_resolving_keys", 0400,
1684 hdev
->debugfs
, hdev
,
1685 &identity_resolving_keys_fops
);
1686 debugfs_create_file("long_term_keys", 0400, hdev
->debugfs
,
1687 hdev
, &long_term_keys_fops
);
1688 debugfs_create_file("conn_min_interval", 0644, hdev
->debugfs
,
1689 hdev
, &conn_min_interval_fops
);
1690 debugfs_create_file("conn_max_interval", 0644, hdev
->debugfs
,
1691 hdev
, &conn_max_interval_fops
);
1692 debugfs_create_file("adv_channel_map", 0644, hdev
->debugfs
,
1693 hdev
, &adv_channel_map_fops
);
1694 debugfs_create_file("6lowpan", 0644, hdev
->debugfs
, hdev
,
1695 &lowpan_debugfs_fops
);
1701 static void hci_scan_req(struct hci_request
*req
, unsigned long opt
)
1705 BT_DBG("%s %x", req
->hdev
->name
, scan
);
1707 /* Inquiry and Page scans */
1708 hci_req_add(req
, HCI_OP_WRITE_SCAN_ENABLE
, 1, &scan
);
1711 static void hci_auth_req(struct hci_request
*req
, unsigned long opt
)
1715 BT_DBG("%s %x", req
->hdev
->name
, auth
);
1717 /* Authentication */
1718 hci_req_add(req
, HCI_OP_WRITE_AUTH_ENABLE
, 1, &auth
);
1721 static void hci_encrypt_req(struct hci_request
*req
, unsigned long opt
)
1725 BT_DBG("%s %x", req
->hdev
->name
, encrypt
);
1728 hci_req_add(req
, HCI_OP_WRITE_ENCRYPT_MODE
, 1, &encrypt
);
1731 static void hci_linkpol_req(struct hci_request
*req
, unsigned long opt
)
1733 __le16 policy
= cpu_to_le16(opt
);
1735 BT_DBG("%s %x", req
->hdev
->name
, policy
);
1737 /* Default link policy */
1738 hci_req_add(req
, HCI_OP_WRITE_DEF_LINK_POLICY
, 2, &policy
);
1741 /* Get HCI device by index.
1742 * Device is held on return. */
1743 struct hci_dev
*hci_dev_get(int index
)
1745 struct hci_dev
*hdev
= NULL
, *d
;
1747 BT_DBG("%d", index
);
1752 read_lock(&hci_dev_list_lock
);
1753 list_for_each_entry(d
, &hci_dev_list
, list
) {
1754 if (d
->id
== index
) {
1755 hdev
= hci_dev_hold(d
);
1759 read_unlock(&hci_dev_list_lock
);
1763 /* ---- Inquiry support ---- */
1765 bool hci_discovery_active(struct hci_dev
*hdev
)
1767 struct discovery_state
*discov
= &hdev
->discovery
;
1769 switch (discov
->state
) {
1770 case DISCOVERY_FINDING
:
1771 case DISCOVERY_RESOLVING
:
1779 void hci_discovery_set_state(struct hci_dev
*hdev
, int state
)
1781 BT_DBG("%s state %u -> %u", hdev
->name
, hdev
->discovery
.state
, state
);
1783 if (hdev
->discovery
.state
== state
)
1787 case DISCOVERY_STOPPED
:
1788 if (hdev
->discovery
.state
!= DISCOVERY_STARTING
)
1789 mgmt_discovering(hdev
, 0);
1791 case DISCOVERY_STARTING
:
1793 case DISCOVERY_FINDING
:
1794 mgmt_discovering(hdev
, 1);
1796 case DISCOVERY_RESOLVING
:
1798 case DISCOVERY_STOPPING
:
1802 hdev
->discovery
.state
= state
;
1805 void hci_inquiry_cache_flush(struct hci_dev
*hdev
)
1807 struct discovery_state
*cache
= &hdev
->discovery
;
1808 struct inquiry_entry
*p
, *n
;
1810 list_for_each_entry_safe(p
, n
, &cache
->all
, all
) {
1815 INIT_LIST_HEAD(&cache
->unknown
);
1816 INIT_LIST_HEAD(&cache
->resolve
);
1819 struct inquiry_entry
*hci_inquiry_cache_lookup(struct hci_dev
*hdev
,
1822 struct discovery_state
*cache
= &hdev
->discovery
;
1823 struct inquiry_entry
*e
;
1825 BT_DBG("cache %p, %pMR", cache
, bdaddr
);
1827 list_for_each_entry(e
, &cache
->all
, all
) {
1828 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
1835 struct inquiry_entry
*hci_inquiry_cache_lookup_unknown(struct hci_dev
*hdev
,
1838 struct discovery_state
*cache
= &hdev
->discovery
;
1839 struct inquiry_entry
*e
;
1841 BT_DBG("cache %p, %pMR", cache
, bdaddr
);
1843 list_for_each_entry(e
, &cache
->unknown
, list
) {
1844 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
1851 struct inquiry_entry
*hci_inquiry_cache_lookup_resolve(struct hci_dev
*hdev
,
1855 struct discovery_state
*cache
= &hdev
->discovery
;
1856 struct inquiry_entry
*e
;
1858 BT_DBG("cache %p bdaddr %pMR state %d", cache
, bdaddr
, state
);
1860 list_for_each_entry(e
, &cache
->resolve
, list
) {
1861 if (!bacmp(bdaddr
, BDADDR_ANY
) && e
->name_state
== state
)
1863 if (!bacmp(&e
->data
.bdaddr
, bdaddr
))
1870 void hci_inquiry_cache_update_resolve(struct hci_dev
*hdev
,
1871 struct inquiry_entry
*ie
)
1873 struct discovery_state
*cache
= &hdev
->discovery
;
1874 struct list_head
*pos
= &cache
->resolve
;
1875 struct inquiry_entry
*p
;
1877 list_del(&ie
->list
);
1879 list_for_each_entry(p
, &cache
->resolve
, list
) {
1880 if (p
->name_state
!= NAME_PENDING
&&
1881 abs(p
->data
.rssi
) >= abs(ie
->data
.rssi
))
1886 list_add(&ie
->list
, pos
);
1889 bool hci_inquiry_cache_update(struct hci_dev
*hdev
, struct inquiry_data
*data
,
1890 bool name_known
, bool *ssp
)
1892 struct discovery_state
*cache
= &hdev
->discovery
;
1893 struct inquiry_entry
*ie
;
1895 BT_DBG("cache %p, %pMR", cache
, &data
->bdaddr
);
1897 hci_remove_remote_oob_data(hdev
, &data
->bdaddr
);
1900 *ssp
= data
->ssp_mode
;
1902 ie
= hci_inquiry_cache_lookup(hdev
, &data
->bdaddr
);
1904 if (ie
->data
.ssp_mode
&& ssp
)
1907 if (ie
->name_state
== NAME_NEEDED
&&
1908 data
->rssi
!= ie
->data
.rssi
) {
1909 ie
->data
.rssi
= data
->rssi
;
1910 hci_inquiry_cache_update_resolve(hdev
, ie
);
1916 /* Entry not in the cache. Add new one. */
1917 ie
= kzalloc(sizeof(struct inquiry_entry
), GFP_ATOMIC
);
1921 list_add(&ie
->all
, &cache
->all
);
1924 ie
->name_state
= NAME_KNOWN
;
1926 ie
->name_state
= NAME_NOT_KNOWN
;
1927 list_add(&ie
->list
, &cache
->unknown
);
1931 if (name_known
&& ie
->name_state
!= NAME_KNOWN
&&
1932 ie
->name_state
!= NAME_PENDING
) {
1933 ie
->name_state
= NAME_KNOWN
;
1934 list_del(&ie
->list
);
1937 memcpy(&ie
->data
, data
, sizeof(*data
));
1938 ie
->timestamp
= jiffies
;
1939 cache
->timestamp
= jiffies
;
1941 if (ie
->name_state
== NAME_NOT_KNOWN
)
1947 static int inquiry_cache_dump(struct hci_dev
*hdev
, int num
, __u8
*buf
)
1949 struct discovery_state
*cache
= &hdev
->discovery
;
1950 struct inquiry_info
*info
= (struct inquiry_info
*) buf
;
1951 struct inquiry_entry
*e
;
1954 list_for_each_entry(e
, &cache
->all
, all
) {
1955 struct inquiry_data
*data
= &e
->data
;
1960 bacpy(&info
->bdaddr
, &data
->bdaddr
);
1961 info
->pscan_rep_mode
= data
->pscan_rep_mode
;
1962 info
->pscan_period_mode
= data
->pscan_period_mode
;
1963 info
->pscan_mode
= data
->pscan_mode
;
1964 memcpy(info
->dev_class
, data
->dev_class
, 3);
1965 info
->clock_offset
= data
->clock_offset
;
1971 BT_DBG("cache %p, copied %d", cache
, copied
);
1975 static void hci_inq_req(struct hci_request
*req
, unsigned long opt
)
1977 struct hci_inquiry_req
*ir
= (struct hci_inquiry_req
*) opt
;
1978 struct hci_dev
*hdev
= req
->hdev
;
1979 struct hci_cp_inquiry cp
;
1981 BT_DBG("%s", hdev
->name
);
1983 if (test_bit(HCI_INQUIRY
, &hdev
->flags
))
1987 memcpy(&cp
.lap
, &ir
->lap
, 3);
1988 cp
.length
= ir
->length
;
1989 cp
.num_rsp
= ir
->num_rsp
;
1990 hci_req_add(req
, HCI_OP_INQUIRY
, sizeof(cp
), &cp
);
1993 static int wait_inquiry(void *word
)
1996 return signal_pending(current
);
1999 int hci_inquiry(void __user
*arg
)
2001 __u8 __user
*ptr
= arg
;
2002 struct hci_inquiry_req ir
;
2003 struct hci_dev
*hdev
;
2004 int err
= 0, do_inquiry
= 0, max_rsp
;
2008 if (copy_from_user(&ir
, ptr
, sizeof(ir
)))
2011 hdev
= hci_dev_get(ir
.dev_id
);
2015 if (test_bit(HCI_USER_CHANNEL
, &hdev
->dev_flags
)) {
2020 if (hdev
->dev_type
!= HCI_BREDR
) {
2025 if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
)) {
2031 if (inquiry_cache_age(hdev
) > INQUIRY_CACHE_AGE_MAX
||
2032 inquiry_cache_empty(hdev
) || ir
.flags
& IREQ_CACHE_FLUSH
) {
2033 hci_inquiry_cache_flush(hdev
);
2036 hci_dev_unlock(hdev
);
2038 timeo
= ir
.length
* msecs_to_jiffies(2000);
2041 err
= hci_req_sync(hdev
, hci_inq_req
, (unsigned long) &ir
,
2046 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2047 * cleared). If it is interrupted by a signal, return -EINTR.
2049 if (wait_on_bit(&hdev
->flags
, HCI_INQUIRY
, wait_inquiry
,
2050 TASK_INTERRUPTIBLE
))
2054 /* for unlimited number of responses we will use buffer with
2057 max_rsp
= (ir
.num_rsp
== 0) ? 255 : ir
.num_rsp
;
2059 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2060 * copy it to the user space.
2062 buf
= kmalloc(sizeof(struct inquiry_info
) * max_rsp
, GFP_KERNEL
);
2069 ir
.num_rsp
= inquiry_cache_dump(hdev
, max_rsp
, buf
);
2070 hci_dev_unlock(hdev
);
2072 BT_DBG("num_rsp %d", ir
.num_rsp
);
2074 if (!copy_to_user(ptr
, &ir
, sizeof(ir
))) {
2076 if (copy_to_user(ptr
, buf
, sizeof(struct inquiry_info
) *
2089 static int hci_dev_do_open(struct hci_dev
*hdev
)
2093 BT_DBG("%s %p", hdev
->name
, hdev
);
2097 if (test_bit(HCI_UNREGISTER
, &hdev
->dev_flags
)) {
2102 if (!test_bit(HCI_SETUP
, &hdev
->dev_flags
)) {
2103 /* Check for rfkill but allow the HCI setup stage to
2104 * proceed (which in itself doesn't cause any RF activity).
2106 if (test_bit(HCI_RFKILLED
, &hdev
->dev_flags
)) {
2111 /* Check for valid public address or a configured static
2112 * random adddress, but let the HCI setup proceed to
2113 * be able to determine if there is a public address
2116 * In case of user channel usage, it is not important
2117 * if a public address or static random address is
2120 * This check is only valid for BR/EDR controllers
2121 * since AMP controllers do not have an address.
2123 if (!test_bit(HCI_USER_CHANNEL
, &hdev
->dev_flags
) &&
2124 hdev
->dev_type
== HCI_BREDR
&&
2125 !bacmp(&hdev
->bdaddr
, BDADDR_ANY
) &&
2126 !bacmp(&hdev
->static_addr
, BDADDR_ANY
)) {
2127 ret
= -EADDRNOTAVAIL
;
2132 if (test_bit(HCI_UP
, &hdev
->flags
)) {
2137 if (hdev
->open(hdev
)) {
2142 atomic_set(&hdev
->cmd_cnt
, 1);
2143 set_bit(HCI_INIT
, &hdev
->flags
);
2145 if (hdev
->setup
&& test_bit(HCI_SETUP
, &hdev
->dev_flags
))
2146 ret
= hdev
->setup(hdev
);
2149 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &hdev
->quirks
))
2150 set_bit(HCI_RAW
, &hdev
->flags
);
2152 if (!test_bit(HCI_RAW
, &hdev
->flags
) &&
2153 !test_bit(HCI_USER_CHANNEL
, &hdev
->dev_flags
))
2154 ret
= __hci_init(hdev
);
2157 clear_bit(HCI_INIT
, &hdev
->flags
);
2161 set_bit(HCI_RPA_EXPIRED
, &hdev
->dev_flags
);
2162 set_bit(HCI_UP
, &hdev
->flags
);
2163 hci_notify(hdev
, HCI_DEV_UP
);
2164 if (!test_bit(HCI_SETUP
, &hdev
->dev_flags
) &&
2165 !test_bit(HCI_USER_CHANNEL
, &hdev
->dev_flags
) &&
2166 hdev
->dev_type
== HCI_BREDR
) {
2168 mgmt_powered(hdev
, 1);
2169 hci_dev_unlock(hdev
);
2172 /* Init failed, cleanup */
2173 flush_work(&hdev
->tx_work
);
2174 flush_work(&hdev
->cmd_work
);
2175 flush_work(&hdev
->rx_work
);
2177 skb_queue_purge(&hdev
->cmd_q
);
2178 skb_queue_purge(&hdev
->rx_q
);
2183 if (hdev
->sent_cmd
) {
2184 kfree_skb(hdev
->sent_cmd
);
2185 hdev
->sent_cmd
= NULL
;
2193 hci_req_unlock(hdev
);
2197 /* ---- HCI ioctl helpers ---- */
2199 int hci_dev_open(__u16 dev
)
2201 struct hci_dev
*hdev
;
2204 hdev
= hci_dev_get(dev
);
2208 /* We need to ensure that no other power on/off work is pending
2209 * before proceeding to call hci_dev_do_open. This is
2210 * particularly important if the setup procedure has not yet
2213 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
))
2214 cancel_delayed_work(&hdev
->power_off
);
2216 /* After this call it is guaranteed that the setup procedure
2217 * has finished. This means that error conditions like RFKILL
2218 * or no valid public or static random address apply.
2220 flush_workqueue(hdev
->req_workqueue
);
2222 err
= hci_dev_do_open(hdev
);
2229 static int hci_dev_do_close(struct hci_dev
*hdev
)
2231 BT_DBG("%s %p", hdev
->name
, hdev
);
2233 cancel_delayed_work(&hdev
->power_off
);
2235 hci_req_cancel(hdev
, ENODEV
);
2238 if (!test_and_clear_bit(HCI_UP
, &hdev
->flags
)) {
2239 del_timer_sync(&hdev
->cmd_timer
);
2240 hci_req_unlock(hdev
);
2244 /* Flush RX and TX works */
2245 flush_work(&hdev
->tx_work
);
2246 flush_work(&hdev
->rx_work
);
2248 if (hdev
->discov_timeout
> 0) {
2249 cancel_delayed_work(&hdev
->discov_off
);
2250 hdev
->discov_timeout
= 0;
2251 clear_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
);
2252 clear_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
);
2255 if (test_and_clear_bit(HCI_SERVICE_CACHE
, &hdev
->dev_flags
))
2256 cancel_delayed_work(&hdev
->service_cache
);
2258 cancel_delayed_work_sync(&hdev
->le_scan_disable
);
2259 cancel_delayed_work_sync(&hdev
->rpa_expired
);
2262 hci_inquiry_cache_flush(hdev
);
2263 hci_conn_hash_flush(hdev
);
2264 hci_dev_unlock(hdev
);
2266 hci_notify(hdev
, HCI_DEV_DOWN
);
2272 skb_queue_purge(&hdev
->cmd_q
);
2273 atomic_set(&hdev
->cmd_cnt
, 1);
2274 if (!test_bit(HCI_RAW
, &hdev
->flags
) &&
2275 !test_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
) &&
2276 test_bit(HCI_QUIRK_RESET_ON_CLOSE
, &hdev
->quirks
)) {
2277 set_bit(HCI_INIT
, &hdev
->flags
);
2278 __hci_req_sync(hdev
, hci_reset_req
, 0, HCI_CMD_TIMEOUT
);
2279 clear_bit(HCI_INIT
, &hdev
->flags
);
2282 /* flush cmd work */
2283 flush_work(&hdev
->cmd_work
);
2286 skb_queue_purge(&hdev
->rx_q
);
2287 skb_queue_purge(&hdev
->cmd_q
);
2288 skb_queue_purge(&hdev
->raw_q
);
2290 /* Drop last sent command */
2291 if (hdev
->sent_cmd
) {
2292 del_timer_sync(&hdev
->cmd_timer
);
2293 kfree_skb(hdev
->sent_cmd
);
2294 hdev
->sent_cmd
= NULL
;
2297 kfree_skb(hdev
->recv_evt
);
2298 hdev
->recv_evt
= NULL
;
2300 /* After this point our queues are empty
2301 * and no tasks are scheduled. */
2306 hdev
->dev_flags
&= ~HCI_PERSISTENT_MASK
;
2308 if (!test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
)) {
2309 if (hdev
->dev_type
== HCI_BREDR
) {
2311 mgmt_powered(hdev
, 0);
2312 hci_dev_unlock(hdev
);
2316 /* Controller radio is available but is currently powered down */
2317 hdev
->amp_status
= AMP_STATUS_POWERED_DOWN
;
2319 memset(hdev
->eir
, 0, sizeof(hdev
->eir
));
2320 memset(hdev
->dev_class
, 0, sizeof(hdev
->dev_class
));
2321 bacpy(&hdev
->random_addr
, BDADDR_ANY
);
2323 hci_req_unlock(hdev
);
2329 int hci_dev_close(__u16 dev
)
2331 struct hci_dev
*hdev
;
2334 hdev
= hci_dev_get(dev
);
2338 if (test_bit(HCI_USER_CHANNEL
, &hdev
->dev_flags
)) {
2343 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
))
2344 cancel_delayed_work(&hdev
->power_off
);
2346 err
= hci_dev_do_close(hdev
);
2353 int hci_dev_reset(__u16 dev
)
2355 struct hci_dev
*hdev
;
2358 hdev
= hci_dev_get(dev
);
2364 if (!test_bit(HCI_UP
, &hdev
->flags
)) {
2369 if (test_bit(HCI_USER_CHANNEL
, &hdev
->dev_flags
)) {
2375 skb_queue_purge(&hdev
->rx_q
);
2376 skb_queue_purge(&hdev
->cmd_q
);
2379 hci_inquiry_cache_flush(hdev
);
2380 hci_conn_hash_flush(hdev
);
2381 hci_dev_unlock(hdev
);
2386 atomic_set(&hdev
->cmd_cnt
, 1);
2387 hdev
->acl_cnt
= 0; hdev
->sco_cnt
= 0; hdev
->le_cnt
= 0;
2389 if (!test_bit(HCI_RAW
, &hdev
->flags
))
2390 ret
= __hci_req_sync(hdev
, hci_reset_req
, 0, HCI_INIT_TIMEOUT
);
2393 hci_req_unlock(hdev
);
2398 int hci_dev_reset_stat(__u16 dev
)
2400 struct hci_dev
*hdev
;
2403 hdev
= hci_dev_get(dev
);
2407 if (test_bit(HCI_USER_CHANNEL
, &hdev
->dev_flags
)) {
2412 memset(&hdev
->stat
, 0, sizeof(struct hci_dev_stats
));
2419 int hci_dev_cmd(unsigned int cmd
, void __user
*arg
)
2421 struct hci_dev
*hdev
;
2422 struct hci_dev_req dr
;
2425 if (copy_from_user(&dr
, arg
, sizeof(dr
)))
2428 hdev
= hci_dev_get(dr
.dev_id
);
2432 if (test_bit(HCI_USER_CHANNEL
, &hdev
->dev_flags
)) {
2437 if (hdev
->dev_type
!= HCI_BREDR
) {
2442 if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
)) {
2449 err
= hci_req_sync(hdev
, hci_auth_req
, dr
.dev_opt
,
2454 if (!lmp_encrypt_capable(hdev
)) {
2459 if (!test_bit(HCI_AUTH
, &hdev
->flags
)) {
2460 /* Auth must be enabled first */
2461 err
= hci_req_sync(hdev
, hci_auth_req
, dr
.dev_opt
,
2467 err
= hci_req_sync(hdev
, hci_encrypt_req
, dr
.dev_opt
,
2472 err
= hci_req_sync(hdev
, hci_scan_req
, dr
.dev_opt
,
2477 err
= hci_req_sync(hdev
, hci_linkpol_req
, dr
.dev_opt
,
2481 case HCISETLINKMODE
:
2482 hdev
->link_mode
= ((__u16
) dr
.dev_opt
) &
2483 (HCI_LM_MASTER
| HCI_LM_ACCEPT
);
2487 hdev
->pkt_type
= (__u16
) dr
.dev_opt
;
2491 hdev
->acl_mtu
= *((__u16
*) &dr
.dev_opt
+ 1);
2492 hdev
->acl_pkts
= *((__u16
*) &dr
.dev_opt
+ 0);
2496 hdev
->sco_mtu
= *((__u16
*) &dr
.dev_opt
+ 1);
2497 hdev
->sco_pkts
= *((__u16
*) &dr
.dev_opt
+ 0);
2510 int hci_get_dev_list(void __user
*arg
)
2512 struct hci_dev
*hdev
;
2513 struct hci_dev_list_req
*dl
;
2514 struct hci_dev_req
*dr
;
2515 int n
= 0, size
, err
;
2518 if (get_user(dev_num
, (__u16 __user
*) arg
))
2521 if (!dev_num
|| dev_num
> (PAGE_SIZE
* 2) / sizeof(*dr
))
2524 size
= sizeof(*dl
) + dev_num
* sizeof(*dr
);
2526 dl
= kzalloc(size
, GFP_KERNEL
);
2532 read_lock(&hci_dev_list_lock
);
2533 list_for_each_entry(hdev
, &hci_dev_list
, list
) {
2534 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
))
2535 cancel_delayed_work(&hdev
->power_off
);
2537 if (!test_bit(HCI_MGMT
, &hdev
->dev_flags
))
2538 set_bit(HCI_PAIRABLE
, &hdev
->dev_flags
);
2540 (dr
+ n
)->dev_id
= hdev
->id
;
2541 (dr
+ n
)->dev_opt
= hdev
->flags
;
2546 read_unlock(&hci_dev_list_lock
);
2549 size
= sizeof(*dl
) + n
* sizeof(*dr
);
2551 err
= copy_to_user(arg
, dl
, size
);
2554 return err
? -EFAULT
: 0;
2557 int hci_get_dev_info(void __user
*arg
)
2559 struct hci_dev
*hdev
;
2560 struct hci_dev_info di
;
2563 if (copy_from_user(&di
, arg
, sizeof(di
)))
2566 hdev
= hci_dev_get(di
.dev_id
);
2570 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
))
2571 cancel_delayed_work_sync(&hdev
->power_off
);
2573 if (!test_bit(HCI_MGMT
, &hdev
->dev_flags
))
2574 set_bit(HCI_PAIRABLE
, &hdev
->dev_flags
);
2576 strcpy(di
.name
, hdev
->name
);
2577 di
.bdaddr
= hdev
->bdaddr
;
2578 di
.type
= (hdev
->bus
& 0x0f) | ((hdev
->dev_type
& 0x03) << 4);
2579 di
.flags
= hdev
->flags
;
2580 di
.pkt_type
= hdev
->pkt_type
;
2581 if (lmp_bredr_capable(hdev
)) {
2582 di
.acl_mtu
= hdev
->acl_mtu
;
2583 di
.acl_pkts
= hdev
->acl_pkts
;
2584 di
.sco_mtu
= hdev
->sco_mtu
;
2585 di
.sco_pkts
= hdev
->sco_pkts
;
2587 di
.acl_mtu
= hdev
->le_mtu
;
2588 di
.acl_pkts
= hdev
->le_pkts
;
2592 di
.link_policy
= hdev
->link_policy
;
2593 di
.link_mode
= hdev
->link_mode
;
2595 memcpy(&di
.stat
, &hdev
->stat
, sizeof(di
.stat
));
2596 memcpy(&di
.features
, &hdev
->features
, sizeof(di
.features
));
2598 if (copy_to_user(arg
, &di
, sizeof(di
)))
2606 /* ---- Interface to HCI drivers ---- */
2608 static int hci_rfkill_set_block(void *data
, bool blocked
)
2610 struct hci_dev
*hdev
= data
;
2612 BT_DBG("%p name %s blocked %d", hdev
, hdev
->name
, blocked
);
2614 if (test_bit(HCI_USER_CHANNEL
, &hdev
->dev_flags
))
2618 set_bit(HCI_RFKILLED
, &hdev
->dev_flags
);
2619 if (!test_bit(HCI_SETUP
, &hdev
->dev_flags
))
2620 hci_dev_do_close(hdev
);
2622 clear_bit(HCI_RFKILLED
, &hdev
->dev_flags
);
2628 static const struct rfkill_ops hci_rfkill_ops
= {
2629 .set_block
= hci_rfkill_set_block
,
2632 static void hci_power_on(struct work_struct
*work
)
2634 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, power_on
);
2637 BT_DBG("%s", hdev
->name
);
2639 err
= hci_dev_do_open(hdev
);
2641 mgmt_set_powered_failed(hdev
, err
);
2645 /* During the HCI setup phase, a few error conditions are
2646 * ignored and they need to be checked now. If they are still
2647 * valid, it is important to turn the device back off.
2649 if (test_bit(HCI_RFKILLED
, &hdev
->dev_flags
) ||
2650 (hdev
->dev_type
== HCI_BREDR
&&
2651 !bacmp(&hdev
->bdaddr
, BDADDR_ANY
) &&
2652 !bacmp(&hdev
->static_addr
, BDADDR_ANY
))) {
2653 clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
);
2654 hci_dev_do_close(hdev
);
2655 } else if (test_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
)) {
2656 queue_delayed_work(hdev
->req_workqueue
, &hdev
->power_off
,
2657 HCI_AUTO_OFF_TIMEOUT
);
2660 if (test_and_clear_bit(HCI_SETUP
, &hdev
->dev_flags
))
2661 mgmt_index_added(hdev
);
2664 static void hci_power_off(struct work_struct
*work
)
2666 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
2669 BT_DBG("%s", hdev
->name
);
2671 hci_dev_do_close(hdev
);
2674 static void hci_discov_off(struct work_struct
*work
)
2676 struct hci_dev
*hdev
;
2678 hdev
= container_of(work
, struct hci_dev
, discov_off
.work
);
2680 BT_DBG("%s", hdev
->name
);
2682 mgmt_discoverable_timeout(hdev
);
2685 void hci_uuids_clear(struct hci_dev
*hdev
)
2687 struct bt_uuid
*uuid
, *tmp
;
2689 list_for_each_entry_safe(uuid
, tmp
, &hdev
->uuids
, list
) {
2690 list_del(&uuid
->list
);
2695 void hci_link_keys_clear(struct hci_dev
*hdev
)
2697 struct list_head
*p
, *n
;
2699 list_for_each_safe(p
, n
, &hdev
->link_keys
) {
2700 struct link_key
*key
;
2702 key
= list_entry(p
, struct link_key
, list
);
2709 void hci_smp_ltks_clear(struct hci_dev
*hdev
)
2711 struct smp_ltk
*k
, *tmp
;
2713 list_for_each_entry_safe(k
, tmp
, &hdev
->long_term_keys
, list
) {
2719 void hci_smp_irks_clear(struct hci_dev
*hdev
)
2721 struct smp_irk
*k
, *tmp
;
2723 list_for_each_entry_safe(k
, tmp
, &hdev
->identity_resolving_keys
, list
) {
2729 struct link_key
*hci_find_link_key(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
2733 list_for_each_entry(k
, &hdev
->link_keys
, list
)
2734 if (bacmp(bdaddr
, &k
->bdaddr
) == 0)
2740 static bool hci_persistent_key(struct hci_dev
*hdev
, struct hci_conn
*conn
,
2741 u8 key_type
, u8 old_key_type
)
2744 if (key_type
< 0x03)
2747 /* Debug keys are insecure so don't store them persistently */
2748 if (key_type
== HCI_LK_DEBUG_COMBINATION
)
2751 /* Changed combination key and there's no previous one */
2752 if (key_type
== HCI_LK_CHANGED_COMBINATION
&& old_key_type
== 0xff)
2755 /* Security mode 3 case */
2759 /* Neither local nor remote side had no-bonding as requirement */
2760 if (conn
->auth_type
> 0x01 && conn
->remote_auth
> 0x01)
2763 /* Local side had dedicated bonding as requirement */
2764 if (conn
->auth_type
== 0x02 || conn
->auth_type
== 0x03)
2767 /* Remote side had dedicated bonding as requirement */
2768 if (conn
->remote_auth
== 0x02 || conn
->remote_auth
== 0x03)
2771 /* If none of the above criteria match, then don't store the key
2776 static bool ltk_type_master(u8 type
)
2778 if (type
== HCI_SMP_STK
|| type
== HCI_SMP_LTK
)
2784 struct smp_ltk
*hci_find_ltk(struct hci_dev
*hdev
, __le16 ediv
, u8 rand
[8],
2789 list_for_each_entry(k
, &hdev
->long_term_keys
, list
) {
2790 if (k
->ediv
!= ediv
||
2791 memcmp(rand
, k
->rand
, sizeof(k
->rand
)))
2794 if (ltk_type_master(k
->type
) != master
)
2803 struct smp_ltk
*hci_find_ltk_by_addr(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
2804 u8 addr_type
, bool master
)
2808 list_for_each_entry(k
, &hdev
->long_term_keys
, list
)
2809 if (addr_type
== k
->bdaddr_type
&&
2810 bacmp(bdaddr
, &k
->bdaddr
) == 0 &&
2811 ltk_type_master(k
->type
) == master
)
2817 struct smp_irk
*hci_find_irk_by_rpa(struct hci_dev
*hdev
, bdaddr_t
*rpa
)
2819 struct smp_irk
*irk
;
2821 list_for_each_entry(irk
, &hdev
->identity_resolving_keys
, list
) {
2822 if (!bacmp(&irk
->rpa
, rpa
))
2826 list_for_each_entry(irk
, &hdev
->identity_resolving_keys
, list
) {
2827 if (smp_irk_matches(hdev
->tfm_aes
, irk
->val
, rpa
)) {
2828 bacpy(&irk
->rpa
, rpa
);
2836 struct smp_irk
*hci_find_irk_by_addr(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
2839 struct smp_irk
*irk
;
2841 /* Identity Address must be public or static random */
2842 if (addr_type
== ADDR_LE_DEV_RANDOM
&& (bdaddr
->b
[5] & 0xc0) != 0xc0)
2845 list_for_each_entry(irk
, &hdev
->identity_resolving_keys
, list
) {
2846 if (addr_type
== irk
->addr_type
&&
2847 bacmp(bdaddr
, &irk
->bdaddr
) == 0)
2854 int hci_add_link_key(struct hci_dev
*hdev
, struct hci_conn
*conn
, int new_key
,
2855 bdaddr_t
*bdaddr
, u8
*val
, u8 type
, u8 pin_len
)
2857 struct link_key
*key
, *old_key
;
2861 old_key
= hci_find_link_key(hdev
, bdaddr
);
2863 old_key_type
= old_key
->type
;
2866 old_key_type
= conn
? conn
->key_type
: 0xff;
2867 key
= kzalloc(sizeof(*key
), GFP_KERNEL
);
2870 list_add(&key
->list
, &hdev
->link_keys
);
2873 BT_DBG("%s key for %pMR type %u", hdev
->name
, bdaddr
, type
);
2875 /* Some buggy controller combinations generate a changed
2876 * combination key for legacy pairing even when there's no
2878 if (type
== HCI_LK_CHANGED_COMBINATION
&&
2879 (!conn
|| conn
->remote_auth
== 0xff) && old_key_type
== 0xff) {
2880 type
= HCI_LK_COMBINATION
;
2882 conn
->key_type
= type
;
2885 bacpy(&key
->bdaddr
, bdaddr
);
2886 memcpy(key
->val
, val
, HCI_LINK_KEY_SIZE
);
2887 key
->pin_len
= pin_len
;
2889 if (type
== HCI_LK_CHANGED_COMBINATION
)
2890 key
->type
= old_key_type
;
2897 persistent
= hci_persistent_key(hdev
, conn
, type
, old_key_type
);
2899 mgmt_new_link_key(hdev
, key
, persistent
);
2902 conn
->flush_key
= !persistent
;
2907 struct smp_ltk
*hci_add_ltk(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
2908 u8 addr_type
, u8 type
, u8 authenticated
,
2909 u8 tk
[16], u8 enc_size
, __le16 ediv
, u8 rand
[8])
2911 struct smp_ltk
*key
, *old_key
;
2912 bool master
= ltk_type_master(type
);
2914 old_key
= hci_find_ltk_by_addr(hdev
, bdaddr
, addr_type
, master
);
2918 key
= kzalloc(sizeof(*key
), GFP_KERNEL
);
2921 list_add(&key
->list
, &hdev
->long_term_keys
);
2924 bacpy(&key
->bdaddr
, bdaddr
);
2925 key
->bdaddr_type
= addr_type
;
2926 memcpy(key
->val
, tk
, sizeof(key
->val
));
2927 key
->authenticated
= authenticated
;
2929 key
->enc_size
= enc_size
;
2931 memcpy(key
->rand
, rand
, sizeof(key
->rand
));
2936 struct smp_irk
*hci_add_irk(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
2937 u8 addr_type
, u8 val
[16], bdaddr_t
*rpa
)
2939 struct smp_irk
*irk
;
2941 irk
= hci_find_irk_by_addr(hdev
, bdaddr
, addr_type
);
2943 irk
= kzalloc(sizeof(*irk
), GFP_KERNEL
);
2947 bacpy(&irk
->bdaddr
, bdaddr
);
2948 irk
->addr_type
= addr_type
;
2950 list_add(&irk
->list
, &hdev
->identity_resolving_keys
);
2953 memcpy(irk
->val
, val
, 16);
2954 bacpy(&irk
->rpa
, rpa
);
2959 int hci_remove_link_key(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
2961 struct link_key
*key
;
2963 key
= hci_find_link_key(hdev
, bdaddr
);
2967 BT_DBG("%s removing %pMR", hdev
->name
, bdaddr
);
2969 list_del(&key
->list
);
2975 int hci_remove_ltk(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 bdaddr_type
)
2977 struct smp_ltk
*k
, *tmp
;
2980 list_for_each_entry_safe(k
, tmp
, &hdev
->long_term_keys
, list
) {
2981 if (bacmp(bdaddr
, &k
->bdaddr
) || k
->bdaddr_type
!= bdaddr_type
)
2984 BT_DBG("%s removing %pMR", hdev
->name
, bdaddr
);
2991 return removed
? 0 : -ENOENT
;
2994 void hci_remove_irk(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 addr_type
)
2996 struct smp_irk
*k
, *tmp
;
2998 list_for_each_entry_safe(k
, tmp
, &hdev
->identity_resolving_keys
, list
) {
2999 if (bacmp(bdaddr
, &k
->bdaddr
) || k
->addr_type
!= addr_type
)
3002 BT_DBG("%s removing %pMR", hdev
->name
, bdaddr
);
3009 /* HCI command timer function */
3010 static void hci_cmd_timeout(unsigned long arg
)
3012 struct hci_dev
*hdev
= (void *) arg
;
3014 if (hdev
->sent_cmd
) {
3015 struct hci_command_hdr
*sent
= (void *) hdev
->sent_cmd
->data
;
3016 u16 opcode
= __le16_to_cpu(sent
->opcode
);
3018 BT_ERR("%s command 0x%4.4x tx timeout", hdev
->name
, opcode
);
3020 BT_ERR("%s command tx timeout", hdev
->name
);
3023 atomic_set(&hdev
->cmd_cnt
, 1);
3024 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
3027 struct oob_data
*hci_find_remote_oob_data(struct hci_dev
*hdev
,
3030 struct oob_data
*data
;
3032 list_for_each_entry(data
, &hdev
->remote_oob_data
, list
)
3033 if (bacmp(bdaddr
, &data
->bdaddr
) == 0)
3039 int hci_remove_remote_oob_data(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
3041 struct oob_data
*data
;
3043 data
= hci_find_remote_oob_data(hdev
, bdaddr
);
3047 BT_DBG("%s removing %pMR", hdev
->name
, bdaddr
);
3049 list_del(&data
->list
);
3055 void hci_remote_oob_data_clear(struct hci_dev
*hdev
)
3057 struct oob_data
*data
, *n
;
3059 list_for_each_entry_safe(data
, n
, &hdev
->remote_oob_data
, list
) {
3060 list_del(&data
->list
);
3065 int hci_add_remote_oob_data(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
3066 u8
*hash
, u8
*randomizer
)
3068 struct oob_data
*data
;
3070 data
= hci_find_remote_oob_data(hdev
, bdaddr
);
3072 data
= kmalloc(sizeof(*data
), GFP_KERNEL
);
3076 bacpy(&data
->bdaddr
, bdaddr
);
3077 list_add(&data
->list
, &hdev
->remote_oob_data
);
3080 memcpy(data
->hash192
, hash
, sizeof(data
->hash192
));
3081 memcpy(data
->randomizer192
, randomizer
, sizeof(data
->randomizer192
));
3083 memset(data
->hash256
, 0, sizeof(data
->hash256
));
3084 memset(data
->randomizer256
, 0, sizeof(data
->randomizer256
));
3086 BT_DBG("%s for %pMR", hdev
->name
, bdaddr
);
3091 int hci_add_remote_oob_ext_data(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
3092 u8
*hash192
, u8
*randomizer192
,
3093 u8
*hash256
, u8
*randomizer256
)
3095 struct oob_data
*data
;
3097 data
= hci_find_remote_oob_data(hdev
, bdaddr
);
3099 data
= kmalloc(sizeof(*data
), GFP_KERNEL
);
3103 bacpy(&data
->bdaddr
, bdaddr
);
3104 list_add(&data
->list
, &hdev
->remote_oob_data
);
3107 memcpy(data
->hash192
, hash192
, sizeof(data
->hash192
));
3108 memcpy(data
->randomizer192
, randomizer192
, sizeof(data
->randomizer192
));
3110 memcpy(data
->hash256
, hash256
, sizeof(data
->hash256
));
3111 memcpy(data
->randomizer256
, randomizer256
, sizeof(data
->randomizer256
));
3113 BT_DBG("%s for %pMR", hdev
->name
, bdaddr
);
3118 struct bdaddr_list
*hci_blacklist_lookup(struct hci_dev
*hdev
,
3119 bdaddr_t
*bdaddr
, u8 type
)
3121 struct bdaddr_list
*b
;
3123 list_for_each_entry(b
, &hdev
->blacklist
, list
) {
3124 if (!bacmp(&b
->bdaddr
, bdaddr
) && b
->bdaddr_type
== type
)
3131 void hci_blacklist_clear(struct hci_dev
*hdev
)
3133 struct list_head
*p
, *n
;
3135 list_for_each_safe(p
, n
, &hdev
->blacklist
) {
3136 struct bdaddr_list
*b
= list_entry(p
, struct bdaddr_list
, list
);
3143 int hci_blacklist_add(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 type
)
3145 struct bdaddr_list
*entry
;
3147 if (!bacmp(bdaddr
, BDADDR_ANY
))
3150 if (hci_blacklist_lookup(hdev
, bdaddr
, type
))
3153 entry
= kzalloc(sizeof(struct bdaddr_list
), GFP_KERNEL
);
3157 bacpy(&entry
->bdaddr
, bdaddr
);
3158 entry
->bdaddr_type
= type
;
3160 list_add(&entry
->list
, &hdev
->blacklist
);
3162 return mgmt_device_blocked(hdev
, bdaddr
, type
);
3165 int hci_blacklist_del(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 type
)
3167 struct bdaddr_list
*entry
;
3169 if (!bacmp(bdaddr
, BDADDR_ANY
)) {
3170 hci_blacklist_clear(hdev
);
3174 entry
= hci_blacklist_lookup(hdev
, bdaddr
, type
);
3178 list_del(&entry
->list
);
3181 return mgmt_device_unblocked(hdev
, bdaddr
, type
);
3184 /* This function requires the caller holds hdev->lock */
3185 struct hci_conn_params
*hci_conn_params_lookup(struct hci_dev
*hdev
,
3186 bdaddr_t
*addr
, u8 addr_type
)
3188 struct hci_conn_params
*params
;
3190 list_for_each_entry(params
, &hdev
->le_conn_params
, list
) {
3191 if (bacmp(¶ms
->addr
, addr
) == 0 &&
3192 params
->addr_type
== addr_type
) {
3200 /* This function requires the caller holds hdev->lock */
3201 void hci_conn_params_add(struct hci_dev
*hdev
, bdaddr_t
*addr
, u8 addr_type
,
3202 u16 conn_min_interval
, u16 conn_max_interval
)
3204 struct hci_conn_params
*params
;
3206 params
= hci_conn_params_lookup(hdev
, addr
, addr_type
);
3208 params
->conn_min_interval
= conn_min_interval
;
3209 params
->conn_max_interval
= conn_max_interval
;
3213 params
= kzalloc(sizeof(*params
), GFP_KERNEL
);
3215 BT_ERR("Out of memory");
3219 bacpy(¶ms
->addr
, addr
);
3220 params
->addr_type
= addr_type
;
3221 params
->conn_min_interval
= conn_min_interval
;
3222 params
->conn_max_interval
= conn_max_interval
;
3224 list_add(¶ms
->list
, &hdev
->le_conn_params
);
3226 BT_DBG("addr %pMR (type %u) conn_min_interval 0x%.4x "
3227 "conn_max_interval 0x%.4x", addr
, addr_type
, conn_min_interval
,
3231 /* This function requires the caller holds hdev->lock */
3232 void hci_conn_params_del(struct hci_dev
*hdev
, bdaddr_t
*addr
, u8 addr_type
)
3234 struct hci_conn_params
*params
;
3236 params
= hci_conn_params_lookup(hdev
, addr
, addr_type
);
3240 list_del(¶ms
->list
);
3243 BT_DBG("addr %pMR (type %u)", addr
, addr_type
);
3246 /* This function requires the caller holds hdev->lock */
3247 void hci_conn_params_clear(struct hci_dev
*hdev
)
3249 struct hci_conn_params
*params
, *tmp
;
3251 list_for_each_entry_safe(params
, tmp
, &hdev
->le_conn_params
, list
) {
3252 list_del(¶ms
->list
);
3256 BT_DBG("All LE connection parameters were removed");
3259 static void inquiry_complete(struct hci_dev
*hdev
, u8 status
)
3262 BT_ERR("Failed to start inquiry: status %d", status
);
3265 hci_discovery_set_state(hdev
, DISCOVERY_STOPPED
);
3266 hci_dev_unlock(hdev
);
3271 static void le_scan_disable_work_complete(struct hci_dev
*hdev
, u8 status
)
3273 /* General inquiry access code (GIAC) */
3274 u8 lap
[3] = { 0x33, 0x8b, 0x9e };
3275 struct hci_request req
;
3276 struct hci_cp_inquiry cp
;
3280 BT_ERR("Failed to disable LE scanning: status %d", status
);
3284 switch (hdev
->discovery
.type
) {
3285 case DISCOV_TYPE_LE
:
3287 hci_discovery_set_state(hdev
, DISCOVERY_STOPPED
);
3288 hci_dev_unlock(hdev
);
3291 case DISCOV_TYPE_INTERLEAVED
:
3292 hci_req_init(&req
, hdev
);
3294 memset(&cp
, 0, sizeof(cp
));
3295 memcpy(&cp
.lap
, lap
, sizeof(cp
.lap
));
3296 cp
.length
= DISCOV_INTERLEAVED_INQUIRY_LEN
;
3297 hci_req_add(&req
, HCI_OP_INQUIRY
, sizeof(cp
), &cp
);
3301 hci_inquiry_cache_flush(hdev
);
3303 err
= hci_req_run(&req
, inquiry_complete
);
3305 BT_ERR("Inquiry request failed: err %d", err
);
3306 hci_discovery_set_state(hdev
, DISCOVERY_STOPPED
);
3309 hci_dev_unlock(hdev
);
3314 static void le_scan_disable_work(struct work_struct
*work
)
3316 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
3317 le_scan_disable
.work
);
3318 struct hci_cp_le_set_scan_enable cp
;
3319 struct hci_request req
;
3322 BT_DBG("%s", hdev
->name
);
3324 hci_req_init(&req
, hdev
);
3326 memset(&cp
, 0, sizeof(cp
));
3327 cp
.enable
= LE_SCAN_DISABLE
;
3328 hci_req_add(&req
, HCI_OP_LE_SET_SCAN_ENABLE
, sizeof(cp
), &cp
);
3330 err
= hci_req_run(&req
, le_scan_disable_work_complete
);
3332 BT_ERR("Disable LE scanning request failed: err %d", err
);
3335 int hci_update_random_address(struct hci_request
*req
, u8
*own_addr_type
)
3337 struct hci_dev
*hdev
= req
->hdev
;
3340 /* If privacy is enabled use a resolvable private address. If
3341 * the current RPA has expired or there's something else than an
3342 * RPA currently in use regenerate a new one.
3344 if (test_bit(HCI_PRIVACY
, &hdev
->dev_flags
)) {
3348 *own_addr_type
= ADDR_LE_DEV_RANDOM
;
3350 if (!test_and_clear_bit(HCI_RPA_EXPIRED
, &hdev
->dev_flags
) &&
3351 hci_bdaddr_is_rpa(&hdev
->random_addr
, ADDR_LE_DEV_RANDOM
))
3354 err
= smp_generate_rpa(hdev
->tfm_aes
, hdev
->irk
, &rpa
);
3356 BT_ERR("%s failed to generate new RPA", hdev
->name
);
3360 hci_req_add(req
, HCI_OP_LE_SET_RANDOM_ADDR
, 6, &rpa
);
3362 to
= msecs_to_jiffies(hdev
->rpa_timeout
* 1000);
3363 queue_delayed_work(hdev
->workqueue
, &hdev
->rpa_expired
, to
);
3368 /* If forcing static address is in use or there is no public
3369 * address use the static address as random address (but skip
3370 * the HCI command if the current random address is already the
3373 if (test_bit(HCI_FORCE_STATIC_ADDR
, &hdev
->dev_flags
) ||
3374 !bacmp(&hdev
->bdaddr
, BDADDR_ANY
)) {
3375 *own_addr_type
= ADDR_LE_DEV_RANDOM
;
3376 if (bacmp(&hdev
->static_addr
, &hdev
->random_addr
))
3377 hci_req_add(req
, HCI_OP_LE_SET_RANDOM_ADDR
, 6,
3378 &hdev
->static_addr
);
3382 /* Neither privacy nor static address is being used so use a
3385 *own_addr_type
= ADDR_LE_DEV_PUBLIC
;
3390 /* Alloc HCI device */
3391 struct hci_dev
*hci_alloc_dev(void)
3393 struct hci_dev
*hdev
;
3395 hdev
= kzalloc(sizeof(struct hci_dev
), GFP_KERNEL
);
3399 hdev
->pkt_type
= (HCI_DM1
| HCI_DH1
| HCI_HV1
);
3400 hdev
->esco_type
= (ESCO_HV1
);
3401 hdev
->link_mode
= (HCI_LM_ACCEPT
);
3402 hdev
->num_iac
= 0x01; /* One IAC support is mandatory */
3403 hdev
->io_capability
= 0x03; /* No Input No Output */
3404 hdev
->inq_tx_power
= HCI_TX_POWER_INVALID
;
3405 hdev
->adv_tx_power
= HCI_TX_POWER_INVALID
;
3407 hdev
->sniff_max_interval
= 800;
3408 hdev
->sniff_min_interval
= 80;
3410 hdev
->le_adv_channel_map
= 0x07;
3411 hdev
->le_scan_interval
= 0x0060;
3412 hdev
->le_scan_window
= 0x0030;
3413 hdev
->le_conn_min_interval
= 0x0028;
3414 hdev
->le_conn_max_interval
= 0x0038;
3416 hdev
->rpa_timeout
= HCI_DEFAULT_RPA_TIMEOUT
;
3418 mutex_init(&hdev
->lock
);
3419 mutex_init(&hdev
->req_lock
);
3421 INIT_LIST_HEAD(&hdev
->mgmt_pending
);
3422 INIT_LIST_HEAD(&hdev
->blacklist
);
3423 INIT_LIST_HEAD(&hdev
->uuids
);
3424 INIT_LIST_HEAD(&hdev
->link_keys
);
3425 INIT_LIST_HEAD(&hdev
->long_term_keys
);
3426 INIT_LIST_HEAD(&hdev
->identity_resolving_keys
);
3427 INIT_LIST_HEAD(&hdev
->remote_oob_data
);
3428 INIT_LIST_HEAD(&hdev
->le_conn_params
);
3429 INIT_LIST_HEAD(&hdev
->conn_hash
.list
);
3431 INIT_WORK(&hdev
->rx_work
, hci_rx_work
);
3432 INIT_WORK(&hdev
->cmd_work
, hci_cmd_work
);
3433 INIT_WORK(&hdev
->tx_work
, hci_tx_work
);
3434 INIT_WORK(&hdev
->power_on
, hci_power_on
);
3436 INIT_DELAYED_WORK(&hdev
->power_off
, hci_power_off
);
3437 INIT_DELAYED_WORK(&hdev
->discov_off
, hci_discov_off
);
3438 INIT_DELAYED_WORK(&hdev
->le_scan_disable
, le_scan_disable_work
);
3440 skb_queue_head_init(&hdev
->rx_q
);
3441 skb_queue_head_init(&hdev
->cmd_q
);
3442 skb_queue_head_init(&hdev
->raw_q
);
3444 init_waitqueue_head(&hdev
->req_wait_q
);
3446 setup_timer(&hdev
->cmd_timer
, hci_cmd_timeout
, (unsigned long) hdev
);
3448 hci_init_sysfs(hdev
);
3449 discovery_init(hdev
);
3453 EXPORT_SYMBOL(hci_alloc_dev
);
3455 /* Free HCI device */
3456 void hci_free_dev(struct hci_dev
*hdev
)
3458 /* will free via device release */
3459 put_device(&hdev
->dev
);
3461 EXPORT_SYMBOL(hci_free_dev
);
3463 /* Register HCI device */
3464 int hci_register_dev(struct hci_dev
*hdev
)
3468 if (!hdev
->open
|| !hdev
->close
)
3471 /* Do not allow HCI_AMP devices to register at index 0,
3472 * so the index can be used as the AMP controller ID.
3474 switch (hdev
->dev_type
) {
3476 id
= ida_simple_get(&hci_index_ida
, 0, 0, GFP_KERNEL
);
3479 id
= ida_simple_get(&hci_index_ida
, 1, 0, GFP_KERNEL
);
3488 sprintf(hdev
->name
, "hci%d", id
);
3491 BT_DBG("%p name %s bus %d", hdev
, hdev
->name
, hdev
->bus
);
3493 hdev
->workqueue
= alloc_workqueue("%s", WQ_HIGHPRI
| WQ_UNBOUND
|
3494 WQ_MEM_RECLAIM
, 1, hdev
->name
);
3495 if (!hdev
->workqueue
) {
3500 hdev
->req_workqueue
= alloc_workqueue("%s", WQ_HIGHPRI
| WQ_UNBOUND
|
3501 WQ_MEM_RECLAIM
, 1, hdev
->name
);
3502 if (!hdev
->req_workqueue
) {
3503 destroy_workqueue(hdev
->workqueue
);
3508 if (!IS_ERR_OR_NULL(bt_debugfs
))
3509 hdev
->debugfs
= debugfs_create_dir(hdev
->name
, bt_debugfs
);
3511 dev_set_name(&hdev
->dev
, "%s", hdev
->name
);
3513 hdev
->tfm_aes
= crypto_alloc_blkcipher("ecb(aes)", 0,
3515 if (IS_ERR(hdev
->tfm_aes
)) {
3516 BT_ERR("Unable to create crypto context");
3517 error
= PTR_ERR(hdev
->tfm_aes
);
3518 hdev
->tfm_aes
= NULL
;
3522 error
= device_add(&hdev
->dev
);
3526 hdev
->rfkill
= rfkill_alloc(hdev
->name
, &hdev
->dev
,
3527 RFKILL_TYPE_BLUETOOTH
, &hci_rfkill_ops
,
3530 if (rfkill_register(hdev
->rfkill
) < 0) {
3531 rfkill_destroy(hdev
->rfkill
);
3532 hdev
->rfkill
= NULL
;
3536 if (hdev
->rfkill
&& rfkill_blocked(hdev
->rfkill
))
3537 set_bit(HCI_RFKILLED
, &hdev
->dev_flags
);
3539 set_bit(HCI_SETUP
, &hdev
->dev_flags
);
3540 set_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
);
3542 if (hdev
->dev_type
== HCI_BREDR
) {
3543 /* Assume BR/EDR support until proven otherwise (such as
3544 * through reading supported features during init.
3546 set_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
);
3549 write_lock(&hci_dev_list_lock
);
3550 list_add(&hdev
->list
, &hci_dev_list
);
3551 write_unlock(&hci_dev_list_lock
);
3553 hci_notify(hdev
, HCI_DEV_REG
);
3556 queue_work(hdev
->req_workqueue
, &hdev
->power_on
);
3561 crypto_free_blkcipher(hdev
->tfm_aes
);
3563 destroy_workqueue(hdev
->workqueue
);
3564 destroy_workqueue(hdev
->req_workqueue
);
3566 ida_simple_remove(&hci_index_ida
, hdev
->id
);
3570 EXPORT_SYMBOL(hci_register_dev
);
3572 /* Unregister HCI device */
3573 void hci_unregister_dev(struct hci_dev
*hdev
)
3577 BT_DBG("%p name %s bus %d", hdev
, hdev
->name
, hdev
->bus
);
3579 set_bit(HCI_UNREGISTER
, &hdev
->dev_flags
);
3583 write_lock(&hci_dev_list_lock
);
3584 list_del(&hdev
->list
);
3585 write_unlock(&hci_dev_list_lock
);
3587 hci_dev_do_close(hdev
);
3589 for (i
= 0; i
< NUM_REASSEMBLY
; i
++)
3590 kfree_skb(hdev
->reassembly
[i
]);
3592 cancel_work_sync(&hdev
->power_on
);
3594 if (!test_bit(HCI_INIT
, &hdev
->flags
) &&
3595 !test_bit(HCI_SETUP
, &hdev
->dev_flags
)) {
3597 mgmt_index_removed(hdev
);
3598 hci_dev_unlock(hdev
);
3601 /* mgmt_index_removed should take care of emptying the
3603 BUG_ON(!list_empty(&hdev
->mgmt_pending
));
3605 hci_notify(hdev
, HCI_DEV_UNREG
);
3608 rfkill_unregister(hdev
->rfkill
);
3609 rfkill_destroy(hdev
->rfkill
);
3613 crypto_free_blkcipher(hdev
->tfm_aes
);
3615 device_del(&hdev
->dev
);
3617 debugfs_remove_recursive(hdev
->debugfs
);
3619 destroy_workqueue(hdev
->workqueue
);
3620 destroy_workqueue(hdev
->req_workqueue
);
3623 hci_blacklist_clear(hdev
);
3624 hci_uuids_clear(hdev
);
3625 hci_link_keys_clear(hdev
);
3626 hci_smp_ltks_clear(hdev
);
3627 hci_smp_irks_clear(hdev
);
3628 hci_remote_oob_data_clear(hdev
);
3629 hci_conn_params_clear(hdev
);
3630 hci_dev_unlock(hdev
);
3634 ida_simple_remove(&hci_index_ida
, id
);
3636 EXPORT_SYMBOL(hci_unregister_dev
);
3638 /* Suspend HCI device */
3639 int hci_suspend_dev(struct hci_dev
*hdev
)
3641 hci_notify(hdev
, HCI_DEV_SUSPEND
);
3644 EXPORT_SYMBOL(hci_suspend_dev
);
3646 /* Resume HCI device */
3647 int hci_resume_dev(struct hci_dev
*hdev
)
3649 hci_notify(hdev
, HCI_DEV_RESUME
);
3652 EXPORT_SYMBOL(hci_resume_dev
);
3654 /* Receive frame from HCI drivers */
3655 int hci_recv_frame(struct hci_dev
*hdev
, struct sk_buff
*skb
)
3657 if (!hdev
|| (!test_bit(HCI_UP
, &hdev
->flags
)
3658 && !test_bit(HCI_INIT
, &hdev
->flags
))) {
3664 bt_cb(skb
)->incoming
= 1;
3667 __net_timestamp(skb
);
3669 skb_queue_tail(&hdev
->rx_q
, skb
);
3670 queue_work(hdev
->workqueue
, &hdev
->rx_work
);
3674 EXPORT_SYMBOL(hci_recv_frame
);
3676 static int hci_reassembly(struct hci_dev
*hdev
, int type
, void *data
,
3677 int count
, __u8 index
)
3682 struct sk_buff
*skb
;
3683 struct bt_skb_cb
*scb
;
3685 if ((type
< HCI_ACLDATA_PKT
|| type
> HCI_EVENT_PKT
) ||
3686 index
>= NUM_REASSEMBLY
)
3689 skb
= hdev
->reassembly
[index
];
3693 case HCI_ACLDATA_PKT
:
3694 len
= HCI_MAX_FRAME_SIZE
;
3695 hlen
= HCI_ACL_HDR_SIZE
;
3698 len
= HCI_MAX_EVENT_SIZE
;
3699 hlen
= HCI_EVENT_HDR_SIZE
;
3701 case HCI_SCODATA_PKT
:
3702 len
= HCI_MAX_SCO_SIZE
;
3703 hlen
= HCI_SCO_HDR_SIZE
;
3707 skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
3711 scb
= (void *) skb
->cb
;
3713 scb
->pkt_type
= type
;
3715 hdev
->reassembly
[index
] = skb
;
3719 scb
= (void *) skb
->cb
;
3720 len
= min_t(uint
, scb
->expect
, count
);
3722 memcpy(skb_put(skb
, len
), data
, len
);
3731 if (skb
->len
== HCI_EVENT_HDR_SIZE
) {
3732 struct hci_event_hdr
*h
= hci_event_hdr(skb
);
3733 scb
->expect
= h
->plen
;
3735 if (skb_tailroom(skb
) < scb
->expect
) {
3737 hdev
->reassembly
[index
] = NULL
;
3743 case HCI_ACLDATA_PKT
:
3744 if (skb
->len
== HCI_ACL_HDR_SIZE
) {
3745 struct hci_acl_hdr
*h
= hci_acl_hdr(skb
);
3746 scb
->expect
= __le16_to_cpu(h
->dlen
);
3748 if (skb_tailroom(skb
) < scb
->expect
) {
3750 hdev
->reassembly
[index
] = NULL
;
3756 case HCI_SCODATA_PKT
:
3757 if (skb
->len
== HCI_SCO_HDR_SIZE
) {
3758 struct hci_sco_hdr
*h
= hci_sco_hdr(skb
);
3759 scb
->expect
= h
->dlen
;
3761 if (skb_tailroom(skb
) < scb
->expect
) {
3763 hdev
->reassembly
[index
] = NULL
;
3770 if (scb
->expect
== 0) {
3771 /* Complete frame */
3773 bt_cb(skb
)->pkt_type
= type
;
3774 hci_recv_frame(hdev
, skb
);
3776 hdev
->reassembly
[index
] = NULL
;
3784 int hci_recv_fragment(struct hci_dev
*hdev
, int type
, void *data
, int count
)
3788 if (type
< HCI_ACLDATA_PKT
|| type
> HCI_EVENT_PKT
)
3792 rem
= hci_reassembly(hdev
, type
, data
, count
, type
- 1);
3796 data
+= (count
- rem
);
3802 EXPORT_SYMBOL(hci_recv_fragment
);
3804 #define STREAM_REASSEMBLY 0
3806 int hci_recv_stream_fragment(struct hci_dev
*hdev
, void *data
, int count
)
3812 struct sk_buff
*skb
= hdev
->reassembly
[STREAM_REASSEMBLY
];
3815 struct { char type
; } *pkt
;
3817 /* Start of the frame */
3824 type
= bt_cb(skb
)->pkt_type
;
3826 rem
= hci_reassembly(hdev
, type
, data
, count
,
3831 data
+= (count
- rem
);
3837 EXPORT_SYMBOL(hci_recv_stream_fragment
);
3839 /* ---- Interface to upper protocols ---- */
3841 int hci_register_cb(struct hci_cb
*cb
)
3843 BT_DBG("%p name %s", cb
, cb
->name
);
3845 write_lock(&hci_cb_list_lock
);
3846 list_add(&cb
->list
, &hci_cb_list
);
3847 write_unlock(&hci_cb_list_lock
);
3851 EXPORT_SYMBOL(hci_register_cb
);
3853 int hci_unregister_cb(struct hci_cb
*cb
)
3855 BT_DBG("%p name %s", cb
, cb
->name
);
3857 write_lock(&hci_cb_list_lock
);
3858 list_del(&cb
->list
);
3859 write_unlock(&hci_cb_list_lock
);
3863 EXPORT_SYMBOL(hci_unregister_cb
);
3865 static void hci_send_frame(struct hci_dev
*hdev
, struct sk_buff
*skb
)
3867 BT_DBG("%s type %d len %d", hdev
->name
, bt_cb(skb
)->pkt_type
, skb
->len
);
3870 __net_timestamp(skb
);
3872 /* Send copy to monitor */
3873 hci_send_to_monitor(hdev
, skb
);
3875 if (atomic_read(&hdev
->promisc
)) {
3876 /* Send copy to the sockets */
3877 hci_send_to_sock(hdev
, skb
);
3880 /* Get rid of skb owner, prior to sending to the driver. */
3883 if (hdev
->send(hdev
, skb
) < 0)
3884 BT_ERR("%s sending frame failed", hdev
->name
);
3887 void hci_req_init(struct hci_request
*req
, struct hci_dev
*hdev
)
3889 skb_queue_head_init(&req
->cmd_q
);
3894 int hci_req_run(struct hci_request
*req
, hci_req_complete_t complete
)
3896 struct hci_dev
*hdev
= req
->hdev
;
3897 struct sk_buff
*skb
;
3898 unsigned long flags
;
3900 BT_DBG("length %u", skb_queue_len(&req
->cmd_q
));
3902 /* If an error occured during request building, remove all HCI
3903 * commands queued on the HCI request queue.
3906 skb_queue_purge(&req
->cmd_q
);
3910 /* Do not allow empty requests */
3911 if (skb_queue_empty(&req
->cmd_q
))
3914 skb
= skb_peek_tail(&req
->cmd_q
);
3915 bt_cb(skb
)->req
.complete
= complete
;
3917 spin_lock_irqsave(&hdev
->cmd_q
.lock
, flags
);
3918 skb_queue_splice_tail(&req
->cmd_q
, &hdev
->cmd_q
);
3919 spin_unlock_irqrestore(&hdev
->cmd_q
.lock
, flags
);
3921 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
3926 static struct sk_buff
*hci_prepare_cmd(struct hci_dev
*hdev
, u16 opcode
,
3927 u32 plen
, const void *param
)
3929 int len
= HCI_COMMAND_HDR_SIZE
+ plen
;
3930 struct hci_command_hdr
*hdr
;
3931 struct sk_buff
*skb
;
3933 skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
3937 hdr
= (struct hci_command_hdr
*) skb_put(skb
, HCI_COMMAND_HDR_SIZE
);
3938 hdr
->opcode
= cpu_to_le16(opcode
);
3942 memcpy(skb_put(skb
, plen
), param
, plen
);
3944 BT_DBG("skb len %d", skb
->len
);
3946 bt_cb(skb
)->pkt_type
= HCI_COMMAND_PKT
;
3951 /* Send HCI command */
3952 int hci_send_cmd(struct hci_dev
*hdev
, __u16 opcode
, __u32 plen
,
3955 struct sk_buff
*skb
;
3957 BT_DBG("%s opcode 0x%4.4x plen %d", hdev
->name
, opcode
, plen
);
3959 skb
= hci_prepare_cmd(hdev
, opcode
, plen
, param
);
3961 BT_ERR("%s no memory for command", hdev
->name
);
3965 /* Stand-alone HCI commands must be flaged as
3966 * single-command requests.
3968 bt_cb(skb
)->req
.start
= true;
3970 skb_queue_tail(&hdev
->cmd_q
, skb
);
3971 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
3976 /* Queue a command to an asynchronous HCI request */
3977 void hci_req_add_ev(struct hci_request
*req
, u16 opcode
, u32 plen
,
3978 const void *param
, u8 event
)
3980 struct hci_dev
*hdev
= req
->hdev
;
3981 struct sk_buff
*skb
;
3983 BT_DBG("%s opcode 0x%4.4x plen %d", hdev
->name
, opcode
, plen
);
3985 /* If an error occured during request building, there is no point in
3986 * queueing the HCI command. We can simply return.
3991 skb
= hci_prepare_cmd(hdev
, opcode
, plen
, param
);
3993 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
3994 hdev
->name
, opcode
);
3999 if (skb_queue_empty(&req
->cmd_q
))
4000 bt_cb(skb
)->req
.start
= true;
4002 bt_cb(skb
)->req
.event
= event
;
4004 skb_queue_tail(&req
->cmd_q
, skb
);
4007 void hci_req_add(struct hci_request
*req
, u16 opcode
, u32 plen
,
4010 hci_req_add_ev(req
, opcode
, plen
, param
, 0);
4013 /* Get data from the previously sent command */
4014 void *hci_sent_cmd_data(struct hci_dev
*hdev
, __u16 opcode
)
4016 struct hci_command_hdr
*hdr
;
4018 if (!hdev
->sent_cmd
)
4021 hdr
= (void *) hdev
->sent_cmd
->data
;
4023 if (hdr
->opcode
!= cpu_to_le16(opcode
))
4026 BT_DBG("%s opcode 0x%4.4x", hdev
->name
, opcode
);
4028 return hdev
->sent_cmd
->data
+ HCI_COMMAND_HDR_SIZE
;
4032 static void hci_add_acl_hdr(struct sk_buff
*skb
, __u16 handle
, __u16 flags
)
4034 struct hci_acl_hdr
*hdr
;
4037 skb_push(skb
, HCI_ACL_HDR_SIZE
);
4038 skb_reset_transport_header(skb
);
4039 hdr
= (struct hci_acl_hdr
*)skb_transport_header(skb
);
4040 hdr
->handle
= cpu_to_le16(hci_handle_pack(handle
, flags
));
4041 hdr
->dlen
= cpu_to_le16(len
);
4044 static void hci_queue_acl(struct hci_chan
*chan
, struct sk_buff_head
*queue
,
4045 struct sk_buff
*skb
, __u16 flags
)
4047 struct hci_conn
*conn
= chan
->conn
;
4048 struct hci_dev
*hdev
= conn
->hdev
;
4049 struct sk_buff
*list
;
4051 skb
->len
= skb_headlen(skb
);
4054 bt_cb(skb
)->pkt_type
= HCI_ACLDATA_PKT
;
4056 switch (hdev
->dev_type
) {
4058 hci_add_acl_hdr(skb
, conn
->handle
, flags
);
4061 hci_add_acl_hdr(skb
, chan
->handle
, flags
);
4064 BT_ERR("%s unknown dev_type %d", hdev
->name
, hdev
->dev_type
);
4068 list
= skb_shinfo(skb
)->frag_list
;
4070 /* Non fragmented */
4071 BT_DBG("%s nonfrag skb %p len %d", hdev
->name
, skb
, skb
->len
);
4073 skb_queue_tail(queue
, skb
);
4076 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
4078 skb_shinfo(skb
)->frag_list
= NULL
;
4080 /* Queue all fragments atomically */
4081 spin_lock(&queue
->lock
);
4083 __skb_queue_tail(queue
, skb
);
4085 flags
&= ~ACL_START
;
4088 skb
= list
; list
= list
->next
;
4090 bt_cb(skb
)->pkt_type
= HCI_ACLDATA_PKT
;
4091 hci_add_acl_hdr(skb
, conn
->handle
, flags
);
4093 BT_DBG("%s frag %p len %d", hdev
->name
, skb
, skb
->len
);
4095 __skb_queue_tail(queue
, skb
);
4098 spin_unlock(&queue
->lock
);
4102 void hci_send_acl(struct hci_chan
*chan
, struct sk_buff
*skb
, __u16 flags
)
4104 struct hci_dev
*hdev
= chan
->conn
->hdev
;
4106 BT_DBG("%s chan %p flags 0x%4.4x", hdev
->name
, chan
, flags
);
4108 hci_queue_acl(chan
, &chan
->data_q
, skb
, flags
);
4110 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
4114 void hci_send_sco(struct hci_conn
*conn
, struct sk_buff
*skb
)
4116 struct hci_dev
*hdev
= conn
->hdev
;
4117 struct hci_sco_hdr hdr
;
4119 BT_DBG("%s len %d", hdev
->name
, skb
->len
);
4121 hdr
.handle
= cpu_to_le16(conn
->handle
);
4122 hdr
.dlen
= skb
->len
;
4124 skb_push(skb
, HCI_SCO_HDR_SIZE
);
4125 skb_reset_transport_header(skb
);
4126 memcpy(skb_transport_header(skb
), &hdr
, HCI_SCO_HDR_SIZE
);
4128 bt_cb(skb
)->pkt_type
= HCI_SCODATA_PKT
;
4130 skb_queue_tail(&conn
->data_q
, skb
);
4131 queue_work(hdev
->workqueue
, &hdev
->tx_work
);
4134 /* ---- HCI TX task (outgoing data) ---- */
4136 /* HCI Connection scheduler */
4137 static struct hci_conn
*hci_low_sent(struct hci_dev
*hdev
, __u8 type
,
4140 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
4141 struct hci_conn
*conn
= NULL
, *c
;
4142 unsigned int num
= 0, min
= ~0;
4144 /* We don't have to lock device here. Connections are always
4145 * added and removed with TX task disabled. */
4149 list_for_each_entry_rcu(c
, &h
->list
, list
) {
4150 if (c
->type
!= type
|| skb_queue_empty(&c
->data_q
))
4153 if (c
->state
!= BT_CONNECTED
&& c
->state
!= BT_CONFIG
)
4158 if (c
->sent
< min
) {
4163 if (hci_conn_num(hdev
, type
) == num
)
4172 switch (conn
->type
) {
4174 cnt
= hdev
->acl_cnt
;
4178 cnt
= hdev
->sco_cnt
;
4181 cnt
= hdev
->le_mtu
? hdev
->le_cnt
: hdev
->acl_cnt
;
4185 BT_ERR("Unknown link type");
4193 BT_DBG("conn %p quote %d", conn
, *quote
);
4197 static void hci_link_tx_to(struct hci_dev
*hdev
, __u8 type
)
4199 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
4202 BT_ERR("%s link tx timeout", hdev
->name
);
4206 /* Kill stalled connections */
4207 list_for_each_entry_rcu(c
, &h
->list
, list
) {
4208 if (c
->type
== type
&& c
->sent
) {
4209 BT_ERR("%s killing stalled connection %pMR",
4210 hdev
->name
, &c
->dst
);
4211 hci_disconnect(c
, HCI_ERROR_REMOTE_USER_TERM
);
4218 static struct hci_chan
*hci_chan_sent(struct hci_dev
*hdev
, __u8 type
,
4221 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
4222 struct hci_chan
*chan
= NULL
;
4223 unsigned int num
= 0, min
= ~0, cur_prio
= 0;
4224 struct hci_conn
*conn
;
4225 int cnt
, q
, conn_num
= 0;
4227 BT_DBG("%s", hdev
->name
);
4231 list_for_each_entry_rcu(conn
, &h
->list
, list
) {
4232 struct hci_chan
*tmp
;
4234 if (conn
->type
!= type
)
4237 if (conn
->state
!= BT_CONNECTED
&& conn
->state
!= BT_CONFIG
)
4242 list_for_each_entry_rcu(tmp
, &conn
->chan_list
, list
) {
4243 struct sk_buff
*skb
;
4245 if (skb_queue_empty(&tmp
->data_q
))
4248 skb
= skb_peek(&tmp
->data_q
);
4249 if (skb
->priority
< cur_prio
)
4252 if (skb
->priority
> cur_prio
) {
4255 cur_prio
= skb
->priority
;
4260 if (conn
->sent
< min
) {
4266 if (hci_conn_num(hdev
, type
) == conn_num
)
4275 switch (chan
->conn
->type
) {
4277 cnt
= hdev
->acl_cnt
;
4280 cnt
= hdev
->block_cnt
;
4284 cnt
= hdev
->sco_cnt
;
4287 cnt
= hdev
->le_mtu
? hdev
->le_cnt
: hdev
->acl_cnt
;
4291 BT_ERR("Unknown link type");
4296 BT_DBG("chan %p quote %d", chan
, *quote
);
4300 static void hci_prio_recalculate(struct hci_dev
*hdev
, __u8 type
)
4302 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
4303 struct hci_conn
*conn
;
4306 BT_DBG("%s", hdev
->name
);
4310 list_for_each_entry_rcu(conn
, &h
->list
, list
) {
4311 struct hci_chan
*chan
;
4313 if (conn
->type
!= type
)
4316 if (conn
->state
!= BT_CONNECTED
&& conn
->state
!= BT_CONFIG
)
4321 list_for_each_entry_rcu(chan
, &conn
->chan_list
, list
) {
4322 struct sk_buff
*skb
;
4329 if (skb_queue_empty(&chan
->data_q
))
4332 skb
= skb_peek(&chan
->data_q
);
4333 if (skb
->priority
>= HCI_PRIO_MAX
- 1)
4336 skb
->priority
= HCI_PRIO_MAX
- 1;
4338 BT_DBG("chan %p skb %p promoted to %d", chan
, skb
,
4342 if (hci_conn_num(hdev
, type
) == num
)
4350 static inline int __get_blocks(struct hci_dev
*hdev
, struct sk_buff
*skb
)
4352 /* Calculate count of blocks used by this packet */
4353 return DIV_ROUND_UP(skb
->len
- HCI_ACL_HDR_SIZE
, hdev
->block_len
);
4356 static void __check_timeout(struct hci_dev
*hdev
, unsigned int cnt
)
4358 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
4359 /* ACL tx timeout must be longer than maximum
4360 * link supervision timeout (40.9 seconds) */
4361 if (!cnt
&& time_after(jiffies
, hdev
->acl_last_tx
+
4362 HCI_ACL_TX_TIMEOUT
))
4363 hci_link_tx_to(hdev
, ACL_LINK
);
4367 static void hci_sched_acl_pkt(struct hci_dev
*hdev
)
4369 unsigned int cnt
= hdev
->acl_cnt
;
4370 struct hci_chan
*chan
;
4371 struct sk_buff
*skb
;
4374 __check_timeout(hdev
, cnt
);
4376 while (hdev
->acl_cnt
&&
4377 (chan
= hci_chan_sent(hdev
, ACL_LINK
, "e
))) {
4378 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
4379 while (quote
-- && (skb
= skb_peek(&chan
->data_q
))) {
4380 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
4381 skb
->len
, skb
->priority
);
4383 /* Stop if priority has changed */
4384 if (skb
->priority
< priority
)
4387 skb
= skb_dequeue(&chan
->data_q
);
4389 hci_conn_enter_active_mode(chan
->conn
,
4390 bt_cb(skb
)->force_active
);
4392 hci_send_frame(hdev
, skb
);
4393 hdev
->acl_last_tx
= jiffies
;
4401 if (cnt
!= hdev
->acl_cnt
)
4402 hci_prio_recalculate(hdev
, ACL_LINK
);
4405 static void hci_sched_acl_blk(struct hci_dev
*hdev
)
4407 unsigned int cnt
= hdev
->block_cnt
;
4408 struct hci_chan
*chan
;
4409 struct sk_buff
*skb
;
4413 __check_timeout(hdev
, cnt
);
4415 BT_DBG("%s", hdev
->name
);
4417 if (hdev
->dev_type
== HCI_AMP
)
4422 while (hdev
->block_cnt
> 0 &&
4423 (chan
= hci_chan_sent(hdev
, type
, "e
))) {
4424 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
4425 while (quote
> 0 && (skb
= skb_peek(&chan
->data_q
))) {
4428 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
4429 skb
->len
, skb
->priority
);
4431 /* Stop if priority has changed */
4432 if (skb
->priority
< priority
)
4435 skb
= skb_dequeue(&chan
->data_q
);
4437 blocks
= __get_blocks(hdev
, skb
);
4438 if (blocks
> hdev
->block_cnt
)
4441 hci_conn_enter_active_mode(chan
->conn
,
4442 bt_cb(skb
)->force_active
);
4444 hci_send_frame(hdev
, skb
);
4445 hdev
->acl_last_tx
= jiffies
;
4447 hdev
->block_cnt
-= blocks
;
4450 chan
->sent
+= blocks
;
4451 chan
->conn
->sent
+= blocks
;
4455 if (cnt
!= hdev
->block_cnt
)
4456 hci_prio_recalculate(hdev
, type
);
4459 static void hci_sched_acl(struct hci_dev
*hdev
)
4461 BT_DBG("%s", hdev
->name
);
4463 /* No ACL link over BR/EDR controller */
4464 if (!hci_conn_num(hdev
, ACL_LINK
) && hdev
->dev_type
== HCI_BREDR
)
4467 /* No AMP link over AMP controller */
4468 if (!hci_conn_num(hdev
, AMP_LINK
) && hdev
->dev_type
== HCI_AMP
)
4471 switch (hdev
->flow_ctl_mode
) {
4472 case HCI_FLOW_CTL_MODE_PACKET_BASED
:
4473 hci_sched_acl_pkt(hdev
);
4476 case HCI_FLOW_CTL_MODE_BLOCK_BASED
:
4477 hci_sched_acl_blk(hdev
);
4483 static void hci_sched_sco(struct hci_dev
*hdev
)
4485 struct hci_conn
*conn
;
4486 struct sk_buff
*skb
;
4489 BT_DBG("%s", hdev
->name
);
4491 if (!hci_conn_num(hdev
, SCO_LINK
))
4494 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, SCO_LINK
, "e
))) {
4495 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
4496 BT_DBG("skb %p len %d", skb
, skb
->len
);
4497 hci_send_frame(hdev
, skb
);
4500 if (conn
->sent
== ~0)
4506 static void hci_sched_esco(struct hci_dev
*hdev
)
4508 struct hci_conn
*conn
;
4509 struct sk_buff
*skb
;
4512 BT_DBG("%s", hdev
->name
);
4514 if (!hci_conn_num(hdev
, ESCO_LINK
))
4517 while (hdev
->sco_cnt
&& (conn
= hci_low_sent(hdev
, ESCO_LINK
,
4519 while (quote
-- && (skb
= skb_dequeue(&conn
->data_q
))) {
4520 BT_DBG("skb %p len %d", skb
, skb
->len
);
4521 hci_send_frame(hdev
, skb
);
4524 if (conn
->sent
== ~0)
4530 static void hci_sched_le(struct hci_dev
*hdev
)
4532 struct hci_chan
*chan
;
4533 struct sk_buff
*skb
;
4534 int quote
, cnt
, tmp
;
4536 BT_DBG("%s", hdev
->name
);
4538 if (!hci_conn_num(hdev
, LE_LINK
))
4541 if (!test_bit(HCI_RAW
, &hdev
->flags
)) {
4542 /* LE tx timeout must be longer than maximum
4543 * link supervision timeout (40.9 seconds) */
4544 if (!hdev
->le_cnt
&& hdev
->le_pkts
&&
4545 time_after(jiffies
, hdev
->le_last_tx
+ HZ
* 45))
4546 hci_link_tx_to(hdev
, LE_LINK
);
4549 cnt
= hdev
->le_pkts
? hdev
->le_cnt
: hdev
->acl_cnt
;
4551 while (cnt
&& (chan
= hci_chan_sent(hdev
, LE_LINK
, "e
))) {
4552 u32 priority
= (skb_peek(&chan
->data_q
))->priority
;
4553 while (quote
-- && (skb
= skb_peek(&chan
->data_q
))) {
4554 BT_DBG("chan %p skb %p len %d priority %u", chan
, skb
,
4555 skb
->len
, skb
->priority
);
4557 /* Stop if priority has changed */
4558 if (skb
->priority
< priority
)
4561 skb
= skb_dequeue(&chan
->data_q
);
4563 hci_send_frame(hdev
, skb
);
4564 hdev
->le_last_tx
= jiffies
;
4575 hdev
->acl_cnt
= cnt
;
4578 hci_prio_recalculate(hdev
, LE_LINK
);
4581 static void hci_tx_work(struct work_struct
*work
)
4583 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, tx_work
);
4584 struct sk_buff
*skb
;
4586 BT_DBG("%s acl %d sco %d le %d", hdev
->name
, hdev
->acl_cnt
,
4587 hdev
->sco_cnt
, hdev
->le_cnt
);
4589 if (!test_bit(HCI_USER_CHANNEL
, &hdev
->dev_flags
)) {
4590 /* Schedule queues and send stuff to HCI driver */
4591 hci_sched_acl(hdev
);
4592 hci_sched_sco(hdev
);
4593 hci_sched_esco(hdev
);
4597 /* Send next queued raw (unknown type) packet */
4598 while ((skb
= skb_dequeue(&hdev
->raw_q
)))
4599 hci_send_frame(hdev
, skb
);
4602 /* ----- HCI RX task (incoming data processing) ----- */
4604 /* ACL data packet */
4605 static void hci_acldata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
4607 struct hci_acl_hdr
*hdr
= (void *) skb
->data
;
4608 struct hci_conn
*conn
;
4609 __u16 handle
, flags
;
4611 skb_pull(skb
, HCI_ACL_HDR_SIZE
);
4613 handle
= __le16_to_cpu(hdr
->handle
);
4614 flags
= hci_flags(handle
);
4615 handle
= hci_handle(handle
);
4617 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev
->name
, skb
->len
,
4620 hdev
->stat
.acl_rx
++;
4623 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
4624 hci_dev_unlock(hdev
);
4627 hci_conn_enter_active_mode(conn
, BT_POWER_FORCE_ACTIVE_OFF
);
4629 /* Send to upper protocol */
4630 l2cap_recv_acldata(conn
, skb
, flags
);
4633 BT_ERR("%s ACL packet for unknown connection handle %d",
4634 hdev
->name
, handle
);
4640 /* SCO data packet */
4641 static void hci_scodata_packet(struct hci_dev
*hdev
, struct sk_buff
*skb
)
4643 struct hci_sco_hdr
*hdr
= (void *) skb
->data
;
4644 struct hci_conn
*conn
;
4647 skb_pull(skb
, HCI_SCO_HDR_SIZE
);
4649 handle
= __le16_to_cpu(hdr
->handle
);
4651 BT_DBG("%s len %d handle 0x%4.4x", hdev
->name
, skb
->len
, handle
);
4653 hdev
->stat
.sco_rx
++;
4656 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
4657 hci_dev_unlock(hdev
);
4660 /* Send to upper protocol */
4661 sco_recv_scodata(conn
, skb
);
4664 BT_ERR("%s SCO packet for unknown connection handle %d",
4665 hdev
->name
, handle
);
4671 static bool hci_req_is_complete(struct hci_dev
*hdev
)
4673 struct sk_buff
*skb
;
4675 skb
= skb_peek(&hdev
->cmd_q
);
4679 return bt_cb(skb
)->req
.start
;
4682 static void hci_resend_last(struct hci_dev
*hdev
)
4684 struct hci_command_hdr
*sent
;
4685 struct sk_buff
*skb
;
4688 if (!hdev
->sent_cmd
)
4691 sent
= (void *) hdev
->sent_cmd
->data
;
4692 opcode
= __le16_to_cpu(sent
->opcode
);
4693 if (opcode
== HCI_OP_RESET
)
4696 skb
= skb_clone(hdev
->sent_cmd
, GFP_KERNEL
);
4700 skb_queue_head(&hdev
->cmd_q
, skb
);
4701 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);
4704 void hci_req_cmd_complete(struct hci_dev
*hdev
, u16 opcode
, u8 status
)
4706 hci_req_complete_t req_complete
= NULL
;
4707 struct sk_buff
*skb
;
4708 unsigned long flags
;
4710 BT_DBG("opcode 0x%04x status 0x%02x", opcode
, status
);
4712 /* If the completed command doesn't match the last one that was
4713 * sent we need to do special handling of it.
4715 if (!hci_sent_cmd_data(hdev
, opcode
)) {
4716 /* Some CSR based controllers generate a spontaneous
4717 * reset complete event during init and any pending
4718 * command will never be completed. In such a case we
4719 * need to resend whatever was the last sent
4722 if (test_bit(HCI_INIT
, &hdev
->flags
) && opcode
== HCI_OP_RESET
)
4723 hci_resend_last(hdev
);
4728 /* If the command succeeded and there's still more commands in
4729 * this request the request is not yet complete.
4731 if (!status
&& !hci_req_is_complete(hdev
))
4734 /* If this was the last command in a request the complete
4735 * callback would be found in hdev->sent_cmd instead of the
4736 * command queue (hdev->cmd_q).
4738 if (hdev
->sent_cmd
) {
4739 req_complete
= bt_cb(hdev
->sent_cmd
)->req
.complete
;
4742 /* We must set the complete callback to NULL to
4743 * avoid calling the callback more than once if
4744 * this function gets called again.
4746 bt_cb(hdev
->sent_cmd
)->req
.complete
= NULL
;
4752 /* Remove all pending commands belonging to this request */
4753 spin_lock_irqsave(&hdev
->cmd_q
.lock
, flags
);
4754 while ((skb
= __skb_dequeue(&hdev
->cmd_q
))) {
4755 if (bt_cb(skb
)->req
.start
) {
4756 __skb_queue_head(&hdev
->cmd_q
, skb
);
4760 req_complete
= bt_cb(skb
)->req
.complete
;
4763 spin_unlock_irqrestore(&hdev
->cmd_q
.lock
, flags
);
4767 req_complete(hdev
, status
);
4770 static void hci_rx_work(struct work_struct
*work
)
4772 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, rx_work
);
4773 struct sk_buff
*skb
;
4775 BT_DBG("%s", hdev
->name
);
4777 while ((skb
= skb_dequeue(&hdev
->rx_q
))) {
4778 /* Send copy to monitor */
4779 hci_send_to_monitor(hdev
, skb
);
4781 if (atomic_read(&hdev
->promisc
)) {
4782 /* Send copy to the sockets */
4783 hci_send_to_sock(hdev
, skb
);
4786 if (test_bit(HCI_RAW
, &hdev
->flags
) ||
4787 test_bit(HCI_USER_CHANNEL
, &hdev
->dev_flags
)) {
4792 if (test_bit(HCI_INIT
, &hdev
->flags
)) {
4793 /* Don't process data packets in this states. */
4794 switch (bt_cb(skb
)->pkt_type
) {
4795 case HCI_ACLDATA_PKT
:
4796 case HCI_SCODATA_PKT
:
4803 switch (bt_cb(skb
)->pkt_type
) {
4805 BT_DBG("%s Event packet", hdev
->name
);
4806 hci_event_packet(hdev
, skb
);
4809 case HCI_ACLDATA_PKT
:
4810 BT_DBG("%s ACL data packet", hdev
->name
);
4811 hci_acldata_packet(hdev
, skb
);
4814 case HCI_SCODATA_PKT
:
4815 BT_DBG("%s SCO data packet", hdev
->name
);
4816 hci_scodata_packet(hdev
, skb
);
4826 static void hci_cmd_work(struct work_struct
*work
)
4828 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
, cmd_work
);
4829 struct sk_buff
*skb
;
4831 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev
->name
,
4832 atomic_read(&hdev
->cmd_cnt
), skb_queue_len(&hdev
->cmd_q
));
4834 /* Send queued commands */
4835 if (atomic_read(&hdev
->cmd_cnt
)) {
4836 skb
= skb_dequeue(&hdev
->cmd_q
);
4840 kfree_skb(hdev
->sent_cmd
);
4842 hdev
->sent_cmd
= skb_clone(skb
, GFP_KERNEL
);
4843 if (hdev
->sent_cmd
) {
4844 atomic_dec(&hdev
->cmd_cnt
);
4845 hci_send_frame(hdev
, skb
);
4846 if (test_bit(HCI_RESET
, &hdev
->flags
))
4847 del_timer(&hdev
->cmd_timer
);
4849 mod_timer(&hdev
->cmd_timer
,
4850 jiffies
+ HCI_CMD_TIMEOUT
);
4852 skb_queue_head(&hdev
->cmd_q
, skb
);
4853 queue_work(hdev
->workqueue
, &hdev
->cmd_work
);