2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI connection handling. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/errno.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/poll.h>
34 #include <linux/fcntl.h>
35 #include <linux/init.h>
36 #include <linux/skbuff.h>
37 #include <linux/interrupt.h>
40 #include <linux/uaccess.h>
41 #include <asm/unaligned.h>
43 #include <net/bluetooth/bluetooth.h>
44 #include <net/bluetooth/hci_core.h>
46 static void hci_le_connect(struct hci_conn
*conn
)
48 struct hci_dev
*hdev
= conn
->hdev
;
49 struct hci_cp_le_create_conn cp
;
51 conn
->state
= BT_CONNECT
;
53 conn
->link_mode
|= HCI_LM_MASTER
;
54 conn
->sec_level
= BT_SECURITY_LOW
;
56 memset(&cp
, 0, sizeof(cp
));
57 cp
.scan_interval
= cpu_to_le16(0x0060);
58 cp
.scan_window
= cpu_to_le16(0x0030);
59 bacpy(&cp
.peer_addr
, &conn
->dst
);
60 cp
.peer_addr_type
= conn
->dst_type
;
61 cp
.conn_interval_min
= cpu_to_le16(0x0028);
62 cp
.conn_interval_max
= cpu_to_le16(0x0038);
63 cp
.supervision_timeout
= cpu_to_le16(0x002a);
64 cp
.min_ce_len
= cpu_to_le16(0x0000);
65 cp
.max_ce_len
= cpu_to_le16(0x0000);
67 hci_send_cmd(hdev
, HCI_OP_LE_CREATE_CONN
, sizeof(cp
), &cp
);
70 static void hci_le_connect_cancel(struct hci_conn
*conn
)
72 hci_send_cmd(conn
->hdev
, HCI_OP_LE_CREATE_CONN_CANCEL
, 0, NULL
);
75 void hci_acl_connect(struct hci_conn
*conn
)
77 struct hci_dev
*hdev
= conn
->hdev
;
78 struct inquiry_entry
*ie
;
79 struct hci_cp_create_conn cp
;
81 BT_DBG("hcon %p", conn
);
83 conn
->state
= BT_CONNECT
;
86 conn
->link_mode
= HCI_LM_MASTER
;
90 conn
->link_policy
= hdev
->link_policy
;
92 memset(&cp
, 0, sizeof(cp
));
93 bacpy(&cp
.bdaddr
, &conn
->dst
);
94 cp
.pscan_rep_mode
= 0x02;
96 ie
= hci_inquiry_cache_lookup(hdev
, &conn
->dst
);
98 if (inquiry_entry_age(ie
) <= INQUIRY_ENTRY_AGE_MAX
) {
99 cp
.pscan_rep_mode
= ie
->data
.pscan_rep_mode
;
100 cp
.pscan_mode
= ie
->data
.pscan_mode
;
101 cp
.clock_offset
= ie
->data
.clock_offset
|
105 memcpy(conn
->dev_class
, ie
->data
.dev_class
, 3);
106 if (ie
->data
.ssp_mode
> 0)
107 set_bit(HCI_CONN_SSP_ENABLED
, &conn
->flags
);
110 cp
.pkt_type
= cpu_to_le16(conn
->pkt_type
);
111 if (lmp_rswitch_capable(hdev
) && !(hdev
->link_mode
& HCI_LM_MASTER
))
112 cp
.role_switch
= 0x01;
114 cp
.role_switch
= 0x00;
116 hci_send_cmd(hdev
, HCI_OP_CREATE_CONN
, sizeof(cp
), &cp
);
119 static void hci_acl_connect_cancel(struct hci_conn
*conn
)
121 struct hci_cp_create_conn_cancel cp
;
125 if (conn
->hdev
->hci_ver
< BLUETOOTH_VER_1_2
)
128 bacpy(&cp
.bdaddr
, &conn
->dst
);
129 hci_send_cmd(conn
->hdev
, HCI_OP_CREATE_CONN_CANCEL
, sizeof(cp
), &cp
);
132 void hci_acl_disconn(struct hci_conn
*conn
, __u8 reason
)
134 struct hci_cp_disconnect cp
;
138 conn
->state
= BT_DISCONN
;
140 cp
.handle
= cpu_to_le16(conn
->handle
);
142 hci_send_cmd(conn
->hdev
, HCI_OP_DISCONNECT
, sizeof(cp
), &cp
);
145 void hci_add_sco(struct hci_conn
*conn
, __u16 handle
)
147 struct hci_dev
*hdev
= conn
->hdev
;
148 struct hci_cp_add_sco cp
;
152 conn
->state
= BT_CONNECT
;
157 cp
.handle
= cpu_to_le16(handle
);
158 cp
.pkt_type
= cpu_to_le16(conn
->pkt_type
);
160 hci_send_cmd(hdev
, HCI_OP_ADD_SCO
, sizeof(cp
), &cp
);
163 void hci_setup_sync(struct hci_conn
*conn
, __u16 handle
)
165 struct hci_dev
*hdev
= conn
->hdev
;
166 struct hci_cp_setup_sync_conn cp
;
170 conn
->state
= BT_CONNECT
;
175 cp
.handle
= cpu_to_le16(handle
);
176 cp
.pkt_type
= cpu_to_le16(conn
->pkt_type
);
178 cp
.tx_bandwidth
= cpu_to_le32(0x00001f40);
179 cp
.rx_bandwidth
= cpu_to_le32(0x00001f40);
180 cp
.max_latency
= cpu_to_le16(0xffff);
181 cp
.voice_setting
= cpu_to_le16(hdev
->voice_setting
);
182 cp
.retrans_effort
= 0xff;
184 hci_send_cmd(hdev
, HCI_OP_SETUP_SYNC_CONN
, sizeof(cp
), &cp
);
187 void hci_le_conn_update(struct hci_conn
*conn
, u16 min
, u16 max
,
188 u16 latency
, u16 to_multiplier
)
190 struct hci_cp_le_conn_update cp
;
191 struct hci_dev
*hdev
= conn
->hdev
;
193 memset(&cp
, 0, sizeof(cp
));
195 cp
.handle
= cpu_to_le16(conn
->handle
);
196 cp
.conn_interval_min
= cpu_to_le16(min
);
197 cp
.conn_interval_max
= cpu_to_le16(max
);
198 cp
.conn_latency
= cpu_to_le16(latency
);
199 cp
.supervision_timeout
= cpu_to_le16(to_multiplier
);
200 cp
.min_ce_len
= cpu_to_le16(0x0001);
201 cp
.max_ce_len
= cpu_to_le16(0x0001);
203 hci_send_cmd(hdev
, HCI_OP_LE_CONN_UPDATE
, sizeof(cp
), &cp
);
205 EXPORT_SYMBOL(hci_le_conn_update
);
207 void hci_le_start_enc(struct hci_conn
*conn
, __le16 ediv
, __u8 rand
[8],
210 struct hci_dev
*hdev
= conn
->hdev
;
211 struct hci_cp_le_start_enc cp
;
215 memset(&cp
, 0, sizeof(cp
));
217 cp
.handle
= cpu_to_le16(conn
->handle
);
218 memcpy(cp
.ltk
, ltk
, sizeof(cp
.ltk
));
220 memcpy(cp
.rand
, rand
, sizeof(cp
.rand
));
222 hci_send_cmd(hdev
, HCI_OP_LE_START_ENC
, sizeof(cp
), &cp
);
224 EXPORT_SYMBOL(hci_le_start_enc
);
226 void hci_le_ltk_neg_reply(struct hci_conn
*conn
)
228 struct hci_dev
*hdev
= conn
->hdev
;
229 struct hci_cp_le_ltk_neg_reply cp
;
233 memset(&cp
, 0, sizeof(cp
));
235 cp
.handle
= cpu_to_le16(conn
->handle
);
237 hci_send_cmd(hdev
, HCI_OP_LE_LTK_NEG_REPLY
, sizeof(cp
), &cp
);
240 /* Device _must_ be locked */
241 void hci_sco_setup(struct hci_conn
*conn
, __u8 status
)
243 struct hci_conn
*sco
= conn
->link
;
251 if (lmp_esco_capable(conn
->hdev
))
252 hci_setup_sync(sco
, conn
->handle
);
254 hci_add_sco(sco
, conn
->handle
);
256 hci_proto_connect_cfm(sco
, status
);
261 static void hci_conn_timeout(struct work_struct
*work
)
263 struct hci_conn
*conn
= container_of(work
, struct hci_conn
,
267 BT_DBG("conn %p state %s", conn
, state_to_string(conn
->state
));
269 if (atomic_read(&conn
->refcnt
))
272 switch (conn
->state
) {
276 if (conn
->type
== ACL_LINK
)
277 hci_acl_connect_cancel(conn
);
278 else if (conn
->type
== LE_LINK
)
279 hci_le_connect_cancel(conn
);
284 reason
= hci_proto_disconn_ind(conn
);
285 hci_acl_disconn(conn
, reason
);
288 conn
->state
= BT_CLOSED
;
293 /* Enter sniff mode */
294 static void hci_conn_enter_sniff_mode(struct hci_conn
*conn
)
296 struct hci_dev
*hdev
= conn
->hdev
;
298 BT_DBG("conn %p mode %d", conn
, conn
->mode
);
300 if (test_bit(HCI_RAW
, &hdev
->flags
))
303 if (!lmp_sniff_capable(hdev
) || !lmp_sniff_capable(conn
))
306 if (conn
->mode
!= HCI_CM_ACTIVE
|| !(conn
->link_policy
& HCI_LP_SNIFF
))
309 if (lmp_sniffsubr_capable(hdev
) && lmp_sniffsubr_capable(conn
)) {
310 struct hci_cp_sniff_subrate cp
;
311 cp
.handle
= cpu_to_le16(conn
->handle
);
312 cp
.max_latency
= cpu_to_le16(0);
313 cp
.min_remote_timeout
= cpu_to_le16(0);
314 cp
.min_local_timeout
= cpu_to_le16(0);
315 hci_send_cmd(hdev
, HCI_OP_SNIFF_SUBRATE
, sizeof(cp
), &cp
);
318 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND
, &conn
->flags
)) {
319 struct hci_cp_sniff_mode cp
;
320 cp
.handle
= cpu_to_le16(conn
->handle
);
321 cp
.max_interval
= cpu_to_le16(hdev
->sniff_max_interval
);
322 cp
.min_interval
= cpu_to_le16(hdev
->sniff_min_interval
);
323 cp
.attempt
= cpu_to_le16(4);
324 cp
.timeout
= cpu_to_le16(1);
325 hci_send_cmd(hdev
, HCI_OP_SNIFF_MODE
, sizeof(cp
), &cp
);
329 static void hci_conn_idle(unsigned long arg
)
331 struct hci_conn
*conn
= (void *) arg
;
333 BT_DBG("conn %p mode %d", conn
, conn
->mode
);
335 hci_conn_enter_sniff_mode(conn
);
338 static void hci_conn_auto_accept(unsigned long arg
)
340 struct hci_conn
*conn
= (void *) arg
;
341 struct hci_dev
*hdev
= conn
->hdev
;
343 hci_send_cmd(hdev
, HCI_OP_USER_CONFIRM_REPLY
, sizeof(conn
->dst
),
347 struct hci_conn
*hci_conn_add(struct hci_dev
*hdev
, int type
, bdaddr_t
*dst
)
349 struct hci_conn
*conn
;
351 BT_DBG("%s dst %s", hdev
->name
, batostr(dst
));
353 conn
= kzalloc(sizeof(struct hci_conn
), GFP_KERNEL
);
357 bacpy(&conn
->dst
, dst
);
360 conn
->mode
= HCI_CM_ACTIVE
;
361 conn
->state
= BT_OPEN
;
362 conn
->auth_type
= HCI_AT_GENERAL_BONDING
;
363 conn
->io_capability
= hdev
->io_capability
;
364 conn
->remote_auth
= 0xff;
365 conn
->key_type
= 0xff;
367 set_bit(HCI_CONN_POWER_SAVE
, &conn
->flags
);
368 conn
->disc_timeout
= HCI_DISCONN_TIMEOUT
;
372 conn
->pkt_type
= hdev
->pkt_type
& ACL_PTYPE_MASK
;
375 if (lmp_esco_capable(hdev
))
376 conn
->pkt_type
= (hdev
->esco_type
& SCO_ESCO_MASK
) |
377 (hdev
->esco_type
& EDR_ESCO_MASK
);
379 conn
->pkt_type
= hdev
->pkt_type
& SCO_PTYPE_MASK
;
382 conn
->pkt_type
= hdev
->esco_type
& ~EDR_ESCO_MASK
;
386 skb_queue_head_init(&conn
->data_q
);
388 INIT_LIST_HEAD(&conn
->chan_list
);
390 INIT_DELAYED_WORK(&conn
->disc_work
, hci_conn_timeout
);
391 setup_timer(&conn
->idle_timer
, hci_conn_idle
, (unsigned long)conn
);
392 setup_timer(&conn
->auto_accept_timer
, hci_conn_auto_accept
,
393 (unsigned long) conn
);
395 atomic_set(&conn
->refcnt
, 0);
399 hci_conn_hash_add(hdev
, conn
);
401 hdev
->notify(hdev
, HCI_NOTIFY_CONN_ADD
);
403 atomic_set(&conn
->devref
, 0);
405 hci_conn_init_sysfs(conn
);
410 int hci_conn_del(struct hci_conn
*conn
)
412 struct hci_dev
*hdev
= conn
->hdev
;
414 BT_DBG("%s conn %p handle %d", hdev
->name
, conn
, conn
->handle
);
416 del_timer(&conn
->idle_timer
);
418 cancel_delayed_work_sync(&conn
->disc_work
);
420 del_timer(&conn
->auto_accept_timer
);
422 if (conn
->type
== ACL_LINK
) {
423 struct hci_conn
*sco
= conn
->link
;
428 hdev
->acl_cnt
+= conn
->sent
;
429 } else if (conn
->type
== LE_LINK
) {
431 hdev
->le_cnt
+= conn
->sent
;
433 hdev
->acl_cnt
+= conn
->sent
;
435 struct hci_conn
*acl
= conn
->link
;
443 hci_chan_list_flush(conn
);
445 hci_conn_hash_del(hdev
, conn
);
447 hdev
->notify(hdev
, HCI_NOTIFY_CONN_DEL
);
449 skb_queue_purge(&conn
->data_q
);
451 hci_conn_put_device(conn
);
455 if (conn
->handle
== 0)
461 struct hci_dev
*hci_get_route(bdaddr_t
*dst
, bdaddr_t
*src
)
463 int use_src
= bacmp(src
, BDADDR_ANY
);
464 struct hci_dev
*hdev
= NULL
, *d
;
466 BT_DBG("%s -> %s", batostr(src
), batostr(dst
));
468 read_lock(&hci_dev_list_lock
);
470 list_for_each_entry(d
, &hci_dev_list
, list
) {
471 if (!test_bit(HCI_UP
, &d
->flags
) || test_bit(HCI_RAW
, &d
->flags
))
475 * No source address - find interface with bdaddr != dst
476 * Source address - find interface with bdaddr == src
480 if (!bacmp(&d
->bdaddr
, src
)) {
484 if (bacmp(&d
->bdaddr
, dst
)) {
491 hdev
= hci_dev_hold(hdev
);
493 read_unlock(&hci_dev_list_lock
);
496 EXPORT_SYMBOL(hci_get_route
);
498 /* Create SCO, ACL or LE connection.
499 * Device _must_ be locked */
500 struct hci_conn
*hci_connect(struct hci_dev
*hdev
, int type
, bdaddr_t
*dst
,
501 __u8 dst_type
, __u8 sec_level
, __u8 auth_type
)
503 struct hci_conn
*acl
;
504 struct hci_conn
*sco
;
507 BT_DBG("%s dst %s", hdev
->name
, batostr(dst
));
509 if (type
== LE_LINK
) {
510 le
= hci_conn_hash_lookup_ba(hdev
, LE_LINK
, dst
);
512 le
= hci_conn_add(hdev
, LE_LINK
, dst
);
514 return ERR_PTR(-ENOMEM
);
516 le
->dst_type
= bdaddr_to_le(dst_type
);
520 le
->pending_sec_level
= sec_level
;
521 le
->auth_type
= auth_type
;
528 acl
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
, dst
);
530 acl
= hci_conn_add(hdev
, ACL_LINK
, dst
);
532 return ERR_PTR(-ENOMEM
);
537 if (acl
->state
== BT_OPEN
|| acl
->state
== BT_CLOSED
) {
538 acl
->sec_level
= BT_SECURITY_LOW
;
539 acl
->pending_sec_level
= sec_level
;
540 acl
->auth_type
= auth_type
;
541 hci_acl_connect(acl
);
544 if (type
== ACL_LINK
)
547 sco
= hci_conn_hash_lookup_ba(hdev
, type
, dst
);
549 sco
= hci_conn_add(hdev
, type
, dst
);
552 return ERR_PTR(-ENOMEM
);
561 if (acl
->state
== BT_CONNECTED
&&
562 (sco
->state
== BT_OPEN
|| sco
->state
== BT_CLOSED
)) {
563 set_bit(HCI_CONN_POWER_SAVE
, &acl
->flags
);
564 hci_conn_enter_active_mode(acl
, BT_POWER_FORCE_ACTIVE_ON
);
566 if (test_bit(HCI_CONN_MODE_CHANGE_PEND
, &acl
->flags
)) {
567 /* defer SCO setup until mode change completed */
568 set_bit(HCI_CONN_SCO_SETUP_PEND
, &acl
->flags
);
572 hci_sco_setup(acl
, 0x00);
577 EXPORT_SYMBOL(hci_connect
);
579 /* Check link security requirement */
580 int hci_conn_check_link_mode(struct hci_conn
*conn
)
582 BT_DBG("conn %p", conn
);
584 if (hci_conn_ssp_enabled(conn
) && !(conn
->link_mode
& HCI_LM_ENCRYPT
))
589 EXPORT_SYMBOL(hci_conn_check_link_mode
);
591 /* Authenticate remote device */
592 static int hci_conn_auth(struct hci_conn
*conn
, __u8 sec_level
, __u8 auth_type
)
594 BT_DBG("conn %p", conn
);
596 if (conn
->pending_sec_level
> sec_level
)
597 sec_level
= conn
->pending_sec_level
;
599 if (sec_level
> conn
->sec_level
)
600 conn
->pending_sec_level
= sec_level
;
601 else if (conn
->link_mode
& HCI_LM_AUTH
)
604 /* Make sure we preserve an existing MITM requirement*/
605 auth_type
|= (conn
->auth_type
& 0x01);
607 conn
->auth_type
= auth_type
;
609 if (!test_and_set_bit(HCI_CONN_AUTH_PEND
, &conn
->flags
)) {
610 struct hci_cp_auth_requested cp
;
612 /* encrypt must be pending if auth is also pending */
613 set_bit(HCI_CONN_ENCRYPT_PEND
, &conn
->flags
);
615 cp
.handle
= cpu_to_le16(conn
->handle
);
616 hci_send_cmd(conn
->hdev
, HCI_OP_AUTH_REQUESTED
,
618 if (conn
->key_type
!= 0xff)
619 set_bit(HCI_CONN_REAUTH_PEND
, &conn
->flags
);
625 /* Encrypt the the link */
626 static void hci_conn_encrypt(struct hci_conn
*conn
)
628 BT_DBG("conn %p", conn
);
630 if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND
, &conn
->flags
)) {
631 struct hci_cp_set_conn_encrypt cp
;
632 cp
.handle
= cpu_to_le16(conn
->handle
);
634 hci_send_cmd(conn
->hdev
, HCI_OP_SET_CONN_ENCRYPT
, sizeof(cp
),
639 /* Enable security */
640 int hci_conn_security(struct hci_conn
*conn
, __u8 sec_level
, __u8 auth_type
)
642 BT_DBG("conn %p", conn
);
644 /* For sdp we don't need the link key. */
645 if (sec_level
== BT_SECURITY_SDP
)
648 /* For non 2.1 devices and low security level we don't need the link
650 if (sec_level
== BT_SECURITY_LOW
&& !hci_conn_ssp_enabled(conn
))
653 /* For other security levels we need the link key. */
654 if (!(conn
->link_mode
& HCI_LM_AUTH
))
657 /* An authenticated combination key has sufficient security for any
659 if (conn
->key_type
== HCI_LK_AUTH_COMBINATION
)
662 /* An unauthenticated combination key has sufficient security for
663 security level 1 and 2. */
664 if (conn
->key_type
== HCI_LK_UNAUTH_COMBINATION
&&
665 (sec_level
== BT_SECURITY_MEDIUM
||
666 sec_level
== BT_SECURITY_LOW
))
669 /* A combination key has always sufficient security for the security
670 levels 1 or 2. High security level requires the combination key
671 is generated using maximum PIN code length (16).
672 For pre 2.1 units. */
673 if (conn
->key_type
== HCI_LK_COMBINATION
&&
674 (sec_level
!= BT_SECURITY_HIGH
||
675 conn
->pin_length
== 16))
679 if (test_bit(HCI_CONN_ENCRYPT_PEND
, &conn
->flags
))
682 if (!hci_conn_auth(conn
, sec_level
, auth_type
))
686 if (conn
->link_mode
& HCI_LM_ENCRYPT
)
689 hci_conn_encrypt(conn
);
692 EXPORT_SYMBOL(hci_conn_security
);
694 /* Check secure link requirement */
695 int hci_conn_check_secure(struct hci_conn
*conn
, __u8 sec_level
)
697 BT_DBG("conn %p", conn
);
699 if (sec_level
!= BT_SECURITY_HIGH
)
700 return 1; /* Accept if non-secure is required */
702 if (conn
->sec_level
== BT_SECURITY_HIGH
)
705 return 0; /* Reject not secure link */
707 EXPORT_SYMBOL(hci_conn_check_secure
);
709 /* Change link key */
710 int hci_conn_change_link_key(struct hci_conn
*conn
)
712 BT_DBG("conn %p", conn
);
714 if (!test_and_set_bit(HCI_CONN_AUTH_PEND
, &conn
->flags
)) {
715 struct hci_cp_change_conn_link_key cp
;
716 cp
.handle
= cpu_to_le16(conn
->handle
);
717 hci_send_cmd(conn
->hdev
, HCI_OP_CHANGE_CONN_LINK_KEY
,
723 EXPORT_SYMBOL(hci_conn_change_link_key
);
726 int hci_conn_switch_role(struct hci_conn
*conn
, __u8 role
)
728 BT_DBG("conn %p", conn
);
730 if (!role
&& conn
->link_mode
& HCI_LM_MASTER
)
733 if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND
, &conn
->flags
)) {
734 struct hci_cp_switch_role cp
;
735 bacpy(&cp
.bdaddr
, &conn
->dst
);
737 hci_send_cmd(conn
->hdev
, HCI_OP_SWITCH_ROLE
, sizeof(cp
), &cp
);
742 EXPORT_SYMBOL(hci_conn_switch_role
);
744 /* Enter active mode */
745 void hci_conn_enter_active_mode(struct hci_conn
*conn
, __u8 force_active
)
747 struct hci_dev
*hdev
= conn
->hdev
;
749 BT_DBG("conn %p mode %d", conn
, conn
->mode
);
751 if (test_bit(HCI_RAW
, &hdev
->flags
))
754 if (conn
->mode
!= HCI_CM_SNIFF
)
757 if (!test_bit(HCI_CONN_POWER_SAVE
, &conn
->flags
) && !force_active
)
760 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND
, &conn
->flags
)) {
761 struct hci_cp_exit_sniff_mode cp
;
762 cp
.handle
= cpu_to_le16(conn
->handle
);
763 hci_send_cmd(hdev
, HCI_OP_EXIT_SNIFF_MODE
, sizeof(cp
), &cp
);
767 if (hdev
->idle_timeout
> 0)
768 mod_timer(&conn
->idle_timer
,
769 jiffies
+ msecs_to_jiffies(hdev
->idle_timeout
));
772 /* Drop all connection on the device */
773 void hci_conn_hash_flush(struct hci_dev
*hdev
)
775 struct hci_conn_hash
*h
= &hdev
->conn_hash
;
776 struct hci_conn
*c
, *n
;
778 BT_DBG("hdev %s", hdev
->name
);
780 list_for_each_entry_safe(c
, n
, &h
->list
, list
) {
781 c
->state
= BT_CLOSED
;
783 hci_proto_disconn_cfm(c
, HCI_ERROR_LOCAL_HOST_TERM
);
788 /* Check pending connect attempts */
789 void hci_conn_check_pending(struct hci_dev
*hdev
)
791 struct hci_conn
*conn
;
793 BT_DBG("hdev %s", hdev
->name
);
797 conn
= hci_conn_hash_lookup_state(hdev
, ACL_LINK
, BT_CONNECT2
);
799 hci_acl_connect(conn
);
801 hci_dev_unlock(hdev
);
804 void hci_conn_hold_device(struct hci_conn
*conn
)
806 atomic_inc(&conn
->devref
);
808 EXPORT_SYMBOL(hci_conn_hold_device
);
810 void hci_conn_put_device(struct hci_conn
*conn
)
812 if (atomic_dec_and_test(&conn
->devref
))
813 hci_conn_del_sysfs(conn
);
815 EXPORT_SYMBOL(hci_conn_put_device
);
817 int hci_get_conn_list(void __user
*arg
)
819 register struct hci_conn
*c
;
820 struct hci_conn_list_req req
, *cl
;
821 struct hci_conn_info
*ci
;
822 struct hci_dev
*hdev
;
823 int n
= 0, size
, err
;
825 if (copy_from_user(&req
, arg
, sizeof(req
)))
828 if (!req
.conn_num
|| req
.conn_num
> (PAGE_SIZE
* 2) / sizeof(*ci
))
831 size
= sizeof(req
) + req
.conn_num
* sizeof(*ci
);
833 cl
= kmalloc(size
, GFP_KERNEL
);
837 hdev
= hci_dev_get(req
.dev_id
);
846 list_for_each_entry(c
, &hdev
->conn_hash
.list
, list
) {
847 bacpy(&(ci
+ n
)->bdaddr
, &c
->dst
);
848 (ci
+ n
)->handle
= c
->handle
;
849 (ci
+ n
)->type
= c
->type
;
850 (ci
+ n
)->out
= c
->out
;
851 (ci
+ n
)->state
= c
->state
;
852 (ci
+ n
)->link_mode
= c
->link_mode
;
853 if (++n
>= req
.conn_num
)
856 hci_dev_unlock(hdev
);
858 cl
->dev_id
= hdev
->id
;
860 size
= sizeof(req
) + n
* sizeof(*ci
);
864 err
= copy_to_user(arg
, cl
, size
);
867 return err
? -EFAULT
: 0;
870 int hci_get_conn_info(struct hci_dev
*hdev
, void __user
*arg
)
872 struct hci_conn_info_req req
;
873 struct hci_conn_info ci
;
874 struct hci_conn
*conn
;
875 char __user
*ptr
= arg
+ sizeof(req
);
877 if (copy_from_user(&req
, arg
, sizeof(req
)))
881 conn
= hci_conn_hash_lookup_ba(hdev
, req
.type
, &req
.bdaddr
);
883 bacpy(&ci
.bdaddr
, &conn
->dst
);
884 ci
.handle
= conn
->handle
;
885 ci
.type
= conn
->type
;
887 ci
.state
= conn
->state
;
888 ci
.link_mode
= conn
->link_mode
;
890 hci_dev_unlock(hdev
);
895 return copy_to_user(ptr
, &ci
, sizeof(ci
)) ? -EFAULT
: 0;
898 int hci_get_auth_info(struct hci_dev
*hdev
, void __user
*arg
)
900 struct hci_auth_info_req req
;
901 struct hci_conn
*conn
;
903 if (copy_from_user(&req
, arg
, sizeof(req
)))
907 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
, &req
.bdaddr
);
909 req
.type
= conn
->auth_type
;
910 hci_dev_unlock(hdev
);
915 return copy_to_user(arg
, &req
, sizeof(req
)) ? -EFAULT
: 0;
918 struct hci_chan
*hci_chan_create(struct hci_conn
*conn
)
920 struct hci_dev
*hdev
= conn
->hdev
;
921 struct hci_chan
*chan
;
923 BT_DBG("%s conn %p", hdev
->name
, conn
);
925 chan
= kzalloc(sizeof(struct hci_chan
), GFP_KERNEL
);
930 skb_queue_head_init(&chan
->data_q
);
932 list_add_rcu(&chan
->list
, &conn
->chan_list
);
937 int hci_chan_del(struct hci_chan
*chan
)
939 struct hci_conn
*conn
= chan
->conn
;
940 struct hci_dev
*hdev
= conn
->hdev
;
942 BT_DBG("%s conn %p chan %p", hdev
->name
, conn
, chan
);
944 list_del_rcu(&chan
->list
);
948 skb_queue_purge(&chan
->data_q
);
954 void hci_chan_list_flush(struct hci_conn
*conn
)
956 struct hci_chan
*chan
, *n
;
958 BT_DBG("conn %p", conn
);
960 list_for_each_entry_safe(chan
, n
, &conn
->chan_list
, list
)