]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - net/bluetooth/hci_conn.c
Bluetooth: Remove unused hci_le_ltk_reply()
[mirror_ubuntu-zesty-kernel.git] / net / bluetooth / hci_conn.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI connection handling. */
26
27 #include <linux/module.h>
28
29 #include <linux/types.h>
30 #include <linux/errno.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/poll.h>
34 #include <linux/fcntl.h>
35 #include <linux/init.h>
36 #include <linux/skbuff.h>
37 #include <linux/interrupt.h>
38 #include <net/sock.h>
39
40 #include <linux/uaccess.h>
41 #include <asm/unaligned.h>
42
43 #include <net/bluetooth/bluetooth.h>
44 #include <net/bluetooth/hci_core.h>
45
46 static void hci_le_connect(struct hci_conn *conn)
47 {
48 struct hci_dev *hdev = conn->hdev;
49 struct hci_cp_le_create_conn cp;
50
51 conn->state = BT_CONNECT;
52 conn->out = true;
53 conn->link_mode |= HCI_LM_MASTER;
54 conn->sec_level = BT_SECURITY_LOW;
55
56 memset(&cp, 0, sizeof(cp));
57 cp.scan_interval = cpu_to_le16(0x0060);
58 cp.scan_window = cpu_to_le16(0x0030);
59 bacpy(&cp.peer_addr, &conn->dst);
60 cp.peer_addr_type = conn->dst_type;
61 cp.conn_interval_min = cpu_to_le16(0x0028);
62 cp.conn_interval_max = cpu_to_le16(0x0038);
63 cp.supervision_timeout = cpu_to_le16(0x002a);
64 cp.min_ce_len = cpu_to_le16(0x0000);
65 cp.max_ce_len = cpu_to_le16(0x0000);
66
67 hci_send_cmd(hdev, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
68 }
69
70 static void hci_le_connect_cancel(struct hci_conn *conn)
71 {
72 hci_send_cmd(conn->hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
73 }
74
75 void hci_acl_connect(struct hci_conn *conn)
76 {
77 struct hci_dev *hdev = conn->hdev;
78 struct inquiry_entry *ie;
79 struct hci_cp_create_conn cp;
80
81 BT_DBG("hcon %p", conn);
82
83 conn->state = BT_CONNECT;
84 conn->out = true;
85
86 conn->link_mode = HCI_LM_MASTER;
87
88 conn->attempt++;
89
90 conn->link_policy = hdev->link_policy;
91
92 memset(&cp, 0, sizeof(cp));
93 bacpy(&cp.bdaddr, &conn->dst);
94 cp.pscan_rep_mode = 0x02;
95
96 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
97 if (ie) {
98 if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
99 cp.pscan_rep_mode = ie->data.pscan_rep_mode;
100 cp.pscan_mode = ie->data.pscan_mode;
101 cp.clock_offset = ie->data.clock_offset |
102 cpu_to_le16(0x8000);
103 }
104
105 memcpy(conn->dev_class, ie->data.dev_class, 3);
106 if (ie->data.ssp_mode > 0)
107 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
108 }
109
110 cp.pkt_type = cpu_to_le16(conn->pkt_type);
111 if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
112 cp.role_switch = 0x01;
113 else
114 cp.role_switch = 0x00;
115
116 hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
117 }
118
119 static void hci_acl_connect_cancel(struct hci_conn *conn)
120 {
121 struct hci_cp_create_conn_cancel cp;
122
123 BT_DBG("%p", conn);
124
125 if (conn->hdev->hci_ver < BLUETOOTH_VER_1_2)
126 return;
127
128 bacpy(&cp.bdaddr, &conn->dst);
129 hci_send_cmd(conn->hdev, HCI_OP_CREATE_CONN_CANCEL, sizeof(cp), &cp);
130 }
131
132 void hci_acl_disconn(struct hci_conn *conn, __u8 reason)
133 {
134 struct hci_cp_disconnect cp;
135
136 BT_DBG("%p", conn);
137
138 conn->state = BT_DISCONN;
139
140 cp.handle = cpu_to_le16(conn->handle);
141 cp.reason = reason;
142 hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp);
143 }
144
145 void hci_add_sco(struct hci_conn *conn, __u16 handle)
146 {
147 struct hci_dev *hdev = conn->hdev;
148 struct hci_cp_add_sco cp;
149
150 BT_DBG("%p", conn);
151
152 conn->state = BT_CONNECT;
153 conn->out = true;
154
155 conn->attempt++;
156
157 cp.handle = cpu_to_le16(handle);
158 cp.pkt_type = cpu_to_le16(conn->pkt_type);
159
160 hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
161 }
162
163 void hci_setup_sync(struct hci_conn *conn, __u16 handle)
164 {
165 struct hci_dev *hdev = conn->hdev;
166 struct hci_cp_setup_sync_conn cp;
167
168 BT_DBG("%p", conn);
169
170 conn->state = BT_CONNECT;
171 conn->out = true;
172
173 conn->attempt++;
174
175 cp.handle = cpu_to_le16(handle);
176 cp.pkt_type = cpu_to_le16(conn->pkt_type);
177
178 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
179 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
180 cp.max_latency = cpu_to_le16(0xffff);
181 cp.voice_setting = cpu_to_le16(hdev->voice_setting);
182 cp.retrans_effort = 0xff;
183
184 hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp);
185 }
186
187 void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max,
188 u16 latency, u16 to_multiplier)
189 {
190 struct hci_cp_le_conn_update cp;
191 struct hci_dev *hdev = conn->hdev;
192
193 memset(&cp, 0, sizeof(cp));
194
195 cp.handle = cpu_to_le16(conn->handle);
196 cp.conn_interval_min = cpu_to_le16(min);
197 cp.conn_interval_max = cpu_to_le16(max);
198 cp.conn_latency = cpu_to_le16(latency);
199 cp.supervision_timeout = cpu_to_le16(to_multiplier);
200 cp.min_ce_len = cpu_to_le16(0x0001);
201 cp.max_ce_len = cpu_to_le16(0x0001);
202
203 hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
204 }
205 EXPORT_SYMBOL(hci_le_conn_update);
206
207 void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8],
208 __u8 ltk[16])
209 {
210 struct hci_dev *hdev = conn->hdev;
211 struct hci_cp_le_start_enc cp;
212
213 BT_DBG("%p", conn);
214
215 memset(&cp, 0, sizeof(cp));
216
217 cp.handle = cpu_to_le16(conn->handle);
218 memcpy(cp.ltk, ltk, sizeof(cp.ltk));
219 cp.ediv = ediv;
220 memcpy(cp.rand, rand, sizeof(cp.rand));
221
222 hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
223 }
224 EXPORT_SYMBOL(hci_le_start_enc);
225
226 void hci_le_ltk_neg_reply(struct hci_conn *conn)
227 {
228 struct hci_dev *hdev = conn->hdev;
229 struct hci_cp_le_ltk_neg_reply cp;
230
231 BT_DBG("%p", conn);
232
233 memset(&cp, 0, sizeof(cp));
234
235 cp.handle = cpu_to_le16(conn->handle);
236
237 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(cp), &cp);
238 }
239
240 /* Device _must_ be locked */
241 void hci_sco_setup(struct hci_conn *conn, __u8 status)
242 {
243 struct hci_conn *sco = conn->link;
244
245 BT_DBG("%p", conn);
246
247 if (!sco)
248 return;
249
250 if (!status) {
251 if (lmp_esco_capable(conn->hdev))
252 hci_setup_sync(sco, conn->handle);
253 else
254 hci_add_sco(sco, conn->handle);
255 } else {
256 hci_proto_connect_cfm(sco, status);
257 hci_conn_del(sco);
258 }
259 }
260
261 static void hci_conn_timeout(struct work_struct *work)
262 {
263 struct hci_conn *conn = container_of(work, struct hci_conn,
264 disc_work.work);
265 __u8 reason;
266
267 BT_DBG("conn %p state %s", conn, state_to_string(conn->state));
268
269 if (atomic_read(&conn->refcnt))
270 return;
271
272 switch (conn->state) {
273 case BT_CONNECT:
274 case BT_CONNECT2:
275 if (conn->out) {
276 if (conn->type == ACL_LINK)
277 hci_acl_connect_cancel(conn);
278 else if (conn->type == LE_LINK)
279 hci_le_connect_cancel(conn);
280 }
281 break;
282 case BT_CONFIG:
283 case BT_CONNECTED:
284 reason = hci_proto_disconn_ind(conn);
285 hci_acl_disconn(conn, reason);
286 break;
287 default:
288 conn->state = BT_CLOSED;
289 break;
290 }
291 }
292
293 /* Enter sniff mode */
294 static void hci_conn_enter_sniff_mode(struct hci_conn *conn)
295 {
296 struct hci_dev *hdev = conn->hdev;
297
298 BT_DBG("conn %p mode %d", conn, conn->mode);
299
300 if (test_bit(HCI_RAW, &hdev->flags))
301 return;
302
303 if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
304 return;
305
306 if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
307 return;
308
309 if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
310 struct hci_cp_sniff_subrate cp;
311 cp.handle = cpu_to_le16(conn->handle);
312 cp.max_latency = cpu_to_le16(0);
313 cp.min_remote_timeout = cpu_to_le16(0);
314 cp.min_local_timeout = cpu_to_le16(0);
315 hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
316 }
317
318 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
319 struct hci_cp_sniff_mode cp;
320 cp.handle = cpu_to_le16(conn->handle);
321 cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
322 cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
323 cp.attempt = cpu_to_le16(4);
324 cp.timeout = cpu_to_le16(1);
325 hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
326 }
327 }
328
329 static void hci_conn_idle(unsigned long arg)
330 {
331 struct hci_conn *conn = (void *) arg;
332
333 BT_DBG("conn %p mode %d", conn, conn->mode);
334
335 hci_conn_enter_sniff_mode(conn);
336 }
337
338 static void hci_conn_auto_accept(unsigned long arg)
339 {
340 struct hci_conn *conn = (void *) arg;
341 struct hci_dev *hdev = conn->hdev;
342
343 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
344 &conn->dst);
345 }
346
347 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
348 {
349 struct hci_conn *conn;
350
351 BT_DBG("%s dst %s", hdev->name, batostr(dst));
352
353 conn = kzalloc(sizeof(struct hci_conn), GFP_KERNEL);
354 if (!conn)
355 return NULL;
356
357 bacpy(&conn->dst, dst);
358 conn->hdev = hdev;
359 conn->type = type;
360 conn->mode = HCI_CM_ACTIVE;
361 conn->state = BT_OPEN;
362 conn->auth_type = HCI_AT_GENERAL_BONDING;
363 conn->io_capability = hdev->io_capability;
364 conn->remote_auth = 0xff;
365 conn->key_type = 0xff;
366
367 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
368 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
369
370 switch (type) {
371 case ACL_LINK:
372 conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
373 break;
374 case SCO_LINK:
375 if (lmp_esco_capable(hdev))
376 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
377 (hdev->esco_type & EDR_ESCO_MASK);
378 else
379 conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
380 break;
381 case ESCO_LINK:
382 conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
383 break;
384 }
385
386 skb_queue_head_init(&conn->data_q);
387
388 INIT_LIST_HEAD(&conn->chan_list);
389
390 INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
391 setup_timer(&conn->idle_timer, hci_conn_idle, (unsigned long)conn);
392 setup_timer(&conn->auto_accept_timer, hci_conn_auto_accept,
393 (unsigned long) conn);
394
395 atomic_set(&conn->refcnt, 0);
396
397 hci_dev_hold(hdev);
398
399 hci_conn_hash_add(hdev, conn);
400 if (hdev->notify)
401 hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
402
403 atomic_set(&conn->devref, 0);
404
405 hci_conn_init_sysfs(conn);
406
407 return conn;
408 }
409
410 int hci_conn_del(struct hci_conn *conn)
411 {
412 struct hci_dev *hdev = conn->hdev;
413
414 BT_DBG("%s conn %p handle %d", hdev->name, conn, conn->handle);
415
416 del_timer(&conn->idle_timer);
417
418 cancel_delayed_work_sync(&conn->disc_work);
419
420 del_timer(&conn->auto_accept_timer);
421
422 if (conn->type == ACL_LINK) {
423 struct hci_conn *sco = conn->link;
424 if (sco)
425 sco->link = NULL;
426
427 /* Unacked frames */
428 hdev->acl_cnt += conn->sent;
429 } else if (conn->type == LE_LINK) {
430 if (hdev->le_pkts)
431 hdev->le_cnt += conn->sent;
432 else
433 hdev->acl_cnt += conn->sent;
434 } else {
435 struct hci_conn *acl = conn->link;
436 if (acl) {
437 acl->link = NULL;
438 hci_conn_put(acl);
439 }
440 }
441
442
443 hci_chan_list_flush(conn);
444
445 hci_conn_hash_del(hdev, conn);
446 if (hdev->notify)
447 hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
448
449 skb_queue_purge(&conn->data_q);
450
451 hci_conn_put_device(conn);
452
453 hci_dev_put(hdev);
454
455 if (conn->handle == 0)
456 kfree(conn);
457
458 return 0;
459 }
460
461 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
462 {
463 int use_src = bacmp(src, BDADDR_ANY);
464 struct hci_dev *hdev = NULL, *d;
465
466 BT_DBG("%s -> %s", batostr(src), batostr(dst));
467
468 read_lock(&hci_dev_list_lock);
469
470 list_for_each_entry(d, &hci_dev_list, list) {
471 if (!test_bit(HCI_UP, &d->flags) || test_bit(HCI_RAW, &d->flags))
472 continue;
473
474 /* Simple routing:
475 * No source address - find interface with bdaddr != dst
476 * Source address - find interface with bdaddr == src
477 */
478
479 if (use_src) {
480 if (!bacmp(&d->bdaddr, src)) {
481 hdev = d; break;
482 }
483 } else {
484 if (bacmp(&d->bdaddr, dst)) {
485 hdev = d; break;
486 }
487 }
488 }
489
490 if (hdev)
491 hdev = hci_dev_hold(hdev);
492
493 read_unlock(&hci_dev_list_lock);
494 return hdev;
495 }
496 EXPORT_SYMBOL(hci_get_route);
497
498 /* Create SCO, ACL or LE connection.
499 * Device _must_ be locked */
500 struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst,
501 __u8 dst_type, __u8 sec_level, __u8 auth_type)
502 {
503 struct hci_conn *acl;
504 struct hci_conn *sco;
505 struct hci_conn *le;
506
507 BT_DBG("%s dst %s", hdev->name, batostr(dst));
508
509 if (type == LE_LINK) {
510 le = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
511 if (!le) {
512 le = hci_conn_add(hdev, LE_LINK, dst);
513 if (!le)
514 return ERR_PTR(-ENOMEM);
515
516 le->dst_type = bdaddr_to_le(dst_type);
517 hci_le_connect(le);
518 }
519
520 le->pending_sec_level = sec_level;
521 le->auth_type = auth_type;
522
523 hci_conn_hold(le);
524
525 return le;
526 }
527
528 acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
529 if (!acl) {
530 acl = hci_conn_add(hdev, ACL_LINK, dst);
531 if (!acl)
532 return ERR_PTR(-ENOMEM);
533 }
534
535 hci_conn_hold(acl);
536
537 if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
538 acl->sec_level = BT_SECURITY_LOW;
539 acl->pending_sec_level = sec_level;
540 acl->auth_type = auth_type;
541 hci_acl_connect(acl);
542 }
543
544 if (type == ACL_LINK)
545 return acl;
546
547 sco = hci_conn_hash_lookup_ba(hdev, type, dst);
548 if (!sco) {
549 sco = hci_conn_add(hdev, type, dst);
550 if (!sco) {
551 hci_conn_put(acl);
552 return ERR_PTR(-ENOMEM);
553 }
554 }
555
556 acl->link = sco;
557 sco->link = acl;
558
559 hci_conn_hold(sco);
560
561 if (acl->state == BT_CONNECTED &&
562 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
563 set_bit(HCI_CONN_POWER_SAVE, &acl->flags);
564 hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
565
566 if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->flags)) {
567 /* defer SCO setup until mode change completed */
568 set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->flags);
569 return sco;
570 }
571
572 hci_sco_setup(acl, 0x00);
573 }
574
575 return sco;
576 }
577 EXPORT_SYMBOL(hci_connect);
578
579 /* Check link security requirement */
580 int hci_conn_check_link_mode(struct hci_conn *conn)
581 {
582 BT_DBG("conn %p", conn);
583
584 if (hci_conn_ssp_enabled(conn) && !(conn->link_mode & HCI_LM_ENCRYPT))
585 return 0;
586
587 return 1;
588 }
589 EXPORT_SYMBOL(hci_conn_check_link_mode);
590
591 /* Authenticate remote device */
592 static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
593 {
594 BT_DBG("conn %p", conn);
595
596 if (conn->pending_sec_level > sec_level)
597 sec_level = conn->pending_sec_level;
598
599 if (sec_level > conn->sec_level)
600 conn->pending_sec_level = sec_level;
601 else if (conn->link_mode & HCI_LM_AUTH)
602 return 1;
603
604 /* Make sure we preserve an existing MITM requirement*/
605 auth_type |= (conn->auth_type & 0x01);
606
607 conn->auth_type = auth_type;
608
609 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
610 struct hci_cp_auth_requested cp;
611
612 /* encrypt must be pending if auth is also pending */
613 set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
614
615 cp.handle = cpu_to_le16(conn->handle);
616 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
617 sizeof(cp), &cp);
618 if (conn->key_type != 0xff)
619 set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
620 }
621
622 return 0;
623 }
624
625 /* Encrypt the the link */
626 static void hci_conn_encrypt(struct hci_conn *conn)
627 {
628 BT_DBG("conn %p", conn);
629
630 if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
631 struct hci_cp_set_conn_encrypt cp;
632 cp.handle = cpu_to_le16(conn->handle);
633 cp.encrypt = 0x01;
634 hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
635 &cp);
636 }
637 }
638
639 /* Enable security */
640 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
641 {
642 BT_DBG("conn %p", conn);
643
644 /* For sdp we don't need the link key. */
645 if (sec_level == BT_SECURITY_SDP)
646 return 1;
647
648 /* For non 2.1 devices and low security level we don't need the link
649 key. */
650 if (sec_level == BT_SECURITY_LOW && !hci_conn_ssp_enabled(conn))
651 return 1;
652
653 /* For other security levels we need the link key. */
654 if (!(conn->link_mode & HCI_LM_AUTH))
655 goto auth;
656
657 /* An authenticated combination key has sufficient security for any
658 security level. */
659 if (conn->key_type == HCI_LK_AUTH_COMBINATION)
660 goto encrypt;
661
662 /* An unauthenticated combination key has sufficient security for
663 security level 1 and 2. */
664 if (conn->key_type == HCI_LK_UNAUTH_COMBINATION &&
665 (sec_level == BT_SECURITY_MEDIUM ||
666 sec_level == BT_SECURITY_LOW))
667 goto encrypt;
668
669 /* A combination key has always sufficient security for the security
670 levels 1 or 2. High security level requires the combination key
671 is generated using maximum PIN code length (16).
672 For pre 2.1 units. */
673 if (conn->key_type == HCI_LK_COMBINATION &&
674 (sec_level != BT_SECURITY_HIGH ||
675 conn->pin_length == 16))
676 goto encrypt;
677
678 auth:
679 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
680 return 0;
681
682 if (!hci_conn_auth(conn, sec_level, auth_type))
683 return 0;
684
685 encrypt:
686 if (conn->link_mode & HCI_LM_ENCRYPT)
687 return 1;
688
689 hci_conn_encrypt(conn);
690 return 0;
691 }
692 EXPORT_SYMBOL(hci_conn_security);
693
694 /* Check secure link requirement */
695 int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
696 {
697 BT_DBG("conn %p", conn);
698
699 if (sec_level != BT_SECURITY_HIGH)
700 return 1; /* Accept if non-secure is required */
701
702 if (conn->sec_level == BT_SECURITY_HIGH)
703 return 1;
704
705 return 0; /* Reject not secure link */
706 }
707 EXPORT_SYMBOL(hci_conn_check_secure);
708
709 /* Change link key */
710 int hci_conn_change_link_key(struct hci_conn *conn)
711 {
712 BT_DBG("conn %p", conn);
713
714 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
715 struct hci_cp_change_conn_link_key cp;
716 cp.handle = cpu_to_le16(conn->handle);
717 hci_send_cmd(conn->hdev, HCI_OP_CHANGE_CONN_LINK_KEY,
718 sizeof(cp), &cp);
719 }
720
721 return 0;
722 }
723 EXPORT_SYMBOL(hci_conn_change_link_key);
724
725 /* Switch role */
726 int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
727 {
728 BT_DBG("conn %p", conn);
729
730 if (!role && conn->link_mode & HCI_LM_MASTER)
731 return 1;
732
733 if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->flags)) {
734 struct hci_cp_switch_role cp;
735 bacpy(&cp.bdaddr, &conn->dst);
736 cp.role = role;
737 hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp);
738 }
739
740 return 0;
741 }
742 EXPORT_SYMBOL(hci_conn_switch_role);
743
744 /* Enter active mode */
745 void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
746 {
747 struct hci_dev *hdev = conn->hdev;
748
749 BT_DBG("conn %p mode %d", conn, conn->mode);
750
751 if (test_bit(HCI_RAW, &hdev->flags))
752 return;
753
754 if (conn->mode != HCI_CM_SNIFF)
755 goto timer;
756
757 if (!test_bit(HCI_CONN_POWER_SAVE, &conn->flags) && !force_active)
758 goto timer;
759
760 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
761 struct hci_cp_exit_sniff_mode cp;
762 cp.handle = cpu_to_le16(conn->handle);
763 hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
764 }
765
766 timer:
767 if (hdev->idle_timeout > 0)
768 mod_timer(&conn->idle_timer,
769 jiffies + msecs_to_jiffies(hdev->idle_timeout));
770 }
771
772 /* Drop all connection on the device */
773 void hci_conn_hash_flush(struct hci_dev *hdev)
774 {
775 struct hci_conn_hash *h = &hdev->conn_hash;
776 struct hci_conn *c, *n;
777
778 BT_DBG("hdev %s", hdev->name);
779
780 list_for_each_entry_safe(c, n, &h->list, list) {
781 c->state = BT_CLOSED;
782
783 hci_proto_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM);
784 hci_conn_del(c);
785 }
786 }
787
788 /* Check pending connect attempts */
789 void hci_conn_check_pending(struct hci_dev *hdev)
790 {
791 struct hci_conn *conn;
792
793 BT_DBG("hdev %s", hdev->name);
794
795 hci_dev_lock(hdev);
796
797 conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
798 if (conn)
799 hci_acl_connect(conn);
800
801 hci_dev_unlock(hdev);
802 }
803
804 void hci_conn_hold_device(struct hci_conn *conn)
805 {
806 atomic_inc(&conn->devref);
807 }
808 EXPORT_SYMBOL(hci_conn_hold_device);
809
810 void hci_conn_put_device(struct hci_conn *conn)
811 {
812 if (atomic_dec_and_test(&conn->devref))
813 hci_conn_del_sysfs(conn);
814 }
815 EXPORT_SYMBOL(hci_conn_put_device);
816
817 int hci_get_conn_list(void __user *arg)
818 {
819 register struct hci_conn *c;
820 struct hci_conn_list_req req, *cl;
821 struct hci_conn_info *ci;
822 struct hci_dev *hdev;
823 int n = 0, size, err;
824
825 if (copy_from_user(&req, arg, sizeof(req)))
826 return -EFAULT;
827
828 if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
829 return -EINVAL;
830
831 size = sizeof(req) + req.conn_num * sizeof(*ci);
832
833 cl = kmalloc(size, GFP_KERNEL);
834 if (!cl)
835 return -ENOMEM;
836
837 hdev = hci_dev_get(req.dev_id);
838 if (!hdev) {
839 kfree(cl);
840 return -ENODEV;
841 }
842
843 ci = cl->conn_info;
844
845 hci_dev_lock(hdev);
846 list_for_each_entry(c, &hdev->conn_hash.list, list) {
847 bacpy(&(ci + n)->bdaddr, &c->dst);
848 (ci + n)->handle = c->handle;
849 (ci + n)->type = c->type;
850 (ci + n)->out = c->out;
851 (ci + n)->state = c->state;
852 (ci + n)->link_mode = c->link_mode;
853 if (++n >= req.conn_num)
854 break;
855 }
856 hci_dev_unlock(hdev);
857
858 cl->dev_id = hdev->id;
859 cl->conn_num = n;
860 size = sizeof(req) + n * sizeof(*ci);
861
862 hci_dev_put(hdev);
863
864 err = copy_to_user(arg, cl, size);
865 kfree(cl);
866
867 return err ? -EFAULT : 0;
868 }
869
870 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
871 {
872 struct hci_conn_info_req req;
873 struct hci_conn_info ci;
874 struct hci_conn *conn;
875 char __user *ptr = arg + sizeof(req);
876
877 if (copy_from_user(&req, arg, sizeof(req)))
878 return -EFAULT;
879
880 hci_dev_lock(hdev);
881 conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
882 if (conn) {
883 bacpy(&ci.bdaddr, &conn->dst);
884 ci.handle = conn->handle;
885 ci.type = conn->type;
886 ci.out = conn->out;
887 ci.state = conn->state;
888 ci.link_mode = conn->link_mode;
889 }
890 hci_dev_unlock(hdev);
891
892 if (!conn)
893 return -ENOENT;
894
895 return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0;
896 }
897
898 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
899 {
900 struct hci_auth_info_req req;
901 struct hci_conn *conn;
902
903 if (copy_from_user(&req, arg, sizeof(req)))
904 return -EFAULT;
905
906 hci_dev_lock(hdev);
907 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
908 if (conn)
909 req.type = conn->auth_type;
910 hci_dev_unlock(hdev);
911
912 if (!conn)
913 return -ENOENT;
914
915 return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
916 }
917
918 struct hci_chan *hci_chan_create(struct hci_conn *conn)
919 {
920 struct hci_dev *hdev = conn->hdev;
921 struct hci_chan *chan;
922
923 BT_DBG("%s conn %p", hdev->name, conn);
924
925 chan = kzalloc(sizeof(struct hci_chan), GFP_KERNEL);
926 if (!chan)
927 return NULL;
928
929 chan->conn = conn;
930 skb_queue_head_init(&chan->data_q);
931
932 list_add_rcu(&chan->list, &conn->chan_list);
933
934 return chan;
935 }
936
937 int hci_chan_del(struct hci_chan *chan)
938 {
939 struct hci_conn *conn = chan->conn;
940 struct hci_dev *hdev = conn->hdev;
941
942 BT_DBG("%s conn %p chan %p", hdev->name, conn, chan);
943
944 list_del_rcu(&chan->list);
945
946 synchronize_rcu();
947
948 skb_queue_purge(&chan->data_q);
949 kfree(chan);
950
951 return 0;
952 }
953
954 void hci_chan_list_flush(struct hci_conn *conn)
955 {
956 struct hci_chan *chan, *n;
957
958 BT_DBG("conn %p", conn);
959
960 list_for_each_entry_safe(chan, n, &conn->chan_list, list)
961 hci_chan_del(chan);
962 }