]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - net/bluetooth/hci_conn.c
Merge master.kernel.org:/pub/scm/linux/kernel/git/lethal/genesis-2.6 into devel-stable
[mirror_ubuntu-zesty-kernel.git] / net / bluetooth / hci_conn.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI connection handling. */
26
27 #include <linux/module.h>
28
29 #include <linux/types.h>
30 #include <linux/errno.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/poll.h>
34 #include <linux/fcntl.h>
35 #include <linux/init.h>
36 #include <linux/skbuff.h>
37 #include <linux/interrupt.h>
38 #include <linux/notifier.h>
39 #include <net/sock.h>
40
41 #include <asm/system.h>
42 #include <asm/uaccess.h>
43 #include <asm/unaligned.h>
44
45 #include <net/bluetooth/bluetooth.h>
46 #include <net/bluetooth/hci_core.h>
47
48 void hci_acl_connect(struct hci_conn *conn)
49 {
50 struct hci_dev *hdev = conn->hdev;
51 struct inquiry_entry *ie;
52 struct hci_cp_create_conn cp;
53
54 BT_DBG("%p", conn);
55
56 conn->state = BT_CONNECT;
57 conn->out = 1;
58
59 conn->link_mode = HCI_LM_MASTER;
60
61 conn->attempt++;
62
63 conn->link_policy = hdev->link_policy;
64
65 memset(&cp, 0, sizeof(cp));
66 bacpy(&cp.bdaddr, &conn->dst);
67 cp.pscan_rep_mode = 0x02;
68
69 if ((ie = hci_inquiry_cache_lookup(hdev, &conn->dst))) {
70 if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
71 cp.pscan_rep_mode = ie->data.pscan_rep_mode;
72 cp.pscan_mode = ie->data.pscan_mode;
73 cp.clock_offset = ie->data.clock_offset |
74 cpu_to_le16(0x8000);
75 }
76
77 memcpy(conn->dev_class, ie->data.dev_class, 3);
78 conn->ssp_mode = ie->data.ssp_mode;
79 }
80
81 cp.pkt_type = cpu_to_le16(conn->pkt_type);
82 if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
83 cp.role_switch = 0x01;
84 else
85 cp.role_switch = 0x00;
86
87 hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
88 }
89
90 static void hci_acl_connect_cancel(struct hci_conn *conn)
91 {
92 struct hci_cp_create_conn_cancel cp;
93
94 BT_DBG("%p", conn);
95
96 if (conn->hdev->hci_ver < 2)
97 return;
98
99 bacpy(&cp.bdaddr, &conn->dst);
100 hci_send_cmd(conn->hdev, HCI_OP_CREATE_CONN_CANCEL, sizeof(cp), &cp);
101 }
102
103 void hci_acl_disconn(struct hci_conn *conn, __u8 reason)
104 {
105 struct hci_cp_disconnect cp;
106
107 BT_DBG("%p", conn);
108
109 conn->state = BT_DISCONN;
110
111 cp.handle = cpu_to_le16(conn->handle);
112 cp.reason = reason;
113 hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp);
114 }
115
116 void hci_add_sco(struct hci_conn *conn, __u16 handle)
117 {
118 struct hci_dev *hdev = conn->hdev;
119 struct hci_cp_add_sco cp;
120
121 BT_DBG("%p", conn);
122
123 conn->state = BT_CONNECT;
124 conn->out = 1;
125
126 conn->attempt++;
127
128 cp.handle = cpu_to_le16(handle);
129 cp.pkt_type = cpu_to_le16(conn->pkt_type);
130
131 hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
132 }
133
134 void hci_setup_sync(struct hci_conn *conn, __u16 handle)
135 {
136 struct hci_dev *hdev = conn->hdev;
137 struct hci_cp_setup_sync_conn cp;
138
139 BT_DBG("%p", conn);
140
141 conn->state = BT_CONNECT;
142 conn->out = 1;
143
144 conn->attempt++;
145
146 cp.handle = cpu_to_le16(handle);
147 cp.pkt_type = cpu_to_le16(conn->pkt_type);
148
149 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
150 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
151 cp.max_latency = cpu_to_le16(0xffff);
152 cp.voice_setting = cpu_to_le16(hdev->voice_setting);
153 cp.retrans_effort = 0xff;
154
155 hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp);
156 }
157
158 /* Device _must_ be locked */
159 void hci_sco_setup(struct hci_conn *conn, __u8 status)
160 {
161 struct hci_conn *sco = conn->link;
162
163 BT_DBG("%p", conn);
164
165 if (!sco)
166 return;
167
168 if (!status) {
169 if (lmp_esco_capable(conn->hdev))
170 hci_setup_sync(sco, conn->handle);
171 else
172 hci_add_sco(sco, conn->handle);
173 } else {
174 hci_proto_connect_cfm(sco, status);
175 hci_conn_del(sco);
176 }
177 }
178
179 static void hci_conn_timeout(unsigned long arg)
180 {
181 struct hci_conn *conn = (void *) arg;
182 struct hci_dev *hdev = conn->hdev;
183 __u8 reason;
184
185 BT_DBG("conn %p state %d", conn, conn->state);
186
187 if (atomic_read(&conn->refcnt))
188 return;
189
190 hci_dev_lock(hdev);
191
192 switch (conn->state) {
193 case BT_CONNECT:
194 case BT_CONNECT2:
195 if (conn->type == ACL_LINK && conn->out)
196 hci_acl_connect_cancel(conn);
197 break;
198 case BT_CONFIG:
199 case BT_CONNECTED:
200 reason = hci_proto_disconn_ind(conn);
201 hci_acl_disconn(conn, reason);
202 break;
203 default:
204 conn->state = BT_CLOSED;
205 break;
206 }
207
208 hci_dev_unlock(hdev);
209 }
210
211 static void hci_conn_idle(unsigned long arg)
212 {
213 struct hci_conn *conn = (void *) arg;
214
215 BT_DBG("conn %p mode %d", conn, conn->mode);
216
217 hci_conn_enter_sniff_mode(conn);
218 }
219
220 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
221 {
222 struct hci_conn *conn;
223
224 BT_DBG("%s dst %s", hdev->name, batostr(dst));
225
226 conn = kzalloc(sizeof(struct hci_conn), GFP_ATOMIC);
227 if (!conn)
228 return NULL;
229
230 bacpy(&conn->dst, dst);
231 conn->hdev = hdev;
232 conn->type = type;
233 conn->mode = HCI_CM_ACTIVE;
234 conn->state = BT_OPEN;
235 conn->auth_type = HCI_AT_GENERAL_BONDING;
236
237 conn->power_save = 1;
238 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
239
240 switch (type) {
241 case ACL_LINK:
242 conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
243 break;
244 case SCO_LINK:
245 if (lmp_esco_capable(hdev))
246 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
247 (hdev->esco_type & EDR_ESCO_MASK);
248 else
249 conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
250 break;
251 case ESCO_LINK:
252 conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
253 break;
254 }
255
256 skb_queue_head_init(&conn->data_q);
257
258 setup_timer(&conn->disc_timer, hci_conn_timeout, (unsigned long)conn);
259 setup_timer(&conn->idle_timer, hci_conn_idle, (unsigned long)conn);
260
261 atomic_set(&conn->refcnt, 0);
262
263 hci_dev_hold(hdev);
264
265 tasklet_disable(&hdev->tx_task);
266
267 hci_conn_hash_add(hdev, conn);
268 if (hdev->notify)
269 hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
270
271 atomic_set(&conn->devref, 0);
272
273 hci_conn_init_sysfs(conn);
274
275 tasklet_enable(&hdev->tx_task);
276
277 return conn;
278 }
279
280 int hci_conn_del(struct hci_conn *conn)
281 {
282 struct hci_dev *hdev = conn->hdev;
283
284 BT_DBG("%s conn %p handle %d", hdev->name, conn, conn->handle);
285
286 del_timer(&conn->idle_timer);
287
288 del_timer(&conn->disc_timer);
289
290 if (conn->type == ACL_LINK) {
291 struct hci_conn *sco = conn->link;
292 if (sco)
293 sco->link = NULL;
294
295 /* Unacked frames */
296 hdev->acl_cnt += conn->sent;
297 } else {
298 struct hci_conn *acl = conn->link;
299 if (acl) {
300 acl->link = NULL;
301 hci_conn_put(acl);
302 }
303 }
304
305 tasklet_disable(&hdev->tx_task);
306
307 hci_conn_hash_del(hdev, conn);
308 if (hdev->notify)
309 hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
310
311 tasklet_enable(&hdev->tx_task);
312
313 skb_queue_purge(&conn->data_q);
314
315 hci_conn_put_device(conn);
316
317 hci_dev_put(hdev);
318
319 return 0;
320 }
321
322 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
323 {
324 int use_src = bacmp(src, BDADDR_ANY);
325 struct hci_dev *hdev = NULL;
326 struct list_head *p;
327
328 BT_DBG("%s -> %s", batostr(src), batostr(dst));
329
330 read_lock_bh(&hci_dev_list_lock);
331
332 list_for_each(p, &hci_dev_list) {
333 struct hci_dev *d = list_entry(p, struct hci_dev, list);
334
335 if (!test_bit(HCI_UP, &d->flags) || test_bit(HCI_RAW, &d->flags))
336 continue;
337
338 /* Simple routing:
339 * No source address - find interface with bdaddr != dst
340 * Source address - find interface with bdaddr == src
341 */
342
343 if (use_src) {
344 if (!bacmp(&d->bdaddr, src)) {
345 hdev = d; break;
346 }
347 } else {
348 if (bacmp(&d->bdaddr, dst)) {
349 hdev = d; break;
350 }
351 }
352 }
353
354 if (hdev)
355 hdev = hci_dev_hold(hdev);
356
357 read_unlock_bh(&hci_dev_list_lock);
358 return hdev;
359 }
360 EXPORT_SYMBOL(hci_get_route);
361
362 /* Create SCO or ACL connection.
363 * Device _must_ be locked */
364 struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 sec_level, __u8 auth_type)
365 {
366 struct hci_conn *acl;
367 struct hci_conn *sco;
368
369 BT_DBG("%s dst %s", hdev->name, batostr(dst));
370
371 if (!(acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst))) {
372 if (!(acl = hci_conn_add(hdev, ACL_LINK, dst)))
373 return NULL;
374 }
375
376 hci_conn_hold(acl);
377
378 if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
379 acl->sec_level = sec_level;
380 acl->auth_type = auth_type;
381 hci_acl_connect(acl);
382 } else {
383 if (acl->sec_level < sec_level)
384 acl->sec_level = sec_level;
385 if (acl->auth_type < auth_type)
386 acl->auth_type = auth_type;
387 }
388
389 if (type == ACL_LINK)
390 return acl;
391
392 if (!(sco = hci_conn_hash_lookup_ba(hdev, type, dst))) {
393 if (!(sco = hci_conn_add(hdev, type, dst))) {
394 hci_conn_put(acl);
395 return NULL;
396 }
397 }
398
399 acl->link = sco;
400 sco->link = acl;
401
402 hci_conn_hold(sco);
403
404 if (acl->state == BT_CONNECTED &&
405 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
406 acl->power_save = 1;
407 hci_conn_enter_active_mode(acl);
408
409 if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->pend)) {
410 /* defer SCO setup until mode change completed */
411 set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->pend);
412 return sco;
413 }
414
415 hci_sco_setup(acl, 0x00);
416 }
417
418 return sco;
419 }
420 EXPORT_SYMBOL(hci_connect);
421
422 /* Check link security requirement */
423 int hci_conn_check_link_mode(struct hci_conn *conn)
424 {
425 BT_DBG("conn %p", conn);
426
427 if (conn->ssp_mode > 0 && conn->hdev->ssp_mode > 0 &&
428 !(conn->link_mode & HCI_LM_ENCRYPT))
429 return 0;
430
431 return 1;
432 }
433 EXPORT_SYMBOL(hci_conn_check_link_mode);
434
435 /* Authenticate remote device */
436 static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
437 {
438 BT_DBG("conn %p", conn);
439
440 if (sec_level > conn->sec_level)
441 conn->sec_level = sec_level;
442 else if (conn->link_mode & HCI_LM_AUTH)
443 return 1;
444
445 conn->auth_type = auth_type;
446
447 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
448 struct hci_cp_auth_requested cp;
449 cp.handle = cpu_to_le16(conn->handle);
450 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
451 sizeof(cp), &cp);
452 }
453
454 return 0;
455 }
456
457 /* Enable security */
458 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
459 {
460 BT_DBG("conn %p", conn);
461
462 if (sec_level == BT_SECURITY_SDP)
463 return 1;
464
465 if (sec_level == BT_SECURITY_LOW &&
466 (!conn->ssp_mode || !conn->hdev->ssp_mode))
467 return 1;
468
469 if (conn->link_mode & HCI_LM_ENCRYPT)
470 return hci_conn_auth(conn, sec_level, auth_type);
471
472 if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
473 return 0;
474
475 if (hci_conn_auth(conn, sec_level, auth_type)) {
476 struct hci_cp_set_conn_encrypt cp;
477 cp.handle = cpu_to_le16(conn->handle);
478 cp.encrypt = 1;
479 hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT,
480 sizeof(cp), &cp);
481 }
482
483 return 0;
484 }
485 EXPORT_SYMBOL(hci_conn_security);
486
487 /* Change link key */
488 int hci_conn_change_link_key(struct hci_conn *conn)
489 {
490 BT_DBG("conn %p", conn);
491
492 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
493 struct hci_cp_change_conn_link_key cp;
494 cp.handle = cpu_to_le16(conn->handle);
495 hci_send_cmd(conn->hdev, HCI_OP_CHANGE_CONN_LINK_KEY,
496 sizeof(cp), &cp);
497 }
498
499 return 0;
500 }
501 EXPORT_SYMBOL(hci_conn_change_link_key);
502
503 /* Switch role */
504 int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
505 {
506 BT_DBG("conn %p", conn);
507
508 if (!role && conn->link_mode & HCI_LM_MASTER)
509 return 1;
510
511 if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->pend)) {
512 struct hci_cp_switch_role cp;
513 bacpy(&cp.bdaddr, &conn->dst);
514 cp.role = role;
515 hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp);
516 }
517
518 return 0;
519 }
520 EXPORT_SYMBOL(hci_conn_switch_role);
521
522 /* Enter active mode */
523 void hci_conn_enter_active_mode(struct hci_conn *conn)
524 {
525 struct hci_dev *hdev = conn->hdev;
526
527 BT_DBG("conn %p mode %d", conn, conn->mode);
528
529 if (test_bit(HCI_RAW, &hdev->flags))
530 return;
531
532 if (conn->mode != HCI_CM_SNIFF || !conn->power_save)
533 goto timer;
534
535 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
536 struct hci_cp_exit_sniff_mode cp;
537 cp.handle = cpu_to_le16(conn->handle);
538 hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
539 }
540
541 timer:
542 if (hdev->idle_timeout > 0)
543 mod_timer(&conn->idle_timer,
544 jiffies + msecs_to_jiffies(hdev->idle_timeout));
545 }
546
547 /* Enter sniff mode */
548 void hci_conn_enter_sniff_mode(struct hci_conn *conn)
549 {
550 struct hci_dev *hdev = conn->hdev;
551
552 BT_DBG("conn %p mode %d", conn, conn->mode);
553
554 if (test_bit(HCI_RAW, &hdev->flags))
555 return;
556
557 if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
558 return;
559
560 if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
561 return;
562
563 if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
564 struct hci_cp_sniff_subrate cp;
565 cp.handle = cpu_to_le16(conn->handle);
566 cp.max_latency = cpu_to_le16(0);
567 cp.min_remote_timeout = cpu_to_le16(0);
568 cp.min_local_timeout = cpu_to_le16(0);
569 hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
570 }
571
572 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
573 struct hci_cp_sniff_mode cp;
574 cp.handle = cpu_to_le16(conn->handle);
575 cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
576 cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
577 cp.attempt = cpu_to_le16(4);
578 cp.timeout = cpu_to_le16(1);
579 hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
580 }
581 }
582
583 /* Drop all connection on the device */
584 void hci_conn_hash_flush(struct hci_dev *hdev)
585 {
586 struct hci_conn_hash *h = &hdev->conn_hash;
587 struct list_head *p;
588
589 BT_DBG("hdev %s", hdev->name);
590
591 p = h->list.next;
592 while (p != &h->list) {
593 struct hci_conn *c;
594
595 c = list_entry(p, struct hci_conn, list);
596 p = p->next;
597
598 c->state = BT_CLOSED;
599
600 hci_proto_disconn_cfm(c, 0x16);
601 hci_conn_del(c);
602 }
603 }
604
605 /* Check pending connect attempts */
606 void hci_conn_check_pending(struct hci_dev *hdev)
607 {
608 struct hci_conn *conn;
609
610 BT_DBG("hdev %s", hdev->name);
611
612 hci_dev_lock(hdev);
613
614 conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
615 if (conn)
616 hci_acl_connect(conn);
617
618 hci_dev_unlock(hdev);
619 }
620
621 void hci_conn_hold_device(struct hci_conn *conn)
622 {
623 atomic_inc(&conn->devref);
624 }
625 EXPORT_SYMBOL(hci_conn_hold_device);
626
627 void hci_conn_put_device(struct hci_conn *conn)
628 {
629 if (atomic_dec_and_test(&conn->devref))
630 hci_conn_del_sysfs(conn);
631 }
632 EXPORT_SYMBOL(hci_conn_put_device);
633
634 int hci_get_conn_list(void __user *arg)
635 {
636 struct hci_conn_list_req req, *cl;
637 struct hci_conn_info *ci;
638 struct hci_dev *hdev;
639 struct list_head *p;
640 int n = 0, size, err;
641
642 if (copy_from_user(&req, arg, sizeof(req)))
643 return -EFAULT;
644
645 if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
646 return -EINVAL;
647
648 size = sizeof(req) + req.conn_num * sizeof(*ci);
649
650 if (!(cl = kmalloc(size, GFP_KERNEL)))
651 return -ENOMEM;
652
653 if (!(hdev = hci_dev_get(req.dev_id))) {
654 kfree(cl);
655 return -ENODEV;
656 }
657
658 ci = cl->conn_info;
659
660 hci_dev_lock_bh(hdev);
661 list_for_each(p, &hdev->conn_hash.list) {
662 register struct hci_conn *c;
663 c = list_entry(p, struct hci_conn, list);
664
665 bacpy(&(ci + n)->bdaddr, &c->dst);
666 (ci + n)->handle = c->handle;
667 (ci + n)->type = c->type;
668 (ci + n)->out = c->out;
669 (ci + n)->state = c->state;
670 (ci + n)->link_mode = c->link_mode;
671 if (++n >= req.conn_num)
672 break;
673 }
674 hci_dev_unlock_bh(hdev);
675
676 cl->dev_id = hdev->id;
677 cl->conn_num = n;
678 size = sizeof(req) + n * sizeof(*ci);
679
680 hci_dev_put(hdev);
681
682 err = copy_to_user(arg, cl, size);
683 kfree(cl);
684
685 return err ? -EFAULT : 0;
686 }
687
688 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
689 {
690 struct hci_conn_info_req req;
691 struct hci_conn_info ci;
692 struct hci_conn *conn;
693 char __user *ptr = arg + sizeof(req);
694
695 if (copy_from_user(&req, arg, sizeof(req)))
696 return -EFAULT;
697
698 hci_dev_lock_bh(hdev);
699 conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
700 if (conn) {
701 bacpy(&ci.bdaddr, &conn->dst);
702 ci.handle = conn->handle;
703 ci.type = conn->type;
704 ci.out = conn->out;
705 ci.state = conn->state;
706 ci.link_mode = conn->link_mode;
707 }
708 hci_dev_unlock_bh(hdev);
709
710 if (!conn)
711 return -ENOENT;
712
713 return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0;
714 }
715
716 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
717 {
718 struct hci_auth_info_req req;
719 struct hci_conn *conn;
720
721 if (copy_from_user(&req, arg, sizeof(req)))
722 return -EFAULT;
723
724 hci_dev_lock_bh(hdev);
725 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
726 if (conn)
727 req.type = conn->auth_type;
728 hci_dev_unlock_bh(hdev);
729
730 if (!conn)
731 return -ENOENT;
732
733 return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
734 }