]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame_incremental - net/bluetooth/hci_event.c
Bluetooth: Fix requiring SMP MITM for outgoing connections
[mirror_ubuntu-bionic-kernel.git] / net / bluetooth / hci_event.c
... / ...
CommitLineData
1/*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI event handling. */
26
27#include <asm/unaligned.h>
28
29#include <net/bluetooth/bluetooth.h>
30#include <net/bluetooth/hci_core.h>
31#include <net/bluetooth/mgmt.h>
32
33#include "a2mp.h"
34#include "amp.h"
35
36/* Handle HCI Event packets */
37
38static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
39{
40 __u8 status = *((__u8 *) skb->data);
41
42 BT_DBG("%s status 0x%2.2x", hdev->name, status);
43
44 if (status)
45 return;
46
47 clear_bit(HCI_INQUIRY, &hdev->flags);
48 smp_mb__after_clear_bit(); /* wake_up_bit advises about this barrier */
49 wake_up_bit(&hdev->flags, HCI_INQUIRY);
50
51 hci_conn_check_pending(hdev);
52}
53
54static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
55{
56 __u8 status = *((__u8 *) skb->data);
57
58 BT_DBG("%s status 0x%2.2x", hdev->name, status);
59
60 if (status)
61 return;
62
63 set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
64}
65
66static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
67{
68 __u8 status = *((__u8 *) skb->data);
69
70 BT_DBG("%s status 0x%2.2x", hdev->name, status);
71
72 if (status)
73 return;
74
75 clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
76
77 hci_conn_check_pending(hdev);
78}
79
80static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
81 struct sk_buff *skb)
82{
83 BT_DBG("%s", hdev->name);
84}
85
86static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
87{
88 struct hci_rp_role_discovery *rp = (void *) skb->data;
89 struct hci_conn *conn;
90
91 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
92
93 if (rp->status)
94 return;
95
96 hci_dev_lock(hdev);
97
98 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
99 if (conn) {
100 if (rp->role)
101 conn->link_mode &= ~HCI_LM_MASTER;
102 else
103 conn->link_mode |= HCI_LM_MASTER;
104 }
105
106 hci_dev_unlock(hdev);
107}
108
109static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
110{
111 struct hci_rp_read_link_policy *rp = (void *) skb->data;
112 struct hci_conn *conn;
113
114 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
115
116 if (rp->status)
117 return;
118
119 hci_dev_lock(hdev);
120
121 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
122 if (conn)
123 conn->link_policy = __le16_to_cpu(rp->policy);
124
125 hci_dev_unlock(hdev);
126}
127
128static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
129{
130 struct hci_rp_write_link_policy *rp = (void *) skb->data;
131 struct hci_conn *conn;
132 void *sent;
133
134 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
135
136 if (rp->status)
137 return;
138
139 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
140 if (!sent)
141 return;
142
143 hci_dev_lock(hdev);
144
145 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
146 if (conn)
147 conn->link_policy = get_unaligned_le16(sent + 2);
148
149 hci_dev_unlock(hdev);
150}
151
152static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
153 struct sk_buff *skb)
154{
155 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
156
157 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
158
159 if (rp->status)
160 return;
161
162 hdev->link_policy = __le16_to_cpu(rp->policy);
163}
164
165static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
166 struct sk_buff *skb)
167{
168 __u8 status = *((__u8 *) skb->data);
169 void *sent;
170
171 BT_DBG("%s status 0x%2.2x", hdev->name, status);
172
173 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
174 if (!sent)
175 return;
176
177 if (!status)
178 hdev->link_policy = get_unaligned_le16(sent);
179}
180
181static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
182{
183 __u8 status = *((__u8 *) skb->data);
184
185 BT_DBG("%s status 0x%2.2x", hdev->name, status);
186
187 clear_bit(HCI_RESET, &hdev->flags);
188
189 /* Reset all non-persistent flags */
190 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
191
192 hdev->discovery.state = DISCOVERY_STOPPED;
193 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
194 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
195
196 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
197 hdev->adv_data_len = 0;
198
199 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
200 hdev->scan_rsp_data_len = 0;
201
202 hdev->le_scan_type = LE_SCAN_PASSIVE;
203
204 hdev->ssp_debug_mode = 0;
205}
206
207static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
208{
209 __u8 status = *((__u8 *) skb->data);
210 void *sent;
211
212 BT_DBG("%s status 0x%2.2x", hdev->name, status);
213
214 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
215 if (!sent)
216 return;
217
218 hci_dev_lock(hdev);
219
220 if (test_bit(HCI_MGMT, &hdev->dev_flags))
221 mgmt_set_local_name_complete(hdev, sent, status);
222 else if (!status)
223 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
224
225 hci_dev_unlock(hdev);
226}
227
228static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
229{
230 struct hci_rp_read_local_name *rp = (void *) skb->data;
231
232 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
233
234 if (rp->status)
235 return;
236
237 if (test_bit(HCI_SETUP, &hdev->dev_flags))
238 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
239}
240
241static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
242{
243 __u8 status = *((__u8 *) skb->data);
244 void *sent;
245
246 BT_DBG("%s status 0x%2.2x", hdev->name, status);
247
248 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
249 if (!sent)
250 return;
251
252 if (!status) {
253 __u8 param = *((__u8 *) sent);
254
255 if (param == AUTH_ENABLED)
256 set_bit(HCI_AUTH, &hdev->flags);
257 else
258 clear_bit(HCI_AUTH, &hdev->flags);
259 }
260
261 if (test_bit(HCI_MGMT, &hdev->dev_flags))
262 mgmt_auth_enable_complete(hdev, status);
263}
264
265static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
266{
267 __u8 status = *((__u8 *) skb->data);
268 void *sent;
269
270 BT_DBG("%s status 0x%2.2x", hdev->name, status);
271
272 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
273 if (!sent)
274 return;
275
276 if (!status) {
277 __u8 param = *((__u8 *) sent);
278
279 if (param)
280 set_bit(HCI_ENCRYPT, &hdev->flags);
281 else
282 clear_bit(HCI_ENCRYPT, &hdev->flags);
283 }
284}
285
286static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
287{
288 __u8 param, status = *((__u8 *) skb->data);
289 int old_pscan, old_iscan;
290 void *sent;
291
292 BT_DBG("%s status 0x%2.2x", hdev->name, status);
293
294 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
295 if (!sent)
296 return;
297
298 param = *((__u8 *) sent);
299
300 hci_dev_lock(hdev);
301
302 if (status) {
303 mgmt_write_scan_failed(hdev, param, status);
304 hdev->discov_timeout = 0;
305 goto done;
306 }
307
308 /* We need to ensure that we set this back on if someone changed
309 * the scan mode through a raw HCI socket.
310 */
311 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
312
313 old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
314 old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
315
316 if (param & SCAN_INQUIRY) {
317 set_bit(HCI_ISCAN, &hdev->flags);
318 if (!old_iscan)
319 mgmt_discoverable(hdev, 1);
320 } else if (old_iscan)
321 mgmt_discoverable(hdev, 0);
322
323 if (param & SCAN_PAGE) {
324 set_bit(HCI_PSCAN, &hdev->flags);
325 if (!old_pscan)
326 mgmt_connectable(hdev, 1);
327 } else if (old_pscan)
328 mgmt_connectable(hdev, 0);
329
330done:
331 hci_dev_unlock(hdev);
332}
333
334static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
335{
336 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
337
338 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
339
340 if (rp->status)
341 return;
342
343 memcpy(hdev->dev_class, rp->dev_class, 3);
344
345 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
346 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
347}
348
349static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
350{
351 __u8 status = *((__u8 *) skb->data);
352 void *sent;
353
354 BT_DBG("%s status 0x%2.2x", hdev->name, status);
355
356 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
357 if (!sent)
358 return;
359
360 hci_dev_lock(hdev);
361
362 if (status == 0)
363 memcpy(hdev->dev_class, sent, 3);
364
365 if (test_bit(HCI_MGMT, &hdev->dev_flags))
366 mgmt_set_class_of_dev_complete(hdev, sent, status);
367
368 hci_dev_unlock(hdev);
369}
370
371static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
372{
373 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
374 __u16 setting;
375
376 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
377
378 if (rp->status)
379 return;
380
381 setting = __le16_to_cpu(rp->voice_setting);
382
383 if (hdev->voice_setting == setting)
384 return;
385
386 hdev->voice_setting = setting;
387
388 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
389
390 if (hdev->notify)
391 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
392}
393
394static void hci_cc_write_voice_setting(struct hci_dev *hdev,
395 struct sk_buff *skb)
396{
397 __u8 status = *((__u8 *) skb->data);
398 __u16 setting;
399 void *sent;
400
401 BT_DBG("%s status 0x%2.2x", hdev->name, status);
402
403 if (status)
404 return;
405
406 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
407 if (!sent)
408 return;
409
410 setting = get_unaligned_le16(sent);
411
412 if (hdev->voice_setting == setting)
413 return;
414
415 hdev->voice_setting = setting;
416
417 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
418
419 if (hdev->notify)
420 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
421}
422
423static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
424 struct sk_buff *skb)
425{
426 struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
427
428 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
429
430 if (rp->status)
431 return;
432
433 hdev->num_iac = rp->num_iac;
434
435 BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
436}
437
438static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
439{
440 __u8 status = *((__u8 *) skb->data);
441 struct hci_cp_write_ssp_mode *sent;
442
443 BT_DBG("%s status 0x%2.2x", hdev->name, status);
444
445 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
446 if (!sent)
447 return;
448
449 if (!status) {
450 if (sent->mode)
451 hdev->features[1][0] |= LMP_HOST_SSP;
452 else
453 hdev->features[1][0] &= ~LMP_HOST_SSP;
454 }
455
456 if (test_bit(HCI_MGMT, &hdev->dev_flags))
457 mgmt_ssp_enable_complete(hdev, sent->mode, status);
458 else if (!status) {
459 if (sent->mode)
460 set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
461 else
462 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
463 }
464}
465
466static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
467{
468 u8 status = *((u8 *) skb->data);
469 struct hci_cp_write_sc_support *sent;
470
471 BT_DBG("%s status 0x%2.2x", hdev->name, status);
472
473 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
474 if (!sent)
475 return;
476
477 if (!status) {
478 if (sent->support)
479 hdev->features[1][0] |= LMP_HOST_SC;
480 else
481 hdev->features[1][0] &= ~LMP_HOST_SC;
482 }
483
484 if (test_bit(HCI_MGMT, &hdev->dev_flags))
485 mgmt_sc_enable_complete(hdev, sent->support, status);
486 else if (!status) {
487 if (sent->support)
488 set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
489 else
490 clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
491 }
492}
493
494static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
495{
496 struct hci_rp_read_local_version *rp = (void *) skb->data;
497
498 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
499
500 if (rp->status)
501 return;
502
503 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
504 hdev->hci_ver = rp->hci_ver;
505 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
506 hdev->lmp_ver = rp->lmp_ver;
507 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
508 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
509 }
510}
511
512static void hci_cc_read_local_commands(struct hci_dev *hdev,
513 struct sk_buff *skb)
514{
515 struct hci_rp_read_local_commands *rp = (void *) skb->data;
516
517 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
518
519 if (rp->status)
520 return;
521
522 if (test_bit(HCI_SETUP, &hdev->dev_flags))
523 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
524}
525
526static void hci_cc_read_local_features(struct hci_dev *hdev,
527 struct sk_buff *skb)
528{
529 struct hci_rp_read_local_features *rp = (void *) skb->data;
530
531 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
532
533 if (rp->status)
534 return;
535
536 memcpy(hdev->features, rp->features, 8);
537
538 /* Adjust default settings according to features
539 * supported by device. */
540
541 if (hdev->features[0][0] & LMP_3SLOT)
542 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
543
544 if (hdev->features[0][0] & LMP_5SLOT)
545 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
546
547 if (hdev->features[0][1] & LMP_HV2) {
548 hdev->pkt_type |= (HCI_HV2);
549 hdev->esco_type |= (ESCO_HV2);
550 }
551
552 if (hdev->features[0][1] & LMP_HV3) {
553 hdev->pkt_type |= (HCI_HV3);
554 hdev->esco_type |= (ESCO_HV3);
555 }
556
557 if (lmp_esco_capable(hdev))
558 hdev->esco_type |= (ESCO_EV3);
559
560 if (hdev->features[0][4] & LMP_EV4)
561 hdev->esco_type |= (ESCO_EV4);
562
563 if (hdev->features[0][4] & LMP_EV5)
564 hdev->esco_type |= (ESCO_EV5);
565
566 if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
567 hdev->esco_type |= (ESCO_2EV3);
568
569 if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
570 hdev->esco_type |= (ESCO_3EV3);
571
572 if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
573 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
574}
575
576static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
577 struct sk_buff *skb)
578{
579 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
580
581 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
582
583 if (rp->status)
584 return;
585
586 if (hdev->max_page < rp->max_page)
587 hdev->max_page = rp->max_page;
588
589 if (rp->page < HCI_MAX_PAGES)
590 memcpy(hdev->features[rp->page], rp->features, 8);
591}
592
593static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
594 struct sk_buff *skb)
595{
596 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
597
598 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
599
600 if (!rp->status)
601 hdev->flow_ctl_mode = rp->mode;
602}
603
604static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
605{
606 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
607
608 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
609
610 if (rp->status)
611 return;
612
613 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
614 hdev->sco_mtu = rp->sco_mtu;
615 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
616 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
617
618 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
619 hdev->sco_mtu = 64;
620 hdev->sco_pkts = 8;
621 }
622
623 hdev->acl_cnt = hdev->acl_pkts;
624 hdev->sco_cnt = hdev->sco_pkts;
625
626 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
627 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
628}
629
630static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
631{
632 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
633
634 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
635
636 if (!rp->status)
637 bacpy(&hdev->bdaddr, &rp->bdaddr);
638}
639
640static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
641 struct sk_buff *skb)
642{
643 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
644
645 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
646
647 if (test_bit(HCI_INIT, &hdev->flags) && !rp->status) {
648 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
649 hdev->page_scan_window = __le16_to_cpu(rp->window);
650 }
651}
652
653static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
654 struct sk_buff *skb)
655{
656 u8 status = *((u8 *) skb->data);
657 struct hci_cp_write_page_scan_activity *sent;
658
659 BT_DBG("%s status 0x%2.2x", hdev->name, status);
660
661 if (status)
662 return;
663
664 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
665 if (!sent)
666 return;
667
668 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
669 hdev->page_scan_window = __le16_to_cpu(sent->window);
670}
671
672static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
673 struct sk_buff *skb)
674{
675 struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
676
677 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
678
679 if (test_bit(HCI_INIT, &hdev->flags) && !rp->status)
680 hdev->page_scan_type = rp->type;
681}
682
683static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
684 struct sk_buff *skb)
685{
686 u8 status = *((u8 *) skb->data);
687 u8 *type;
688
689 BT_DBG("%s status 0x%2.2x", hdev->name, status);
690
691 if (status)
692 return;
693
694 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
695 if (type)
696 hdev->page_scan_type = *type;
697}
698
699static void hci_cc_read_data_block_size(struct hci_dev *hdev,
700 struct sk_buff *skb)
701{
702 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
703
704 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
705
706 if (rp->status)
707 return;
708
709 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
710 hdev->block_len = __le16_to_cpu(rp->block_len);
711 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
712
713 hdev->block_cnt = hdev->num_blocks;
714
715 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
716 hdev->block_cnt, hdev->block_len);
717}
718
719static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
720 struct sk_buff *skb)
721{
722 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
723
724 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
725
726 if (rp->status)
727 goto a2mp_rsp;
728
729 hdev->amp_status = rp->amp_status;
730 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
731 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
732 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
733 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
734 hdev->amp_type = rp->amp_type;
735 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
736 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
737 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
738 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
739
740a2mp_rsp:
741 a2mp_send_getinfo_rsp(hdev);
742}
743
744static void hci_cc_read_local_amp_assoc(struct hci_dev *hdev,
745 struct sk_buff *skb)
746{
747 struct hci_rp_read_local_amp_assoc *rp = (void *) skb->data;
748 struct amp_assoc *assoc = &hdev->loc_assoc;
749 size_t rem_len, frag_len;
750
751 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
752
753 if (rp->status)
754 goto a2mp_rsp;
755
756 frag_len = skb->len - sizeof(*rp);
757 rem_len = __le16_to_cpu(rp->rem_len);
758
759 if (rem_len > frag_len) {
760 BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
761
762 memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
763 assoc->offset += frag_len;
764
765 /* Read other fragments */
766 amp_read_loc_assoc_frag(hdev, rp->phy_handle);
767
768 return;
769 }
770
771 memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
772 assoc->len = assoc->offset + rem_len;
773 assoc->offset = 0;
774
775a2mp_rsp:
776 /* Send A2MP Rsp when all fragments are received */
777 a2mp_send_getampassoc_rsp(hdev, rp->status);
778 a2mp_send_create_phy_link_req(hdev, rp->status);
779}
780
781static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
782 struct sk_buff *skb)
783{
784 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
785
786 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
787
788 if (!rp->status)
789 hdev->inq_tx_power = rp->tx_power;
790}
791
792static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
793{
794 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
795 struct hci_cp_pin_code_reply *cp;
796 struct hci_conn *conn;
797
798 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
799
800 hci_dev_lock(hdev);
801
802 if (test_bit(HCI_MGMT, &hdev->dev_flags))
803 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
804
805 if (rp->status)
806 goto unlock;
807
808 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
809 if (!cp)
810 goto unlock;
811
812 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
813 if (conn)
814 conn->pin_length = cp->pin_len;
815
816unlock:
817 hci_dev_unlock(hdev);
818}
819
820static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
821{
822 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
823
824 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
825
826 hci_dev_lock(hdev);
827
828 if (test_bit(HCI_MGMT, &hdev->dev_flags))
829 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
830 rp->status);
831
832 hci_dev_unlock(hdev);
833}
834
835static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
836 struct sk_buff *skb)
837{
838 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
839
840 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
841
842 if (rp->status)
843 return;
844
845 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
846 hdev->le_pkts = rp->le_max_pkt;
847
848 hdev->le_cnt = hdev->le_pkts;
849
850 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
851}
852
853static void hci_cc_le_read_local_features(struct hci_dev *hdev,
854 struct sk_buff *skb)
855{
856 struct hci_rp_le_read_local_features *rp = (void *) skb->data;
857
858 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
859
860 if (!rp->status)
861 memcpy(hdev->le_features, rp->features, 8);
862}
863
864static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
865 struct sk_buff *skb)
866{
867 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
868
869 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
870
871 if (!rp->status)
872 hdev->adv_tx_power = rp->tx_power;
873}
874
875static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
876{
877 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
878
879 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
880
881 hci_dev_lock(hdev);
882
883 if (test_bit(HCI_MGMT, &hdev->dev_flags))
884 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
885 rp->status);
886
887 hci_dev_unlock(hdev);
888}
889
890static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
891 struct sk_buff *skb)
892{
893 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
894
895 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
896
897 hci_dev_lock(hdev);
898
899 if (test_bit(HCI_MGMT, &hdev->dev_flags))
900 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
901 ACL_LINK, 0, rp->status);
902
903 hci_dev_unlock(hdev);
904}
905
906static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
907{
908 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
909
910 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
911
912 hci_dev_lock(hdev);
913
914 if (test_bit(HCI_MGMT, &hdev->dev_flags))
915 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
916 0, rp->status);
917
918 hci_dev_unlock(hdev);
919}
920
921static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
922 struct sk_buff *skb)
923{
924 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
925
926 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
927
928 hci_dev_lock(hdev);
929
930 if (test_bit(HCI_MGMT, &hdev->dev_flags))
931 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
932 ACL_LINK, 0, rp->status);
933
934 hci_dev_unlock(hdev);
935}
936
937static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
938 struct sk_buff *skb)
939{
940 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
941
942 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
943
944 hci_dev_lock(hdev);
945 mgmt_read_local_oob_data_complete(hdev, rp->hash, rp->randomizer,
946 NULL, NULL, rp->status);
947 hci_dev_unlock(hdev);
948}
949
950static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
951 struct sk_buff *skb)
952{
953 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
954
955 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
956
957 hci_dev_lock(hdev);
958 mgmt_read_local_oob_data_complete(hdev, rp->hash192, rp->randomizer192,
959 rp->hash256, rp->randomizer256,
960 rp->status);
961 hci_dev_unlock(hdev);
962}
963
964
965static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
966{
967 __u8 status = *((__u8 *) skb->data);
968 bdaddr_t *sent;
969
970 BT_DBG("%s status 0x%2.2x", hdev->name, status);
971
972 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
973 if (!sent)
974 return;
975
976 hci_dev_lock(hdev);
977
978 if (!status)
979 bacpy(&hdev->random_addr, sent);
980
981 hci_dev_unlock(hdev);
982}
983
984static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
985{
986 __u8 *sent, status = *((__u8 *) skb->data);
987
988 BT_DBG("%s status 0x%2.2x", hdev->name, status);
989
990 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
991 if (!sent)
992 return;
993
994 if (status)
995 return;
996
997 hci_dev_lock(hdev);
998
999 /* If we're doing connection initation as peripheral. Set a
1000 * timeout in case something goes wrong.
1001 */
1002 if (*sent) {
1003 struct hci_conn *conn;
1004
1005 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1006 if (conn)
1007 queue_delayed_work(hdev->workqueue,
1008 &conn->le_conn_timeout,
1009 HCI_LE_CONN_TIMEOUT);
1010 }
1011
1012 mgmt_advertising(hdev, *sent);
1013
1014 hci_dev_unlock(hdev);
1015}
1016
1017static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1018{
1019 struct hci_cp_le_set_scan_param *cp;
1020 __u8 status = *((__u8 *) skb->data);
1021
1022 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1023
1024 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1025 if (!cp)
1026 return;
1027
1028 hci_dev_lock(hdev);
1029
1030 if (!status)
1031 hdev->le_scan_type = cp->type;
1032
1033 hci_dev_unlock(hdev);
1034}
1035
1036static bool has_pending_adv_report(struct hci_dev *hdev)
1037{
1038 struct discovery_state *d = &hdev->discovery;
1039
1040 return bacmp(&d->last_adv_addr, BDADDR_ANY);
1041}
1042
1043static void clear_pending_adv_report(struct hci_dev *hdev)
1044{
1045 struct discovery_state *d = &hdev->discovery;
1046
1047 bacpy(&d->last_adv_addr, BDADDR_ANY);
1048 d->last_adv_data_len = 0;
1049}
1050
1051static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1052 u8 bdaddr_type, s8 rssi, u8 *data, u8 len)
1053{
1054 struct discovery_state *d = &hdev->discovery;
1055
1056 bacpy(&d->last_adv_addr, bdaddr);
1057 d->last_adv_addr_type = bdaddr_type;
1058 d->last_adv_rssi = rssi;
1059 memcpy(d->last_adv_data, data, len);
1060 d->last_adv_data_len = len;
1061}
1062
1063static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1064 struct sk_buff *skb)
1065{
1066 struct hci_cp_le_set_scan_enable *cp;
1067 __u8 status = *((__u8 *) skb->data);
1068
1069 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1070
1071 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1072 if (!cp)
1073 return;
1074
1075 if (status)
1076 return;
1077
1078 switch (cp->enable) {
1079 case LE_SCAN_ENABLE:
1080 set_bit(HCI_LE_SCAN, &hdev->dev_flags);
1081 if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1082 clear_pending_adv_report(hdev);
1083 break;
1084
1085 case LE_SCAN_DISABLE:
1086 /* We do this here instead of when setting DISCOVERY_STOPPED
1087 * since the latter would potentially require waiting for
1088 * inquiry to stop too.
1089 */
1090 if (has_pending_adv_report(hdev)) {
1091 struct discovery_state *d = &hdev->discovery;
1092
1093 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1094 d->last_adv_addr_type, NULL,
1095 d->last_adv_rssi, 0, 1,
1096 d->last_adv_data,
1097 d->last_adv_data_len, NULL, 0);
1098 }
1099
1100 /* Cancel this timer so that we don't try to disable scanning
1101 * when it's already disabled.
1102 */
1103 cancel_delayed_work(&hdev->le_scan_disable);
1104
1105 clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
1106 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1107 * interrupted scanning due to a connect request. Mark
1108 * therefore discovery as stopped.
1109 */
1110 if (test_and_clear_bit(HCI_LE_SCAN_INTERRUPTED,
1111 &hdev->dev_flags))
1112 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1113 break;
1114
1115 default:
1116 BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1117 break;
1118 }
1119}
1120
1121static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1122 struct sk_buff *skb)
1123{
1124 struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1125
1126 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1127
1128 if (!rp->status)
1129 hdev->le_white_list_size = rp->size;
1130}
1131
1132static void hci_cc_le_clear_white_list(struct hci_dev *hdev,
1133 struct sk_buff *skb)
1134{
1135 __u8 status = *((__u8 *) skb->data);
1136
1137 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1138
1139 if (!status)
1140 hci_white_list_clear(hdev);
1141}
1142
1143static void hci_cc_le_add_to_white_list(struct hci_dev *hdev,
1144 struct sk_buff *skb)
1145{
1146 struct hci_cp_le_add_to_white_list *sent;
1147 __u8 status = *((__u8 *) skb->data);
1148
1149 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1150
1151 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST);
1152 if (!sent)
1153 return;
1154
1155 if (!status)
1156 hci_white_list_add(hdev, &sent->bdaddr, sent->bdaddr_type);
1157}
1158
1159static void hci_cc_le_del_from_white_list(struct hci_dev *hdev,
1160 struct sk_buff *skb)
1161{
1162 struct hci_cp_le_del_from_white_list *sent;
1163 __u8 status = *((__u8 *) skb->data);
1164
1165 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1166
1167 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST);
1168 if (!sent)
1169 return;
1170
1171 if (!status)
1172 hci_white_list_del(hdev, &sent->bdaddr, sent->bdaddr_type);
1173}
1174
1175static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1176 struct sk_buff *skb)
1177{
1178 struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1179
1180 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1181
1182 if (!rp->status)
1183 memcpy(hdev->le_states, rp->le_states, 8);
1184}
1185
1186static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1187 struct sk_buff *skb)
1188{
1189 struct hci_cp_write_le_host_supported *sent;
1190 __u8 status = *((__u8 *) skb->data);
1191
1192 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1193
1194 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1195 if (!sent)
1196 return;
1197
1198 if (!status) {
1199 if (sent->le) {
1200 hdev->features[1][0] |= LMP_HOST_LE;
1201 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1202 } else {
1203 hdev->features[1][0] &= ~LMP_HOST_LE;
1204 clear_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1205 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
1206 }
1207
1208 if (sent->simul)
1209 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1210 else
1211 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1212 }
1213}
1214
1215static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1216{
1217 struct hci_cp_le_set_adv_param *cp;
1218 u8 status = *((u8 *) skb->data);
1219
1220 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1221
1222 if (status)
1223 return;
1224
1225 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1226 if (!cp)
1227 return;
1228
1229 hci_dev_lock(hdev);
1230 hdev->adv_addr_type = cp->own_address_type;
1231 hci_dev_unlock(hdev);
1232}
1233
1234static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,
1235 struct sk_buff *skb)
1236{
1237 struct hci_rp_write_remote_amp_assoc *rp = (void *) skb->data;
1238
1239 BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
1240 hdev->name, rp->status, rp->phy_handle);
1241
1242 if (rp->status)
1243 return;
1244
1245 amp_write_rem_assoc_continue(hdev, rp->phy_handle);
1246}
1247
1248static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
1249{
1250 struct hci_rp_read_rssi *rp = (void *) skb->data;
1251 struct hci_conn *conn;
1252
1253 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1254
1255 if (rp->status)
1256 return;
1257
1258 hci_dev_lock(hdev);
1259
1260 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1261 if (conn)
1262 conn->rssi = rp->rssi;
1263
1264 hci_dev_unlock(hdev);
1265}
1266
1267static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
1268{
1269 struct hci_cp_read_tx_power *sent;
1270 struct hci_rp_read_tx_power *rp = (void *) skb->data;
1271 struct hci_conn *conn;
1272
1273 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1274
1275 if (rp->status)
1276 return;
1277
1278 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
1279 if (!sent)
1280 return;
1281
1282 hci_dev_lock(hdev);
1283
1284 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1285 if (!conn)
1286 goto unlock;
1287
1288 switch (sent->type) {
1289 case 0x00:
1290 conn->tx_power = rp->tx_power;
1291 break;
1292 case 0x01:
1293 conn->max_tx_power = rp->tx_power;
1294 break;
1295 }
1296
1297unlock:
1298 hci_dev_unlock(hdev);
1299}
1300
1301static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1302{
1303 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1304
1305 if (status) {
1306 hci_conn_check_pending(hdev);
1307 return;
1308 }
1309
1310 set_bit(HCI_INQUIRY, &hdev->flags);
1311}
1312
1313static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1314{
1315 struct hci_cp_create_conn *cp;
1316 struct hci_conn *conn;
1317
1318 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1319
1320 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1321 if (!cp)
1322 return;
1323
1324 hci_dev_lock(hdev);
1325
1326 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1327
1328 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1329
1330 if (status) {
1331 if (conn && conn->state == BT_CONNECT) {
1332 if (status != 0x0c || conn->attempt > 2) {
1333 conn->state = BT_CLOSED;
1334 hci_proto_connect_cfm(conn, status);
1335 hci_conn_del(conn);
1336 } else
1337 conn->state = BT_CONNECT2;
1338 }
1339 } else {
1340 if (!conn) {
1341 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
1342 if (conn) {
1343 conn->out = true;
1344 conn->link_mode |= HCI_LM_MASTER;
1345 } else
1346 BT_ERR("No memory for new connection");
1347 }
1348 }
1349
1350 hci_dev_unlock(hdev);
1351}
1352
1353static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1354{
1355 struct hci_cp_add_sco *cp;
1356 struct hci_conn *acl, *sco;
1357 __u16 handle;
1358
1359 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1360
1361 if (!status)
1362 return;
1363
1364 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1365 if (!cp)
1366 return;
1367
1368 handle = __le16_to_cpu(cp->handle);
1369
1370 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1371
1372 hci_dev_lock(hdev);
1373
1374 acl = hci_conn_hash_lookup_handle(hdev, handle);
1375 if (acl) {
1376 sco = acl->link;
1377 if (sco) {
1378 sco->state = BT_CLOSED;
1379
1380 hci_proto_connect_cfm(sco, status);
1381 hci_conn_del(sco);
1382 }
1383 }
1384
1385 hci_dev_unlock(hdev);
1386}
1387
1388static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1389{
1390 struct hci_cp_auth_requested *cp;
1391 struct hci_conn *conn;
1392
1393 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1394
1395 if (!status)
1396 return;
1397
1398 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1399 if (!cp)
1400 return;
1401
1402 hci_dev_lock(hdev);
1403
1404 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1405 if (conn) {
1406 if (conn->state == BT_CONFIG) {
1407 hci_proto_connect_cfm(conn, status);
1408 hci_conn_drop(conn);
1409 }
1410 }
1411
1412 hci_dev_unlock(hdev);
1413}
1414
1415static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1416{
1417 struct hci_cp_set_conn_encrypt *cp;
1418 struct hci_conn *conn;
1419
1420 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1421
1422 if (!status)
1423 return;
1424
1425 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1426 if (!cp)
1427 return;
1428
1429 hci_dev_lock(hdev);
1430
1431 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1432 if (conn) {
1433 if (conn->state == BT_CONFIG) {
1434 hci_proto_connect_cfm(conn, status);
1435 hci_conn_drop(conn);
1436 }
1437 }
1438
1439 hci_dev_unlock(hdev);
1440}
1441
1442static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1443 struct hci_conn *conn)
1444{
1445 if (conn->state != BT_CONFIG || !conn->out)
1446 return 0;
1447
1448 if (conn->pending_sec_level == BT_SECURITY_SDP)
1449 return 0;
1450
1451 /* Only request authentication for SSP connections or non-SSP
1452 * devices with sec_level MEDIUM or HIGH or if MITM protection
1453 * is requested.
1454 */
1455 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1456 conn->pending_sec_level != BT_SECURITY_FIPS &&
1457 conn->pending_sec_level != BT_SECURITY_HIGH &&
1458 conn->pending_sec_level != BT_SECURITY_MEDIUM)
1459 return 0;
1460
1461 return 1;
1462}
1463
1464static int hci_resolve_name(struct hci_dev *hdev,
1465 struct inquiry_entry *e)
1466{
1467 struct hci_cp_remote_name_req cp;
1468
1469 memset(&cp, 0, sizeof(cp));
1470
1471 bacpy(&cp.bdaddr, &e->data.bdaddr);
1472 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1473 cp.pscan_mode = e->data.pscan_mode;
1474 cp.clock_offset = e->data.clock_offset;
1475
1476 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1477}
1478
1479static bool hci_resolve_next_name(struct hci_dev *hdev)
1480{
1481 struct discovery_state *discov = &hdev->discovery;
1482 struct inquiry_entry *e;
1483
1484 if (list_empty(&discov->resolve))
1485 return false;
1486
1487 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1488 if (!e)
1489 return false;
1490
1491 if (hci_resolve_name(hdev, e) == 0) {
1492 e->name_state = NAME_PENDING;
1493 return true;
1494 }
1495
1496 return false;
1497}
1498
1499static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1500 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1501{
1502 struct discovery_state *discov = &hdev->discovery;
1503 struct inquiry_entry *e;
1504
1505 if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1506 mgmt_device_connected(hdev, bdaddr, ACL_LINK, 0x00, 0, name,
1507 name_len, conn->dev_class);
1508
1509 if (discov->state == DISCOVERY_STOPPED)
1510 return;
1511
1512 if (discov->state == DISCOVERY_STOPPING)
1513 goto discov_complete;
1514
1515 if (discov->state != DISCOVERY_RESOLVING)
1516 return;
1517
1518 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1519 /* If the device was not found in a list of found devices names of which
1520 * are pending. there is no need to continue resolving a next name as it
1521 * will be done upon receiving another Remote Name Request Complete
1522 * Event */
1523 if (!e)
1524 return;
1525
1526 list_del(&e->list);
1527 if (name) {
1528 e->name_state = NAME_KNOWN;
1529 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1530 e->data.rssi, name, name_len);
1531 } else {
1532 e->name_state = NAME_NOT_KNOWN;
1533 }
1534
1535 if (hci_resolve_next_name(hdev))
1536 return;
1537
1538discov_complete:
1539 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1540}
1541
1542static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1543{
1544 struct hci_cp_remote_name_req *cp;
1545 struct hci_conn *conn;
1546
1547 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1548
1549 /* If successful wait for the name req complete event before
1550 * checking for the need to do authentication */
1551 if (!status)
1552 return;
1553
1554 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1555 if (!cp)
1556 return;
1557
1558 hci_dev_lock(hdev);
1559
1560 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1561
1562 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1563 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1564
1565 if (!conn)
1566 goto unlock;
1567
1568 if (!hci_outgoing_auth_needed(hdev, conn))
1569 goto unlock;
1570
1571 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1572 struct hci_cp_auth_requested auth_cp;
1573
1574 auth_cp.handle = __cpu_to_le16(conn->handle);
1575 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
1576 sizeof(auth_cp), &auth_cp);
1577 }
1578
1579unlock:
1580 hci_dev_unlock(hdev);
1581}
1582
1583static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1584{
1585 struct hci_cp_read_remote_features *cp;
1586 struct hci_conn *conn;
1587
1588 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1589
1590 if (!status)
1591 return;
1592
1593 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1594 if (!cp)
1595 return;
1596
1597 hci_dev_lock(hdev);
1598
1599 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1600 if (conn) {
1601 if (conn->state == BT_CONFIG) {
1602 hci_proto_connect_cfm(conn, status);
1603 hci_conn_drop(conn);
1604 }
1605 }
1606
1607 hci_dev_unlock(hdev);
1608}
1609
1610static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1611{
1612 struct hci_cp_read_remote_ext_features *cp;
1613 struct hci_conn *conn;
1614
1615 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1616
1617 if (!status)
1618 return;
1619
1620 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1621 if (!cp)
1622 return;
1623
1624 hci_dev_lock(hdev);
1625
1626 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1627 if (conn) {
1628 if (conn->state == BT_CONFIG) {
1629 hci_proto_connect_cfm(conn, status);
1630 hci_conn_drop(conn);
1631 }
1632 }
1633
1634 hci_dev_unlock(hdev);
1635}
1636
1637static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1638{
1639 struct hci_cp_setup_sync_conn *cp;
1640 struct hci_conn *acl, *sco;
1641 __u16 handle;
1642
1643 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1644
1645 if (!status)
1646 return;
1647
1648 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1649 if (!cp)
1650 return;
1651
1652 handle = __le16_to_cpu(cp->handle);
1653
1654 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1655
1656 hci_dev_lock(hdev);
1657
1658 acl = hci_conn_hash_lookup_handle(hdev, handle);
1659 if (acl) {
1660 sco = acl->link;
1661 if (sco) {
1662 sco->state = BT_CLOSED;
1663
1664 hci_proto_connect_cfm(sco, status);
1665 hci_conn_del(sco);
1666 }
1667 }
1668
1669 hci_dev_unlock(hdev);
1670}
1671
1672static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1673{
1674 struct hci_cp_sniff_mode *cp;
1675 struct hci_conn *conn;
1676
1677 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1678
1679 if (!status)
1680 return;
1681
1682 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1683 if (!cp)
1684 return;
1685
1686 hci_dev_lock(hdev);
1687
1688 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1689 if (conn) {
1690 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1691
1692 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1693 hci_sco_setup(conn, status);
1694 }
1695
1696 hci_dev_unlock(hdev);
1697}
1698
1699static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1700{
1701 struct hci_cp_exit_sniff_mode *cp;
1702 struct hci_conn *conn;
1703
1704 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1705
1706 if (!status)
1707 return;
1708
1709 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1710 if (!cp)
1711 return;
1712
1713 hci_dev_lock(hdev);
1714
1715 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1716 if (conn) {
1717 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1718
1719 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1720 hci_sco_setup(conn, status);
1721 }
1722
1723 hci_dev_unlock(hdev);
1724}
1725
1726static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1727{
1728 struct hci_cp_disconnect *cp;
1729 struct hci_conn *conn;
1730
1731 if (!status)
1732 return;
1733
1734 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1735 if (!cp)
1736 return;
1737
1738 hci_dev_lock(hdev);
1739
1740 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1741 if (conn)
1742 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1743 conn->dst_type, status);
1744
1745 hci_dev_unlock(hdev);
1746}
1747
1748static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status)
1749{
1750 struct hci_cp_create_phy_link *cp;
1751
1752 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1753
1754 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
1755 if (!cp)
1756 return;
1757
1758 hci_dev_lock(hdev);
1759
1760 if (status) {
1761 struct hci_conn *hcon;
1762
1763 hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
1764 if (hcon)
1765 hci_conn_del(hcon);
1766 } else {
1767 amp_write_remote_assoc(hdev, cp->phy_handle);
1768 }
1769
1770 hci_dev_unlock(hdev);
1771}
1772
1773static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status)
1774{
1775 struct hci_cp_accept_phy_link *cp;
1776
1777 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1778
1779 if (status)
1780 return;
1781
1782 cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
1783 if (!cp)
1784 return;
1785
1786 amp_write_remote_assoc(hdev, cp->phy_handle);
1787}
1788
1789static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
1790{
1791 struct hci_cp_le_create_conn *cp;
1792 struct hci_conn *conn;
1793
1794 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1795
1796 /* All connection failure handling is taken care of by the
1797 * hci_le_conn_failed function which is triggered by the HCI
1798 * request completion callbacks used for connecting.
1799 */
1800 if (status)
1801 return;
1802
1803 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
1804 if (!cp)
1805 return;
1806
1807 hci_dev_lock(hdev);
1808
1809 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
1810 if (!conn)
1811 goto unlock;
1812
1813 /* Store the initiator and responder address information which
1814 * is needed for SMP. These values will not change during the
1815 * lifetime of the connection.
1816 */
1817 conn->init_addr_type = cp->own_address_type;
1818 if (cp->own_address_type == ADDR_LE_DEV_RANDOM)
1819 bacpy(&conn->init_addr, &hdev->random_addr);
1820 else
1821 bacpy(&conn->init_addr, &hdev->bdaddr);
1822
1823 conn->resp_addr_type = cp->peer_addr_type;
1824 bacpy(&conn->resp_addr, &cp->peer_addr);
1825
1826 /* We don't want the connection attempt to stick around
1827 * indefinitely since LE doesn't have a page timeout concept
1828 * like BR/EDR. Set a timer for any connection that doesn't use
1829 * the white list for connecting.
1830 */
1831 if (cp->filter_policy == HCI_LE_USE_PEER_ADDR)
1832 queue_delayed_work(conn->hdev->workqueue,
1833 &conn->le_conn_timeout,
1834 HCI_LE_CONN_TIMEOUT);
1835
1836unlock:
1837 hci_dev_unlock(hdev);
1838}
1839
1840static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1841{
1842 struct hci_cp_le_start_enc *cp;
1843 struct hci_conn *conn;
1844
1845 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1846
1847 if (!status)
1848 return;
1849
1850 hci_dev_lock(hdev);
1851
1852 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
1853 if (!cp)
1854 goto unlock;
1855
1856 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1857 if (!conn)
1858 goto unlock;
1859
1860 if (conn->state != BT_CONNECTED)
1861 goto unlock;
1862
1863 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
1864 hci_conn_drop(conn);
1865
1866unlock:
1867 hci_dev_unlock(hdev);
1868}
1869
1870static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1871{
1872 __u8 status = *((__u8 *) skb->data);
1873 struct discovery_state *discov = &hdev->discovery;
1874 struct inquiry_entry *e;
1875
1876 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1877
1878 hci_conn_check_pending(hdev);
1879
1880 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1881 return;
1882
1883 smp_mb__after_clear_bit(); /* wake_up_bit advises about this barrier */
1884 wake_up_bit(&hdev->flags, HCI_INQUIRY);
1885
1886 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1887 return;
1888
1889 hci_dev_lock(hdev);
1890
1891 if (discov->state != DISCOVERY_FINDING)
1892 goto unlock;
1893
1894 if (list_empty(&discov->resolve)) {
1895 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1896 goto unlock;
1897 }
1898
1899 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1900 if (e && hci_resolve_name(hdev, e) == 0) {
1901 e->name_state = NAME_PENDING;
1902 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
1903 } else {
1904 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1905 }
1906
1907unlock:
1908 hci_dev_unlock(hdev);
1909}
1910
1911static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1912{
1913 struct inquiry_data data;
1914 struct inquiry_info *info = (void *) (skb->data + 1);
1915 int num_rsp = *((__u8 *) skb->data);
1916
1917 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1918
1919 if (!num_rsp)
1920 return;
1921
1922 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
1923 return;
1924
1925 hci_dev_lock(hdev);
1926
1927 for (; num_rsp; num_rsp--, info++) {
1928 bool name_known, ssp;
1929
1930 bacpy(&data.bdaddr, &info->bdaddr);
1931 data.pscan_rep_mode = info->pscan_rep_mode;
1932 data.pscan_period_mode = info->pscan_period_mode;
1933 data.pscan_mode = info->pscan_mode;
1934 memcpy(data.dev_class, info->dev_class, 3);
1935 data.clock_offset = info->clock_offset;
1936 data.rssi = 0x00;
1937 data.ssp_mode = 0x00;
1938
1939 name_known = hci_inquiry_cache_update(hdev, &data, false, &ssp);
1940 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
1941 info->dev_class, 0, !name_known, ssp, NULL,
1942 0, NULL, 0);
1943 }
1944
1945 hci_dev_unlock(hdev);
1946}
1947
1948static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1949{
1950 struct hci_ev_conn_complete *ev = (void *) skb->data;
1951 struct hci_conn *conn;
1952
1953 BT_DBG("%s", hdev->name);
1954
1955 hci_dev_lock(hdev);
1956
1957 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1958 if (!conn) {
1959 if (ev->link_type != SCO_LINK)
1960 goto unlock;
1961
1962 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
1963 if (!conn)
1964 goto unlock;
1965
1966 conn->type = SCO_LINK;
1967 }
1968
1969 if (!ev->status) {
1970 conn->handle = __le16_to_cpu(ev->handle);
1971
1972 if (conn->type == ACL_LINK) {
1973 conn->state = BT_CONFIG;
1974 hci_conn_hold(conn);
1975
1976 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
1977 !hci_find_link_key(hdev, &ev->bdaddr))
1978 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
1979 else
1980 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1981 } else
1982 conn->state = BT_CONNECTED;
1983
1984 hci_conn_add_sysfs(conn);
1985
1986 if (test_bit(HCI_AUTH, &hdev->flags))
1987 conn->link_mode |= HCI_LM_AUTH;
1988
1989 if (test_bit(HCI_ENCRYPT, &hdev->flags))
1990 conn->link_mode |= HCI_LM_ENCRYPT;
1991
1992 /* Get remote features */
1993 if (conn->type == ACL_LINK) {
1994 struct hci_cp_read_remote_features cp;
1995 cp.handle = ev->handle;
1996 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
1997 sizeof(cp), &cp);
1998 }
1999
2000 /* Set packet type for incoming connection */
2001 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2002 struct hci_cp_change_conn_ptype cp;
2003 cp.handle = ev->handle;
2004 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2005 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2006 &cp);
2007 }
2008 } else {
2009 conn->state = BT_CLOSED;
2010 if (conn->type == ACL_LINK)
2011 mgmt_connect_failed(hdev, &conn->dst, conn->type,
2012 conn->dst_type, ev->status);
2013 }
2014
2015 if (conn->type == ACL_LINK)
2016 hci_sco_setup(conn, ev->status);
2017
2018 if (ev->status) {
2019 hci_proto_connect_cfm(conn, ev->status);
2020 hci_conn_del(conn);
2021 } else if (ev->link_type != ACL_LINK)
2022 hci_proto_connect_cfm(conn, ev->status);
2023
2024unlock:
2025 hci_dev_unlock(hdev);
2026
2027 hci_conn_check_pending(hdev);
2028}
2029
2030static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2031{
2032 struct hci_ev_conn_request *ev = (void *) skb->data;
2033 int mask = hdev->link_mode;
2034 __u8 flags = 0;
2035
2036 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2037 ev->link_type);
2038
2039 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2040 &flags);
2041
2042 if ((mask & HCI_LM_ACCEPT) &&
2043 !hci_blacklist_lookup(hdev, &ev->bdaddr, BDADDR_BREDR)) {
2044 /* Connection accepted */
2045 struct inquiry_entry *ie;
2046 struct hci_conn *conn;
2047
2048 hci_dev_lock(hdev);
2049
2050 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2051 if (ie)
2052 memcpy(ie->data.dev_class, ev->dev_class, 3);
2053
2054 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2055 &ev->bdaddr);
2056 if (!conn) {
2057 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
2058 if (!conn) {
2059 BT_ERR("No memory for new connection");
2060 hci_dev_unlock(hdev);
2061 return;
2062 }
2063 }
2064
2065 memcpy(conn->dev_class, ev->dev_class, 3);
2066
2067 hci_dev_unlock(hdev);
2068
2069 if (ev->link_type == ACL_LINK ||
2070 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2071 struct hci_cp_accept_conn_req cp;
2072 conn->state = BT_CONNECT;
2073
2074 bacpy(&cp.bdaddr, &ev->bdaddr);
2075
2076 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2077 cp.role = 0x00; /* Become master */
2078 else
2079 cp.role = 0x01; /* Remain slave */
2080
2081 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp),
2082 &cp);
2083 } else if (!(flags & HCI_PROTO_DEFER)) {
2084 struct hci_cp_accept_sync_conn_req cp;
2085 conn->state = BT_CONNECT;
2086
2087 bacpy(&cp.bdaddr, &ev->bdaddr);
2088 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2089
2090 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
2091 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
2092 cp.max_latency = cpu_to_le16(0xffff);
2093 cp.content_format = cpu_to_le16(hdev->voice_setting);
2094 cp.retrans_effort = 0xff;
2095
2096 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
2097 sizeof(cp), &cp);
2098 } else {
2099 conn->state = BT_CONNECT2;
2100 hci_proto_connect_cfm(conn, 0);
2101 }
2102 } else {
2103 /* Connection rejected */
2104 struct hci_cp_reject_conn_req cp;
2105
2106 bacpy(&cp.bdaddr, &ev->bdaddr);
2107 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2108 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2109 }
2110}
2111
2112static u8 hci_to_mgmt_reason(u8 err)
2113{
2114 switch (err) {
2115 case HCI_ERROR_CONNECTION_TIMEOUT:
2116 return MGMT_DEV_DISCONN_TIMEOUT;
2117 case HCI_ERROR_REMOTE_USER_TERM:
2118 case HCI_ERROR_REMOTE_LOW_RESOURCES:
2119 case HCI_ERROR_REMOTE_POWER_OFF:
2120 return MGMT_DEV_DISCONN_REMOTE;
2121 case HCI_ERROR_LOCAL_HOST_TERM:
2122 return MGMT_DEV_DISCONN_LOCAL_HOST;
2123 default:
2124 return MGMT_DEV_DISCONN_UNKNOWN;
2125 }
2126}
2127
2128static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2129{
2130 struct hci_ev_disconn_complete *ev = (void *) skb->data;
2131 u8 reason = hci_to_mgmt_reason(ev->reason);
2132 struct hci_conn_params *params;
2133 struct hci_conn *conn;
2134 bool mgmt_connected;
2135 u8 type;
2136
2137 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2138
2139 hci_dev_lock(hdev);
2140
2141 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2142 if (!conn)
2143 goto unlock;
2144
2145 if (ev->status) {
2146 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2147 conn->dst_type, ev->status);
2148 goto unlock;
2149 }
2150
2151 conn->state = BT_CLOSED;
2152
2153 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2154 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2155 reason, mgmt_connected);
2156
2157 if (conn->type == ACL_LINK && conn->flush_key)
2158 hci_remove_link_key(hdev, &conn->dst);
2159
2160 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2161 if (params) {
2162 switch (params->auto_connect) {
2163 case HCI_AUTO_CONN_LINK_LOSS:
2164 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2165 break;
2166 /* Fall through */
2167
2168 case HCI_AUTO_CONN_ALWAYS:
2169 hci_pend_le_conn_add(hdev, &conn->dst, conn->dst_type);
2170 break;
2171
2172 default:
2173 break;
2174 }
2175 }
2176
2177 type = conn->type;
2178
2179 hci_proto_disconn_cfm(conn, ev->reason);
2180 hci_conn_del(conn);
2181
2182 /* Re-enable advertising if necessary, since it might
2183 * have been disabled by the connection. From the
2184 * HCI_LE_Set_Advertise_Enable command description in
2185 * the core specification (v4.0):
2186 * "The Controller shall continue advertising until the Host
2187 * issues an LE_Set_Advertise_Enable command with
2188 * Advertising_Enable set to 0x00 (Advertising is disabled)
2189 * or until a connection is created or until the Advertising
2190 * is timed out due to Directed Advertising."
2191 */
2192 if (type == LE_LINK)
2193 mgmt_reenable_advertising(hdev);
2194
2195unlock:
2196 hci_dev_unlock(hdev);
2197}
2198
2199static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2200{
2201 struct hci_ev_auth_complete *ev = (void *) skb->data;
2202 struct hci_conn *conn;
2203
2204 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2205
2206 hci_dev_lock(hdev);
2207
2208 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2209 if (!conn)
2210 goto unlock;
2211
2212 if (!ev->status) {
2213 if (!hci_conn_ssp_enabled(conn) &&
2214 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
2215 BT_INFO("re-auth of legacy device is not possible.");
2216 } else {
2217 conn->link_mode |= HCI_LM_AUTH;
2218 conn->sec_level = conn->pending_sec_level;
2219 }
2220 } else {
2221 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
2222 ev->status);
2223 }
2224
2225 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2226 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2227
2228 if (conn->state == BT_CONFIG) {
2229 if (!ev->status && hci_conn_ssp_enabled(conn)) {
2230 struct hci_cp_set_conn_encrypt cp;
2231 cp.handle = ev->handle;
2232 cp.encrypt = 0x01;
2233 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2234 &cp);
2235 } else {
2236 conn->state = BT_CONNECTED;
2237 hci_proto_connect_cfm(conn, ev->status);
2238 hci_conn_drop(conn);
2239 }
2240 } else {
2241 hci_auth_cfm(conn, ev->status);
2242
2243 hci_conn_hold(conn);
2244 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2245 hci_conn_drop(conn);
2246 }
2247
2248 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2249 if (!ev->status) {
2250 struct hci_cp_set_conn_encrypt cp;
2251 cp.handle = ev->handle;
2252 cp.encrypt = 0x01;
2253 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2254 &cp);
2255 } else {
2256 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2257 hci_encrypt_cfm(conn, ev->status, 0x00);
2258 }
2259 }
2260
2261unlock:
2262 hci_dev_unlock(hdev);
2263}
2264
2265static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2266{
2267 struct hci_ev_remote_name *ev = (void *) skb->data;
2268 struct hci_conn *conn;
2269
2270 BT_DBG("%s", hdev->name);
2271
2272 hci_conn_check_pending(hdev);
2273
2274 hci_dev_lock(hdev);
2275
2276 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2277
2278 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2279 goto check_auth;
2280
2281 if (ev->status == 0)
2282 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2283 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2284 else
2285 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2286
2287check_auth:
2288 if (!conn)
2289 goto unlock;
2290
2291 if (!hci_outgoing_auth_needed(hdev, conn))
2292 goto unlock;
2293
2294 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2295 struct hci_cp_auth_requested cp;
2296 cp.handle = __cpu_to_le16(conn->handle);
2297 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2298 }
2299
2300unlock:
2301 hci_dev_unlock(hdev);
2302}
2303
2304static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2305{
2306 struct hci_ev_encrypt_change *ev = (void *) skb->data;
2307 struct hci_conn *conn;
2308
2309 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2310
2311 hci_dev_lock(hdev);
2312
2313 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2314 if (!conn)
2315 goto unlock;
2316
2317 if (!ev->status) {
2318 if (ev->encrypt) {
2319 /* Encryption implies authentication */
2320 conn->link_mode |= HCI_LM_AUTH;
2321 conn->link_mode |= HCI_LM_ENCRYPT;
2322 conn->sec_level = conn->pending_sec_level;
2323
2324 /* P-256 authentication key implies FIPS */
2325 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
2326 conn->link_mode |= HCI_LM_FIPS;
2327
2328 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
2329 conn->type == LE_LINK)
2330 set_bit(HCI_CONN_AES_CCM, &conn->flags);
2331 } else {
2332 conn->link_mode &= ~HCI_LM_ENCRYPT;
2333 clear_bit(HCI_CONN_AES_CCM, &conn->flags);
2334 }
2335 }
2336
2337 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2338
2339 if (ev->status && conn->state == BT_CONNECTED) {
2340 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2341 hci_conn_drop(conn);
2342 goto unlock;
2343 }
2344
2345 if (conn->state == BT_CONFIG) {
2346 if (!ev->status)
2347 conn->state = BT_CONNECTED;
2348
2349 /* In Secure Connections Only mode, do not allow any
2350 * connections that are not encrypted with AES-CCM
2351 * using a P-256 authenticated combination key.
2352 */
2353 if (test_bit(HCI_SC_ONLY, &hdev->dev_flags) &&
2354 (!test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
2355 conn->key_type != HCI_LK_AUTH_COMBINATION_P256)) {
2356 hci_proto_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE);
2357 hci_conn_drop(conn);
2358 goto unlock;
2359 }
2360
2361 hci_proto_connect_cfm(conn, ev->status);
2362 hci_conn_drop(conn);
2363 } else
2364 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2365
2366unlock:
2367 hci_dev_unlock(hdev);
2368}
2369
2370static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2371 struct sk_buff *skb)
2372{
2373 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2374 struct hci_conn *conn;
2375
2376 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2377
2378 hci_dev_lock(hdev);
2379
2380 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2381 if (conn) {
2382 if (!ev->status)
2383 conn->link_mode |= HCI_LM_SECURE;
2384
2385 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2386
2387 hci_key_change_cfm(conn, ev->status);
2388 }
2389
2390 hci_dev_unlock(hdev);
2391}
2392
2393static void hci_remote_features_evt(struct hci_dev *hdev,
2394 struct sk_buff *skb)
2395{
2396 struct hci_ev_remote_features *ev = (void *) skb->data;
2397 struct hci_conn *conn;
2398
2399 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2400
2401 hci_dev_lock(hdev);
2402
2403 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2404 if (!conn)
2405 goto unlock;
2406
2407 if (!ev->status)
2408 memcpy(conn->features[0], ev->features, 8);
2409
2410 if (conn->state != BT_CONFIG)
2411 goto unlock;
2412
2413 if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
2414 struct hci_cp_read_remote_ext_features cp;
2415 cp.handle = ev->handle;
2416 cp.page = 0x01;
2417 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2418 sizeof(cp), &cp);
2419 goto unlock;
2420 }
2421
2422 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2423 struct hci_cp_remote_name_req cp;
2424 memset(&cp, 0, sizeof(cp));
2425 bacpy(&cp.bdaddr, &conn->dst);
2426 cp.pscan_rep_mode = 0x02;
2427 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2428 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2429 mgmt_device_connected(hdev, &conn->dst, conn->type,
2430 conn->dst_type, 0, NULL, 0,
2431 conn->dev_class);
2432
2433 if (!hci_outgoing_auth_needed(hdev, conn)) {
2434 conn->state = BT_CONNECTED;
2435 hci_proto_connect_cfm(conn, ev->status);
2436 hci_conn_drop(conn);
2437 }
2438
2439unlock:
2440 hci_dev_unlock(hdev);
2441}
2442
2443static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2444{
2445 struct hci_ev_cmd_complete *ev = (void *) skb->data;
2446 u8 status = skb->data[sizeof(*ev)];
2447 __u16 opcode;
2448
2449 skb_pull(skb, sizeof(*ev));
2450
2451 opcode = __le16_to_cpu(ev->opcode);
2452
2453 switch (opcode) {
2454 case HCI_OP_INQUIRY_CANCEL:
2455 hci_cc_inquiry_cancel(hdev, skb);
2456 break;
2457
2458 case HCI_OP_PERIODIC_INQ:
2459 hci_cc_periodic_inq(hdev, skb);
2460 break;
2461
2462 case HCI_OP_EXIT_PERIODIC_INQ:
2463 hci_cc_exit_periodic_inq(hdev, skb);
2464 break;
2465
2466 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2467 hci_cc_remote_name_req_cancel(hdev, skb);
2468 break;
2469
2470 case HCI_OP_ROLE_DISCOVERY:
2471 hci_cc_role_discovery(hdev, skb);
2472 break;
2473
2474 case HCI_OP_READ_LINK_POLICY:
2475 hci_cc_read_link_policy(hdev, skb);
2476 break;
2477
2478 case HCI_OP_WRITE_LINK_POLICY:
2479 hci_cc_write_link_policy(hdev, skb);
2480 break;
2481
2482 case HCI_OP_READ_DEF_LINK_POLICY:
2483 hci_cc_read_def_link_policy(hdev, skb);
2484 break;
2485
2486 case HCI_OP_WRITE_DEF_LINK_POLICY:
2487 hci_cc_write_def_link_policy(hdev, skb);
2488 break;
2489
2490 case HCI_OP_RESET:
2491 hci_cc_reset(hdev, skb);
2492 break;
2493
2494 case HCI_OP_WRITE_LOCAL_NAME:
2495 hci_cc_write_local_name(hdev, skb);
2496 break;
2497
2498 case HCI_OP_READ_LOCAL_NAME:
2499 hci_cc_read_local_name(hdev, skb);
2500 break;
2501
2502 case HCI_OP_WRITE_AUTH_ENABLE:
2503 hci_cc_write_auth_enable(hdev, skb);
2504 break;
2505
2506 case HCI_OP_WRITE_ENCRYPT_MODE:
2507 hci_cc_write_encrypt_mode(hdev, skb);
2508 break;
2509
2510 case HCI_OP_WRITE_SCAN_ENABLE:
2511 hci_cc_write_scan_enable(hdev, skb);
2512 break;
2513
2514 case HCI_OP_READ_CLASS_OF_DEV:
2515 hci_cc_read_class_of_dev(hdev, skb);
2516 break;
2517
2518 case HCI_OP_WRITE_CLASS_OF_DEV:
2519 hci_cc_write_class_of_dev(hdev, skb);
2520 break;
2521
2522 case HCI_OP_READ_VOICE_SETTING:
2523 hci_cc_read_voice_setting(hdev, skb);
2524 break;
2525
2526 case HCI_OP_WRITE_VOICE_SETTING:
2527 hci_cc_write_voice_setting(hdev, skb);
2528 break;
2529
2530 case HCI_OP_READ_NUM_SUPPORTED_IAC:
2531 hci_cc_read_num_supported_iac(hdev, skb);
2532 break;
2533
2534 case HCI_OP_WRITE_SSP_MODE:
2535 hci_cc_write_ssp_mode(hdev, skb);
2536 break;
2537
2538 case HCI_OP_WRITE_SC_SUPPORT:
2539 hci_cc_write_sc_support(hdev, skb);
2540 break;
2541
2542 case HCI_OP_READ_LOCAL_VERSION:
2543 hci_cc_read_local_version(hdev, skb);
2544 break;
2545
2546 case HCI_OP_READ_LOCAL_COMMANDS:
2547 hci_cc_read_local_commands(hdev, skb);
2548 break;
2549
2550 case HCI_OP_READ_LOCAL_FEATURES:
2551 hci_cc_read_local_features(hdev, skb);
2552 break;
2553
2554 case HCI_OP_READ_LOCAL_EXT_FEATURES:
2555 hci_cc_read_local_ext_features(hdev, skb);
2556 break;
2557
2558 case HCI_OP_READ_BUFFER_SIZE:
2559 hci_cc_read_buffer_size(hdev, skb);
2560 break;
2561
2562 case HCI_OP_READ_BD_ADDR:
2563 hci_cc_read_bd_addr(hdev, skb);
2564 break;
2565
2566 case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
2567 hci_cc_read_page_scan_activity(hdev, skb);
2568 break;
2569
2570 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
2571 hci_cc_write_page_scan_activity(hdev, skb);
2572 break;
2573
2574 case HCI_OP_READ_PAGE_SCAN_TYPE:
2575 hci_cc_read_page_scan_type(hdev, skb);
2576 break;
2577
2578 case HCI_OP_WRITE_PAGE_SCAN_TYPE:
2579 hci_cc_write_page_scan_type(hdev, skb);
2580 break;
2581
2582 case HCI_OP_READ_DATA_BLOCK_SIZE:
2583 hci_cc_read_data_block_size(hdev, skb);
2584 break;
2585
2586 case HCI_OP_READ_FLOW_CONTROL_MODE:
2587 hci_cc_read_flow_control_mode(hdev, skb);
2588 break;
2589
2590 case HCI_OP_READ_LOCAL_AMP_INFO:
2591 hci_cc_read_local_amp_info(hdev, skb);
2592 break;
2593
2594 case HCI_OP_READ_LOCAL_AMP_ASSOC:
2595 hci_cc_read_local_amp_assoc(hdev, skb);
2596 break;
2597
2598 case HCI_OP_READ_INQ_RSP_TX_POWER:
2599 hci_cc_read_inq_rsp_tx_power(hdev, skb);
2600 break;
2601
2602 case HCI_OP_PIN_CODE_REPLY:
2603 hci_cc_pin_code_reply(hdev, skb);
2604 break;
2605
2606 case HCI_OP_PIN_CODE_NEG_REPLY:
2607 hci_cc_pin_code_neg_reply(hdev, skb);
2608 break;
2609
2610 case HCI_OP_READ_LOCAL_OOB_DATA:
2611 hci_cc_read_local_oob_data(hdev, skb);
2612 break;
2613
2614 case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
2615 hci_cc_read_local_oob_ext_data(hdev, skb);
2616 break;
2617
2618 case HCI_OP_LE_READ_BUFFER_SIZE:
2619 hci_cc_le_read_buffer_size(hdev, skb);
2620 break;
2621
2622 case HCI_OP_LE_READ_LOCAL_FEATURES:
2623 hci_cc_le_read_local_features(hdev, skb);
2624 break;
2625
2626 case HCI_OP_LE_READ_ADV_TX_POWER:
2627 hci_cc_le_read_adv_tx_power(hdev, skb);
2628 break;
2629
2630 case HCI_OP_USER_CONFIRM_REPLY:
2631 hci_cc_user_confirm_reply(hdev, skb);
2632 break;
2633
2634 case HCI_OP_USER_CONFIRM_NEG_REPLY:
2635 hci_cc_user_confirm_neg_reply(hdev, skb);
2636 break;
2637
2638 case HCI_OP_USER_PASSKEY_REPLY:
2639 hci_cc_user_passkey_reply(hdev, skb);
2640 break;
2641
2642 case HCI_OP_USER_PASSKEY_NEG_REPLY:
2643 hci_cc_user_passkey_neg_reply(hdev, skb);
2644 break;
2645
2646 case HCI_OP_LE_SET_RANDOM_ADDR:
2647 hci_cc_le_set_random_addr(hdev, skb);
2648 break;
2649
2650 case HCI_OP_LE_SET_ADV_ENABLE:
2651 hci_cc_le_set_adv_enable(hdev, skb);
2652 break;
2653
2654 case HCI_OP_LE_SET_SCAN_PARAM:
2655 hci_cc_le_set_scan_param(hdev, skb);
2656 break;
2657
2658 case HCI_OP_LE_SET_SCAN_ENABLE:
2659 hci_cc_le_set_scan_enable(hdev, skb);
2660 break;
2661
2662 case HCI_OP_LE_READ_WHITE_LIST_SIZE:
2663 hci_cc_le_read_white_list_size(hdev, skb);
2664 break;
2665
2666 case HCI_OP_LE_CLEAR_WHITE_LIST:
2667 hci_cc_le_clear_white_list(hdev, skb);
2668 break;
2669
2670 case HCI_OP_LE_ADD_TO_WHITE_LIST:
2671 hci_cc_le_add_to_white_list(hdev, skb);
2672 break;
2673
2674 case HCI_OP_LE_DEL_FROM_WHITE_LIST:
2675 hci_cc_le_del_from_white_list(hdev, skb);
2676 break;
2677
2678 case HCI_OP_LE_READ_SUPPORTED_STATES:
2679 hci_cc_le_read_supported_states(hdev, skb);
2680 break;
2681
2682 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2683 hci_cc_write_le_host_supported(hdev, skb);
2684 break;
2685
2686 case HCI_OP_LE_SET_ADV_PARAM:
2687 hci_cc_set_adv_param(hdev, skb);
2688 break;
2689
2690 case HCI_OP_WRITE_REMOTE_AMP_ASSOC:
2691 hci_cc_write_remote_amp_assoc(hdev, skb);
2692 break;
2693
2694 case HCI_OP_READ_RSSI:
2695 hci_cc_read_rssi(hdev, skb);
2696 break;
2697
2698 case HCI_OP_READ_TX_POWER:
2699 hci_cc_read_tx_power(hdev, skb);
2700 break;
2701
2702 default:
2703 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2704 break;
2705 }
2706
2707 if (opcode != HCI_OP_NOP)
2708 del_timer(&hdev->cmd_timer);
2709
2710 hci_req_cmd_complete(hdev, opcode, status);
2711
2712 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2713 atomic_set(&hdev->cmd_cnt, 1);
2714 if (!skb_queue_empty(&hdev->cmd_q))
2715 queue_work(hdev->workqueue, &hdev->cmd_work);
2716 }
2717}
2718
2719static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2720{
2721 struct hci_ev_cmd_status *ev = (void *) skb->data;
2722 __u16 opcode;
2723
2724 skb_pull(skb, sizeof(*ev));
2725
2726 opcode = __le16_to_cpu(ev->opcode);
2727
2728 switch (opcode) {
2729 case HCI_OP_INQUIRY:
2730 hci_cs_inquiry(hdev, ev->status);
2731 break;
2732
2733 case HCI_OP_CREATE_CONN:
2734 hci_cs_create_conn(hdev, ev->status);
2735 break;
2736
2737 case HCI_OP_ADD_SCO:
2738 hci_cs_add_sco(hdev, ev->status);
2739 break;
2740
2741 case HCI_OP_AUTH_REQUESTED:
2742 hci_cs_auth_requested(hdev, ev->status);
2743 break;
2744
2745 case HCI_OP_SET_CONN_ENCRYPT:
2746 hci_cs_set_conn_encrypt(hdev, ev->status);
2747 break;
2748
2749 case HCI_OP_REMOTE_NAME_REQ:
2750 hci_cs_remote_name_req(hdev, ev->status);
2751 break;
2752
2753 case HCI_OP_READ_REMOTE_FEATURES:
2754 hci_cs_read_remote_features(hdev, ev->status);
2755 break;
2756
2757 case HCI_OP_READ_REMOTE_EXT_FEATURES:
2758 hci_cs_read_remote_ext_features(hdev, ev->status);
2759 break;
2760
2761 case HCI_OP_SETUP_SYNC_CONN:
2762 hci_cs_setup_sync_conn(hdev, ev->status);
2763 break;
2764
2765 case HCI_OP_SNIFF_MODE:
2766 hci_cs_sniff_mode(hdev, ev->status);
2767 break;
2768
2769 case HCI_OP_EXIT_SNIFF_MODE:
2770 hci_cs_exit_sniff_mode(hdev, ev->status);
2771 break;
2772
2773 case HCI_OP_DISCONNECT:
2774 hci_cs_disconnect(hdev, ev->status);
2775 break;
2776
2777 case HCI_OP_CREATE_PHY_LINK:
2778 hci_cs_create_phylink(hdev, ev->status);
2779 break;
2780
2781 case HCI_OP_ACCEPT_PHY_LINK:
2782 hci_cs_accept_phylink(hdev, ev->status);
2783 break;
2784
2785 case HCI_OP_LE_CREATE_CONN:
2786 hci_cs_le_create_conn(hdev, ev->status);
2787 break;
2788
2789 case HCI_OP_LE_START_ENC:
2790 hci_cs_le_start_enc(hdev, ev->status);
2791 break;
2792
2793 default:
2794 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2795 break;
2796 }
2797
2798 if (opcode != HCI_OP_NOP)
2799 del_timer(&hdev->cmd_timer);
2800
2801 if (ev->status ||
2802 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->req.event))
2803 hci_req_cmd_complete(hdev, opcode, ev->status);
2804
2805 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2806 atomic_set(&hdev->cmd_cnt, 1);
2807 if (!skb_queue_empty(&hdev->cmd_q))
2808 queue_work(hdev->workqueue, &hdev->cmd_work);
2809 }
2810}
2811
2812static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2813{
2814 struct hci_ev_role_change *ev = (void *) skb->data;
2815 struct hci_conn *conn;
2816
2817 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2818
2819 hci_dev_lock(hdev);
2820
2821 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2822 if (conn) {
2823 if (!ev->status) {
2824 if (ev->role)
2825 conn->link_mode &= ~HCI_LM_MASTER;
2826 else
2827 conn->link_mode |= HCI_LM_MASTER;
2828 }
2829
2830 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2831
2832 hci_role_switch_cfm(conn, ev->status, ev->role);
2833 }
2834
2835 hci_dev_unlock(hdev);
2836}
2837
2838static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2839{
2840 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2841 int i;
2842
2843 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
2844 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2845 return;
2846 }
2847
2848 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2849 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
2850 BT_DBG("%s bad parameters", hdev->name);
2851 return;
2852 }
2853
2854 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
2855
2856 for (i = 0; i < ev->num_hndl; i++) {
2857 struct hci_comp_pkts_info *info = &ev->handles[i];
2858 struct hci_conn *conn;
2859 __u16 handle, count;
2860
2861 handle = __le16_to_cpu(info->handle);
2862 count = __le16_to_cpu(info->count);
2863
2864 conn = hci_conn_hash_lookup_handle(hdev, handle);
2865 if (!conn)
2866 continue;
2867
2868 conn->sent -= count;
2869
2870 switch (conn->type) {
2871 case ACL_LINK:
2872 hdev->acl_cnt += count;
2873 if (hdev->acl_cnt > hdev->acl_pkts)
2874 hdev->acl_cnt = hdev->acl_pkts;
2875 break;
2876
2877 case LE_LINK:
2878 if (hdev->le_pkts) {
2879 hdev->le_cnt += count;
2880 if (hdev->le_cnt > hdev->le_pkts)
2881 hdev->le_cnt = hdev->le_pkts;
2882 } else {
2883 hdev->acl_cnt += count;
2884 if (hdev->acl_cnt > hdev->acl_pkts)
2885 hdev->acl_cnt = hdev->acl_pkts;
2886 }
2887 break;
2888
2889 case SCO_LINK:
2890 hdev->sco_cnt += count;
2891 if (hdev->sco_cnt > hdev->sco_pkts)
2892 hdev->sco_cnt = hdev->sco_pkts;
2893 break;
2894
2895 default:
2896 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2897 break;
2898 }
2899 }
2900
2901 queue_work(hdev->workqueue, &hdev->tx_work);
2902}
2903
2904static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
2905 __u16 handle)
2906{
2907 struct hci_chan *chan;
2908
2909 switch (hdev->dev_type) {
2910 case HCI_BREDR:
2911 return hci_conn_hash_lookup_handle(hdev, handle);
2912 case HCI_AMP:
2913 chan = hci_chan_lookup_handle(hdev, handle);
2914 if (chan)
2915 return chan->conn;
2916 break;
2917 default:
2918 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2919 break;
2920 }
2921
2922 return NULL;
2923}
2924
2925static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
2926{
2927 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
2928 int i;
2929
2930 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
2931 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2932 return;
2933 }
2934
2935 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2936 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
2937 BT_DBG("%s bad parameters", hdev->name);
2938 return;
2939 }
2940
2941 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
2942 ev->num_hndl);
2943
2944 for (i = 0; i < ev->num_hndl; i++) {
2945 struct hci_comp_blocks_info *info = &ev->handles[i];
2946 struct hci_conn *conn = NULL;
2947 __u16 handle, block_count;
2948
2949 handle = __le16_to_cpu(info->handle);
2950 block_count = __le16_to_cpu(info->blocks);
2951
2952 conn = __hci_conn_lookup_handle(hdev, handle);
2953 if (!conn)
2954 continue;
2955
2956 conn->sent -= block_count;
2957
2958 switch (conn->type) {
2959 case ACL_LINK:
2960 case AMP_LINK:
2961 hdev->block_cnt += block_count;
2962 if (hdev->block_cnt > hdev->num_blocks)
2963 hdev->block_cnt = hdev->num_blocks;
2964 break;
2965
2966 default:
2967 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2968 break;
2969 }
2970 }
2971
2972 queue_work(hdev->workqueue, &hdev->tx_work);
2973}
2974
2975static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2976{
2977 struct hci_ev_mode_change *ev = (void *) skb->data;
2978 struct hci_conn *conn;
2979
2980 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2981
2982 hci_dev_lock(hdev);
2983
2984 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2985 if (conn) {
2986 conn->mode = ev->mode;
2987
2988 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
2989 &conn->flags)) {
2990 if (conn->mode == HCI_CM_ACTIVE)
2991 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2992 else
2993 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2994 }
2995
2996 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2997 hci_sco_setup(conn, ev->status);
2998 }
2999
3000 hci_dev_unlock(hdev);
3001}
3002
3003static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3004{
3005 struct hci_ev_pin_code_req *ev = (void *) skb->data;
3006 struct hci_conn *conn;
3007
3008 BT_DBG("%s", hdev->name);
3009
3010 hci_dev_lock(hdev);
3011
3012 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3013 if (!conn)
3014 goto unlock;
3015
3016 if (conn->state == BT_CONNECTED) {
3017 hci_conn_hold(conn);
3018 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3019 hci_conn_drop(conn);
3020 }
3021
3022 if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags))
3023 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3024 sizeof(ev->bdaddr), &ev->bdaddr);
3025 else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
3026 u8 secure;
3027
3028 if (conn->pending_sec_level == BT_SECURITY_HIGH)
3029 secure = 1;
3030 else
3031 secure = 0;
3032
3033 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
3034 }
3035
3036unlock:
3037 hci_dev_unlock(hdev);
3038}
3039
3040static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3041{
3042 struct hci_ev_link_key_req *ev = (void *) skb->data;
3043 struct hci_cp_link_key_reply cp;
3044 struct hci_conn *conn;
3045 struct link_key *key;
3046
3047 BT_DBG("%s", hdev->name);
3048
3049 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3050 return;
3051
3052 hci_dev_lock(hdev);
3053
3054 key = hci_find_link_key(hdev, &ev->bdaddr);
3055 if (!key) {
3056 BT_DBG("%s link key not found for %pMR", hdev->name,
3057 &ev->bdaddr);
3058 goto not_found;
3059 }
3060
3061 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
3062 &ev->bdaddr);
3063
3064 if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) &&
3065 key->type == HCI_LK_DEBUG_COMBINATION) {
3066 BT_DBG("%s ignoring debug key", hdev->name);
3067 goto not_found;
3068 }
3069
3070 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3071 if (conn) {
3072 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
3073 key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
3074 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
3075 BT_DBG("%s ignoring unauthenticated key", hdev->name);
3076 goto not_found;
3077 }
3078
3079 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
3080 conn->pending_sec_level == BT_SECURITY_HIGH) {
3081 BT_DBG("%s ignoring key unauthenticated for high security",
3082 hdev->name);
3083 goto not_found;
3084 }
3085
3086 conn->key_type = key->type;
3087 conn->pin_length = key->pin_len;
3088 }
3089
3090 bacpy(&cp.bdaddr, &ev->bdaddr);
3091 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
3092
3093 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
3094
3095 hci_dev_unlock(hdev);
3096
3097 return;
3098
3099not_found:
3100 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
3101 hci_dev_unlock(hdev);
3102}
3103
3104static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3105{
3106 struct hci_ev_link_key_notify *ev = (void *) skb->data;
3107 struct hci_conn *conn;
3108 u8 pin_len = 0;
3109
3110 BT_DBG("%s", hdev->name);
3111
3112 hci_dev_lock(hdev);
3113
3114 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3115 if (conn) {
3116 hci_conn_hold(conn);
3117 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3118 pin_len = conn->pin_length;
3119
3120 if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
3121 conn->key_type = ev->key_type;
3122
3123 hci_conn_drop(conn);
3124 }
3125
3126 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3127 hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
3128 ev->key_type, pin_len);
3129
3130 hci_dev_unlock(hdev);
3131}
3132
3133static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
3134{
3135 struct hci_ev_clock_offset *ev = (void *) skb->data;
3136 struct hci_conn *conn;
3137
3138 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3139
3140 hci_dev_lock(hdev);
3141
3142 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3143 if (conn && !ev->status) {
3144 struct inquiry_entry *ie;
3145
3146 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3147 if (ie) {
3148 ie->data.clock_offset = ev->clock_offset;
3149 ie->timestamp = jiffies;
3150 }
3151 }
3152
3153 hci_dev_unlock(hdev);
3154}
3155
3156static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3157{
3158 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
3159 struct hci_conn *conn;
3160
3161 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3162
3163 hci_dev_lock(hdev);
3164
3165 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3166 if (conn && !ev->status)
3167 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
3168
3169 hci_dev_unlock(hdev);
3170}
3171
3172static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
3173{
3174 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
3175 struct inquiry_entry *ie;
3176
3177 BT_DBG("%s", hdev->name);
3178
3179 hci_dev_lock(hdev);
3180
3181 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3182 if (ie) {
3183 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
3184 ie->timestamp = jiffies;
3185 }
3186
3187 hci_dev_unlock(hdev);
3188}
3189
3190static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
3191 struct sk_buff *skb)
3192{
3193 struct inquiry_data data;
3194 int num_rsp = *((__u8 *) skb->data);
3195 bool name_known, ssp;
3196
3197 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3198
3199 if (!num_rsp)
3200 return;
3201
3202 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3203 return;
3204
3205 hci_dev_lock(hdev);
3206
3207 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
3208 struct inquiry_info_with_rssi_and_pscan_mode *info;
3209 info = (void *) (skb->data + 1);
3210
3211 for (; num_rsp; num_rsp--, info++) {
3212 bacpy(&data.bdaddr, &info->bdaddr);
3213 data.pscan_rep_mode = info->pscan_rep_mode;
3214 data.pscan_period_mode = info->pscan_period_mode;
3215 data.pscan_mode = info->pscan_mode;
3216 memcpy(data.dev_class, info->dev_class, 3);
3217 data.clock_offset = info->clock_offset;
3218 data.rssi = info->rssi;
3219 data.ssp_mode = 0x00;
3220
3221 name_known = hci_inquiry_cache_update(hdev, &data,
3222 false, &ssp);
3223 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3224 info->dev_class, info->rssi,
3225 !name_known, ssp, NULL, 0, NULL, 0);
3226 }
3227 } else {
3228 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
3229
3230 for (; num_rsp; num_rsp--, info++) {
3231 bacpy(&data.bdaddr, &info->bdaddr);
3232 data.pscan_rep_mode = info->pscan_rep_mode;
3233 data.pscan_period_mode = info->pscan_period_mode;
3234 data.pscan_mode = 0x00;
3235 memcpy(data.dev_class, info->dev_class, 3);
3236 data.clock_offset = info->clock_offset;
3237 data.rssi = info->rssi;
3238 data.ssp_mode = 0x00;
3239 name_known = hci_inquiry_cache_update(hdev, &data,
3240 false, &ssp);
3241 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3242 info->dev_class, info->rssi,
3243 !name_known, ssp, NULL, 0, NULL, 0);
3244 }
3245 }
3246
3247 hci_dev_unlock(hdev);
3248}
3249
3250static void hci_remote_ext_features_evt(struct hci_dev *hdev,
3251 struct sk_buff *skb)
3252{
3253 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
3254 struct hci_conn *conn;
3255
3256 BT_DBG("%s", hdev->name);
3257
3258 hci_dev_lock(hdev);
3259
3260 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3261 if (!conn)
3262 goto unlock;
3263
3264 if (ev->page < HCI_MAX_PAGES)
3265 memcpy(conn->features[ev->page], ev->features, 8);
3266
3267 if (!ev->status && ev->page == 0x01) {
3268 struct inquiry_entry *ie;
3269
3270 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3271 if (ie)
3272 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3273
3274 if (ev->features[0] & LMP_HOST_SSP) {
3275 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3276 } else {
3277 /* It is mandatory by the Bluetooth specification that
3278 * Extended Inquiry Results are only used when Secure
3279 * Simple Pairing is enabled, but some devices violate
3280 * this.
3281 *
3282 * To make these devices work, the internal SSP
3283 * enabled flag needs to be cleared if the remote host
3284 * features do not indicate SSP support */
3285 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3286 }
3287
3288 if (ev->features[0] & LMP_HOST_SC)
3289 set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
3290 }
3291
3292 if (conn->state != BT_CONFIG)
3293 goto unlock;
3294
3295 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3296 struct hci_cp_remote_name_req cp;
3297 memset(&cp, 0, sizeof(cp));
3298 bacpy(&cp.bdaddr, &conn->dst);
3299 cp.pscan_rep_mode = 0x02;
3300 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3301 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3302 mgmt_device_connected(hdev, &conn->dst, conn->type,
3303 conn->dst_type, 0, NULL, 0,
3304 conn->dev_class);
3305
3306 if (!hci_outgoing_auth_needed(hdev, conn)) {
3307 conn->state = BT_CONNECTED;
3308 hci_proto_connect_cfm(conn, ev->status);
3309 hci_conn_drop(conn);
3310 }
3311
3312unlock:
3313 hci_dev_unlock(hdev);
3314}
3315
3316static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
3317 struct sk_buff *skb)
3318{
3319 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
3320 struct hci_conn *conn;
3321
3322 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3323
3324 hci_dev_lock(hdev);
3325
3326 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3327 if (!conn) {
3328 if (ev->link_type == ESCO_LINK)
3329 goto unlock;
3330
3331 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
3332 if (!conn)
3333 goto unlock;
3334
3335 conn->type = SCO_LINK;
3336 }
3337
3338 switch (ev->status) {
3339 case 0x00:
3340 conn->handle = __le16_to_cpu(ev->handle);
3341 conn->state = BT_CONNECTED;
3342
3343 hci_conn_add_sysfs(conn);
3344 break;
3345
3346 case 0x0d: /* Connection Rejected due to Limited Resources */
3347 case 0x11: /* Unsupported Feature or Parameter Value */
3348 case 0x1c: /* SCO interval rejected */
3349 case 0x1a: /* Unsupported Remote Feature */
3350 case 0x1f: /* Unspecified error */
3351 case 0x20: /* Unsupported LMP Parameter value */
3352 if (conn->out) {
3353 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
3354 (hdev->esco_type & EDR_ESCO_MASK);
3355 if (hci_setup_sync(conn, conn->link->handle))
3356 goto unlock;
3357 }
3358 /* fall through */
3359
3360 default:
3361 conn->state = BT_CLOSED;
3362 break;
3363 }
3364
3365 hci_proto_connect_cfm(conn, ev->status);
3366 if (ev->status)
3367 hci_conn_del(conn);
3368
3369unlock:
3370 hci_dev_unlock(hdev);
3371}
3372
3373static inline size_t eir_get_length(u8 *eir, size_t eir_len)
3374{
3375 size_t parsed = 0;
3376
3377 while (parsed < eir_len) {
3378 u8 field_len = eir[0];
3379
3380 if (field_len == 0)
3381 return parsed;
3382
3383 parsed += field_len + 1;
3384 eir += field_len + 1;
3385 }
3386
3387 return eir_len;
3388}
3389
3390static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
3391 struct sk_buff *skb)
3392{
3393 struct inquiry_data data;
3394 struct extended_inquiry_info *info = (void *) (skb->data + 1);
3395 int num_rsp = *((__u8 *) skb->data);
3396 size_t eir_len;
3397
3398 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3399
3400 if (!num_rsp)
3401 return;
3402
3403 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3404 return;
3405
3406 hci_dev_lock(hdev);
3407
3408 for (; num_rsp; num_rsp--, info++) {
3409 bool name_known, ssp;
3410
3411 bacpy(&data.bdaddr, &info->bdaddr);
3412 data.pscan_rep_mode = info->pscan_rep_mode;
3413 data.pscan_period_mode = info->pscan_period_mode;
3414 data.pscan_mode = 0x00;
3415 memcpy(data.dev_class, info->dev_class, 3);
3416 data.clock_offset = info->clock_offset;
3417 data.rssi = info->rssi;
3418 data.ssp_mode = 0x01;
3419
3420 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3421 name_known = eir_has_data_type(info->data,
3422 sizeof(info->data),
3423 EIR_NAME_COMPLETE);
3424 else
3425 name_known = true;
3426
3427 name_known = hci_inquiry_cache_update(hdev, &data, name_known,
3428 &ssp);
3429 eir_len = eir_get_length(info->data, sizeof(info->data));
3430 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3431 info->dev_class, info->rssi, !name_known,
3432 ssp, info->data, eir_len, NULL, 0);
3433 }
3434
3435 hci_dev_unlock(hdev);
3436}
3437
3438static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
3439 struct sk_buff *skb)
3440{
3441 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
3442 struct hci_conn *conn;
3443
3444 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
3445 __le16_to_cpu(ev->handle));
3446
3447 hci_dev_lock(hdev);
3448
3449 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3450 if (!conn)
3451 goto unlock;
3452
3453 if (!ev->status)
3454 conn->sec_level = conn->pending_sec_level;
3455
3456 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3457
3458 if (ev->status && conn->state == BT_CONNECTED) {
3459 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3460 hci_conn_drop(conn);
3461 goto unlock;
3462 }
3463
3464 if (conn->state == BT_CONFIG) {
3465 if (!ev->status)
3466 conn->state = BT_CONNECTED;
3467
3468 hci_proto_connect_cfm(conn, ev->status);
3469 hci_conn_drop(conn);
3470 } else {
3471 hci_auth_cfm(conn, ev->status);
3472
3473 hci_conn_hold(conn);
3474 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3475 hci_conn_drop(conn);
3476 }
3477
3478unlock:
3479 hci_dev_unlock(hdev);
3480}
3481
3482static u8 hci_get_auth_req(struct hci_conn *conn)
3483{
3484 /* If remote requests no-bonding follow that lead */
3485 if (conn->remote_auth == HCI_AT_NO_BONDING ||
3486 conn->remote_auth == HCI_AT_NO_BONDING_MITM)
3487 return conn->remote_auth | (conn->auth_type & 0x01);
3488
3489 /* If both remote and local have enough IO capabilities, require
3490 * MITM protection
3491 */
3492 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
3493 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
3494 return conn->remote_auth | 0x01;
3495
3496 /* No MITM protection possible so ignore remote requirement */
3497 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
3498}
3499
3500static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3501{
3502 struct hci_ev_io_capa_request *ev = (void *) skb->data;
3503 struct hci_conn *conn;
3504
3505 BT_DBG("%s", hdev->name);
3506
3507 hci_dev_lock(hdev);
3508
3509 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3510 if (!conn)
3511 goto unlock;
3512
3513 hci_conn_hold(conn);
3514
3515 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3516 goto unlock;
3517
3518 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) ||
3519 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3520 struct hci_cp_io_capability_reply cp;
3521
3522 bacpy(&cp.bdaddr, &ev->bdaddr);
3523 /* Change the IO capability from KeyboardDisplay
3524 * to DisplayYesNo as it is not supported by BT spec. */
3525 cp.capability = (conn->io_capability == 0x04) ?
3526 HCI_IO_DISPLAY_YESNO : conn->io_capability;
3527
3528 /* If we are initiators, there is no remote information yet */
3529 if (conn->remote_auth == 0xff) {
3530 cp.authentication = conn->auth_type;
3531
3532 /* Request MITM protection if our IO caps allow it
3533 * except for the no-bonding case
3534 */
3535 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
3536 cp.authentication != HCI_AT_NO_BONDING)
3537 cp.authentication |= 0x01;
3538 } else {
3539 conn->auth_type = hci_get_auth_req(conn);
3540 cp.authentication = conn->auth_type;
3541 }
3542
3543 if (hci_find_remote_oob_data(hdev, &conn->dst) &&
3544 (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)))
3545 cp.oob_data = 0x01;
3546 else
3547 cp.oob_data = 0x00;
3548
3549 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3550 sizeof(cp), &cp);
3551 } else {
3552 struct hci_cp_io_capability_neg_reply cp;
3553
3554 bacpy(&cp.bdaddr, &ev->bdaddr);
3555 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3556
3557 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3558 sizeof(cp), &cp);
3559 }
3560
3561unlock:
3562 hci_dev_unlock(hdev);
3563}
3564
3565static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
3566{
3567 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3568 struct hci_conn *conn;
3569
3570 BT_DBG("%s", hdev->name);
3571
3572 hci_dev_lock(hdev);
3573
3574 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3575 if (!conn)
3576 goto unlock;
3577
3578 conn->remote_cap = ev->capability;
3579 conn->remote_auth = ev->authentication;
3580 if (ev->oob_data)
3581 set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
3582
3583unlock:
3584 hci_dev_unlock(hdev);
3585}
3586
3587static void hci_user_confirm_request_evt(struct hci_dev *hdev,
3588 struct sk_buff *skb)
3589{
3590 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3591 int loc_mitm, rem_mitm, confirm_hint = 0;
3592 struct hci_conn *conn;
3593
3594 BT_DBG("%s", hdev->name);
3595
3596 hci_dev_lock(hdev);
3597
3598 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3599 goto unlock;
3600
3601 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3602 if (!conn)
3603 goto unlock;
3604
3605 loc_mitm = (conn->auth_type & 0x01);
3606 rem_mitm = (conn->remote_auth & 0x01);
3607
3608 /* If we require MITM but the remote device can't provide that
3609 * (it has NoInputNoOutput) then reject the confirmation request
3610 */
3611 if (loc_mitm && conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
3612 BT_DBG("Rejecting request: remote device can't provide MITM");
3613 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3614 sizeof(ev->bdaddr), &ev->bdaddr);
3615 goto unlock;
3616 }
3617
3618 /* If no side requires MITM protection; auto-accept */
3619 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
3620 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
3621
3622 /* If we're not the initiators request authorization to
3623 * proceed from user space (mgmt_user_confirm with
3624 * confirm_hint set to 1). */
3625 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3626 BT_DBG("Confirming auto-accept as acceptor");
3627 confirm_hint = 1;
3628 goto confirm;
3629 }
3630
3631 BT_DBG("Auto-accept of user confirmation with %ums delay",
3632 hdev->auto_accept_delay);
3633
3634 if (hdev->auto_accept_delay > 0) {
3635 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
3636 queue_delayed_work(conn->hdev->workqueue,
3637 &conn->auto_accept_work, delay);
3638 goto unlock;
3639 }
3640
3641 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3642 sizeof(ev->bdaddr), &ev->bdaddr);
3643 goto unlock;
3644 }
3645
3646confirm:
3647 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
3648 le32_to_cpu(ev->passkey), confirm_hint);
3649
3650unlock:
3651 hci_dev_unlock(hdev);
3652}
3653
3654static void hci_user_passkey_request_evt(struct hci_dev *hdev,
3655 struct sk_buff *skb)
3656{
3657 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3658
3659 BT_DBG("%s", hdev->name);
3660
3661 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3662 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
3663}
3664
3665static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
3666 struct sk_buff *skb)
3667{
3668 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
3669 struct hci_conn *conn;
3670
3671 BT_DBG("%s", hdev->name);
3672
3673 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3674 if (!conn)
3675 return;
3676
3677 conn->passkey_notify = __le32_to_cpu(ev->passkey);
3678 conn->passkey_entered = 0;
3679
3680 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3681 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3682 conn->dst_type, conn->passkey_notify,
3683 conn->passkey_entered);
3684}
3685
3686static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3687{
3688 struct hci_ev_keypress_notify *ev = (void *) skb->data;
3689 struct hci_conn *conn;
3690
3691 BT_DBG("%s", hdev->name);
3692
3693 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3694 if (!conn)
3695 return;
3696
3697 switch (ev->type) {
3698 case HCI_KEYPRESS_STARTED:
3699 conn->passkey_entered = 0;
3700 return;
3701
3702 case HCI_KEYPRESS_ENTERED:
3703 conn->passkey_entered++;
3704 break;
3705
3706 case HCI_KEYPRESS_ERASED:
3707 conn->passkey_entered--;
3708 break;
3709
3710 case HCI_KEYPRESS_CLEARED:
3711 conn->passkey_entered = 0;
3712 break;
3713
3714 case HCI_KEYPRESS_COMPLETED:
3715 return;
3716 }
3717
3718 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3719 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3720 conn->dst_type, conn->passkey_notify,
3721 conn->passkey_entered);
3722}
3723
3724static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
3725 struct sk_buff *skb)
3726{
3727 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
3728 struct hci_conn *conn;
3729
3730 BT_DBG("%s", hdev->name);
3731
3732 hci_dev_lock(hdev);
3733
3734 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3735 if (!conn)
3736 goto unlock;
3737
3738 /* To avoid duplicate auth_failed events to user space we check
3739 * the HCI_CONN_AUTH_PEND flag which will be set if we
3740 * initiated the authentication. A traditional auth_complete
3741 * event gets always produced as initiator and is also mapped to
3742 * the mgmt_auth_failed event */
3743 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
3744 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
3745 ev->status);
3746
3747 hci_conn_drop(conn);
3748
3749unlock:
3750 hci_dev_unlock(hdev);
3751}
3752
3753static void hci_remote_host_features_evt(struct hci_dev *hdev,
3754 struct sk_buff *skb)
3755{
3756 struct hci_ev_remote_host_features *ev = (void *) skb->data;
3757 struct inquiry_entry *ie;
3758 struct hci_conn *conn;
3759
3760 BT_DBG("%s", hdev->name);
3761
3762 hci_dev_lock(hdev);
3763
3764 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3765 if (conn)
3766 memcpy(conn->features[1], ev->features, 8);
3767
3768 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3769 if (ie)
3770 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3771
3772 hci_dev_unlock(hdev);
3773}
3774
3775static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
3776 struct sk_buff *skb)
3777{
3778 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
3779 struct oob_data *data;
3780
3781 BT_DBG("%s", hdev->name);
3782
3783 hci_dev_lock(hdev);
3784
3785 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3786 goto unlock;
3787
3788 data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
3789 if (data) {
3790 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
3791 struct hci_cp_remote_oob_ext_data_reply cp;
3792
3793 bacpy(&cp.bdaddr, &ev->bdaddr);
3794 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
3795 memcpy(cp.randomizer192, data->randomizer192,
3796 sizeof(cp.randomizer192));
3797 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
3798 memcpy(cp.randomizer256, data->randomizer256,
3799 sizeof(cp.randomizer256));
3800
3801 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
3802 sizeof(cp), &cp);
3803 } else {
3804 struct hci_cp_remote_oob_data_reply cp;
3805
3806 bacpy(&cp.bdaddr, &ev->bdaddr);
3807 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
3808 memcpy(cp.randomizer, data->randomizer192,
3809 sizeof(cp.randomizer));
3810
3811 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
3812 sizeof(cp), &cp);
3813 }
3814 } else {
3815 struct hci_cp_remote_oob_data_neg_reply cp;
3816
3817 bacpy(&cp.bdaddr, &ev->bdaddr);
3818 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
3819 sizeof(cp), &cp);
3820 }
3821
3822unlock:
3823 hci_dev_unlock(hdev);
3824}
3825
3826static void hci_phy_link_complete_evt(struct hci_dev *hdev,
3827 struct sk_buff *skb)
3828{
3829 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
3830 struct hci_conn *hcon, *bredr_hcon;
3831
3832 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
3833 ev->status);
3834
3835 hci_dev_lock(hdev);
3836
3837 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3838 if (!hcon) {
3839 hci_dev_unlock(hdev);
3840 return;
3841 }
3842
3843 if (ev->status) {
3844 hci_conn_del(hcon);
3845 hci_dev_unlock(hdev);
3846 return;
3847 }
3848
3849 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
3850
3851 hcon->state = BT_CONNECTED;
3852 bacpy(&hcon->dst, &bredr_hcon->dst);
3853
3854 hci_conn_hold(hcon);
3855 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3856 hci_conn_drop(hcon);
3857
3858 hci_conn_add_sysfs(hcon);
3859
3860 amp_physical_cfm(bredr_hcon, hcon);
3861
3862 hci_dev_unlock(hdev);
3863}
3864
3865static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3866{
3867 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
3868 struct hci_conn *hcon;
3869 struct hci_chan *hchan;
3870 struct amp_mgr *mgr;
3871
3872 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
3873 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
3874 ev->status);
3875
3876 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3877 if (!hcon)
3878 return;
3879
3880 /* Create AMP hchan */
3881 hchan = hci_chan_create(hcon);
3882 if (!hchan)
3883 return;
3884
3885 hchan->handle = le16_to_cpu(ev->handle);
3886
3887 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
3888
3889 mgr = hcon->amp_mgr;
3890 if (mgr && mgr->bredr_chan) {
3891 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
3892
3893 l2cap_chan_lock(bredr_chan);
3894
3895 bredr_chan->conn->mtu = hdev->block_mtu;
3896 l2cap_logical_cfm(bredr_chan, hchan, 0);
3897 hci_conn_hold(hcon);
3898
3899 l2cap_chan_unlock(bredr_chan);
3900 }
3901}
3902
3903static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
3904 struct sk_buff *skb)
3905{
3906 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
3907 struct hci_chan *hchan;
3908
3909 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
3910 le16_to_cpu(ev->handle), ev->status);
3911
3912 if (ev->status)
3913 return;
3914
3915 hci_dev_lock(hdev);
3916
3917 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
3918 if (!hchan)
3919 goto unlock;
3920
3921 amp_destroy_logical_link(hchan, ev->reason);
3922
3923unlock:
3924 hci_dev_unlock(hdev);
3925}
3926
3927static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
3928 struct sk_buff *skb)
3929{
3930 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
3931 struct hci_conn *hcon;
3932
3933 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3934
3935 if (ev->status)
3936 return;
3937
3938 hci_dev_lock(hdev);
3939
3940 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3941 if (hcon) {
3942 hcon->state = BT_CLOSED;
3943 hci_conn_del(hcon);
3944 }
3945
3946 hci_dev_unlock(hdev);
3947}
3948
3949static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3950{
3951 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
3952 struct hci_conn *conn;
3953 struct smp_irk *irk;
3954
3955 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3956
3957 hci_dev_lock(hdev);
3958
3959 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
3960 if (!conn) {
3961 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
3962 if (!conn) {
3963 BT_ERR("No memory for new connection");
3964 goto unlock;
3965 }
3966
3967 conn->dst_type = ev->bdaddr_type;
3968
3969 if (ev->role == LE_CONN_ROLE_MASTER) {
3970 conn->out = true;
3971 conn->link_mode |= HCI_LM_MASTER;
3972 }
3973
3974 /* If we didn't have a hci_conn object previously
3975 * but we're in master role this must be something
3976 * initiated using a white list. Since white list based
3977 * connections are not "first class citizens" we don't
3978 * have full tracking of them. Therefore, we go ahead
3979 * with a "best effort" approach of determining the
3980 * initiator address based on the HCI_PRIVACY flag.
3981 */
3982 if (conn->out) {
3983 conn->resp_addr_type = ev->bdaddr_type;
3984 bacpy(&conn->resp_addr, &ev->bdaddr);
3985 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
3986 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
3987 bacpy(&conn->init_addr, &hdev->rpa);
3988 } else {
3989 hci_copy_identity_address(hdev,
3990 &conn->init_addr,
3991 &conn->init_addr_type);
3992 }
3993 }
3994 } else {
3995 cancel_delayed_work(&conn->le_conn_timeout);
3996 }
3997
3998 if (!conn->out) {
3999 /* Set the responder (our side) address type based on
4000 * the advertising address type.
4001 */
4002 conn->resp_addr_type = hdev->adv_addr_type;
4003 if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM)
4004 bacpy(&conn->resp_addr, &hdev->random_addr);
4005 else
4006 bacpy(&conn->resp_addr, &hdev->bdaddr);
4007
4008 conn->init_addr_type = ev->bdaddr_type;
4009 bacpy(&conn->init_addr, &ev->bdaddr);
4010 }
4011
4012 /* Lookup the identity address from the stored connection
4013 * address and address type.
4014 *
4015 * When establishing connections to an identity address, the
4016 * connection procedure will store the resolvable random
4017 * address first. Now if it can be converted back into the
4018 * identity address, start using the identity address from
4019 * now on.
4020 */
4021 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
4022 if (irk) {
4023 bacpy(&conn->dst, &irk->bdaddr);
4024 conn->dst_type = irk->addr_type;
4025 }
4026
4027 if (ev->status) {
4028 hci_le_conn_failed(conn, ev->status);
4029 goto unlock;
4030 }
4031
4032 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4033 mgmt_device_connected(hdev, &conn->dst, conn->type,
4034 conn->dst_type, 0, NULL, 0, NULL);
4035
4036 conn->sec_level = BT_SECURITY_LOW;
4037 conn->handle = __le16_to_cpu(ev->handle);
4038 conn->state = BT_CONNECTED;
4039
4040 if (test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags))
4041 set_bit(HCI_CONN_6LOWPAN, &conn->flags);
4042
4043 hci_conn_add_sysfs(conn);
4044
4045 hci_proto_connect_cfm(conn, ev->status);
4046
4047 hci_pend_le_conn_del(hdev, &conn->dst, conn->dst_type);
4048
4049unlock:
4050 hci_dev_unlock(hdev);
4051}
4052
4053/* This function requires the caller holds hdev->lock */
4054static void check_pending_le_conn(struct hci_dev *hdev, bdaddr_t *addr,
4055 u8 addr_type)
4056{
4057 struct hci_conn *conn;
4058 struct smp_irk *irk;
4059
4060 /* If this is a resolvable address, we should resolve it and then
4061 * update address and address type variables.
4062 */
4063 irk = hci_get_irk(hdev, addr, addr_type);
4064 if (irk) {
4065 addr = &irk->bdaddr;
4066 addr_type = irk->addr_type;
4067 }
4068
4069 if (!hci_pend_le_conn_lookup(hdev, addr, addr_type))
4070 return;
4071
4072 conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
4073 HCI_AT_NO_BONDING);
4074 if (!IS_ERR(conn))
4075 return;
4076
4077 switch (PTR_ERR(conn)) {
4078 case -EBUSY:
4079 /* If hci_connect() returns -EBUSY it means there is already
4080 * an LE connection attempt going on. Since controllers don't
4081 * support more than one connection attempt at the time, we
4082 * don't consider this an error case.
4083 */
4084 break;
4085 default:
4086 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
4087 }
4088}
4089
4090static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
4091 u8 bdaddr_type, s8 rssi, u8 *data, u8 len)
4092{
4093 struct discovery_state *d = &hdev->discovery;
4094 bool match;
4095
4096 /* Passive scanning shouldn't trigger any device found events */
4097 if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
4098 if (type == LE_ADV_IND || type == LE_ADV_DIRECT_IND)
4099 check_pending_le_conn(hdev, bdaddr, bdaddr_type);
4100 return;
4101 }
4102
4103 /* If there's nothing pending either store the data from this
4104 * event or send an immediate device found event if the data
4105 * should not be stored for later.
4106 */
4107 if (!has_pending_adv_report(hdev)) {
4108 /* If the report will trigger a SCAN_REQ store it for
4109 * later merging.
4110 */
4111 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4112 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4113 rssi, data, len);
4114 return;
4115 }
4116
4117 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4118 rssi, 0, 1, data, len, NULL, 0);
4119 return;
4120 }
4121
4122 /* Check if the pending report is for the same device as the new one */
4123 match = (!bacmp(bdaddr, &d->last_adv_addr) &&
4124 bdaddr_type == d->last_adv_addr_type);
4125
4126 /* If the pending data doesn't match this report or this isn't a
4127 * scan response (e.g. we got a duplicate ADV_IND) then force
4128 * sending of the pending data.
4129 */
4130 if (type != LE_ADV_SCAN_RSP || !match) {
4131 /* Send out whatever is in the cache, but skip duplicates */
4132 if (!match)
4133 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4134 d->last_adv_addr_type, NULL,
4135 d->last_adv_rssi, 0, 1,
4136 d->last_adv_data,
4137 d->last_adv_data_len, NULL, 0);
4138
4139 /* If the new report will trigger a SCAN_REQ store it for
4140 * later merging.
4141 */
4142 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4143 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4144 rssi, data, len);
4145 return;
4146 }
4147
4148 /* The advertising reports cannot be merged, so clear
4149 * the pending report and send out a device found event.
4150 */
4151 clear_pending_adv_report(hdev);
4152 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4153 rssi, 0, 1, data, len, NULL, 0);
4154 return;
4155 }
4156
4157 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
4158 * the new event is a SCAN_RSP. We can therefore proceed with
4159 * sending a merged device found event.
4160 */
4161 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4162 d->last_adv_addr_type, NULL, rssi, 0, 1, data, len,
4163 d->last_adv_data, d->last_adv_data_len);
4164 clear_pending_adv_report(hdev);
4165}
4166
4167static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
4168{
4169 u8 num_reports = skb->data[0];
4170 void *ptr = &skb->data[1];
4171
4172 hci_dev_lock(hdev);
4173
4174 while (num_reports--) {
4175 struct hci_ev_le_advertising_info *ev = ptr;
4176 s8 rssi;
4177
4178 rssi = ev->data[ev->length];
4179 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
4180 ev->bdaddr_type, rssi, ev->data, ev->length);
4181
4182 ptr += sizeof(*ev) + ev->length + 1;
4183 }
4184
4185 hci_dev_unlock(hdev);
4186}
4187
4188static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4189{
4190 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
4191 struct hci_cp_le_ltk_reply cp;
4192 struct hci_cp_le_ltk_neg_reply neg;
4193 struct hci_conn *conn;
4194 struct smp_ltk *ltk;
4195
4196 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
4197
4198 hci_dev_lock(hdev);
4199
4200 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4201 if (conn == NULL)
4202 goto not_found;
4203
4204 ltk = hci_find_ltk(hdev, ev->ediv, ev->rand, conn->out);
4205 if (ltk == NULL)
4206 goto not_found;
4207
4208 memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
4209 cp.handle = cpu_to_le16(conn->handle);
4210
4211 if (ltk->authenticated)
4212 conn->pending_sec_level = BT_SECURITY_HIGH;
4213 else
4214 conn->pending_sec_level = BT_SECURITY_MEDIUM;
4215
4216 conn->enc_key_size = ltk->enc_size;
4217
4218 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
4219
4220 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
4221 * temporary key used to encrypt a connection following
4222 * pairing. It is used during the Encrypted Session Setup to
4223 * distribute the keys. Later, security can be re-established
4224 * using a distributed LTK.
4225 */
4226 if (ltk->type == HCI_SMP_STK_SLAVE) {
4227 list_del(&ltk->list);
4228 kfree(ltk);
4229 }
4230
4231 hci_dev_unlock(hdev);
4232
4233 return;
4234
4235not_found:
4236 neg.handle = ev->handle;
4237 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
4238 hci_dev_unlock(hdev);
4239}
4240
4241static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
4242{
4243 struct hci_ev_le_meta *le_ev = (void *) skb->data;
4244
4245 skb_pull(skb, sizeof(*le_ev));
4246
4247 switch (le_ev->subevent) {
4248 case HCI_EV_LE_CONN_COMPLETE:
4249 hci_le_conn_complete_evt(hdev, skb);
4250 break;
4251
4252 case HCI_EV_LE_ADVERTISING_REPORT:
4253 hci_le_adv_report_evt(hdev, skb);
4254 break;
4255
4256 case HCI_EV_LE_LTK_REQ:
4257 hci_le_ltk_request_evt(hdev, skb);
4258 break;
4259
4260 default:
4261 break;
4262 }
4263}
4264
4265static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
4266{
4267 struct hci_ev_channel_selected *ev = (void *) skb->data;
4268 struct hci_conn *hcon;
4269
4270 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
4271
4272 skb_pull(skb, sizeof(*ev));
4273
4274 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4275 if (!hcon)
4276 return;
4277
4278 amp_read_loc_assoc_final_data(hdev, hcon);
4279}
4280
4281void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
4282{
4283 struct hci_event_hdr *hdr = (void *) skb->data;
4284 __u8 event = hdr->evt;
4285
4286 hci_dev_lock(hdev);
4287
4288 /* Received events are (currently) only needed when a request is
4289 * ongoing so avoid unnecessary memory allocation.
4290 */
4291 if (hdev->req_status == HCI_REQ_PEND) {
4292 kfree_skb(hdev->recv_evt);
4293 hdev->recv_evt = skb_clone(skb, GFP_KERNEL);
4294 }
4295
4296 hci_dev_unlock(hdev);
4297
4298 skb_pull(skb, HCI_EVENT_HDR_SIZE);
4299
4300 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->req.event == event) {
4301 struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
4302 u16 opcode = __le16_to_cpu(cmd_hdr->opcode);
4303
4304 hci_req_cmd_complete(hdev, opcode, 0);
4305 }
4306
4307 switch (event) {
4308 case HCI_EV_INQUIRY_COMPLETE:
4309 hci_inquiry_complete_evt(hdev, skb);
4310 break;
4311
4312 case HCI_EV_INQUIRY_RESULT:
4313 hci_inquiry_result_evt(hdev, skb);
4314 break;
4315
4316 case HCI_EV_CONN_COMPLETE:
4317 hci_conn_complete_evt(hdev, skb);
4318 break;
4319
4320 case HCI_EV_CONN_REQUEST:
4321 hci_conn_request_evt(hdev, skb);
4322 break;
4323
4324 case HCI_EV_DISCONN_COMPLETE:
4325 hci_disconn_complete_evt(hdev, skb);
4326 break;
4327
4328 case HCI_EV_AUTH_COMPLETE:
4329 hci_auth_complete_evt(hdev, skb);
4330 break;
4331
4332 case HCI_EV_REMOTE_NAME:
4333 hci_remote_name_evt(hdev, skb);
4334 break;
4335
4336 case HCI_EV_ENCRYPT_CHANGE:
4337 hci_encrypt_change_evt(hdev, skb);
4338 break;
4339
4340 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
4341 hci_change_link_key_complete_evt(hdev, skb);
4342 break;
4343
4344 case HCI_EV_REMOTE_FEATURES:
4345 hci_remote_features_evt(hdev, skb);
4346 break;
4347
4348 case HCI_EV_CMD_COMPLETE:
4349 hci_cmd_complete_evt(hdev, skb);
4350 break;
4351
4352 case HCI_EV_CMD_STATUS:
4353 hci_cmd_status_evt(hdev, skb);
4354 break;
4355
4356 case HCI_EV_ROLE_CHANGE:
4357 hci_role_change_evt(hdev, skb);
4358 break;
4359
4360 case HCI_EV_NUM_COMP_PKTS:
4361 hci_num_comp_pkts_evt(hdev, skb);
4362 break;
4363
4364 case HCI_EV_MODE_CHANGE:
4365 hci_mode_change_evt(hdev, skb);
4366 break;
4367
4368 case HCI_EV_PIN_CODE_REQ:
4369 hci_pin_code_request_evt(hdev, skb);
4370 break;
4371
4372 case HCI_EV_LINK_KEY_REQ:
4373 hci_link_key_request_evt(hdev, skb);
4374 break;
4375
4376 case HCI_EV_LINK_KEY_NOTIFY:
4377 hci_link_key_notify_evt(hdev, skb);
4378 break;
4379
4380 case HCI_EV_CLOCK_OFFSET:
4381 hci_clock_offset_evt(hdev, skb);
4382 break;
4383
4384 case HCI_EV_PKT_TYPE_CHANGE:
4385 hci_pkt_type_change_evt(hdev, skb);
4386 break;
4387
4388 case HCI_EV_PSCAN_REP_MODE:
4389 hci_pscan_rep_mode_evt(hdev, skb);
4390 break;
4391
4392 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
4393 hci_inquiry_result_with_rssi_evt(hdev, skb);
4394 break;
4395
4396 case HCI_EV_REMOTE_EXT_FEATURES:
4397 hci_remote_ext_features_evt(hdev, skb);
4398 break;
4399
4400 case HCI_EV_SYNC_CONN_COMPLETE:
4401 hci_sync_conn_complete_evt(hdev, skb);
4402 break;
4403
4404 case HCI_EV_EXTENDED_INQUIRY_RESULT:
4405 hci_extended_inquiry_result_evt(hdev, skb);
4406 break;
4407
4408 case HCI_EV_KEY_REFRESH_COMPLETE:
4409 hci_key_refresh_complete_evt(hdev, skb);
4410 break;
4411
4412 case HCI_EV_IO_CAPA_REQUEST:
4413 hci_io_capa_request_evt(hdev, skb);
4414 break;
4415
4416 case HCI_EV_IO_CAPA_REPLY:
4417 hci_io_capa_reply_evt(hdev, skb);
4418 break;
4419
4420 case HCI_EV_USER_CONFIRM_REQUEST:
4421 hci_user_confirm_request_evt(hdev, skb);
4422 break;
4423
4424 case HCI_EV_USER_PASSKEY_REQUEST:
4425 hci_user_passkey_request_evt(hdev, skb);
4426 break;
4427
4428 case HCI_EV_USER_PASSKEY_NOTIFY:
4429 hci_user_passkey_notify_evt(hdev, skb);
4430 break;
4431
4432 case HCI_EV_KEYPRESS_NOTIFY:
4433 hci_keypress_notify_evt(hdev, skb);
4434 break;
4435
4436 case HCI_EV_SIMPLE_PAIR_COMPLETE:
4437 hci_simple_pair_complete_evt(hdev, skb);
4438 break;
4439
4440 case HCI_EV_REMOTE_HOST_FEATURES:
4441 hci_remote_host_features_evt(hdev, skb);
4442 break;
4443
4444 case HCI_EV_LE_META:
4445 hci_le_meta_evt(hdev, skb);
4446 break;
4447
4448 case HCI_EV_CHANNEL_SELECTED:
4449 hci_chan_selected_evt(hdev, skb);
4450 break;
4451
4452 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
4453 hci_remote_oob_data_request_evt(hdev, skb);
4454 break;
4455
4456 case HCI_EV_PHY_LINK_COMPLETE:
4457 hci_phy_link_complete_evt(hdev, skb);
4458 break;
4459
4460 case HCI_EV_LOGICAL_LINK_COMPLETE:
4461 hci_loglink_complete_evt(hdev, skb);
4462 break;
4463
4464 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
4465 hci_disconn_loglink_complete_evt(hdev, skb);
4466 break;
4467
4468 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
4469 hci_disconn_phylink_complete_evt(hdev, skb);
4470 break;
4471
4472 case HCI_EV_NUM_COMP_BLOCKS:
4473 hci_num_comp_blocks_evt(hdev, skb);
4474 break;
4475
4476 default:
4477 BT_DBG("%s event 0x%2.2x", hdev->name, event);
4478 break;
4479 }
4480
4481 kfree_skb(skb);
4482 hdev->stat.evt_rx++;
4483}