]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/bluetooth/hci_event.c
Bluetooth: Add clarifying comment to command status handling
[mirror_ubuntu-bionic-kernel.git] / net / bluetooth / hci_event.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI event handling. */
26
27 #include <asm/unaligned.h>
28
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
32
33 #include "hci_request.h"
34 #include "hci_debugfs.h"
35 #include "a2mp.h"
36 #include "amp.h"
37 #include "smp.h"
38
39 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
40 "\x00\x00\x00\x00\x00\x00\x00\x00"
41
42 /* Handle HCI Event packets */
43
44 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
45 {
46 __u8 status = *((__u8 *) skb->data);
47
48 BT_DBG("%s status 0x%2.2x", hdev->name, status);
49
50 if (status)
51 return;
52
53 clear_bit(HCI_INQUIRY, &hdev->flags);
54 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
55 wake_up_bit(&hdev->flags, HCI_INQUIRY);
56
57 hci_dev_lock(hdev);
58 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
59 hci_dev_unlock(hdev);
60
61 hci_conn_check_pending(hdev);
62 }
63
64 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
65 {
66 __u8 status = *((__u8 *) skb->data);
67
68 BT_DBG("%s status 0x%2.2x", hdev->name, status);
69
70 if (status)
71 return;
72
73 hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
74 }
75
76 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
77 {
78 __u8 status = *((__u8 *) skb->data);
79
80 BT_DBG("%s status 0x%2.2x", hdev->name, status);
81
82 if (status)
83 return;
84
85 hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
86
87 hci_conn_check_pending(hdev);
88 }
89
90 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
91 struct sk_buff *skb)
92 {
93 BT_DBG("%s", hdev->name);
94 }
95
96 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
97 {
98 struct hci_rp_role_discovery *rp = (void *) skb->data;
99 struct hci_conn *conn;
100
101 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
102
103 if (rp->status)
104 return;
105
106 hci_dev_lock(hdev);
107
108 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
109 if (conn)
110 conn->role = rp->role;
111
112 hci_dev_unlock(hdev);
113 }
114
115 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
116 {
117 struct hci_rp_read_link_policy *rp = (void *) skb->data;
118 struct hci_conn *conn;
119
120 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
121
122 if (rp->status)
123 return;
124
125 hci_dev_lock(hdev);
126
127 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
128 if (conn)
129 conn->link_policy = __le16_to_cpu(rp->policy);
130
131 hci_dev_unlock(hdev);
132 }
133
134 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
135 {
136 struct hci_rp_write_link_policy *rp = (void *) skb->data;
137 struct hci_conn *conn;
138 void *sent;
139
140 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
141
142 if (rp->status)
143 return;
144
145 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
146 if (!sent)
147 return;
148
149 hci_dev_lock(hdev);
150
151 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
152 if (conn)
153 conn->link_policy = get_unaligned_le16(sent + 2);
154
155 hci_dev_unlock(hdev);
156 }
157
158 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
159 struct sk_buff *skb)
160 {
161 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
162
163 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
164
165 if (rp->status)
166 return;
167
168 hdev->link_policy = __le16_to_cpu(rp->policy);
169 }
170
171 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
172 struct sk_buff *skb)
173 {
174 __u8 status = *((__u8 *) skb->data);
175 void *sent;
176
177 BT_DBG("%s status 0x%2.2x", hdev->name, status);
178
179 if (status)
180 return;
181
182 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
183 if (!sent)
184 return;
185
186 hdev->link_policy = get_unaligned_le16(sent);
187 }
188
189 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
190 {
191 __u8 status = *((__u8 *) skb->data);
192
193 BT_DBG("%s status 0x%2.2x", hdev->name, status);
194
195 clear_bit(HCI_RESET, &hdev->flags);
196
197 if (status)
198 return;
199
200 /* Reset all non-persistent flags */
201 hci_dev_clear_volatile_flags(hdev);
202
203 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
204
205 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
206 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
207
208 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
209 hdev->adv_data_len = 0;
210
211 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
212 hdev->scan_rsp_data_len = 0;
213
214 hdev->le_scan_type = LE_SCAN_PASSIVE;
215
216 hdev->ssp_debug_mode = 0;
217
218 hci_bdaddr_list_clear(&hdev->le_white_list);
219 }
220
221 static void hci_cc_read_stored_link_key(struct hci_dev *hdev,
222 struct sk_buff *skb)
223 {
224 struct hci_rp_read_stored_link_key *rp = (void *)skb->data;
225 struct hci_cp_read_stored_link_key *sent;
226
227 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
228
229 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
230 if (!sent)
231 return;
232
233 if (!rp->status && sent->read_all == 0x01) {
234 hdev->stored_max_keys = rp->max_keys;
235 hdev->stored_num_keys = rp->num_keys;
236 }
237 }
238
239 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
240 struct sk_buff *skb)
241 {
242 struct hci_rp_delete_stored_link_key *rp = (void *)skb->data;
243
244 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
245
246 if (rp->status)
247 return;
248
249 if (rp->num_keys <= hdev->stored_num_keys)
250 hdev->stored_num_keys -= rp->num_keys;
251 else
252 hdev->stored_num_keys = 0;
253 }
254
255 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
256 {
257 __u8 status = *((__u8 *) skb->data);
258 void *sent;
259
260 BT_DBG("%s status 0x%2.2x", hdev->name, status);
261
262 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
263 if (!sent)
264 return;
265
266 hci_dev_lock(hdev);
267
268 if (hci_dev_test_flag(hdev, HCI_MGMT))
269 mgmt_set_local_name_complete(hdev, sent, status);
270 else if (!status)
271 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
272
273 hci_dev_unlock(hdev);
274 }
275
276 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
277 {
278 struct hci_rp_read_local_name *rp = (void *) skb->data;
279
280 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
281
282 if (rp->status)
283 return;
284
285 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
286 hci_dev_test_flag(hdev, HCI_CONFIG))
287 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
288 }
289
290 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
291 {
292 __u8 status = *((__u8 *) skb->data);
293 void *sent;
294
295 BT_DBG("%s status 0x%2.2x", hdev->name, status);
296
297 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
298 if (!sent)
299 return;
300
301 hci_dev_lock(hdev);
302
303 if (!status) {
304 __u8 param = *((__u8 *) sent);
305
306 if (param == AUTH_ENABLED)
307 set_bit(HCI_AUTH, &hdev->flags);
308 else
309 clear_bit(HCI_AUTH, &hdev->flags);
310 }
311
312 if (hci_dev_test_flag(hdev, HCI_MGMT))
313 mgmt_auth_enable_complete(hdev, status);
314
315 hci_dev_unlock(hdev);
316 }
317
318 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
319 {
320 __u8 status = *((__u8 *) skb->data);
321 __u8 param;
322 void *sent;
323
324 BT_DBG("%s status 0x%2.2x", hdev->name, status);
325
326 if (status)
327 return;
328
329 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
330 if (!sent)
331 return;
332
333 param = *((__u8 *) sent);
334
335 if (param)
336 set_bit(HCI_ENCRYPT, &hdev->flags);
337 else
338 clear_bit(HCI_ENCRYPT, &hdev->flags);
339 }
340
341 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
342 {
343 __u8 status = *((__u8 *) skb->data);
344 __u8 param;
345 void *sent;
346
347 BT_DBG("%s status 0x%2.2x", hdev->name, status);
348
349 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
350 if (!sent)
351 return;
352
353 param = *((__u8 *) sent);
354
355 hci_dev_lock(hdev);
356
357 if (status) {
358 hdev->discov_timeout = 0;
359 goto done;
360 }
361
362 if (param & SCAN_INQUIRY)
363 set_bit(HCI_ISCAN, &hdev->flags);
364 else
365 clear_bit(HCI_ISCAN, &hdev->flags);
366
367 if (param & SCAN_PAGE)
368 set_bit(HCI_PSCAN, &hdev->flags);
369 else
370 clear_bit(HCI_PSCAN, &hdev->flags);
371
372 done:
373 hci_dev_unlock(hdev);
374 }
375
376 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
377 {
378 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
379
380 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
381
382 if (rp->status)
383 return;
384
385 memcpy(hdev->dev_class, rp->dev_class, 3);
386
387 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
388 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
389 }
390
391 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
392 {
393 __u8 status = *((__u8 *) skb->data);
394 void *sent;
395
396 BT_DBG("%s status 0x%2.2x", hdev->name, status);
397
398 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
399 if (!sent)
400 return;
401
402 hci_dev_lock(hdev);
403
404 if (status == 0)
405 memcpy(hdev->dev_class, sent, 3);
406
407 if (hci_dev_test_flag(hdev, HCI_MGMT))
408 mgmt_set_class_of_dev_complete(hdev, sent, status);
409
410 hci_dev_unlock(hdev);
411 }
412
413 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
414 {
415 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
416 __u16 setting;
417
418 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
419
420 if (rp->status)
421 return;
422
423 setting = __le16_to_cpu(rp->voice_setting);
424
425 if (hdev->voice_setting == setting)
426 return;
427
428 hdev->voice_setting = setting;
429
430 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
431
432 if (hdev->notify)
433 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
434 }
435
436 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
437 struct sk_buff *skb)
438 {
439 __u8 status = *((__u8 *) skb->data);
440 __u16 setting;
441 void *sent;
442
443 BT_DBG("%s status 0x%2.2x", hdev->name, status);
444
445 if (status)
446 return;
447
448 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
449 if (!sent)
450 return;
451
452 setting = get_unaligned_le16(sent);
453
454 if (hdev->voice_setting == setting)
455 return;
456
457 hdev->voice_setting = setting;
458
459 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
460
461 if (hdev->notify)
462 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
463 }
464
465 static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
466 struct sk_buff *skb)
467 {
468 struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
469
470 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
471
472 if (rp->status)
473 return;
474
475 hdev->num_iac = rp->num_iac;
476
477 BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
478 }
479
480 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
481 {
482 __u8 status = *((__u8 *) skb->data);
483 struct hci_cp_write_ssp_mode *sent;
484
485 BT_DBG("%s status 0x%2.2x", hdev->name, status);
486
487 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
488 if (!sent)
489 return;
490
491 hci_dev_lock(hdev);
492
493 if (!status) {
494 if (sent->mode)
495 hdev->features[1][0] |= LMP_HOST_SSP;
496 else
497 hdev->features[1][0] &= ~LMP_HOST_SSP;
498 }
499
500 if (hci_dev_test_flag(hdev, HCI_MGMT))
501 mgmt_ssp_enable_complete(hdev, sent->mode, status);
502 else if (!status) {
503 if (sent->mode)
504 hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
505 else
506 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
507 }
508
509 hci_dev_unlock(hdev);
510 }
511
512 static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
513 {
514 u8 status = *((u8 *) skb->data);
515 struct hci_cp_write_sc_support *sent;
516
517 BT_DBG("%s status 0x%2.2x", hdev->name, status);
518
519 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
520 if (!sent)
521 return;
522
523 hci_dev_lock(hdev);
524
525 if (!status) {
526 if (sent->support)
527 hdev->features[1][0] |= LMP_HOST_SC;
528 else
529 hdev->features[1][0] &= ~LMP_HOST_SC;
530 }
531
532 if (!hci_dev_test_flag(hdev, HCI_MGMT) && !status) {
533 if (sent->support)
534 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
535 else
536 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
537 }
538
539 hci_dev_unlock(hdev);
540 }
541
542 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
543 {
544 struct hci_rp_read_local_version *rp = (void *) skb->data;
545
546 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
547
548 if (rp->status)
549 return;
550
551 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
552 hci_dev_test_flag(hdev, HCI_CONFIG)) {
553 hdev->hci_ver = rp->hci_ver;
554 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
555 hdev->lmp_ver = rp->lmp_ver;
556 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
557 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
558 }
559 }
560
561 static void hci_cc_read_local_commands(struct hci_dev *hdev,
562 struct sk_buff *skb)
563 {
564 struct hci_rp_read_local_commands *rp = (void *) skb->data;
565
566 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
567
568 if (rp->status)
569 return;
570
571 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
572 hci_dev_test_flag(hdev, HCI_CONFIG))
573 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
574 }
575
576 static void hci_cc_read_local_features(struct hci_dev *hdev,
577 struct sk_buff *skb)
578 {
579 struct hci_rp_read_local_features *rp = (void *) skb->data;
580
581 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
582
583 if (rp->status)
584 return;
585
586 memcpy(hdev->features, rp->features, 8);
587
588 /* Adjust default settings according to features
589 * supported by device. */
590
591 if (hdev->features[0][0] & LMP_3SLOT)
592 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
593
594 if (hdev->features[0][0] & LMP_5SLOT)
595 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
596
597 if (hdev->features[0][1] & LMP_HV2) {
598 hdev->pkt_type |= (HCI_HV2);
599 hdev->esco_type |= (ESCO_HV2);
600 }
601
602 if (hdev->features[0][1] & LMP_HV3) {
603 hdev->pkt_type |= (HCI_HV3);
604 hdev->esco_type |= (ESCO_HV3);
605 }
606
607 if (lmp_esco_capable(hdev))
608 hdev->esco_type |= (ESCO_EV3);
609
610 if (hdev->features[0][4] & LMP_EV4)
611 hdev->esco_type |= (ESCO_EV4);
612
613 if (hdev->features[0][4] & LMP_EV5)
614 hdev->esco_type |= (ESCO_EV5);
615
616 if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
617 hdev->esco_type |= (ESCO_2EV3);
618
619 if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
620 hdev->esco_type |= (ESCO_3EV3);
621
622 if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
623 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
624 }
625
626 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
627 struct sk_buff *skb)
628 {
629 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
630
631 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
632
633 if (rp->status)
634 return;
635
636 if (hdev->max_page < rp->max_page)
637 hdev->max_page = rp->max_page;
638
639 if (rp->page < HCI_MAX_PAGES)
640 memcpy(hdev->features[rp->page], rp->features, 8);
641 }
642
643 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
644 struct sk_buff *skb)
645 {
646 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
647
648 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
649
650 if (rp->status)
651 return;
652
653 hdev->flow_ctl_mode = rp->mode;
654 }
655
656 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
657 {
658 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
659
660 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
661
662 if (rp->status)
663 return;
664
665 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
666 hdev->sco_mtu = rp->sco_mtu;
667 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
668 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
669
670 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
671 hdev->sco_mtu = 64;
672 hdev->sco_pkts = 8;
673 }
674
675 hdev->acl_cnt = hdev->acl_pkts;
676 hdev->sco_cnt = hdev->sco_pkts;
677
678 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
679 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
680 }
681
682 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
683 {
684 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
685
686 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
687
688 if (rp->status)
689 return;
690
691 if (test_bit(HCI_INIT, &hdev->flags))
692 bacpy(&hdev->bdaddr, &rp->bdaddr);
693
694 if (hci_dev_test_flag(hdev, HCI_SETUP))
695 bacpy(&hdev->setup_addr, &rp->bdaddr);
696 }
697
698 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
699 struct sk_buff *skb)
700 {
701 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
702
703 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
704
705 if (rp->status)
706 return;
707
708 if (test_bit(HCI_INIT, &hdev->flags)) {
709 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
710 hdev->page_scan_window = __le16_to_cpu(rp->window);
711 }
712 }
713
714 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
715 struct sk_buff *skb)
716 {
717 u8 status = *((u8 *) skb->data);
718 struct hci_cp_write_page_scan_activity *sent;
719
720 BT_DBG("%s status 0x%2.2x", hdev->name, status);
721
722 if (status)
723 return;
724
725 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
726 if (!sent)
727 return;
728
729 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
730 hdev->page_scan_window = __le16_to_cpu(sent->window);
731 }
732
733 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
734 struct sk_buff *skb)
735 {
736 struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
737
738 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
739
740 if (rp->status)
741 return;
742
743 if (test_bit(HCI_INIT, &hdev->flags))
744 hdev->page_scan_type = rp->type;
745 }
746
747 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
748 struct sk_buff *skb)
749 {
750 u8 status = *((u8 *) skb->data);
751 u8 *type;
752
753 BT_DBG("%s status 0x%2.2x", hdev->name, status);
754
755 if (status)
756 return;
757
758 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
759 if (type)
760 hdev->page_scan_type = *type;
761 }
762
763 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
764 struct sk_buff *skb)
765 {
766 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
767
768 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
769
770 if (rp->status)
771 return;
772
773 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
774 hdev->block_len = __le16_to_cpu(rp->block_len);
775 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
776
777 hdev->block_cnt = hdev->num_blocks;
778
779 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
780 hdev->block_cnt, hdev->block_len);
781 }
782
783 static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
784 {
785 struct hci_rp_read_clock *rp = (void *) skb->data;
786 struct hci_cp_read_clock *cp;
787 struct hci_conn *conn;
788
789 BT_DBG("%s", hdev->name);
790
791 if (skb->len < sizeof(*rp))
792 return;
793
794 if (rp->status)
795 return;
796
797 hci_dev_lock(hdev);
798
799 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
800 if (!cp)
801 goto unlock;
802
803 if (cp->which == 0x00) {
804 hdev->clock = le32_to_cpu(rp->clock);
805 goto unlock;
806 }
807
808 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
809 if (conn) {
810 conn->clock = le32_to_cpu(rp->clock);
811 conn->clock_accuracy = le16_to_cpu(rp->accuracy);
812 }
813
814 unlock:
815 hci_dev_unlock(hdev);
816 }
817
818 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
819 struct sk_buff *skb)
820 {
821 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
822
823 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
824
825 if (rp->status)
826 goto a2mp_rsp;
827
828 hdev->amp_status = rp->amp_status;
829 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
830 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
831 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
832 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
833 hdev->amp_type = rp->amp_type;
834 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
835 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
836 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
837 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
838
839 a2mp_rsp:
840 a2mp_send_getinfo_rsp(hdev);
841 }
842
843 static void hci_cc_read_local_amp_assoc(struct hci_dev *hdev,
844 struct sk_buff *skb)
845 {
846 struct hci_rp_read_local_amp_assoc *rp = (void *) skb->data;
847 struct amp_assoc *assoc = &hdev->loc_assoc;
848 size_t rem_len, frag_len;
849
850 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
851
852 if (rp->status)
853 goto a2mp_rsp;
854
855 frag_len = skb->len - sizeof(*rp);
856 rem_len = __le16_to_cpu(rp->rem_len);
857
858 if (rem_len > frag_len) {
859 BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
860
861 memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
862 assoc->offset += frag_len;
863
864 /* Read other fragments */
865 amp_read_loc_assoc_frag(hdev, rp->phy_handle);
866
867 return;
868 }
869
870 memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
871 assoc->len = assoc->offset + rem_len;
872 assoc->offset = 0;
873
874 a2mp_rsp:
875 /* Send A2MP Rsp when all fragments are received */
876 a2mp_send_getampassoc_rsp(hdev, rp->status);
877 a2mp_send_create_phy_link_req(hdev, rp->status);
878 }
879
880 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
881 struct sk_buff *skb)
882 {
883 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
884
885 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
886
887 if (rp->status)
888 return;
889
890 hdev->inq_tx_power = rp->tx_power;
891 }
892
893 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
894 {
895 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
896 struct hci_cp_pin_code_reply *cp;
897 struct hci_conn *conn;
898
899 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
900
901 hci_dev_lock(hdev);
902
903 if (hci_dev_test_flag(hdev, HCI_MGMT))
904 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
905
906 if (rp->status)
907 goto unlock;
908
909 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
910 if (!cp)
911 goto unlock;
912
913 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
914 if (conn)
915 conn->pin_length = cp->pin_len;
916
917 unlock:
918 hci_dev_unlock(hdev);
919 }
920
921 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
922 {
923 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
924
925 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
926
927 hci_dev_lock(hdev);
928
929 if (hci_dev_test_flag(hdev, HCI_MGMT))
930 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
931 rp->status);
932
933 hci_dev_unlock(hdev);
934 }
935
936 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
937 struct sk_buff *skb)
938 {
939 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
940
941 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
942
943 if (rp->status)
944 return;
945
946 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
947 hdev->le_pkts = rp->le_max_pkt;
948
949 hdev->le_cnt = hdev->le_pkts;
950
951 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
952 }
953
954 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
955 struct sk_buff *skb)
956 {
957 struct hci_rp_le_read_local_features *rp = (void *) skb->data;
958
959 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
960
961 if (rp->status)
962 return;
963
964 memcpy(hdev->le_features, rp->features, 8);
965 }
966
967 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
968 struct sk_buff *skb)
969 {
970 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
971
972 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
973
974 if (rp->status)
975 return;
976
977 hdev->adv_tx_power = rp->tx_power;
978 }
979
980 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
981 {
982 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
983
984 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
985
986 hci_dev_lock(hdev);
987
988 if (hci_dev_test_flag(hdev, HCI_MGMT))
989 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
990 rp->status);
991
992 hci_dev_unlock(hdev);
993 }
994
995 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
996 struct sk_buff *skb)
997 {
998 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
999
1000 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1001
1002 hci_dev_lock(hdev);
1003
1004 if (hci_dev_test_flag(hdev, HCI_MGMT))
1005 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1006 ACL_LINK, 0, rp->status);
1007
1008 hci_dev_unlock(hdev);
1009 }
1010
1011 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
1012 {
1013 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1014
1015 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1016
1017 hci_dev_lock(hdev);
1018
1019 if (hci_dev_test_flag(hdev, HCI_MGMT))
1020 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1021 0, rp->status);
1022
1023 hci_dev_unlock(hdev);
1024 }
1025
1026 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
1027 struct sk_buff *skb)
1028 {
1029 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1030
1031 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1032
1033 hci_dev_lock(hdev);
1034
1035 if (hci_dev_test_flag(hdev, HCI_MGMT))
1036 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1037 ACL_LINK, 0, rp->status);
1038
1039 hci_dev_unlock(hdev);
1040 }
1041
1042 static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
1043 struct sk_buff *skb)
1044 {
1045 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1046
1047 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1048
1049 hci_dev_lock(hdev);
1050 mgmt_read_local_oob_data_complete(hdev, rp->hash, rp->rand, NULL, NULL,
1051 rp->status);
1052 hci_dev_unlock(hdev);
1053 }
1054
1055 static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
1056 struct sk_buff *skb)
1057 {
1058 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
1059
1060 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1061
1062 hci_dev_lock(hdev);
1063 mgmt_read_local_oob_data_complete(hdev, rp->hash192, rp->rand192,
1064 rp->hash256, rp->rand256,
1065 rp->status);
1066 hci_dev_unlock(hdev);
1067 }
1068
1069 static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
1070 {
1071 __u8 status = *((__u8 *) skb->data);
1072 bdaddr_t *sent;
1073
1074 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1075
1076 if (status)
1077 return;
1078
1079 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1080 if (!sent)
1081 return;
1082
1083 hci_dev_lock(hdev);
1084
1085 bacpy(&hdev->random_addr, sent);
1086
1087 hci_dev_unlock(hdev);
1088 }
1089
1090 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1091 {
1092 __u8 *sent, status = *((__u8 *) skb->data);
1093
1094 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1095
1096 if (status)
1097 return;
1098
1099 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1100 if (!sent)
1101 return;
1102
1103 hci_dev_lock(hdev);
1104
1105 /* If we're doing connection initiation as peripheral. Set a
1106 * timeout in case something goes wrong.
1107 */
1108 if (*sent) {
1109 struct hci_conn *conn;
1110
1111 hci_dev_set_flag(hdev, HCI_LE_ADV);
1112
1113 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1114 if (conn)
1115 queue_delayed_work(hdev->workqueue,
1116 &conn->le_conn_timeout,
1117 conn->conn_timeout);
1118 } else {
1119 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1120 }
1121
1122 hci_dev_unlock(hdev);
1123 }
1124
1125 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1126 {
1127 struct hci_cp_le_set_scan_param *cp;
1128 __u8 status = *((__u8 *) skb->data);
1129
1130 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1131
1132 if (status)
1133 return;
1134
1135 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1136 if (!cp)
1137 return;
1138
1139 hci_dev_lock(hdev);
1140
1141 hdev->le_scan_type = cp->type;
1142
1143 hci_dev_unlock(hdev);
1144 }
1145
1146 static bool has_pending_adv_report(struct hci_dev *hdev)
1147 {
1148 struct discovery_state *d = &hdev->discovery;
1149
1150 return bacmp(&d->last_adv_addr, BDADDR_ANY);
1151 }
1152
1153 static void clear_pending_adv_report(struct hci_dev *hdev)
1154 {
1155 struct discovery_state *d = &hdev->discovery;
1156
1157 bacpy(&d->last_adv_addr, BDADDR_ANY);
1158 d->last_adv_data_len = 0;
1159 }
1160
1161 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1162 u8 bdaddr_type, s8 rssi, u32 flags,
1163 u8 *data, u8 len)
1164 {
1165 struct discovery_state *d = &hdev->discovery;
1166
1167 bacpy(&d->last_adv_addr, bdaddr);
1168 d->last_adv_addr_type = bdaddr_type;
1169 d->last_adv_rssi = rssi;
1170 d->last_adv_flags = flags;
1171 memcpy(d->last_adv_data, data, len);
1172 d->last_adv_data_len = len;
1173 }
1174
1175 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1176 struct sk_buff *skb)
1177 {
1178 struct hci_cp_le_set_scan_enable *cp;
1179 __u8 status = *((__u8 *) skb->data);
1180
1181 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1182
1183 if (status)
1184 return;
1185
1186 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1187 if (!cp)
1188 return;
1189
1190 hci_dev_lock(hdev);
1191
1192 switch (cp->enable) {
1193 case LE_SCAN_ENABLE:
1194 hci_dev_set_flag(hdev, HCI_LE_SCAN);
1195 if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1196 clear_pending_adv_report(hdev);
1197 break;
1198
1199 case LE_SCAN_DISABLE:
1200 /* We do this here instead of when setting DISCOVERY_STOPPED
1201 * since the latter would potentially require waiting for
1202 * inquiry to stop too.
1203 */
1204 if (has_pending_adv_report(hdev)) {
1205 struct discovery_state *d = &hdev->discovery;
1206
1207 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1208 d->last_adv_addr_type, NULL,
1209 d->last_adv_rssi, d->last_adv_flags,
1210 d->last_adv_data,
1211 d->last_adv_data_len, NULL, 0);
1212 }
1213
1214 /* Cancel this timer so that we don't try to disable scanning
1215 * when it's already disabled.
1216 */
1217 cancel_delayed_work(&hdev->le_scan_disable);
1218
1219 hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1220
1221 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1222 * interrupted scanning due to a connect request. Mark
1223 * therefore discovery as stopped. If this was not
1224 * because of a connect request advertising might have
1225 * been disabled because of active scanning, so
1226 * re-enable it again if necessary.
1227 */
1228 if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1229 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1230 else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1231 hdev->discovery.state == DISCOVERY_FINDING)
1232 mgmt_reenable_advertising(hdev);
1233
1234 break;
1235
1236 default:
1237 BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1238 break;
1239 }
1240
1241 hci_dev_unlock(hdev);
1242 }
1243
1244 static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1245 struct sk_buff *skb)
1246 {
1247 struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1248
1249 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1250
1251 if (rp->status)
1252 return;
1253
1254 hdev->le_white_list_size = rp->size;
1255 }
1256
1257 static void hci_cc_le_clear_white_list(struct hci_dev *hdev,
1258 struct sk_buff *skb)
1259 {
1260 __u8 status = *((__u8 *) skb->data);
1261
1262 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1263
1264 if (status)
1265 return;
1266
1267 hci_bdaddr_list_clear(&hdev->le_white_list);
1268 }
1269
1270 static void hci_cc_le_add_to_white_list(struct hci_dev *hdev,
1271 struct sk_buff *skb)
1272 {
1273 struct hci_cp_le_add_to_white_list *sent;
1274 __u8 status = *((__u8 *) skb->data);
1275
1276 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1277
1278 if (status)
1279 return;
1280
1281 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST);
1282 if (!sent)
1283 return;
1284
1285 hci_bdaddr_list_add(&hdev->le_white_list, &sent->bdaddr,
1286 sent->bdaddr_type);
1287 }
1288
1289 static void hci_cc_le_del_from_white_list(struct hci_dev *hdev,
1290 struct sk_buff *skb)
1291 {
1292 struct hci_cp_le_del_from_white_list *sent;
1293 __u8 status = *((__u8 *) skb->data);
1294
1295 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1296
1297 if (status)
1298 return;
1299
1300 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST);
1301 if (!sent)
1302 return;
1303
1304 hci_bdaddr_list_del(&hdev->le_white_list, &sent->bdaddr,
1305 sent->bdaddr_type);
1306 }
1307
1308 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1309 struct sk_buff *skb)
1310 {
1311 struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1312
1313 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1314
1315 if (rp->status)
1316 return;
1317
1318 memcpy(hdev->le_states, rp->le_states, 8);
1319 }
1320
1321 static void hci_cc_le_read_def_data_len(struct hci_dev *hdev,
1322 struct sk_buff *skb)
1323 {
1324 struct hci_rp_le_read_def_data_len *rp = (void *) skb->data;
1325
1326 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1327
1328 if (rp->status)
1329 return;
1330
1331 hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1332 hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1333 }
1334
1335 static void hci_cc_le_write_def_data_len(struct hci_dev *hdev,
1336 struct sk_buff *skb)
1337 {
1338 struct hci_cp_le_write_def_data_len *sent;
1339 __u8 status = *((__u8 *) skb->data);
1340
1341 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1342
1343 if (status)
1344 return;
1345
1346 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
1347 if (!sent)
1348 return;
1349
1350 hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
1351 hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
1352 }
1353
1354 static void hci_cc_le_read_max_data_len(struct hci_dev *hdev,
1355 struct sk_buff *skb)
1356 {
1357 struct hci_rp_le_read_max_data_len *rp = (void *) skb->data;
1358
1359 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1360
1361 if (rp->status)
1362 return;
1363
1364 hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
1365 hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
1366 hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
1367 hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
1368 }
1369
1370 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1371 struct sk_buff *skb)
1372 {
1373 struct hci_cp_write_le_host_supported *sent;
1374 __u8 status = *((__u8 *) skb->data);
1375
1376 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1377
1378 if (status)
1379 return;
1380
1381 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1382 if (!sent)
1383 return;
1384
1385 hci_dev_lock(hdev);
1386
1387 if (sent->le) {
1388 hdev->features[1][0] |= LMP_HOST_LE;
1389 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
1390 } else {
1391 hdev->features[1][0] &= ~LMP_HOST_LE;
1392 hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
1393 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1394 }
1395
1396 if (sent->simul)
1397 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1398 else
1399 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1400
1401 hci_dev_unlock(hdev);
1402 }
1403
1404 static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1405 {
1406 struct hci_cp_le_set_adv_param *cp;
1407 u8 status = *((u8 *) skb->data);
1408
1409 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1410
1411 if (status)
1412 return;
1413
1414 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1415 if (!cp)
1416 return;
1417
1418 hci_dev_lock(hdev);
1419 hdev->adv_addr_type = cp->own_address_type;
1420 hci_dev_unlock(hdev);
1421 }
1422
1423 static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,
1424 struct sk_buff *skb)
1425 {
1426 struct hci_rp_write_remote_amp_assoc *rp = (void *) skb->data;
1427
1428 BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
1429 hdev->name, rp->status, rp->phy_handle);
1430
1431 if (rp->status)
1432 return;
1433
1434 amp_write_rem_assoc_continue(hdev, rp->phy_handle);
1435 }
1436
1437 static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
1438 {
1439 struct hci_rp_read_rssi *rp = (void *) skb->data;
1440 struct hci_conn *conn;
1441
1442 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1443
1444 if (rp->status)
1445 return;
1446
1447 hci_dev_lock(hdev);
1448
1449 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1450 if (conn)
1451 conn->rssi = rp->rssi;
1452
1453 hci_dev_unlock(hdev);
1454 }
1455
1456 static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
1457 {
1458 struct hci_cp_read_tx_power *sent;
1459 struct hci_rp_read_tx_power *rp = (void *) skb->data;
1460 struct hci_conn *conn;
1461
1462 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1463
1464 if (rp->status)
1465 return;
1466
1467 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
1468 if (!sent)
1469 return;
1470
1471 hci_dev_lock(hdev);
1472
1473 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1474 if (!conn)
1475 goto unlock;
1476
1477 switch (sent->type) {
1478 case 0x00:
1479 conn->tx_power = rp->tx_power;
1480 break;
1481 case 0x01:
1482 conn->max_tx_power = rp->tx_power;
1483 break;
1484 }
1485
1486 unlock:
1487 hci_dev_unlock(hdev);
1488 }
1489
1490 static void hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, struct sk_buff *skb)
1491 {
1492 u8 status = *((u8 *) skb->data);
1493 u8 *mode;
1494
1495 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1496
1497 if (status)
1498 return;
1499
1500 mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
1501 if (mode)
1502 hdev->ssp_debug_mode = *mode;
1503 }
1504
1505 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1506 {
1507 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1508
1509 if (status) {
1510 hci_conn_check_pending(hdev);
1511 return;
1512 }
1513
1514 set_bit(HCI_INQUIRY, &hdev->flags);
1515 }
1516
1517 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1518 {
1519 struct hci_cp_create_conn *cp;
1520 struct hci_conn *conn;
1521
1522 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1523
1524 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1525 if (!cp)
1526 return;
1527
1528 hci_dev_lock(hdev);
1529
1530 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1531
1532 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1533
1534 if (status) {
1535 if (conn && conn->state == BT_CONNECT) {
1536 if (status != 0x0c || conn->attempt > 2) {
1537 conn->state = BT_CLOSED;
1538 hci_connect_cfm(conn, status);
1539 hci_conn_del(conn);
1540 } else
1541 conn->state = BT_CONNECT2;
1542 }
1543 } else {
1544 if (!conn) {
1545 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
1546 HCI_ROLE_MASTER);
1547 if (!conn)
1548 BT_ERR("No memory for new connection");
1549 }
1550 }
1551
1552 hci_dev_unlock(hdev);
1553 }
1554
1555 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1556 {
1557 struct hci_cp_add_sco *cp;
1558 struct hci_conn *acl, *sco;
1559 __u16 handle;
1560
1561 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1562
1563 if (!status)
1564 return;
1565
1566 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1567 if (!cp)
1568 return;
1569
1570 handle = __le16_to_cpu(cp->handle);
1571
1572 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1573
1574 hci_dev_lock(hdev);
1575
1576 acl = hci_conn_hash_lookup_handle(hdev, handle);
1577 if (acl) {
1578 sco = acl->link;
1579 if (sco) {
1580 sco->state = BT_CLOSED;
1581
1582 hci_connect_cfm(sco, status);
1583 hci_conn_del(sco);
1584 }
1585 }
1586
1587 hci_dev_unlock(hdev);
1588 }
1589
1590 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1591 {
1592 struct hci_cp_auth_requested *cp;
1593 struct hci_conn *conn;
1594
1595 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1596
1597 if (!status)
1598 return;
1599
1600 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1601 if (!cp)
1602 return;
1603
1604 hci_dev_lock(hdev);
1605
1606 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1607 if (conn) {
1608 if (conn->state == BT_CONFIG) {
1609 hci_connect_cfm(conn, status);
1610 hci_conn_drop(conn);
1611 }
1612 }
1613
1614 hci_dev_unlock(hdev);
1615 }
1616
1617 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1618 {
1619 struct hci_cp_set_conn_encrypt *cp;
1620 struct hci_conn *conn;
1621
1622 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1623
1624 if (!status)
1625 return;
1626
1627 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1628 if (!cp)
1629 return;
1630
1631 hci_dev_lock(hdev);
1632
1633 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1634 if (conn) {
1635 if (conn->state == BT_CONFIG) {
1636 hci_connect_cfm(conn, status);
1637 hci_conn_drop(conn);
1638 }
1639 }
1640
1641 hci_dev_unlock(hdev);
1642 }
1643
1644 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1645 struct hci_conn *conn)
1646 {
1647 if (conn->state != BT_CONFIG || !conn->out)
1648 return 0;
1649
1650 if (conn->pending_sec_level == BT_SECURITY_SDP)
1651 return 0;
1652
1653 /* Only request authentication for SSP connections or non-SSP
1654 * devices with sec_level MEDIUM or HIGH or if MITM protection
1655 * is requested.
1656 */
1657 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1658 conn->pending_sec_level != BT_SECURITY_FIPS &&
1659 conn->pending_sec_level != BT_SECURITY_HIGH &&
1660 conn->pending_sec_level != BT_SECURITY_MEDIUM)
1661 return 0;
1662
1663 return 1;
1664 }
1665
1666 static int hci_resolve_name(struct hci_dev *hdev,
1667 struct inquiry_entry *e)
1668 {
1669 struct hci_cp_remote_name_req cp;
1670
1671 memset(&cp, 0, sizeof(cp));
1672
1673 bacpy(&cp.bdaddr, &e->data.bdaddr);
1674 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1675 cp.pscan_mode = e->data.pscan_mode;
1676 cp.clock_offset = e->data.clock_offset;
1677
1678 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1679 }
1680
1681 static bool hci_resolve_next_name(struct hci_dev *hdev)
1682 {
1683 struct discovery_state *discov = &hdev->discovery;
1684 struct inquiry_entry *e;
1685
1686 if (list_empty(&discov->resolve))
1687 return false;
1688
1689 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1690 if (!e)
1691 return false;
1692
1693 if (hci_resolve_name(hdev, e) == 0) {
1694 e->name_state = NAME_PENDING;
1695 return true;
1696 }
1697
1698 return false;
1699 }
1700
1701 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1702 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1703 {
1704 struct discovery_state *discov = &hdev->discovery;
1705 struct inquiry_entry *e;
1706
1707 /* Update the mgmt connected state if necessary. Be careful with
1708 * conn objects that exist but are not (yet) connected however.
1709 * Only those in BT_CONFIG or BT_CONNECTED states can be
1710 * considered connected.
1711 */
1712 if (conn &&
1713 (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
1714 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1715 mgmt_device_connected(hdev, conn, 0, name, name_len);
1716
1717 if (discov->state == DISCOVERY_STOPPED)
1718 return;
1719
1720 if (discov->state == DISCOVERY_STOPPING)
1721 goto discov_complete;
1722
1723 if (discov->state != DISCOVERY_RESOLVING)
1724 return;
1725
1726 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1727 /* If the device was not found in a list of found devices names of which
1728 * are pending. there is no need to continue resolving a next name as it
1729 * will be done upon receiving another Remote Name Request Complete
1730 * Event */
1731 if (!e)
1732 return;
1733
1734 list_del(&e->list);
1735 if (name) {
1736 e->name_state = NAME_KNOWN;
1737 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1738 e->data.rssi, name, name_len);
1739 } else {
1740 e->name_state = NAME_NOT_KNOWN;
1741 }
1742
1743 if (hci_resolve_next_name(hdev))
1744 return;
1745
1746 discov_complete:
1747 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1748 }
1749
1750 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1751 {
1752 struct hci_cp_remote_name_req *cp;
1753 struct hci_conn *conn;
1754
1755 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1756
1757 /* If successful wait for the name req complete event before
1758 * checking for the need to do authentication */
1759 if (!status)
1760 return;
1761
1762 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1763 if (!cp)
1764 return;
1765
1766 hci_dev_lock(hdev);
1767
1768 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1769
1770 if (hci_dev_test_flag(hdev, HCI_MGMT))
1771 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1772
1773 if (!conn)
1774 goto unlock;
1775
1776 if (!hci_outgoing_auth_needed(hdev, conn))
1777 goto unlock;
1778
1779 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1780 struct hci_cp_auth_requested auth_cp;
1781
1782 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
1783
1784 auth_cp.handle = __cpu_to_le16(conn->handle);
1785 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
1786 sizeof(auth_cp), &auth_cp);
1787 }
1788
1789 unlock:
1790 hci_dev_unlock(hdev);
1791 }
1792
1793 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1794 {
1795 struct hci_cp_read_remote_features *cp;
1796 struct hci_conn *conn;
1797
1798 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1799
1800 if (!status)
1801 return;
1802
1803 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1804 if (!cp)
1805 return;
1806
1807 hci_dev_lock(hdev);
1808
1809 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1810 if (conn) {
1811 if (conn->state == BT_CONFIG) {
1812 hci_connect_cfm(conn, status);
1813 hci_conn_drop(conn);
1814 }
1815 }
1816
1817 hci_dev_unlock(hdev);
1818 }
1819
1820 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1821 {
1822 struct hci_cp_read_remote_ext_features *cp;
1823 struct hci_conn *conn;
1824
1825 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1826
1827 if (!status)
1828 return;
1829
1830 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1831 if (!cp)
1832 return;
1833
1834 hci_dev_lock(hdev);
1835
1836 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1837 if (conn) {
1838 if (conn->state == BT_CONFIG) {
1839 hci_connect_cfm(conn, status);
1840 hci_conn_drop(conn);
1841 }
1842 }
1843
1844 hci_dev_unlock(hdev);
1845 }
1846
1847 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1848 {
1849 struct hci_cp_setup_sync_conn *cp;
1850 struct hci_conn *acl, *sco;
1851 __u16 handle;
1852
1853 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1854
1855 if (!status)
1856 return;
1857
1858 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1859 if (!cp)
1860 return;
1861
1862 handle = __le16_to_cpu(cp->handle);
1863
1864 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1865
1866 hci_dev_lock(hdev);
1867
1868 acl = hci_conn_hash_lookup_handle(hdev, handle);
1869 if (acl) {
1870 sco = acl->link;
1871 if (sco) {
1872 sco->state = BT_CLOSED;
1873
1874 hci_connect_cfm(sco, status);
1875 hci_conn_del(sco);
1876 }
1877 }
1878
1879 hci_dev_unlock(hdev);
1880 }
1881
1882 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1883 {
1884 struct hci_cp_sniff_mode *cp;
1885 struct hci_conn *conn;
1886
1887 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1888
1889 if (!status)
1890 return;
1891
1892 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1893 if (!cp)
1894 return;
1895
1896 hci_dev_lock(hdev);
1897
1898 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1899 if (conn) {
1900 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1901
1902 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1903 hci_sco_setup(conn, status);
1904 }
1905
1906 hci_dev_unlock(hdev);
1907 }
1908
1909 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1910 {
1911 struct hci_cp_exit_sniff_mode *cp;
1912 struct hci_conn *conn;
1913
1914 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1915
1916 if (!status)
1917 return;
1918
1919 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1920 if (!cp)
1921 return;
1922
1923 hci_dev_lock(hdev);
1924
1925 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1926 if (conn) {
1927 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1928
1929 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1930 hci_sco_setup(conn, status);
1931 }
1932
1933 hci_dev_unlock(hdev);
1934 }
1935
1936 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1937 {
1938 struct hci_cp_disconnect *cp;
1939 struct hci_conn *conn;
1940
1941 if (!status)
1942 return;
1943
1944 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1945 if (!cp)
1946 return;
1947
1948 hci_dev_lock(hdev);
1949
1950 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1951 if (conn)
1952 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1953 conn->dst_type, status);
1954
1955 hci_dev_unlock(hdev);
1956 }
1957
1958 static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status)
1959 {
1960 struct hci_cp_create_phy_link *cp;
1961
1962 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1963
1964 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
1965 if (!cp)
1966 return;
1967
1968 hci_dev_lock(hdev);
1969
1970 if (status) {
1971 struct hci_conn *hcon;
1972
1973 hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
1974 if (hcon)
1975 hci_conn_del(hcon);
1976 } else {
1977 amp_write_remote_assoc(hdev, cp->phy_handle);
1978 }
1979
1980 hci_dev_unlock(hdev);
1981 }
1982
1983 static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status)
1984 {
1985 struct hci_cp_accept_phy_link *cp;
1986
1987 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1988
1989 if (status)
1990 return;
1991
1992 cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
1993 if (!cp)
1994 return;
1995
1996 amp_write_remote_assoc(hdev, cp->phy_handle);
1997 }
1998
1999 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
2000 {
2001 struct hci_cp_le_create_conn *cp;
2002 struct hci_conn *conn;
2003
2004 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2005
2006 /* All connection failure handling is taken care of by the
2007 * hci_le_conn_failed function which is triggered by the HCI
2008 * request completion callbacks used for connecting.
2009 */
2010 if (status)
2011 return;
2012
2013 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
2014 if (!cp)
2015 return;
2016
2017 hci_dev_lock(hdev);
2018
2019 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
2020 if (!conn)
2021 goto unlock;
2022
2023 /* Store the initiator and responder address information which
2024 * is needed for SMP. These values will not change during the
2025 * lifetime of the connection.
2026 */
2027 conn->init_addr_type = cp->own_address_type;
2028 if (cp->own_address_type == ADDR_LE_DEV_RANDOM)
2029 bacpy(&conn->init_addr, &hdev->random_addr);
2030 else
2031 bacpy(&conn->init_addr, &hdev->bdaddr);
2032
2033 conn->resp_addr_type = cp->peer_addr_type;
2034 bacpy(&conn->resp_addr, &cp->peer_addr);
2035
2036 /* We don't want the connection attempt to stick around
2037 * indefinitely since LE doesn't have a page timeout concept
2038 * like BR/EDR. Set a timer for any connection that doesn't use
2039 * the white list for connecting.
2040 */
2041 if (cp->filter_policy == HCI_LE_USE_PEER_ADDR)
2042 queue_delayed_work(conn->hdev->workqueue,
2043 &conn->le_conn_timeout,
2044 conn->conn_timeout);
2045
2046 unlock:
2047 hci_dev_unlock(hdev);
2048 }
2049
2050 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
2051 {
2052 struct hci_cp_le_start_enc *cp;
2053 struct hci_conn *conn;
2054
2055 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2056
2057 if (!status)
2058 return;
2059
2060 hci_dev_lock(hdev);
2061
2062 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
2063 if (!cp)
2064 goto unlock;
2065
2066 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2067 if (!conn)
2068 goto unlock;
2069
2070 if (conn->state != BT_CONNECTED)
2071 goto unlock;
2072
2073 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2074 hci_conn_drop(conn);
2075
2076 unlock:
2077 hci_dev_unlock(hdev);
2078 }
2079
2080 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
2081 {
2082 struct hci_cp_switch_role *cp;
2083 struct hci_conn *conn;
2084
2085 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2086
2087 if (!status)
2088 return;
2089
2090 cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
2091 if (!cp)
2092 return;
2093
2094 hci_dev_lock(hdev);
2095
2096 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2097 if (conn)
2098 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2099
2100 hci_dev_unlock(hdev);
2101 }
2102
2103 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2104 {
2105 __u8 status = *((__u8 *) skb->data);
2106 struct discovery_state *discov = &hdev->discovery;
2107 struct inquiry_entry *e;
2108
2109 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2110
2111 hci_conn_check_pending(hdev);
2112
2113 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
2114 return;
2115
2116 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
2117 wake_up_bit(&hdev->flags, HCI_INQUIRY);
2118
2119 if (!hci_dev_test_flag(hdev, HCI_MGMT))
2120 return;
2121
2122 hci_dev_lock(hdev);
2123
2124 if (discov->state != DISCOVERY_FINDING)
2125 goto unlock;
2126
2127 if (list_empty(&discov->resolve)) {
2128 /* When BR/EDR inquiry is active and no LE scanning is in
2129 * progress, then change discovery state to indicate completion.
2130 *
2131 * When running LE scanning and BR/EDR inquiry simultaneously
2132 * and the LE scan already finished, then change the discovery
2133 * state to indicate completion.
2134 */
2135 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2136 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2137 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2138 goto unlock;
2139 }
2140
2141 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2142 if (e && hci_resolve_name(hdev, e) == 0) {
2143 e->name_state = NAME_PENDING;
2144 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
2145 } else {
2146 /* When BR/EDR inquiry is active and no LE scanning is in
2147 * progress, then change discovery state to indicate completion.
2148 *
2149 * When running LE scanning and BR/EDR inquiry simultaneously
2150 * and the LE scan already finished, then change the discovery
2151 * state to indicate completion.
2152 */
2153 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2154 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2155 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2156 }
2157
2158 unlock:
2159 hci_dev_unlock(hdev);
2160 }
2161
2162 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2163 {
2164 struct inquiry_data data;
2165 struct inquiry_info *info = (void *) (skb->data + 1);
2166 int num_rsp = *((__u8 *) skb->data);
2167
2168 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2169
2170 if (!num_rsp)
2171 return;
2172
2173 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
2174 return;
2175
2176 hci_dev_lock(hdev);
2177
2178 for (; num_rsp; num_rsp--, info++) {
2179 u32 flags;
2180
2181 bacpy(&data.bdaddr, &info->bdaddr);
2182 data.pscan_rep_mode = info->pscan_rep_mode;
2183 data.pscan_period_mode = info->pscan_period_mode;
2184 data.pscan_mode = info->pscan_mode;
2185 memcpy(data.dev_class, info->dev_class, 3);
2186 data.clock_offset = info->clock_offset;
2187 data.rssi = HCI_RSSI_INVALID;
2188 data.ssp_mode = 0x00;
2189
2190 flags = hci_inquiry_cache_update(hdev, &data, false);
2191
2192 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2193 info->dev_class, HCI_RSSI_INVALID,
2194 flags, NULL, 0, NULL, 0);
2195 }
2196
2197 hci_dev_unlock(hdev);
2198 }
2199
2200 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2201 {
2202 struct hci_ev_conn_complete *ev = (void *) skb->data;
2203 struct hci_conn *conn;
2204
2205 BT_DBG("%s", hdev->name);
2206
2207 hci_dev_lock(hdev);
2208
2209 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2210 if (!conn) {
2211 if (ev->link_type != SCO_LINK)
2212 goto unlock;
2213
2214 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2215 if (!conn)
2216 goto unlock;
2217
2218 conn->type = SCO_LINK;
2219 }
2220
2221 if (!ev->status) {
2222 conn->handle = __le16_to_cpu(ev->handle);
2223
2224 if (conn->type == ACL_LINK) {
2225 conn->state = BT_CONFIG;
2226 hci_conn_hold(conn);
2227
2228 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
2229 !hci_find_link_key(hdev, &ev->bdaddr))
2230 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2231 else
2232 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2233 } else
2234 conn->state = BT_CONNECTED;
2235
2236 hci_debugfs_create_conn(conn);
2237 hci_conn_add_sysfs(conn);
2238
2239 if (test_bit(HCI_AUTH, &hdev->flags))
2240 set_bit(HCI_CONN_AUTH, &conn->flags);
2241
2242 if (test_bit(HCI_ENCRYPT, &hdev->flags))
2243 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2244
2245 /* Get remote features */
2246 if (conn->type == ACL_LINK) {
2247 struct hci_cp_read_remote_features cp;
2248 cp.handle = ev->handle;
2249 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2250 sizeof(cp), &cp);
2251
2252 hci_update_page_scan(hdev);
2253 }
2254
2255 /* Set packet type for incoming connection */
2256 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2257 struct hci_cp_change_conn_ptype cp;
2258 cp.handle = ev->handle;
2259 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2260 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2261 &cp);
2262 }
2263 } else {
2264 conn->state = BT_CLOSED;
2265 if (conn->type == ACL_LINK)
2266 mgmt_connect_failed(hdev, &conn->dst, conn->type,
2267 conn->dst_type, ev->status);
2268 }
2269
2270 if (conn->type == ACL_LINK)
2271 hci_sco_setup(conn, ev->status);
2272
2273 if (ev->status) {
2274 hci_connect_cfm(conn, ev->status);
2275 hci_conn_del(conn);
2276 } else if (ev->link_type != ACL_LINK)
2277 hci_connect_cfm(conn, ev->status);
2278
2279 unlock:
2280 hci_dev_unlock(hdev);
2281
2282 hci_conn_check_pending(hdev);
2283 }
2284
2285 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
2286 {
2287 struct hci_cp_reject_conn_req cp;
2288
2289 bacpy(&cp.bdaddr, bdaddr);
2290 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2291 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2292 }
2293
2294 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2295 {
2296 struct hci_ev_conn_request *ev = (void *) skb->data;
2297 int mask = hdev->link_mode;
2298 struct inquiry_entry *ie;
2299 struct hci_conn *conn;
2300 __u8 flags = 0;
2301
2302 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2303 ev->link_type);
2304
2305 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2306 &flags);
2307
2308 if (!(mask & HCI_LM_ACCEPT)) {
2309 hci_reject_conn(hdev, &ev->bdaddr);
2310 return;
2311 }
2312
2313 if (hci_bdaddr_list_lookup(&hdev->blacklist, &ev->bdaddr,
2314 BDADDR_BREDR)) {
2315 hci_reject_conn(hdev, &ev->bdaddr);
2316 return;
2317 }
2318
2319 /* Require HCI_CONNECTABLE or a whitelist entry to accept the
2320 * connection. These features are only touched through mgmt so
2321 * only do the checks if HCI_MGMT is set.
2322 */
2323 if (hci_dev_test_flag(hdev, HCI_MGMT) &&
2324 !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
2325 !hci_bdaddr_list_lookup(&hdev->whitelist, &ev->bdaddr,
2326 BDADDR_BREDR)) {
2327 hci_reject_conn(hdev, &ev->bdaddr);
2328 return;
2329 }
2330
2331 /* Connection accepted */
2332
2333 hci_dev_lock(hdev);
2334
2335 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2336 if (ie)
2337 memcpy(ie->data.dev_class, ev->dev_class, 3);
2338
2339 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2340 &ev->bdaddr);
2341 if (!conn) {
2342 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2343 HCI_ROLE_SLAVE);
2344 if (!conn) {
2345 BT_ERR("No memory for new connection");
2346 hci_dev_unlock(hdev);
2347 return;
2348 }
2349 }
2350
2351 memcpy(conn->dev_class, ev->dev_class, 3);
2352
2353 hci_dev_unlock(hdev);
2354
2355 if (ev->link_type == ACL_LINK ||
2356 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2357 struct hci_cp_accept_conn_req cp;
2358 conn->state = BT_CONNECT;
2359
2360 bacpy(&cp.bdaddr, &ev->bdaddr);
2361
2362 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2363 cp.role = 0x00; /* Become master */
2364 else
2365 cp.role = 0x01; /* Remain slave */
2366
2367 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
2368 } else if (!(flags & HCI_PROTO_DEFER)) {
2369 struct hci_cp_accept_sync_conn_req cp;
2370 conn->state = BT_CONNECT;
2371
2372 bacpy(&cp.bdaddr, &ev->bdaddr);
2373 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2374
2375 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
2376 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
2377 cp.max_latency = cpu_to_le16(0xffff);
2378 cp.content_format = cpu_to_le16(hdev->voice_setting);
2379 cp.retrans_effort = 0xff;
2380
2381 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
2382 &cp);
2383 } else {
2384 conn->state = BT_CONNECT2;
2385 hci_connect_cfm(conn, 0);
2386 }
2387 }
2388
2389 static u8 hci_to_mgmt_reason(u8 err)
2390 {
2391 switch (err) {
2392 case HCI_ERROR_CONNECTION_TIMEOUT:
2393 return MGMT_DEV_DISCONN_TIMEOUT;
2394 case HCI_ERROR_REMOTE_USER_TERM:
2395 case HCI_ERROR_REMOTE_LOW_RESOURCES:
2396 case HCI_ERROR_REMOTE_POWER_OFF:
2397 return MGMT_DEV_DISCONN_REMOTE;
2398 case HCI_ERROR_LOCAL_HOST_TERM:
2399 return MGMT_DEV_DISCONN_LOCAL_HOST;
2400 default:
2401 return MGMT_DEV_DISCONN_UNKNOWN;
2402 }
2403 }
2404
2405 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2406 {
2407 struct hci_ev_disconn_complete *ev = (void *) skb->data;
2408 u8 reason = hci_to_mgmt_reason(ev->reason);
2409 struct hci_conn_params *params;
2410 struct hci_conn *conn;
2411 bool mgmt_connected;
2412 u8 type;
2413
2414 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2415
2416 hci_dev_lock(hdev);
2417
2418 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2419 if (!conn)
2420 goto unlock;
2421
2422 if (ev->status) {
2423 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2424 conn->dst_type, ev->status);
2425 goto unlock;
2426 }
2427
2428 conn->state = BT_CLOSED;
2429
2430 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2431 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2432 reason, mgmt_connected);
2433
2434 if (conn->type == ACL_LINK) {
2435 if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2436 hci_remove_link_key(hdev, &conn->dst);
2437
2438 hci_update_page_scan(hdev);
2439 }
2440
2441 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2442 if (params) {
2443 switch (params->auto_connect) {
2444 case HCI_AUTO_CONN_LINK_LOSS:
2445 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2446 break;
2447 /* Fall through */
2448
2449 case HCI_AUTO_CONN_DIRECT:
2450 case HCI_AUTO_CONN_ALWAYS:
2451 list_del_init(&params->action);
2452 list_add(&params->action, &hdev->pend_le_conns);
2453 hci_update_background_scan(hdev);
2454 break;
2455
2456 default:
2457 break;
2458 }
2459 }
2460
2461 type = conn->type;
2462
2463 hci_disconn_cfm(conn, ev->reason);
2464 hci_conn_del(conn);
2465
2466 /* Re-enable advertising if necessary, since it might
2467 * have been disabled by the connection. From the
2468 * HCI_LE_Set_Advertise_Enable command description in
2469 * the core specification (v4.0):
2470 * "The Controller shall continue advertising until the Host
2471 * issues an LE_Set_Advertise_Enable command with
2472 * Advertising_Enable set to 0x00 (Advertising is disabled)
2473 * or until a connection is created or until the Advertising
2474 * is timed out due to Directed Advertising."
2475 */
2476 if (type == LE_LINK)
2477 mgmt_reenable_advertising(hdev);
2478
2479 unlock:
2480 hci_dev_unlock(hdev);
2481 }
2482
2483 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2484 {
2485 struct hci_ev_auth_complete *ev = (void *) skb->data;
2486 struct hci_conn *conn;
2487
2488 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2489
2490 hci_dev_lock(hdev);
2491
2492 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2493 if (!conn)
2494 goto unlock;
2495
2496 if (!ev->status) {
2497 if (!hci_conn_ssp_enabled(conn) &&
2498 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
2499 BT_INFO("re-auth of legacy device is not possible.");
2500 } else {
2501 set_bit(HCI_CONN_AUTH, &conn->flags);
2502 conn->sec_level = conn->pending_sec_level;
2503 }
2504 } else {
2505 mgmt_auth_failed(conn, ev->status);
2506 }
2507
2508 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2509 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2510
2511 if (conn->state == BT_CONFIG) {
2512 if (!ev->status && hci_conn_ssp_enabled(conn)) {
2513 struct hci_cp_set_conn_encrypt cp;
2514 cp.handle = ev->handle;
2515 cp.encrypt = 0x01;
2516 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2517 &cp);
2518 } else {
2519 conn->state = BT_CONNECTED;
2520 hci_connect_cfm(conn, ev->status);
2521 hci_conn_drop(conn);
2522 }
2523 } else {
2524 hci_auth_cfm(conn, ev->status);
2525
2526 hci_conn_hold(conn);
2527 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2528 hci_conn_drop(conn);
2529 }
2530
2531 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2532 if (!ev->status) {
2533 struct hci_cp_set_conn_encrypt cp;
2534 cp.handle = ev->handle;
2535 cp.encrypt = 0x01;
2536 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2537 &cp);
2538 } else {
2539 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2540 hci_encrypt_cfm(conn, ev->status, 0x00);
2541 }
2542 }
2543
2544 unlock:
2545 hci_dev_unlock(hdev);
2546 }
2547
2548 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2549 {
2550 struct hci_ev_remote_name *ev = (void *) skb->data;
2551 struct hci_conn *conn;
2552
2553 BT_DBG("%s", hdev->name);
2554
2555 hci_conn_check_pending(hdev);
2556
2557 hci_dev_lock(hdev);
2558
2559 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2560
2561 if (!hci_dev_test_flag(hdev, HCI_MGMT))
2562 goto check_auth;
2563
2564 if (ev->status == 0)
2565 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2566 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2567 else
2568 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2569
2570 check_auth:
2571 if (!conn)
2572 goto unlock;
2573
2574 if (!hci_outgoing_auth_needed(hdev, conn))
2575 goto unlock;
2576
2577 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2578 struct hci_cp_auth_requested cp;
2579
2580 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2581
2582 cp.handle = __cpu_to_le16(conn->handle);
2583 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2584 }
2585
2586 unlock:
2587 hci_dev_unlock(hdev);
2588 }
2589
2590 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2591 {
2592 struct hci_ev_encrypt_change *ev = (void *) skb->data;
2593 struct hci_conn *conn;
2594
2595 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2596
2597 hci_dev_lock(hdev);
2598
2599 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2600 if (!conn)
2601 goto unlock;
2602
2603 if (!ev->status) {
2604 if (ev->encrypt) {
2605 /* Encryption implies authentication */
2606 set_bit(HCI_CONN_AUTH, &conn->flags);
2607 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2608 conn->sec_level = conn->pending_sec_level;
2609
2610 /* P-256 authentication key implies FIPS */
2611 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
2612 set_bit(HCI_CONN_FIPS, &conn->flags);
2613
2614 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
2615 conn->type == LE_LINK)
2616 set_bit(HCI_CONN_AES_CCM, &conn->flags);
2617 } else {
2618 clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
2619 clear_bit(HCI_CONN_AES_CCM, &conn->flags);
2620 }
2621 }
2622
2623 /* We should disregard the current RPA and generate a new one
2624 * whenever the encryption procedure fails.
2625 */
2626 if (ev->status && conn->type == LE_LINK)
2627 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
2628
2629 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2630
2631 if (ev->status && conn->state == BT_CONNECTED) {
2632 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2633 hci_conn_drop(conn);
2634 goto unlock;
2635 }
2636
2637 if (conn->state == BT_CONFIG) {
2638 if (!ev->status)
2639 conn->state = BT_CONNECTED;
2640
2641 /* In Secure Connections Only mode, do not allow any
2642 * connections that are not encrypted with AES-CCM
2643 * using a P-256 authenticated combination key.
2644 */
2645 if (hci_dev_test_flag(hdev, HCI_SC_ONLY) &&
2646 (!test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
2647 conn->key_type != HCI_LK_AUTH_COMBINATION_P256)) {
2648 hci_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE);
2649 hci_conn_drop(conn);
2650 goto unlock;
2651 }
2652
2653 hci_connect_cfm(conn, ev->status);
2654 hci_conn_drop(conn);
2655 } else
2656 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2657
2658 unlock:
2659 hci_dev_unlock(hdev);
2660 }
2661
2662 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2663 struct sk_buff *skb)
2664 {
2665 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2666 struct hci_conn *conn;
2667
2668 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2669
2670 hci_dev_lock(hdev);
2671
2672 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2673 if (conn) {
2674 if (!ev->status)
2675 set_bit(HCI_CONN_SECURE, &conn->flags);
2676
2677 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2678
2679 hci_key_change_cfm(conn, ev->status);
2680 }
2681
2682 hci_dev_unlock(hdev);
2683 }
2684
2685 static void hci_remote_features_evt(struct hci_dev *hdev,
2686 struct sk_buff *skb)
2687 {
2688 struct hci_ev_remote_features *ev = (void *) skb->data;
2689 struct hci_conn *conn;
2690
2691 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2692
2693 hci_dev_lock(hdev);
2694
2695 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2696 if (!conn)
2697 goto unlock;
2698
2699 if (!ev->status)
2700 memcpy(conn->features[0], ev->features, 8);
2701
2702 if (conn->state != BT_CONFIG)
2703 goto unlock;
2704
2705 if (!ev->status && lmp_ext_feat_capable(hdev) &&
2706 lmp_ext_feat_capable(conn)) {
2707 struct hci_cp_read_remote_ext_features cp;
2708 cp.handle = ev->handle;
2709 cp.page = 0x01;
2710 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2711 sizeof(cp), &cp);
2712 goto unlock;
2713 }
2714
2715 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2716 struct hci_cp_remote_name_req cp;
2717 memset(&cp, 0, sizeof(cp));
2718 bacpy(&cp.bdaddr, &conn->dst);
2719 cp.pscan_rep_mode = 0x02;
2720 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2721 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2722 mgmt_device_connected(hdev, conn, 0, NULL, 0);
2723
2724 if (!hci_outgoing_auth_needed(hdev, conn)) {
2725 conn->state = BT_CONNECTED;
2726 hci_connect_cfm(conn, ev->status);
2727 hci_conn_drop(conn);
2728 }
2729
2730 unlock:
2731 hci_dev_unlock(hdev);
2732 }
2733
2734 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2735 {
2736 struct hci_ev_cmd_complete *ev = (void *) skb->data;
2737 u8 status = skb->data[sizeof(*ev)];
2738 __u16 opcode;
2739
2740 skb_pull(skb, sizeof(*ev));
2741
2742 opcode = __le16_to_cpu(ev->opcode);
2743
2744 switch (opcode) {
2745 case HCI_OP_INQUIRY_CANCEL:
2746 hci_cc_inquiry_cancel(hdev, skb);
2747 break;
2748
2749 case HCI_OP_PERIODIC_INQ:
2750 hci_cc_periodic_inq(hdev, skb);
2751 break;
2752
2753 case HCI_OP_EXIT_PERIODIC_INQ:
2754 hci_cc_exit_periodic_inq(hdev, skb);
2755 break;
2756
2757 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2758 hci_cc_remote_name_req_cancel(hdev, skb);
2759 break;
2760
2761 case HCI_OP_ROLE_DISCOVERY:
2762 hci_cc_role_discovery(hdev, skb);
2763 break;
2764
2765 case HCI_OP_READ_LINK_POLICY:
2766 hci_cc_read_link_policy(hdev, skb);
2767 break;
2768
2769 case HCI_OP_WRITE_LINK_POLICY:
2770 hci_cc_write_link_policy(hdev, skb);
2771 break;
2772
2773 case HCI_OP_READ_DEF_LINK_POLICY:
2774 hci_cc_read_def_link_policy(hdev, skb);
2775 break;
2776
2777 case HCI_OP_WRITE_DEF_LINK_POLICY:
2778 hci_cc_write_def_link_policy(hdev, skb);
2779 break;
2780
2781 case HCI_OP_RESET:
2782 hci_cc_reset(hdev, skb);
2783 break;
2784
2785 case HCI_OP_READ_STORED_LINK_KEY:
2786 hci_cc_read_stored_link_key(hdev, skb);
2787 break;
2788
2789 case HCI_OP_DELETE_STORED_LINK_KEY:
2790 hci_cc_delete_stored_link_key(hdev, skb);
2791 break;
2792
2793 case HCI_OP_WRITE_LOCAL_NAME:
2794 hci_cc_write_local_name(hdev, skb);
2795 break;
2796
2797 case HCI_OP_READ_LOCAL_NAME:
2798 hci_cc_read_local_name(hdev, skb);
2799 break;
2800
2801 case HCI_OP_WRITE_AUTH_ENABLE:
2802 hci_cc_write_auth_enable(hdev, skb);
2803 break;
2804
2805 case HCI_OP_WRITE_ENCRYPT_MODE:
2806 hci_cc_write_encrypt_mode(hdev, skb);
2807 break;
2808
2809 case HCI_OP_WRITE_SCAN_ENABLE:
2810 hci_cc_write_scan_enable(hdev, skb);
2811 break;
2812
2813 case HCI_OP_READ_CLASS_OF_DEV:
2814 hci_cc_read_class_of_dev(hdev, skb);
2815 break;
2816
2817 case HCI_OP_WRITE_CLASS_OF_DEV:
2818 hci_cc_write_class_of_dev(hdev, skb);
2819 break;
2820
2821 case HCI_OP_READ_VOICE_SETTING:
2822 hci_cc_read_voice_setting(hdev, skb);
2823 break;
2824
2825 case HCI_OP_WRITE_VOICE_SETTING:
2826 hci_cc_write_voice_setting(hdev, skb);
2827 break;
2828
2829 case HCI_OP_READ_NUM_SUPPORTED_IAC:
2830 hci_cc_read_num_supported_iac(hdev, skb);
2831 break;
2832
2833 case HCI_OP_WRITE_SSP_MODE:
2834 hci_cc_write_ssp_mode(hdev, skb);
2835 break;
2836
2837 case HCI_OP_WRITE_SC_SUPPORT:
2838 hci_cc_write_sc_support(hdev, skb);
2839 break;
2840
2841 case HCI_OP_READ_LOCAL_VERSION:
2842 hci_cc_read_local_version(hdev, skb);
2843 break;
2844
2845 case HCI_OP_READ_LOCAL_COMMANDS:
2846 hci_cc_read_local_commands(hdev, skb);
2847 break;
2848
2849 case HCI_OP_READ_LOCAL_FEATURES:
2850 hci_cc_read_local_features(hdev, skb);
2851 break;
2852
2853 case HCI_OP_READ_LOCAL_EXT_FEATURES:
2854 hci_cc_read_local_ext_features(hdev, skb);
2855 break;
2856
2857 case HCI_OP_READ_BUFFER_SIZE:
2858 hci_cc_read_buffer_size(hdev, skb);
2859 break;
2860
2861 case HCI_OP_READ_BD_ADDR:
2862 hci_cc_read_bd_addr(hdev, skb);
2863 break;
2864
2865 case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
2866 hci_cc_read_page_scan_activity(hdev, skb);
2867 break;
2868
2869 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
2870 hci_cc_write_page_scan_activity(hdev, skb);
2871 break;
2872
2873 case HCI_OP_READ_PAGE_SCAN_TYPE:
2874 hci_cc_read_page_scan_type(hdev, skb);
2875 break;
2876
2877 case HCI_OP_WRITE_PAGE_SCAN_TYPE:
2878 hci_cc_write_page_scan_type(hdev, skb);
2879 break;
2880
2881 case HCI_OP_READ_DATA_BLOCK_SIZE:
2882 hci_cc_read_data_block_size(hdev, skb);
2883 break;
2884
2885 case HCI_OP_READ_FLOW_CONTROL_MODE:
2886 hci_cc_read_flow_control_mode(hdev, skb);
2887 break;
2888
2889 case HCI_OP_READ_LOCAL_AMP_INFO:
2890 hci_cc_read_local_amp_info(hdev, skb);
2891 break;
2892
2893 case HCI_OP_READ_CLOCK:
2894 hci_cc_read_clock(hdev, skb);
2895 break;
2896
2897 case HCI_OP_READ_LOCAL_AMP_ASSOC:
2898 hci_cc_read_local_amp_assoc(hdev, skb);
2899 break;
2900
2901 case HCI_OP_READ_INQ_RSP_TX_POWER:
2902 hci_cc_read_inq_rsp_tx_power(hdev, skb);
2903 break;
2904
2905 case HCI_OP_PIN_CODE_REPLY:
2906 hci_cc_pin_code_reply(hdev, skb);
2907 break;
2908
2909 case HCI_OP_PIN_CODE_NEG_REPLY:
2910 hci_cc_pin_code_neg_reply(hdev, skb);
2911 break;
2912
2913 case HCI_OP_READ_LOCAL_OOB_DATA:
2914 hci_cc_read_local_oob_data(hdev, skb);
2915 break;
2916
2917 case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
2918 hci_cc_read_local_oob_ext_data(hdev, skb);
2919 break;
2920
2921 case HCI_OP_LE_READ_BUFFER_SIZE:
2922 hci_cc_le_read_buffer_size(hdev, skb);
2923 break;
2924
2925 case HCI_OP_LE_READ_LOCAL_FEATURES:
2926 hci_cc_le_read_local_features(hdev, skb);
2927 break;
2928
2929 case HCI_OP_LE_READ_ADV_TX_POWER:
2930 hci_cc_le_read_adv_tx_power(hdev, skb);
2931 break;
2932
2933 case HCI_OP_USER_CONFIRM_REPLY:
2934 hci_cc_user_confirm_reply(hdev, skb);
2935 break;
2936
2937 case HCI_OP_USER_CONFIRM_NEG_REPLY:
2938 hci_cc_user_confirm_neg_reply(hdev, skb);
2939 break;
2940
2941 case HCI_OP_USER_PASSKEY_REPLY:
2942 hci_cc_user_passkey_reply(hdev, skb);
2943 break;
2944
2945 case HCI_OP_USER_PASSKEY_NEG_REPLY:
2946 hci_cc_user_passkey_neg_reply(hdev, skb);
2947 break;
2948
2949 case HCI_OP_LE_SET_RANDOM_ADDR:
2950 hci_cc_le_set_random_addr(hdev, skb);
2951 break;
2952
2953 case HCI_OP_LE_SET_ADV_ENABLE:
2954 hci_cc_le_set_adv_enable(hdev, skb);
2955 break;
2956
2957 case HCI_OP_LE_SET_SCAN_PARAM:
2958 hci_cc_le_set_scan_param(hdev, skb);
2959 break;
2960
2961 case HCI_OP_LE_SET_SCAN_ENABLE:
2962 hci_cc_le_set_scan_enable(hdev, skb);
2963 break;
2964
2965 case HCI_OP_LE_READ_WHITE_LIST_SIZE:
2966 hci_cc_le_read_white_list_size(hdev, skb);
2967 break;
2968
2969 case HCI_OP_LE_CLEAR_WHITE_LIST:
2970 hci_cc_le_clear_white_list(hdev, skb);
2971 break;
2972
2973 case HCI_OP_LE_ADD_TO_WHITE_LIST:
2974 hci_cc_le_add_to_white_list(hdev, skb);
2975 break;
2976
2977 case HCI_OP_LE_DEL_FROM_WHITE_LIST:
2978 hci_cc_le_del_from_white_list(hdev, skb);
2979 break;
2980
2981 case HCI_OP_LE_READ_SUPPORTED_STATES:
2982 hci_cc_le_read_supported_states(hdev, skb);
2983 break;
2984
2985 case HCI_OP_LE_READ_DEF_DATA_LEN:
2986 hci_cc_le_read_def_data_len(hdev, skb);
2987 break;
2988
2989 case HCI_OP_LE_WRITE_DEF_DATA_LEN:
2990 hci_cc_le_write_def_data_len(hdev, skb);
2991 break;
2992
2993 case HCI_OP_LE_READ_MAX_DATA_LEN:
2994 hci_cc_le_read_max_data_len(hdev, skb);
2995 break;
2996
2997 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2998 hci_cc_write_le_host_supported(hdev, skb);
2999 break;
3000
3001 case HCI_OP_LE_SET_ADV_PARAM:
3002 hci_cc_set_adv_param(hdev, skb);
3003 break;
3004
3005 case HCI_OP_WRITE_REMOTE_AMP_ASSOC:
3006 hci_cc_write_remote_amp_assoc(hdev, skb);
3007 break;
3008
3009 case HCI_OP_READ_RSSI:
3010 hci_cc_read_rssi(hdev, skb);
3011 break;
3012
3013 case HCI_OP_READ_TX_POWER:
3014 hci_cc_read_tx_power(hdev, skb);
3015 break;
3016
3017 case HCI_OP_WRITE_SSP_DEBUG_MODE:
3018 hci_cc_write_ssp_debug_mode(hdev, skb);
3019 break;
3020
3021 default:
3022 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3023 break;
3024 }
3025
3026 if (opcode != HCI_OP_NOP)
3027 cancel_delayed_work(&hdev->cmd_timer);
3028
3029 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
3030 atomic_set(&hdev->cmd_cnt, 1);
3031
3032 hci_req_cmd_complete(hdev, opcode, status);
3033
3034 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3035 queue_work(hdev->workqueue, &hdev->cmd_work);
3036 }
3037
3038 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
3039 {
3040 struct hci_ev_cmd_status *ev = (void *) skb->data;
3041 __u16 opcode;
3042
3043 skb_pull(skb, sizeof(*ev));
3044
3045 opcode = __le16_to_cpu(ev->opcode);
3046
3047 switch (opcode) {
3048 case HCI_OP_INQUIRY:
3049 hci_cs_inquiry(hdev, ev->status);
3050 break;
3051
3052 case HCI_OP_CREATE_CONN:
3053 hci_cs_create_conn(hdev, ev->status);
3054 break;
3055
3056 case HCI_OP_DISCONNECT:
3057 hci_cs_disconnect(hdev, ev->status);
3058 break;
3059
3060 case HCI_OP_ADD_SCO:
3061 hci_cs_add_sco(hdev, ev->status);
3062 break;
3063
3064 case HCI_OP_AUTH_REQUESTED:
3065 hci_cs_auth_requested(hdev, ev->status);
3066 break;
3067
3068 case HCI_OP_SET_CONN_ENCRYPT:
3069 hci_cs_set_conn_encrypt(hdev, ev->status);
3070 break;
3071
3072 case HCI_OP_REMOTE_NAME_REQ:
3073 hci_cs_remote_name_req(hdev, ev->status);
3074 break;
3075
3076 case HCI_OP_READ_REMOTE_FEATURES:
3077 hci_cs_read_remote_features(hdev, ev->status);
3078 break;
3079
3080 case HCI_OP_READ_REMOTE_EXT_FEATURES:
3081 hci_cs_read_remote_ext_features(hdev, ev->status);
3082 break;
3083
3084 case HCI_OP_SETUP_SYNC_CONN:
3085 hci_cs_setup_sync_conn(hdev, ev->status);
3086 break;
3087
3088 case HCI_OP_CREATE_PHY_LINK:
3089 hci_cs_create_phylink(hdev, ev->status);
3090 break;
3091
3092 case HCI_OP_ACCEPT_PHY_LINK:
3093 hci_cs_accept_phylink(hdev, ev->status);
3094 break;
3095
3096 case HCI_OP_SNIFF_MODE:
3097 hci_cs_sniff_mode(hdev, ev->status);
3098 break;
3099
3100 case HCI_OP_EXIT_SNIFF_MODE:
3101 hci_cs_exit_sniff_mode(hdev, ev->status);
3102 break;
3103
3104 case HCI_OP_SWITCH_ROLE:
3105 hci_cs_switch_role(hdev, ev->status);
3106 break;
3107
3108 case HCI_OP_LE_CREATE_CONN:
3109 hci_cs_le_create_conn(hdev, ev->status);
3110 break;
3111
3112 case HCI_OP_LE_START_ENC:
3113 hci_cs_le_start_enc(hdev, ev->status);
3114 break;
3115
3116 default:
3117 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3118 break;
3119 }
3120
3121 if (opcode != HCI_OP_NOP)
3122 cancel_delayed_work(&hdev->cmd_timer);
3123
3124 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
3125 atomic_set(&hdev->cmd_cnt, 1);
3126
3127 /* Indicate request completion if the command failed. Also, if
3128 * we're not waiting for a special event and we get a success
3129 * command status we should try to flag the request as completed
3130 * (since for this kind of commands there will not be a command
3131 * complete event).
3132 */
3133 if (ev->status ||
3134 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->req.event))
3135 hci_req_cmd_complete(hdev, opcode, ev->status);
3136
3137 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3138 queue_work(hdev->workqueue, &hdev->cmd_work);
3139 }
3140
3141 static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb)
3142 {
3143 struct hci_ev_hardware_error *ev = (void *) skb->data;
3144
3145 hdev->hw_error_code = ev->code;
3146
3147 queue_work(hdev->req_workqueue, &hdev->error_reset);
3148 }
3149
3150 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3151 {
3152 struct hci_ev_role_change *ev = (void *) skb->data;
3153 struct hci_conn *conn;
3154
3155 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3156
3157 hci_dev_lock(hdev);
3158
3159 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3160 if (conn) {
3161 if (!ev->status)
3162 conn->role = ev->role;
3163
3164 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
3165
3166 hci_role_switch_cfm(conn, ev->status, ev->role);
3167 }
3168
3169 hci_dev_unlock(hdev);
3170 }
3171
3172 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
3173 {
3174 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
3175 int i;
3176
3177 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
3178 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
3179 return;
3180 }
3181
3182 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
3183 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
3184 BT_DBG("%s bad parameters", hdev->name);
3185 return;
3186 }
3187
3188 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
3189
3190 for (i = 0; i < ev->num_hndl; i++) {
3191 struct hci_comp_pkts_info *info = &ev->handles[i];
3192 struct hci_conn *conn;
3193 __u16 handle, count;
3194
3195 handle = __le16_to_cpu(info->handle);
3196 count = __le16_to_cpu(info->count);
3197
3198 conn = hci_conn_hash_lookup_handle(hdev, handle);
3199 if (!conn)
3200 continue;
3201
3202 conn->sent -= count;
3203
3204 switch (conn->type) {
3205 case ACL_LINK:
3206 hdev->acl_cnt += count;
3207 if (hdev->acl_cnt > hdev->acl_pkts)
3208 hdev->acl_cnt = hdev->acl_pkts;
3209 break;
3210
3211 case LE_LINK:
3212 if (hdev->le_pkts) {
3213 hdev->le_cnt += count;
3214 if (hdev->le_cnt > hdev->le_pkts)
3215 hdev->le_cnt = hdev->le_pkts;
3216 } else {
3217 hdev->acl_cnt += count;
3218 if (hdev->acl_cnt > hdev->acl_pkts)
3219 hdev->acl_cnt = hdev->acl_pkts;
3220 }
3221 break;
3222
3223 case SCO_LINK:
3224 hdev->sco_cnt += count;
3225 if (hdev->sco_cnt > hdev->sco_pkts)
3226 hdev->sco_cnt = hdev->sco_pkts;
3227 break;
3228
3229 default:
3230 BT_ERR("Unknown type %d conn %p", conn->type, conn);
3231 break;
3232 }
3233 }
3234
3235 queue_work(hdev->workqueue, &hdev->tx_work);
3236 }
3237
3238 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
3239 __u16 handle)
3240 {
3241 struct hci_chan *chan;
3242
3243 switch (hdev->dev_type) {
3244 case HCI_BREDR:
3245 return hci_conn_hash_lookup_handle(hdev, handle);
3246 case HCI_AMP:
3247 chan = hci_chan_lookup_handle(hdev, handle);
3248 if (chan)
3249 return chan->conn;
3250 break;
3251 default:
3252 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3253 break;
3254 }
3255
3256 return NULL;
3257 }
3258
3259 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
3260 {
3261 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
3262 int i;
3263
3264 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
3265 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
3266 return;
3267 }
3268
3269 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
3270 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
3271 BT_DBG("%s bad parameters", hdev->name);
3272 return;
3273 }
3274
3275 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
3276 ev->num_hndl);
3277
3278 for (i = 0; i < ev->num_hndl; i++) {
3279 struct hci_comp_blocks_info *info = &ev->handles[i];
3280 struct hci_conn *conn = NULL;
3281 __u16 handle, block_count;
3282
3283 handle = __le16_to_cpu(info->handle);
3284 block_count = __le16_to_cpu(info->blocks);
3285
3286 conn = __hci_conn_lookup_handle(hdev, handle);
3287 if (!conn)
3288 continue;
3289
3290 conn->sent -= block_count;
3291
3292 switch (conn->type) {
3293 case ACL_LINK:
3294 case AMP_LINK:
3295 hdev->block_cnt += block_count;
3296 if (hdev->block_cnt > hdev->num_blocks)
3297 hdev->block_cnt = hdev->num_blocks;
3298 break;
3299
3300 default:
3301 BT_ERR("Unknown type %d conn %p", conn->type, conn);
3302 break;
3303 }
3304 }
3305
3306 queue_work(hdev->workqueue, &hdev->tx_work);
3307 }
3308
3309 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3310 {
3311 struct hci_ev_mode_change *ev = (void *) skb->data;
3312 struct hci_conn *conn;
3313
3314 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3315
3316 hci_dev_lock(hdev);
3317
3318 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3319 if (conn) {
3320 conn->mode = ev->mode;
3321
3322 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
3323 &conn->flags)) {
3324 if (conn->mode == HCI_CM_ACTIVE)
3325 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3326 else
3327 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3328 }
3329
3330 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
3331 hci_sco_setup(conn, ev->status);
3332 }
3333
3334 hci_dev_unlock(hdev);
3335 }
3336
3337 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3338 {
3339 struct hci_ev_pin_code_req *ev = (void *) skb->data;
3340 struct hci_conn *conn;
3341
3342 BT_DBG("%s", hdev->name);
3343
3344 hci_dev_lock(hdev);
3345
3346 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3347 if (!conn)
3348 goto unlock;
3349
3350 if (conn->state == BT_CONNECTED) {
3351 hci_conn_hold(conn);
3352 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3353 hci_conn_drop(conn);
3354 }
3355
3356 if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
3357 !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
3358 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3359 sizeof(ev->bdaddr), &ev->bdaddr);
3360 } else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
3361 u8 secure;
3362
3363 if (conn->pending_sec_level == BT_SECURITY_HIGH)
3364 secure = 1;
3365 else
3366 secure = 0;
3367
3368 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
3369 }
3370
3371 unlock:
3372 hci_dev_unlock(hdev);
3373 }
3374
3375 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
3376 {
3377 if (key_type == HCI_LK_CHANGED_COMBINATION)
3378 return;
3379
3380 conn->pin_length = pin_len;
3381 conn->key_type = key_type;
3382
3383 switch (key_type) {
3384 case HCI_LK_LOCAL_UNIT:
3385 case HCI_LK_REMOTE_UNIT:
3386 case HCI_LK_DEBUG_COMBINATION:
3387 return;
3388 case HCI_LK_COMBINATION:
3389 if (pin_len == 16)
3390 conn->pending_sec_level = BT_SECURITY_HIGH;
3391 else
3392 conn->pending_sec_level = BT_SECURITY_MEDIUM;
3393 break;
3394 case HCI_LK_UNAUTH_COMBINATION_P192:
3395 case HCI_LK_UNAUTH_COMBINATION_P256:
3396 conn->pending_sec_level = BT_SECURITY_MEDIUM;
3397 break;
3398 case HCI_LK_AUTH_COMBINATION_P192:
3399 conn->pending_sec_level = BT_SECURITY_HIGH;
3400 break;
3401 case HCI_LK_AUTH_COMBINATION_P256:
3402 conn->pending_sec_level = BT_SECURITY_FIPS;
3403 break;
3404 }
3405 }
3406
3407 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3408 {
3409 struct hci_ev_link_key_req *ev = (void *) skb->data;
3410 struct hci_cp_link_key_reply cp;
3411 struct hci_conn *conn;
3412 struct link_key *key;
3413
3414 BT_DBG("%s", hdev->name);
3415
3416 if (!hci_dev_test_flag(hdev, HCI_MGMT))
3417 return;
3418
3419 hci_dev_lock(hdev);
3420
3421 key = hci_find_link_key(hdev, &ev->bdaddr);
3422 if (!key) {
3423 BT_DBG("%s link key not found for %pMR", hdev->name,
3424 &ev->bdaddr);
3425 goto not_found;
3426 }
3427
3428 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
3429 &ev->bdaddr);
3430
3431 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3432 if (conn) {
3433 clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
3434
3435 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
3436 key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
3437 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
3438 BT_DBG("%s ignoring unauthenticated key", hdev->name);
3439 goto not_found;
3440 }
3441
3442 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
3443 (conn->pending_sec_level == BT_SECURITY_HIGH ||
3444 conn->pending_sec_level == BT_SECURITY_FIPS)) {
3445 BT_DBG("%s ignoring key unauthenticated for high security",
3446 hdev->name);
3447 goto not_found;
3448 }
3449
3450 conn_set_key(conn, key->type, key->pin_len);
3451 }
3452
3453 bacpy(&cp.bdaddr, &ev->bdaddr);
3454 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
3455
3456 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
3457
3458 hci_dev_unlock(hdev);
3459
3460 return;
3461
3462 not_found:
3463 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
3464 hci_dev_unlock(hdev);
3465 }
3466
3467 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3468 {
3469 struct hci_ev_link_key_notify *ev = (void *) skb->data;
3470 struct hci_conn *conn;
3471 struct link_key *key;
3472 bool persistent;
3473 u8 pin_len = 0;
3474
3475 BT_DBG("%s", hdev->name);
3476
3477 hci_dev_lock(hdev);
3478
3479 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3480 if (!conn)
3481 goto unlock;
3482
3483 hci_conn_hold(conn);
3484 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3485 hci_conn_drop(conn);
3486
3487 set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
3488 conn_set_key(conn, ev->key_type, conn->pin_length);
3489
3490 if (!hci_dev_test_flag(hdev, HCI_MGMT))
3491 goto unlock;
3492
3493 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
3494 ev->key_type, pin_len, &persistent);
3495 if (!key)
3496 goto unlock;
3497
3498 /* Update connection information since adding the key will have
3499 * fixed up the type in the case of changed combination keys.
3500 */
3501 if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
3502 conn_set_key(conn, key->type, key->pin_len);
3503
3504 mgmt_new_link_key(hdev, key, persistent);
3505
3506 /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
3507 * is set. If it's not set simply remove the key from the kernel
3508 * list (we've still notified user space about it but with
3509 * store_hint being 0).
3510 */
3511 if (key->type == HCI_LK_DEBUG_COMBINATION &&
3512 !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
3513 list_del_rcu(&key->list);
3514 kfree_rcu(key, rcu);
3515 goto unlock;
3516 }
3517
3518 if (persistent)
3519 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3520 else
3521 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3522
3523 unlock:
3524 hci_dev_unlock(hdev);
3525 }
3526
3527 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
3528 {
3529 struct hci_ev_clock_offset *ev = (void *) skb->data;
3530 struct hci_conn *conn;
3531
3532 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3533
3534 hci_dev_lock(hdev);
3535
3536 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3537 if (conn && !ev->status) {
3538 struct inquiry_entry *ie;
3539
3540 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3541 if (ie) {
3542 ie->data.clock_offset = ev->clock_offset;
3543 ie->timestamp = jiffies;
3544 }
3545 }
3546
3547 hci_dev_unlock(hdev);
3548 }
3549
3550 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3551 {
3552 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
3553 struct hci_conn *conn;
3554
3555 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3556
3557 hci_dev_lock(hdev);
3558
3559 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3560 if (conn && !ev->status)
3561 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
3562
3563 hci_dev_unlock(hdev);
3564 }
3565
3566 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
3567 {
3568 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
3569 struct inquiry_entry *ie;
3570
3571 BT_DBG("%s", hdev->name);
3572
3573 hci_dev_lock(hdev);
3574
3575 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3576 if (ie) {
3577 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
3578 ie->timestamp = jiffies;
3579 }
3580
3581 hci_dev_unlock(hdev);
3582 }
3583
3584 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
3585 struct sk_buff *skb)
3586 {
3587 struct inquiry_data data;
3588 int num_rsp = *((__u8 *) skb->data);
3589
3590 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3591
3592 if (!num_rsp)
3593 return;
3594
3595 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
3596 return;
3597
3598 hci_dev_lock(hdev);
3599
3600 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
3601 struct inquiry_info_with_rssi_and_pscan_mode *info;
3602 info = (void *) (skb->data + 1);
3603
3604 for (; num_rsp; num_rsp--, info++) {
3605 u32 flags;
3606
3607 bacpy(&data.bdaddr, &info->bdaddr);
3608 data.pscan_rep_mode = info->pscan_rep_mode;
3609 data.pscan_period_mode = info->pscan_period_mode;
3610 data.pscan_mode = info->pscan_mode;
3611 memcpy(data.dev_class, info->dev_class, 3);
3612 data.clock_offset = info->clock_offset;
3613 data.rssi = info->rssi;
3614 data.ssp_mode = 0x00;
3615
3616 flags = hci_inquiry_cache_update(hdev, &data, false);
3617
3618 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3619 info->dev_class, info->rssi,
3620 flags, NULL, 0, NULL, 0);
3621 }
3622 } else {
3623 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
3624
3625 for (; num_rsp; num_rsp--, info++) {
3626 u32 flags;
3627
3628 bacpy(&data.bdaddr, &info->bdaddr);
3629 data.pscan_rep_mode = info->pscan_rep_mode;
3630 data.pscan_period_mode = info->pscan_period_mode;
3631 data.pscan_mode = 0x00;
3632 memcpy(data.dev_class, info->dev_class, 3);
3633 data.clock_offset = info->clock_offset;
3634 data.rssi = info->rssi;
3635 data.ssp_mode = 0x00;
3636
3637 flags = hci_inquiry_cache_update(hdev, &data, false);
3638
3639 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3640 info->dev_class, info->rssi,
3641 flags, NULL, 0, NULL, 0);
3642 }
3643 }
3644
3645 hci_dev_unlock(hdev);
3646 }
3647
3648 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
3649 struct sk_buff *skb)
3650 {
3651 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
3652 struct hci_conn *conn;
3653
3654 BT_DBG("%s", hdev->name);
3655
3656 hci_dev_lock(hdev);
3657
3658 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3659 if (!conn)
3660 goto unlock;
3661
3662 if (ev->page < HCI_MAX_PAGES)
3663 memcpy(conn->features[ev->page], ev->features, 8);
3664
3665 if (!ev->status && ev->page == 0x01) {
3666 struct inquiry_entry *ie;
3667
3668 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3669 if (ie)
3670 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3671
3672 if (ev->features[0] & LMP_HOST_SSP) {
3673 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3674 } else {
3675 /* It is mandatory by the Bluetooth specification that
3676 * Extended Inquiry Results are only used when Secure
3677 * Simple Pairing is enabled, but some devices violate
3678 * this.
3679 *
3680 * To make these devices work, the internal SSP
3681 * enabled flag needs to be cleared if the remote host
3682 * features do not indicate SSP support */
3683 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3684 }
3685
3686 if (ev->features[0] & LMP_HOST_SC)
3687 set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
3688 }
3689
3690 if (conn->state != BT_CONFIG)
3691 goto unlock;
3692
3693 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3694 struct hci_cp_remote_name_req cp;
3695 memset(&cp, 0, sizeof(cp));
3696 bacpy(&cp.bdaddr, &conn->dst);
3697 cp.pscan_rep_mode = 0x02;
3698 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3699 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3700 mgmt_device_connected(hdev, conn, 0, NULL, 0);
3701
3702 if (!hci_outgoing_auth_needed(hdev, conn)) {
3703 conn->state = BT_CONNECTED;
3704 hci_connect_cfm(conn, ev->status);
3705 hci_conn_drop(conn);
3706 }
3707
3708 unlock:
3709 hci_dev_unlock(hdev);
3710 }
3711
3712 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
3713 struct sk_buff *skb)
3714 {
3715 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
3716 struct hci_conn *conn;
3717
3718 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3719
3720 hci_dev_lock(hdev);
3721
3722 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3723 if (!conn) {
3724 if (ev->link_type == ESCO_LINK)
3725 goto unlock;
3726
3727 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
3728 if (!conn)
3729 goto unlock;
3730
3731 conn->type = SCO_LINK;
3732 }
3733
3734 switch (ev->status) {
3735 case 0x00:
3736 conn->handle = __le16_to_cpu(ev->handle);
3737 conn->state = BT_CONNECTED;
3738
3739 hci_debugfs_create_conn(conn);
3740 hci_conn_add_sysfs(conn);
3741 break;
3742
3743 case 0x10: /* Connection Accept Timeout */
3744 case 0x0d: /* Connection Rejected due to Limited Resources */
3745 case 0x11: /* Unsupported Feature or Parameter Value */
3746 case 0x1c: /* SCO interval rejected */
3747 case 0x1a: /* Unsupported Remote Feature */
3748 case 0x1f: /* Unspecified error */
3749 case 0x20: /* Unsupported LMP Parameter value */
3750 if (conn->out) {
3751 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
3752 (hdev->esco_type & EDR_ESCO_MASK);
3753 if (hci_setup_sync(conn, conn->link->handle))
3754 goto unlock;
3755 }
3756 /* fall through */
3757
3758 default:
3759 conn->state = BT_CLOSED;
3760 break;
3761 }
3762
3763 hci_connect_cfm(conn, ev->status);
3764 if (ev->status)
3765 hci_conn_del(conn);
3766
3767 unlock:
3768 hci_dev_unlock(hdev);
3769 }
3770
3771 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
3772 {
3773 size_t parsed = 0;
3774
3775 while (parsed < eir_len) {
3776 u8 field_len = eir[0];
3777
3778 if (field_len == 0)
3779 return parsed;
3780
3781 parsed += field_len + 1;
3782 eir += field_len + 1;
3783 }
3784
3785 return eir_len;
3786 }
3787
3788 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
3789 struct sk_buff *skb)
3790 {
3791 struct inquiry_data data;
3792 struct extended_inquiry_info *info = (void *) (skb->data + 1);
3793 int num_rsp = *((__u8 *) skb->data);
3794 size_t eir_len;
3795
3796 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3797
3798 if (!num_rsp)
3799 return;
3800
3801 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
3802 return;
3803
3804 hci_dev_lock(hdev);
3805
3806 for (; num_rsp; num_rsp--, info++) {
3807 u32 flags;
3808 bool name_known;
3809
3810 bacpy(&data.bdaddr, &info->bdaddr);
3811 data.pscan_rep_mode = info->pscan_rep_mode;
3812 data.pscan_period_mode = info->pscan_period_mode;
3813 data.pscan_mode = 0x00;
3814 memcpy(data.dev_class, info->dev_class, 3);
3815 data.clock_offset = info->clock_offset;
3816 data.rssi = info->rssi;
3817 data.ssp_mode = 0x01;
3818
3819 if (hci_dev_test_flag(hdev, HCI_MGMT))
3820 name_known = eir_has_data_type(info->data,
3821 sizeof(info->data),
3822 EIR_NAME_COMPLETE);
3823 else
3824 name_known = true;
3825
3826 flags = hci_inquiry_cache_update(hdev, &data, name_known);
3827
3828 eir_len = eir_get_length(info->data, sizeof(info->data));
3829
3830 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3831 info->dev_class, info->rssi,
3832 flags, info->data, eir_len, NULL, 0);
3833 }
3834
3835 hci_dev_unlock(hdev);
3836 }
3837
3838 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
3839 struct sk_buff *skb)
3840 {
3841 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
3842 struct hci_conn *conn;
3843
3844 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
3845 __le16_to_cpu(ev->handle));
3846
3847 hci_dev_lock(hdev);
3848
3849 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3850 if (!conn)
3851 goto unlock;
3852
3853 /* For BR/EDR the necessary steps are taken through the
3854 * auth_complete event.
3855 */
3856 if (conn->type != LE_LINK)
3857 goto unlock;
3858
3859 if (!ev->status)
3860 conn->sec_level = conn->pending_sec_level;
3861
3862 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3863
3864 if (ev->status && conn->state == BT_CONNECTED) {
3865 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3866 hci_conn_drop(conn);
3867 goto unlock;
3868 }
3869
3870 if (conn->state == BT_CONFIG) {
3871 if (!ev->status)
3872 conn->state = BT_CONNECTED;
3873
3874 hci_connect_cfm(conn, ev->status);
3875 hci_conn_drop(conn);
3876 } else {
3877 hci_auth_cfm(conn, ev->status);
3878
3879 hci_conn_hold(conn);
3880 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3881 hci_conn_drop(conn);
3882 }
3883
3884 unlock:
3885 hci_dev_unlock(hdev);
3886 }
3887
3888 static u8 hci_get_auth_req(struct hci_conn *conn)
3889 {
3890 /* If remote requests no-bonding follow that lead */
3891 if (conn->remote_auth == HCI_AT_NO_BONDING ||
3892 conn->remote_auth == HCI_AT_NO_BONDING_MITM)
3893 return conn->remote_auth | (conn->auth_type & 0x01);
3894
3895 /* If both remote and local have enough IO capabilities, require
3896 * MITM protection
3897 */
3898 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
3899 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
3900 return conn->remote_auth | 0x01;
3901
3902 /* No MITM protection possible so ignore remote requirement */
3903 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
3904 }
3905
3906 static u8 bredr_oob_data_present(struct hci_conn *conn)
3907 {
3908 struct hci_dev *hdev = conn->hdev;
3909 struct oob_data *data;
3910
3911 data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
3912 if (!data)
3913 return 0x00;
3914
3915 if (bredr_sc_enabled(hdev)) {
3916 /* When Secure Connections is enabled, then just
3917 * return the present value stored with the OOB
3918 * data. The stored value contains the right present
3919 * information. However it can only be trusted when
3920 * not in Secure Connection Only mode.
3921 */
3922 if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
3923 return data->present;
3924
3925 /* When Secure Connections Only mode is enabled, then
3926 * the P-256 values are required. If they are not
3927 * available, then do not declare that OOB data is
3928 * present.
3929 */
3930 if (!memcmp(data->rand256, ZERO_KEY, 16) ||
3931 !memcmp(data->hash256, ZERO_KEY, 16))
3932 return 0x00;
3933
3934 return 0x02;
3935 }
3936
3937 /* When Secure Connections is not enabled or actually
3938 * not supported by the hardware, then check that if
3939 * P-192 data values are present.
3940 */
3941 if (!memcmp(data->rand192, ZERO_KEY, 16) ||
3942 !memcmp(data->hash192, ZERO_KEY, 16))
3943 return 0x00;
3944
3945 return 0x01;
3946 }
3947
3948 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3949 {
3950 struct hci_ev_io_capa_request *ev = (void *) skb->data;
3951 struct hci_conn *conn;
3952
3953 BT_DBG("%s", hdev->name);
3954
3955 hci_dev_lock(hdev);
3956
3957 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3958 if (!conn)
3959 goto unlock;
3960
3961 hci_conn_hold(conn);
3962
3963 if (!hci_dev_test_flag(hdev, HCI_MGMT))
3964 goto unlock;
3965
3966 /* Allow pairing if we're pairable, the initiators of the
3967 * pairing or if the remote is not requesting bonding.
3968 */
3969 if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
3970 test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
3971 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3972 struct hci_cp_io_capability_reply cp;
3973
3974 bacpy(&cp.bdaddr, &ev->bdaddr);
3975 /* Change the IO capability from KeyboardDisplay
3976 * to DisplayYesNo as it is not supported by BT spec. */
3977 cp.capability = (conn->io_capability == 0x04) ?
3978 HCI_IO_DISPLAY_YESNO : conn->io_capability;
3979
3980 /* If we are initiators, there is no remote information yet */
3981 if (conn->remote_auth == 0xff) {
3982 /* Request MITM protection if our IO caps allow it
3983 * except for the no-bonding case.
3984 */
3985 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
3986 conn->auth_type != HCI_AT_NO_BONDING)
3987 conn->auth_type |= 0x01;
3988 } else {
3989 conn->auth_type = hci_get_auth_req(conn);
3990 }
3991
3992 /* If we're not bondable, force one of the non-bondable
3993 * authentication requirement values.
3994 */
3995 if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
3996 conn->auth_type &= HCI_AT_NO_BONDING_MITM;
3997
3998 cp.authentication = conn->auth_type;
3999 cp.oob_data = bredr_oob_data_present(conn);
4000
4001 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
4002 sizeof(cp), &cp);
4003 } else {
4004 struct hci_cp_io_capability_neg_reply cp;
4005
4006 bacpy(&cp.bdaddr, &ev->bdaddr);
4007 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
4008
4009 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
4010 sizeof(cp), &cp);
4011 }
4012
4013 unlock:
4014 hci_dev_unlock(hdev);
4015 }
4016
4017 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
4018 {
4019 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
4020 struct hci_conn *conn;
4021
4022 BT_DBG("%s", hdev->name);
4023
4024 hci_dev_lock(hdev);
4025
4026 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4027 if (!conn)
4028 goto unlock;
4029
4030 conn->remote_cap = ev->capability;
4031 conn->remote_auth = ev->authentication;
4032
4033 unlock:
4034 hci_dev_unlock(hdev);
4035 }
4036
4037 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
4038 struct sk_buff *skb)
4039 {
4040 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
4041 int loc_mitm, rem_mitm, confirm_hint = 0;
4042 struct hci_conn *conn;
4043
4044 BT_DBG("%s", hdev->name);
4045
4046 hci_dev_lock(hdev);
4047
4048 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4049 goto unlock;
4050
4051 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4052 if (!conn)
4053 goto unlock;
4054
4055 loc_mitm = (conn->auth_type & 0x01);
4056 rem_mitm = (conn->remote_auth & 0x01);
4057
4058 /* If we require MITM but the remote device can't provide that
4059 * (it has NoInputNoOutput) then reject the confirmation
4060 * request. We check the security level here since it doesn't
4061 * necessarily match conn->auth_type.
4062 */
4063 if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
4064 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
4065 BT_DBG("Rejecting request: remote device can't provide MITM");
4066 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
4067 sizeof(ev->bdaddr), &ev->bdaddr);
4068 goto unlock;
4069 }
4070
4071 /* If no side requires MITM protection; auto-accept */
4072 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
4073 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
4074
4075 /* If we're not the initiators request authorization to
4076 * proceed from user space (mgmt_user_confirm with
4077 * confirm_hint set to 1). The exception is if neither
4078 * side had MITM or if the local IO capability is
4079 * NoInputNoOutput, in which case we do auto-accept
4080 */
4081 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
4082 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4083 (loc_mitm || rem_mitm)) {
4084 BT_DBG("Confirming auto-accept as acceptor");
4085 confirm_hint = 1;
4086 goto confirm;
4087 }
4088
4089 BT_DBG("Auto-accept of user confirmation with %ums delay",
4090 hdev->auto_accept_delay);
4091
4092 if (hdev->auto_accept_delay > 0) {
4093 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
4094 queue_delayed_work(conn->hdev->workqueue,
4095 &conn->auto_accept_work, delay);
4096 goto unlock;
4097 }
4098
4099 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
4100 sizeof(ev->bdaddr), &ev->bdaddr);
4101 goto unlock;
4102 }
4103
4104 confirm:
4105 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
4106 le32_to_cpu(ev->passkey), confirm_hint);
4107
4108 unlock:
4109 hci_dev_unlock(hdev);
4110 }
4111
4112 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
4113 struct sk_buff *skb)
4114 {
4115 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
4116
4117 BT_DBG("%s", hdev->name);
4118
4119 if (hci_dev_test_flag(hdev, HCI_MGMT))
4120 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
4121 }
4122
4123 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
4124 struct sk_buff *skb)
4125 {
4126 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
4127 struct hci_conn *conn;
4128
4129 BT_DBG("%s", hdev->name);
4130
4131 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4132 if (!conn)
4133 return;
4134
4135 conn->passkey_notify = __le32_to_cpu(ev->passkey);
4136 conn->passkey_entered = 0;
4137
4138 if (hci_dev_test_flag(hdev, HCI_MGMT))
4139 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4140 conn->dst_type, conn->passkey_notify,
4141 conn->passkey_entered);
4142 }
4143
4144 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
4145 {
4146 struct hci_ev_keypress_notify *ev = (void *) skb->data;
4147 struct hci_conn *conn;
4148
4149 BT_DBG("%s", hdev->name);
4150
4151 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4152 if (!conn)
4153 return;
4154
4155 switch (ev->type) {
4156 case HCI_KEYPRESS_STARTED:
4157 conn->passkey_entered = 0;
4158 return;
4159
4160 case HCI_KEYPRESS_ENTERED:
4161 conn->passkey_entered++;
4162 break;
4163
4164 case HCI_KEYPRESS_ERASED:
4165 conn->passkey_entered--;
4166 break;
4167
4168 case HCI_KEYPRESS_CLEARED:
4169 conn->passkey_entered = 0;
4170 break;
4171
4172 case HCI_KEYPRESS_COMPLETED:
4173 return;
4174 }
4175
4176 if (hci_dev_test_flag(hdev, HCI_MGMT))
4177 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4178 conn->dst_type, conn->passkey_notify,
4179 conn->passkey_entered);
4180 }
4181
4182 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
4183 struct sk_buff *skb)
4184 {
4185 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
4186 struct hci_conn *conn;
4187
4188 BT_DBG("%s", hdev->name);
4189
4190 hci_dev_lock(hdev);
4191
4192 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4193 if (!conn)
4194 goto unlock;
4195
4196 /* Reset the authentication requirement to unknown */
4197 conn->remote_auth = 0xff;
4198
4199 /* To avoid duplicate auth_failed events to user space we check
4200 * the HCI_CONN_AUTH_PEND flag which will be set if we
4201 * initiated the authentication. A traditional auth_complete
4202 * event gets always produced as initiator and is also mapped to
4203 * the mgmt_auth_failed event */
4204 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
4205 mgmt_auth_failed(conn, ev->status);
4206
4207 hci_conn_drop(conn);
4208
4209 unlock:
4210 hci_dev_unlock(hdev);
4211 }
4212
4213 static void hci_remote_host_features_evt(struct hci_dev *hdev,
4214 struct sk_buff *skb)
4215 {
4216 struct hci_ev_remote_host_features *ev = (void *) skb->data;
4217 struct inquiry_entry *ie;
4218 struct hci_conn *conn;
4219
4220 BT_DBG("%s", hdev->name);
4221
4222 hci_dev_lock(hdev);
4223
4224 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4225 if (conn)
4226 memcpy(conn->features[1], ev->features, 8);
4227
4228 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4229 if (ie)
4230 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4231
4232 hci_dev_unlock(hdev);
4233 }
4234
4235 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
4236 struct sk_buff *skb)
4237 {
4238 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
4239 struct oob_data *data;
4240
4241 BT_DBG("%s", hdev->name);
4242
4243 hci_dev_lock(hdev);
4244
4245 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4246 goto unlock;
4247
4248 data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
4249 if (!data) {
4250 struct hci_cp_remote_oob_data_neg_reply cp;
4251
4252 bacpy(&cp.bdaddr, &ev->bdaddr);
4253 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
4254 sizeof(cp), &cp);
4255 goto unlock;
4256 }
4257
4258 if (bredr_sc_enabled(hdev)) {
4259 struct hci_cp_remote_oob_ext_data_reply cp;
4260
4261 bacpy(&cp.bdaddr, &ev->bdaddr);
4262 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
4263 memset(cp.hash192, 0, sizeof(cp.hash192));
4264 memset(cp.rand192, 0, sizeof(cp.rand192));
4265 } else {
4266 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
4267 memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
4268 }
4269 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
4270 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
4271
4272 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
4273 sizeof(cp), &cp);
4274 } else {
4275 struct hci_cp_remote_oob_data_reply cp;
4276
4277 bacpy(&cp.bdaddr, &ev->bdaddr);
4278 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
4279 memcpy(cp.rand, data->rand192, sizeof(cp.rand));
4280
4281 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
4282 sizeof(cp), &cp);
4283 }
4284
4285 unlock:
4286 hci_dev_unlock(hdev);
4287 }
4288
4289 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
4290 struct sk_buff *skb)
4291 {
4292 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
4293 struct hci_conn *hcon, *bredr_hcon;
4294
4295 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
4296 ev->status);
4297
4298 hci_dev_lock(hdev);
4299
4300 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4301 if (!hcon) {
4302 hci_dev_unlock(hdev);
4303 return;
4304 }
4305
4306 if (ev->status) {
4307 hci_conn_del(hcon);
4308 hci_dev_unlock(hdev);
4309 return;
4310 }
4311
4312 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
4313
4314 hcon->state = BT_CONNECTED;
4315 bacpy(&hcon->dst, &bredr_hcon->dst);
4316
4317 hci_conn_hold(hcon);
4318 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4319 hci_conn_drop(hcon);
4320
4321 hci_debugfs_create_conn(hcon);
4322 hci_conn_add_sysfs(hcon);
4323
4324 amp_physical_cfm(bredr_hcon, hcon);
4325
4326 hci_dev_unlock(hdev);
4327 }
4328
4329 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4330 {
4331 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
4332 struct hci_conn *hcon;
4333 struct hci_chan *hchan;
4334 struct amp_mgr *mgr;
4335
4336 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
4337 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
4338 ev->status);
4339
4340 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4341 if (!hcon)
4342 return;
4343
4344 /* Create AMP hchan */
4345 hchan = hci_chan_create(hcon);
4346 if (!hchan)
4347 return;
4348
4349 hchan->handle = le16_to_cpu(ev->handle);
4350
4351 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
4352
4353 mgr = hcon->amp_mgr;
4354 if (mgr && mgr->bredr_chan) {
4355 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
4356
4357 l2cap_chan_lock(bredr_chan);
4358
4359 bredr_chan->conn->mtu = hdev->block_mtu;
4360 l2cap_logical_cfm(bredr_chan, hchan, 0);
4361 hci_conn_hold(hcon);
4362
4363 l2cap_chan_unlock(bredr_chan);
4364 }
4365 }
4366
4367 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
4368 struct sk_buff *skb)
4369 {
4370 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
4371 struct hci_chan *hchan;
4372
4373 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
4374 le16_to_cpu(ev->handle), ev->status);
4375
4376 if (ev->status)
4377 return;
4378
4379 hci_dev_lock(hdev);
4380
4381 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
4382 if (!hchan)
4383 goto unlock;
4384
4385 amp_destroy_logical_link(hchan, ev->reason);
4386
4387 unlock:
4388 hci_dev_unlock(hdev);
4389 }
4390
4391 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
4392 struct sk_buff *skb)
4393 {
4394 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
4395 struct hci_conn *hcon;
4396
4397 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4398
4399 if (ev->status)
4400 return;
4401
4402 hci_dev_lock(hdev);
4403
4404 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4405 if (hcon) {
4406 hcon->state = BT_CLOSED;
4407 hci_conn_del(hcon);
4408 }
4409
4410 hci_dev_unlock(hdev);
4411 }
4412
4413 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4414 {
4415 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
4416 struct hci_conn_params *params;
4417 struct hci_conn *conn;
4418 struct smp_irk *irk;
4419 u8 addr_type;
4420
4421 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4422
4423 hci_dev_lock(hdev);
4424
4425 /* All controllers implicitly stop advertising in the event of a
4426 * connection, so ensure that the state bit is cleared.
4427 */
4428 hci_dev_clear_flag(hdev, HCI_LE_ADV);
4429
4430 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
4431 if (!conn) {
4432 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr, ev->role);
4433 if (!conn) {
4434 BT_ERR("No memory for new connection");
4435 goto unlock;
4436 }
4437
4438 conn->dst_type = ev->bdaddr_type;
4439
4440 /* If we didn't have a hci_conn object previously
4441 * but we're in master role this must be something
4442 * initiated using a white list. Since white list based
4443 * connections are not "first class citizens" we don't
4444 * have full tracking of them. Therefore, we go ahead
4445 * with a "best effort" approach of determining the
4446 * initiator address based on the HCI_PRIVACY flag.
4447 */
4448 if (conn->out) {
4449 conn->resp_addr_type = ev->bdaddr_type;
4450 bacpy(&conn->resp_addr, &ev->bdaddr);
4451 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
4452 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
4453 bacpy(&conn->init_addr, &hdev->rpa);
4454 } else {
4455 hci_copy_identity_address(hdev,
4456 &conn->init_addr,
4457 &conn->init_addr_type);
4458 }
4459 }
4460 } else {
4461 cancel_delayed_work(&conn->le_conn_timeout);
4462 }
4463
4464 if (!conn->out) {
4465 /* Set the responder (our side) address type based on
4466 * the advertising address type.
4467 */
4468 conn->resp_addr_type = hdev->adv_addr_type;
4469 if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM)
4470 bacpy(&conn->resp_addr, &hdev->random_addr);
4471 else
4472 bacpy(&conn->resp_addr, &hdev->bdaddr);
4473
4474 conn->init_addr_type = ev->bdaddr_type;
4475 bacpy(&conn->init_addr, &ev->bdaddr);
4476
4477 /* For incoming connections, set the default minimum
4478 * and maximum connection interval. They will be used
4479 * to check if the parameters are in range and if not
4480 * trigger the connection update procedure.
4481 */
4482 conn->le_conn_min_interval = hdev->le_conn_min_interval;
4483 conn->le_conn_max_interval = hdev->le_conn_max_interval;
4484 }
4485
4486 /* Lookup the identity address from the stored connection
4487 * address and address type.
4488 *
4489 * When establishing connections to an identity address, the
4490 * connection procedure will store the resolvable random
4491 * address first. Now if it can be converted back into the
4492 * identity address, start using the identity address from
4493 * now on.
4494 */
4495 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
4496 if (irk) {
4497 bacpy(&conn->dst, &irk->bdaddr);
4498 conn->dst_type = irk->addr_type;
4499 }
4500
4501 if (ev->status) {
4502 hci_le_conn_failed(conn, ev->status);
4503 goto unlock;
4504 }
4505
4506 if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
4507 addr_type = BDADDR_LE_PUBLIC;
4508 else
4509 addr_type = BDADDR_LE_RANDOM;
4510
4511 /* Drop the connection if the device is blocked */
4512 if (hci_bdaddr_list_lookup(&hdev->blacklist, &conn->dst, addr_type)) {
4513 hci_conn_drop(conn);
4514 goto unlock;
4515 }
4516
4517 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4518 mgmt_device_connected(hdev, conn, 0, NULL, 0);
4519
4520 conn->sec_level = BT_SECURITY_LOW;
4521 conn->handle = __le16_to_cpu(ev->handle);
4522 conn->state = BT_CONNECTED;
4523
4524 conn->le_conn_interval = le16_to_cpu(ev->interval);
4525 conn->le_conn_latency = le16_to_cpu(ev->latency);
4526 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4527
4528 hci_debugfs_create_conn(conn);
4529 hci_conn_add_sysfs(conn);
4530
4531 hci_connect_cfm(conn, ev->status);
4532
4533 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
4534 conn->dst_type);
4535 if (params) {
4536 list_del_init(&params->action);
4537 if (params->conn) {
4538 hci_conn_drop(params->conn);
4539 hci_conn_put(params->conn);
4540 params->conn = NULL;
4541 }
4542 }
4543
4544 unlock:
4545 hci_update_background_scan(hdev);
4546 hci_dev_unlock(hdev);
4547 }
4548
4549 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
4550 struct sk_buff *skb)
4551 {
4552 struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
4553 struct hci_conn *conn;
4554
4555 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4556
4557 if (ev->status)
4558 return;
4559
4560 hci_dev_lock(hdev);
4561
4562 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4563 if (conn) {
4564 conn->le_conn_interval = le16_to_cpu(ev->interval);
4565 conn->le_conn_latency = le16_to_cpu(ev->latency);
4566 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4567 }
4568
4569 hci_dev_unlock(hdev);
4570 }
4571
4572 /* This function requires the caller holds hdev->lock */
4573 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
4574 bdaddr_t *addr,
4575 u8 addr_type, u8 adv_type)
4576 {
4577 struct hci_conn *conn;
4578 struct hci_conn_params *params;
4579
4580 /* If the event is not connectable don't proceed further */
4581 if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
4582 return NULL;
4583
4584 /* Ignore if the device is blocked */
4585 if (hci_bdaddr_list_lookup(&hdev->blacklist, addr, addr_type))
4586 return NULL;
4587
4588 /* Most controller will fail if we try to create new connections
4589 * while we have an existing one in slave role.
4590 */
4591 if (hdev->conn_hash.le_num_slave > 0)
4592 return NULL;
4593
4594 /* If we're not connectable only connect devices that we have in
4595 * our pend_le_conns list.
4596 */
4597 params = hci_pend_le_action_lookup(&hdev->pend_le_conns,
4598 addr, addr_type);
4599 if (!params)
4600 return NULL;
4601
4602 switch (params->auto_connect) {
4603 case HCI_AUTO_CONN_DIRECT:
4604 /* Only devices advertising with ADV_DIRECT_IND are
4605 * triggering a connection attempt. This is allowing
4606 * incoming connections from slave devices.
4607 */
4608 if (adv_type != LE_ADV_DIRECT_IND)
4609 return NULL;
4610 break;
4611 case HCI_AUTO_CONN_ALWAYS:
4612 /* Devices advertising with ADV_IND or ADV_DIRECT_IND
4613 * are triggering a connection attempt. This means
4614 * that incoming connectioms from slave device are
4615 * accepted and also outgoing connections to slave
4616 * devices are established when found.
4617 */
4618 break;
4619 default:
4620 return NULL;
4621 }
4622
4623 conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
4624 HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER);
4625 if (!IS_ERR(conn)) {
4626 /* Store the pointer since we don't really have any
4627 * other owner of the object besides the params that
4628 * triggered it. This way we can abort the connection if
4629 * the parameters get removed and keep the reference
4630 * count consistent once the connection is established.
4631 */
4632 params->conn = hci_conn_get(conn);
4633 return conn;
4634 }
4635
4636 switch (PTR_ERR(conn)) {
4637 case -EBUSY:
4638 /* If hci_connect() returns -EBUSY it means there is already
4639 * an LE connection attempt going on. Since controllers don't
4640 * support more than one connection attempt at the time, we
4641 * don't consider this an error case.
4642 */
4643 break;
4644 default:
4645 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
4646 return NULL;
4647 }
4648
4649 return NULL;
4650 }
4651
4652 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
4653 u8 bdaddr_type, bdaddr_t *direct_addr,
4654 u8 direct_addr_type, s8 rssi, u8 *data, u8 len)
4655 {
4656 struct discovery_state *d = &hdev->discovery;
4657 struct smp_irk *irk;
4658 struct hci_conn *conn;
4659 bool match;
4660 u32 flags;
4661
4662 /* If the direct address is present, then this report is from
4663 * a LE Direct Advertising Report event. In that case it is
4664 * important to see if the address is matching the local
4665 * controller address.
4666 */
4667 if (direct_addr) {
4668 /* Only resolvable random addresses are valid for these
4669 * kind of reports and others can be ignored.
4670 */
4671 if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
4672 return;
4673
4674 /* If the controller is not using resolvable random
4675 * addresses, then this report can be ignored.
4676 */
4677 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
4678 return;
4679
4680 /* If the local IRK of the controller does not match
4681 * with the resolvable random address provided, then
4682 * this report can be ignored.
4683 */
4684 if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
4685 return;
4686 }
4687
4688 /* Check if we need to convert to identity address */
4689 irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
4690 if (irk) {
4691 bdaddr = &irk->bdaddr;
4692 bdaddr_type = irk->addr_type;
4693 }
4694
4695 /* Check if we have been requested to connect to this device */
4696 conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type);
4697 if (conn && type == LE_ADV_IND) {
4698 /* Store report for later inclusion by
4699 * mgmt_device_connected
4700 */
4701 memcpy(conn->le_adv_data, data, len);
4702 conn->le_adv_data_len = len;
4703 }
4704
4705 /* Passive scanning shouldn't trigger any device found events,
4706 * except for devices marked as CONN_REPORT for which we do send
4707 * device found events.
4708 */
4709 if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
4710 if (type == LE_ADV_DIRECT_IND)
4711 return;
4712
4713 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
4714 bdaddr, bdaddr_type))
4715 return;
4716
4717 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
4718 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
4719 else
4720 flags = 0;
4721 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4722 rssi, flags, data, len, NULL, 0);
4723 return;
4724 }
4725
4726 /* When receiving non-connectable or scannable undirected
4727 * advertising reports, this means that the remote device is
4728 * not connectable and then clearly indicate this in the
4729 * device found event.
4730 *
4731 * When receiving a scan response, then there is no way to
4732 * know if the remote device is connectable or not. However
4733 * since scan responses are merged with a previously seen
4734 * advertising report, the flags field from that report
4735 * will be used.
4736 *
4737 * In the really unlikely case that a controller get confused
4738 * and just sends a scan response event, then it is marked as
4739 * not connectable as well.
4740 */
4741 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND ||
4742 type == LE_ADV_SCAN_RSP)
4743 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
4744 else
4745 flags = 0;
4746
4747 /* If there's nothing pending either store the data from this
4748 * event or send an immediate device found event if the data
4749 * should not be stored for later.
4750 */
4751 if (!has_pending_adv_report(hdev)) {
4752 /* If the report will trigger a SCAN_REQ store it for
4753 * later merging.
4754 */
4755 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4756 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4757 rssi, flags, data, len);
4758 return;
4759 }
4760
4761 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4762 rssi, flags, data, len, NULL, 0);
4763 return;
4764 }
4765
4766 /* Check if the pending report is for the same device as the new one */
4767 match = (!bacmp(bdaddr, &d->last_adv_addr) &&
4768 bdaddr_type == d->last_adv_addr_type);
4769
4770 /* If the pending data doesn't match this report or this isn't a
4771 * scan response (e.g. we got a duplicate ADV_IND) then force
4772 * sending of the pending data.
4773 */
4774 if (type != LE_ADV_SCAN_RSP || !match) {
4775 /* Send out whatever is in the cache, but skip duplicates */
4776 if (!match)
4777 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4778 d->last_adv_addr_type, NULL,
4779 d->last_adv_rssi, d->last_adv_flags,
4780 d->last_adv_data,
4781 d->last_adv_data_len, NULL, 0);
4782
4783 /* If the new report will trigger a SCAN_REQ store it for
4784 * later merging.
4785 */
4786 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4787 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4788 rssi, flags, data, len);
4789 return;
4790 }
4791
4792 /* The advertising reports cannot be merged, so clear
4793 * the pending report and send out a device found event.
4794 */
4795 clear_pending_adv_report(hdev);
4796 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4797 rssi, flags, data, len, NULL, 0);
4798 return;
4799 }
4800
4801 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
4802 * the new event is a SCAN_RSP. We can therefore proceed with
4803 * sending a merged device found event.
4804 */
4805 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4806 d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
4807 d->last_adv_data, d->last_adv_data_len, data, len);
4808 clear_pending_adv_report(hdev);
4809 }
4810
4811 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
4812 {
4813 u8 num_reports = skb->data[0];
4814 void *ptr = &skb->data[1];
4815
4816 hci_dev_lock(hdev);
4817
4818 while (num_reports--) {
4819 struct hci_ev_le_advertising_info *ev = ptr;
4820 s8 rssi;
4821
4822 rssi = ev->data[ev->length];
4823 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
4824 ev->bdaddr_type, NULL, 0, rssi,
4825 ev->data, ev->length);
4826
4827 ptr += sizeof(*ev) + ev->length + 1;
4828 }
4829
4830 hci_dev_unlock(hdev);
4831 }
4832
4833 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4834 {
4835 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
4836 struct hci_cp_le_ltk_reply cp;
4837 struct hci_cp_le_ltk_neg_reply neg;
4838 struct hci_conn *conn;
4839 struct smp_ltk *ltk;
4840
4841 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
4842
4843 hci_dev_lock(hdev);
4844
4845 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4846 if (conn == NULL)
4847 goto not_found;
4848
4849 ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
4850 if (!ltk)
4851 goto not_found;
4852
4853 if (smp_ltk_is_sc(ltk)) {
4854 /* With SC both EDiv and Rand are set to zero */
4855 if (ev->ediv || ev->rand)
4856 goto not_found;
4857 } else {
4858 /* For non-SC keys check that EDiv and Rand match */
4859 if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
4860 goto not_found;
4861 }
4862
4863 memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
4864 cp.handle = cpu_to_le16(conn->handle);
4865
4866 conn->pending_sec_level = smp_ltk_sec_level(ltk);
4867
4868 conn->enc_key_size = ltk->enc_size;
4869
4870 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
4871
4872 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
4873 * temporary key used to encrypt a connection following
4874 * pairing. It is used during the Encrypted Session Setup to
4875 * distribute the keys. Later, security can be re-established
4876 * using a distributed LTK.
4877 */
4878 if (ltk->type == SMP_STK) {
4879 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
4880 list_del_rcu(&ltk->list);
4881 kfree_rcu(ltk, rcu);
4882 } else {
4883 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
4884 }
4885
4886 hci_dev_unlock(hdev);
4887
4888 return;
4889
4890 not_found:
4891 neg.handle = ev->handle;
4892 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
4893 hci_dev_unlock(hdev);
4894 }
4895
4896 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
4897 u8 reason)
4898 {
4899 struct hci_cp_le_conn_param_req_neg_reply cp;
4900
4901 cp.handle = cpu_to_le16(handle);
4902 cp.reason = reason;
4903
4904 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
4905 &cp);
4906 }
4907
4908 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
4909 struct sk_buff *skb)
4910 {
4911 struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data;
4912 struct hci_cp_le_conn_param_req_reply cp;
4913 struct hci_conn *hcon;
4914 u16 handle, min, max, latency, timeout;
4915
4916 handle = le16_to_cpu(ev->handle);
4917 min = le16_to_cpu(ev->interval_min);
4918 max = le16_to_cpu(ev->interval_max);
4919 latency = le16_to_cpu(ev->latency);
4920 timeout = le16_to_cpu(ev->timeout);
4921
4922 hcon = hci_conn_hash_lookup_handle(hdev, handle);
4923 if (!hcon || hcon->state != BT_CONNECTED)
4924 return send_conn_param_neg_reply(hdev, handle,
4925 HCI_ERROR_UNKNOWN_CONN_ID);
4926
4927 if (hci_check_conn_params(min, max, latency, timeout))
4928 return send_conn_param_neg_reply(hdev, handle,
4929 HCI_ERROR_INVALID_LL_PARAMS);
4930
4931 if (hcon->role == HCI_ROLE_MASTER) {
4932 struct hci_conn_params *params;
4933 u8 store_hint;
4934
4935 hci_dev_lock(hdev);
4936
4937 params = hci_conn_params_lookup(hdev, &hcon->dst,
4938 hcon->dst_type);
4939 if (params) {
4940 params->conn_min_interval = min;
4941 params->conn_max_interval = max;
4942 params->conn_latency = latency;
4943 params->supervision_timeout = timeout;
4944 store_hint = 0x01;
4945 } else{
4946 store_hint = 0x00;
4947 }
4948
4949 hci_dev_unlock(hdev);
4950
4951 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
4952 store_hint, min, max, latency, timeout);
4953 }
4954
4955 cp.handle = ev->handle;
4956 cp.interval_min = ev->interval_min;
4957 cp.interval_max = ev->interval_max;
4958 cp.latency = ev->latency;
4959 cp.timeout = ev->timeout;
4960 cp.min_ce_len = 0;
4961 cp.max_ce_len = 0;
4962
4963 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
4964 }
4965
4966 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev,
4967 struct sk_buff *skb)
4968 {
4969 u8 num_reports = skb->data[0];
4970 void *ptr = &skb->data[1];
4971
4972 hci_dev_lock(hdev);
4973
4974 while (num_reports--) {
4975 struct hci_ev_le_direct_adv_info *ev = ptr;
4976
4977 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
4978 ev->bdaddr_type, &ev->direct_addr,
4979 ev->direct_addr_type, ev->rssi, NULL, 0);
4980
4981 ptr += sizeof(*ev);
4982 }
4983
4984 hci_dev_unlock(hdev);
4985 }
4986
4987 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
4988 {
4989 struct hci_ev_le_meta *le_ev = (void *) skb->data;
4990
4991 skb_pull(skb, sizeof(*le_ev));
4992
4993 switch (le_ev->subevent) {
4994 case HCI_EV_LE_CONN_COMPLETE:
4995 hci_le_conn_complete_evt(hdev, skb);
4996 break;
4997
4998 case HCI_EV_LE_CONN_UPDATE_COMPLETE:
4999 hci_le_conn_update_complete_evt(hdev, skb);
5000 break;
5001
5002 case HCI_EV_LE_ADVERTISING_REPORT:
5003 hci_le_adv_report_evt(hdev, skb);
5004 break;
5005
5006 case HCI_EV_LE_LTK_REQ:
5007 hci_le_ltk_request_evt(hdev, skb);
5008 break;
5009
5010 case HCI_EV_LE_REMOTE_CONN_PARAM_REQ:
5011 hci_le_remote_conn_param_req_evt(hdev, skb);
5012 break;
5013
5014 case HCI_EV_LE_DIRECT_ADV_REPORT:
5015 hci_le_direct_adv_report_evt(hdev, skb);
5016 break;
5017
5018 default:
5019 break;
5020 }
5021 }
5022
5023 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
5024 {
5025 struct hci_ev_channel_selected *ev = (void *) skb->data;
5026 struct hci_conn *hcon;
5027
5028 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
5029
5030 skb_pull(skb, sizeof(*ev));
5031
5032 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5033 if (!hcon)
5034 return;
5035
5036 amp_read_loc_assoc_final_data(hdev, hcon);
5037 }
5038
5039 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
5040 {
5041 struct hci_event_hdr *hdr = (void *) skb->data;
5042 __u8 event = hdr->evt;
5043
5044 hci_dev_lock(hdev);
5045
5046 /* Received events are (currently) only needed when a request is
5047 * ongoing so avoid unnecessary memory allocation.
5048 */
5049 if (hci_req_pending(hdev)) {
5050 kfree_skb(hdev->recv_evt);
5051 hdev->recv_evt = skb_clone(skb, GFP_KERNEL);
5052 }
5053
5054 hci_dev_unlock(hdev);
5055
5056 skb_pull(skb, HCI_EVENT_HDR_SIZE);
5057
5058 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->req.event == event) {
5059 struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
5060 u16 opcode = __le16_to_cpu(cmd_hdr->opcode);
5061
5062 hci_req_cmd_complete(hdev, opcode, 0);
5063 }
5064
5065 switch (event) {
5066 case HCI_EV_INQUIRY_COMPLETE:
5067 hci_inquiry_complete_evt(hdev, skb);
5068 break;
5069
5070 case HCI_EV_INQUIRY_RESULT:
5071 hci_inquiry_result_evt(hdev, skb);
5072 break;
5073
5074 case HCI_EV_CONN_COMPLETE:
5075 hci_conn_complete_evt(hdev, skb);
5076 break;
5077
5078 case HCI_EV_CONN_REQUEST:
5079 hci_conn_request_evt(hdev, skb);
5080 break;
5081
5082 case HCI_EV_DISCONN_COMPLETE:
5083 hci_disconn_complete_evt(hdev, skb);
5084 break;
5085
5086 case HCI_EV_AUTH_COMPLETE:
5087 hci_auth_complete_evt(hdev, skb);
5088 break;
5089
5090 case HCI_EV_REMOTE_NAME:
5091 hci_remote_name_evt(hdev, skb);
5092 break;
5093
5094 case HCI_EV_ENCRYPT_CHANGE:
5095 hci_encrypt_change_evt(hdev, skb);
5096 break;
5097
5098 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
5099 hci_change_link_key_complete_evt(hdev, skb);
5100 break;
5101
5102 case HCI_EV_REMOTE_FEATURES:
5103 hci_remote_features_evt(hdev, skb);
5104 break;
5105
5106 case HCI_EV_CMD_COMPLETE:
5107 hci_cmd_complete_evt(hdev, skb);
5108 break;
5109
5110 case HCI_EV_CMD_STATUS:
5111 hci_cmd_status_evt(hdev, skb);
5112 break;
5113
5114 case HCI_EV_HARDWARE_ERROR:
5115 hci_hardware_error_evt(hdev, skb);
5116 break;
5117
5118 case HCI_EV_ROLE_CHANGE:
5119 hci_role_change_evt(hdev, skb);
5120 break;
5121
5122 case HCI_EV_NUM_COMP_PKTS:
5123 hci_num_comp_pkts_evt(hdev, skb);
5124 break;
5125
5126 case HCI_EV_MODE_CHANGE:
5127 hci_mode_change_evt(hdev, skb);
5128 break;
5129
5130 case HCI_EV_PIN_CODE_REQ:
5131 hci_pin_code_request_evt(hdev, skb);
5132 break;
5133
5134 case HCI_EV_LINK_KEY_REQ:
5135 hci_link_key_request_evt(hdev, skb);
5136 break;
5137
5138 case HCI_EV_LINK_KEY_NOTIFY:
5139 hci_link_key_notify_evt(hdev, skb);
5140 break;
5141
5142 case HCI_EV_CLOCK_OFFSET:
5143 hci_clock_offset_evt(hdev, skb);
5144 break;
5145
5146 case HCI_EV_PKT_TYPE_CHANGE:
5147 hci_pkt_type_change_evt(hdev, skb);
5148 break;
5149
5150 case HCI_EV_PSCAN_REP_MODE:
5151 hci_pscan_rep_mode_evt(hdev, skb);
5152 break;
5153
5154 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
5155 hci_inquiry_result_with_rssi_evt(hdev, skb);
5156 break;
5157
5158 case HCI_EV_REMOTE_EXT_FEATURES:
5159 hci_remote_ext_features_evt(hdev, skb);
5160 break;
5161
5162 case HCI_EV_SYNC_CONN_COMPLETE:
5163 hci_sync_conn_complete_evt(hdev, skb);
5164 break;
5165
5166 case HCI_EV_EXTENDED_INQUIRY_RESULT:
5167 hci_extended_inquiry_result_evt(hdev, skb);
5168 break;
5169
5170 case HCI_EV_KEY_REFRESH_COMPLETE:
5171 hci_key_refresh_complete_evt(hdev, skb);
5172 break;
5173
5174 case HCI_EV_IO_CAPA_REQUEST:
5175 hci_io_capa_request_evt(hdev, skb);
5176 break;
5177
5178 case HCI_EV_IO_CAPA_REPLY:
5179 hci_io_capa_reply_evt(hdev, skb);
5180 break;
5181
5182 case HCI_EV_USER_CONFIRM_REQUEST:
5183 hci_user_confirm_request_evt(hdev, skb);
5184 break;
5185
5186 case HCI_EV_USER_PASSKEY_REQUEST:
5187 hci_user_passkey_request_evt(hdev, skb);
5188 break;
5189
5190 case HCI_EV_USER_PASSKEY_NOTIFY:
5191 hci_user_passkey_notify_evt(hdev, skb);
5192 break;
5193
5194 case HCI_EV_KEYPRESS_NOTIFY:
5195 hci_keypress_notify_evt(hdev, skb);
5196 break;
5197
5198 case HCI_EV_SIMPLE_PAIR_COMPLETE:
5199 hci_simple_pair_complete_evt(hdev, skb);
5200 break;
5201
5202 case HCI_EV_REMOTE_HOST_FEATURES:
5203 hci_remote_host_features_evt(hdev, skb);
5204 break;
5205
5206 case HCI_EV_LE_META:
5207 hci_le_meta_evt(hdev, skb);
5208 break;
5209
5210 case HCI_EV_CHANNEL_SELECTED:
5211 hci_chan_selected_evt(hdev, skb);
5212 break;
5213
5214 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
5215 hci_remote_oob_data_request_evt(hdev, skb);
5216 break;
5217
5218 case HCI_EV_PHY_LINK_COMPLETE:
5219 hci_phy_link_complete_evt(hdev, skb);
5220 break;
5221
5222 case HCI_EV_LOGICAL_LINK_COMPLETE:
5223 hci_loglink_complete_evt(hdev, skb);
5224 break;
5225
5226 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
5227 hci_disconn_loglink_complete_evt(hdev, skb);
5228 break;
5229
5230 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
5231 hci_disconn_phylink_complete_evt(hdev, skb);
5232 break;
5233
5234 case HCI_EV_NUM_COMP_BLOCKS:
5235 hci_num_comp_blocks_evt(hdev, skb);
5236 break;
5237
5238 default:
5239 BT_DBG("%s event 0x%2.2x", hdev->name, event);
5240 break;
5241 }
5242
5243 kfree_skb(skb);
5244 hdev->stat.evt_rx++;
5245 }