]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - net/bluetooth/hci_event.c
20e36126bbdaec2c4ae310b6ea0a7c51cc14de83
[mirror_ubuntu-jammy-kernel.git] / net / bluetooth / hci_event.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI event handling. */
26
27 #include <asm/unaligned.h>
28
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
32
33 #include "hci_request.h"
34 #include "hci_debugfs.h"
35 #include "a2mp.h"
36 #include "amp.h"
37 #include "smp.h"
38 #include "msft.h"
39
40 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
41 "\x00\x00\x00\x00\x00\x00\x00\x00"
42
43 #define secs_to_jiffies(_secs) msecs_to_jiffies((_secs) * 1000)
44
45 /* Handle HCI Event packets */
46
47 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb,
48 u8 *new_status)
49 {
50 __u8 status = *((__u8 *) skb->data);
51
52 BT_DBG("%s status 0x%2.2x", hdev->name, status);
53
54 /* It is possible that we receive Inquiry Complete event right
55 * before we receive Inquiry Cancel Command Complete event, in
56 * which case the latter event should have status of Command
57 * Disallowed (0x0c). This should not be treated as error, since
58 * we actually achieve what Inquiry Cancel wants to achieve,
59 * which is to end the last Inquiry session.
60 */
61 if (status == 0x0c && !test_bit(HCI_INQUIRY, &hdev->flags)) {
62 bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command");
63 status = 0x00;
64 }
65
66 *new_status = status;
67
68 if (status)
69 return;
70
71 clear_bit(HCI_INQUIRY, &hdev->flags);
72 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
73 wake_up_bit(&hdev->flags, HCI_INQUIRY);
74
75 hci_dev_lock(hdev);
76 /* Set discovery state to stopped if we're not doing LE active
77 * scanning.
78 */
79 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
80 hdev->le_scan_type != LE_SCAN_ACTIVE)
81 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
82 hci_dev_unlock(hdev);
83
84 hci_conn_check_pending(hdev);
85 }
86
87 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
88 {
89 __u8 status = *((__u8 *) skb->data);
90
91 BT_DBG("%s status 0x%2.2x", hdev->name, status);
92
93 if (status)
94 return;
95
96 hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
97 }
98
99 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
100 {
101 __u8 status = *((__u8 *) skb->data);
102
103 BT_DBG("%s status 0x%2.2x", hdev->name, status);
104
105 if (status)
106 return;
107
108 hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
109
110 hci_conn_check_pending(hdev);
111 }
112
113 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
114 struct sk_buff *skb)
115 {
116 BT_DBG("%s", hdev->name);
117 }
118
119 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
120 {
121 struct hci_rp_role_discovery *rp = (void *) skb->data;
122 struct hci_conn *conn;
123
124 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
125
126 if (rp->status)
127 return;
128
129 hci_dev_lock(hdev);
130
131 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
132 if (conn)
133 conn->role = rp->role;
134
135 hci_dev_unlock(hdev);
136 }
137
138 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
139 {
140 struct hci_rp_read_link_policy *rp = (void *) skb->data;
141 struct hci_conn *conn;
142
143 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
144
145 if (rp->status)
146 return;
147
148 hci_dev_lock(hdev);
149
150 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
151 if (conn)
152 conn->link_policy = __le16_to_cpu(rp->policy);
153
154 hci_dev_unlock(hdev);
155 }
156
157 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
158 {
159 struct hci_rp_write_link_policy *rp = (void *) skb->data;
160 struct hci_conn *conn;
161 void *sent;
162
163 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
164
165 if (rp->status)
166 return;
167
168 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
169 if (!sent)
170 return;
171
172 hci_dev_lock(hdev);
173
174 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
175 if (conn)
176 conn->link_policy = get_unaligned_le16(sent + 2);
177
178 hci_dev_unlock(hdev);
179 }
180
181 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
182 struct sk_buff *skb)
183 {
184 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
185
186 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
187
188 if (rp->status)
189 return;
190
191 hdev->link_policy = __le16_to_cpu(rp->policy);
192 }
193
194 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
195 struct sk_buff *skb)
196 {
197 __u8 status = *((__u8 *) skb->data);
198 void *sent;
199
200 BT_DBG("%s status 0x%2.2x", hdev->name, status);
201
202 if (status)
203 return;
204
205 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
206 if (!sent)
207 return;
208
209 hdev->link_policy = get_unaligned_le16(sent);
210 }
211
212 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
213 {
214 __u8 status = *((__u8 *) skb->data);
215
216 BT_DBG("%s status 0x%2.2x", hdev->name, status);
217
218 clear_bit(HCI_RESET, &hdev->flags);
219
220 if (status)
221 return;
222
223 /* Reset all non-persistent flags */
224 hci_dev_clear_volatile_flags(hdev);
225
226 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
227
228 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
229 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
230
231 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
232 hdev->adv_data_len = 0;
233
234 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
235 hdev->scan_rsp_data_len = 0;
236
237 hdev->le_scan_type = LE_SCAN_PASSIVE;
238
239 hdev->ssp_debug_mode = 0;
240
241 hci_bdaddr_list_clear(&hdev->le_accept_list);
242 hci_bdaddr_list_clear(&hdev->le_resolv_list);
243 }
244
245 static void hci_cc_read_stored_link_key(struct hci_dev *hdev,
246 struct sk_buff *skb)
247 {
248 struct hci_rp_read_stored_link_key *rp = (void *)skb->data;
249 struct hci_cp_read_stored_link_key *sent;
250
251 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
252
253 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
254 if (!sent)
255 return;
256
257 if (!rp->status && sent->read_all == 0x01) {
258 hdev->stored_max_keys = rp->max_keys;
259 hdev->stored_num_keys = rp->num_keys;
260 }
261 }
262
263 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
264 struct sk_buff *skb)
265 {
266 struct hci_rp_delete_stored_link_key *rp = (void *)skb->data;
267
268 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
269
270 if (rp->status)
271 return;
272
273 if (rp->num_keys <= hdev->stored_num_keys)
274 hdev->stored_num_keys -= rp->num_keys;
275 else
276 hdev->stored_num_keys = 0;
277 }
278
279 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
280 {
281 __u8 status = *((__u8 *) skb->data);
282 void *sent;
283
284 BT_DBG("%s status 0x%2.2x", hdev->name, status);
285
286 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
287 if (!sent)
288 return;
289
290 hci_dev_lock(hdev);
291
292 if (hci_dev_test_flag(hdev, HCI_MGMT))
293 mgmt_set_local_name_complete(hdev, sent, status);
294 else if (!status)
295 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
296
297 hci_dev_unlock(hdev);
298 }
299
300 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
301 {
302 struct hci_rp_read_local_name *rp = (void *) skb->data;
303
304 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
305
306 if (rp->status)
307 return;
308
309 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
310 hci_dev_test_flag(hdev, HCI_CONFIG))
311 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
312 }
313
314 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
315 {
316 __u8 status = *((__u8 *) skb->data);
317 void *sent;
318
319 BT_DBG("%s status 0x%2.2x", hdev->name, status);
320
321 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
322 if (!sent)
323 return;
324
325 hci_dev_lock(hdev);
326
327 if (!status) {
328 __u8 param = *((__u8 *) sent);
329
330 if (param == AUTH_ENABLED)
331 set_bit(HCI_AUTH, &hdev->flags);
332 else
333 clear_bit(HCI_AUTH, &hdev->flags);
334 }
335
336 if (hci_dev_test_flag(hdev, HCI_MGMT))
337 mgmt_auth_enable_complete(hdev, status);
338
339 hci_dev_unlock(hdev);
340 }
341
342 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
343 {
344 __u8 status = *((__u8 *) skb->data);
345 __u8 param;
346 void *sent;
347
348 BT_DBG("%s status 0x%2.2x", hdev->name, status);
349
350 if (status)
351 return;
352
353 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
354 if (!sent)
355 return;
356
357 param = *((__u8 *) sent);
358
359 if (param)
360 set_bit(HCI_ENCRYPT, &hdev->flags);
361 else
362 clear_bit(HCI_ENCRYPT, &hdev->flags);
363 }
364
365 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
366 {
367 __u8 status = *((__u8 *) skb->data);
368 __u8 param;
369 void *sent;
370
371 BT_DBG("%s status 0x%2.2x", hdev->name, status);
372
373 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
374 if (!sent)
375 return;
376
377 param = *((__u8 *) sent);
378
379 hci_dev_lock(hdev);
380
381 if (status) {
382 hdev->discov_timeout = 0;
383 goto done;
384 }
385
386 if (param & SCAN_INQUIRY)
387 set_bit(HCI_ISCAN, &hdev->flags);
388 else
389 clear_bit(HCI_ISCAN, &hdev->flags);
390
391 if (param & SCAN_PAGE)
392 set_bit(HCI_PSCAN, &hdev->flags);
393 else
394 clear_bit(HCI_PSCAN, &hdev->flags);
395
396 done:
397 hci_dev_unlock(hdev);
398 }
399
400 static void hci_cc_set_event_filter(struct hci_dev *hdev, struct sk_buff *skb)
401 {
402 __u8 status = *((__u8 *)skb->data);
403 struct hci_cp_set_event_filter *cp;
404 void *sent;
405
406 BT_DBG("%s status 0x%2.2x", hdev->name, status);
407
408 if (status)
409 return;
410
411 sent = hci_sent_cmd_data(hdev, HCI_OP_SET_EVENT_FLT);
412 if (!sent)
413 return;
414
415 cp = (struct hci_cp_set_event_filter *)sent;
416
417 if (cp->flt_type == HCI_FLT_CLEAR_ALL)
418 hci_dev_clear_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
419 else
420 hci_dev_set_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
421 }
422
423 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
424 {
425 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
426
427 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
428
429 if (rp->status)
430 return;
431
432 memcpy(hdev->dev_class, rp->dev_class, 3);
433
434 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
435 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
436 }
437
438 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
439 {
440 __u8 status = *((__u8 *) skb->data);
441 void *sent;
442
443 BT_DBG("%s status 0x%2.2x", hdev->name, status);
444
445 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
446 if (!sent)
447 return;
448
449 hci_dev_lock(hdev);
450
451 if (status == 0)
452 memcpy(hdev->dev_class, sent, 3);
453
454 if (hci_dev_test_flag(hdev, HCI_MGMT))
455 mgmt_set_class_of_dev_complete(hdev, sent, status);
456
457 hci_dev_unlock(hdev);
458 }
459
460 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
461 {
462 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
463 __u16 setting;
464
465 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
466
467 if (rp->status)
468 return;
469
470 setting = __le16_to_cpu(rp->voice_setting);
471
472 if (hdev->voice_setting == setting)
473 return;
474
475 hdev->voice_setting = setting;
476
477 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
478
479 if (hdev->notify)
480 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
481 }
482
483 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
484 struct sk_buff *skb)
485 {
486 __u8 status = *((__u8 *) skb->data);
487 __u16 setting;
488 void *sent;
489
490 BT_DBG("%s status 0x%2.2x", hdev->name, status);
491
492 if (status)
493 return;
494
495 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
496 if (!sent)
497 return;
498
499 setting = get_unaligned_le16(sent);
500
501 if (hdev->voice_setting == setting)
502 return;
503
504 hdev->voice_setting = setting;
505
506 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
507
508 if (hdev->notify)
509 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
510 }
511
512 static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
513 struct sk_buff *skb)
514 {
515 struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
516
517 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
518
519 if (rp->status)
520 return;
521
522 hdev->num_iac = rp->num_iac;
523
524 BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
525 }
526
527 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
528 {
529 __u8 status = *((__u8 *) skb->data);
530 struct hci_cp_write_ssp_mode *sent;
531
532 BT_DBG("%s status 0x%2.2x", hdev->name, status);
533
534 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
535 if (!sent)
536 return;
537
538 hci_dev_lock(hdev);
539
540 if (!status) {
541 if (sent->mode)
542 hdev->features[1][0] |= LMP_HOST_SSP;
543 else
544 hdev->features[1][0] &= ~LMP_HOST_SSP;
545 }
546
547 if (hci_dev_test_flag(hdev, HCI_MGMT))
548 mgmt_ssp_enable_complete(hdev, sent->mode, status);
549 else if (!status) {
550 if (sent->mode)
551 hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
552 else
553 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
554 }
555
556 hci_dev_unlock(hdev);
557 }
558
559 static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
560 {
561 u8 status = *((u8 *) skb->data);
562 struct hci_cp_write_sc_support *sent;
563
564 BT_DBG("%s status 0x%2.2x", hdev->name, status);
565
566 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
567 if (!sent)
568 return;
569
570 hci_dev_lock(hdev);
571
572 if (!status) {
573 if (sent->support)
574 hdev->features[1][0] |= LMP_HOST_SC;
575 else
576 hdev->features[1][0] &= ~LMP_HOST_SC;
577 }
578
579 if (!hci_dev_test_flag(hdev, HCI_MGMT) && !status) {
580 if (sent->support)
581 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
582 else
583 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
584 }
585
586 hci_dev_unlock(hdev);
587 }
588
589 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
590 {
591 struct hci_rp_read_local_version *rp = (void *) skb->data;
592
593 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
594
595 if (rp->status)
596 return;
597
598 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
599 hci_dev_test_flag(hdev, HCI_CONFIG)) {
600 hdev->hci_ver = rp->hci_ver;
601 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
602 hdev->lmp_ver = rp->lmp_ver;
603 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
604 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
605 }
606 }
607
608 static void hci_cc_read_local_commands(struct hci_dev *hdev,
609 struct sk_buff *skb)
610 {
611 struct hci_rp_read_local_commands *rp = (void *) skb->data;
612
613 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
614
615 if (rp->status)
616 return;
617
618 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
619 hci_dev_test_flag(hdev, HCI_CONFIG))
620 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
621 }
622
623 static void hci_cc_read_auth_payload_timeout(struct hci_dev *hdev,
624 struct sk_buff *skb)
625 {
626 struct hci_rp_read_auth_payload_to *rp = (void *)skb->data;
627 struct hci_conn *conn;
628
629 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
630
631 if (rp->status)
632 return;
633
634 hci_dev_lock(hdev);
635
636 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
637 if (conn)
638 conn->auth_payload_timeout = __le16_to_cpu(rp->timeout);
639
640 hci_dev_unlock(hdev);
641 }
642
643 static void hci_cc_write_auth_payload_timeout(struct hci_dev *hdev,
644 struct sk_buff *skb)
645 {
646 struct hci_rp_write_auth_payload_to *rp = (void *)skb->data;
647 struct hci_conn *conn;
648 void *sent;
649
650 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
651
652 if (rp->status)
653 return;
654
655 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO);
656 if (!sent)
657 return;
658
659 hci_dev_lock(hdev);
660
661 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
662 if (conn)
663 conn->auth_payload_timeout = get_unaligned_le16(sent + 2);
664
665 hci_dev_unlock(hdev);
666 }
667
668 static void hci_cc_read_local_features(struct hci_dev *hdev,
669 struct sk_buff *skb)
670 {
671 struct hci_rp_read_local_features *rp = (void *) skb->data;
672
673 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
674
675 if (rp->status)
676 return;
677
678 memcpy(hdev->features, rp->features, 8);
679
680 /* Adjust default settings according to features
681 * supported by device. */
682
683 if (hdev->features[0][0] & LMP_3SLOT)
684 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
685
686 if (hdev->features[0][0] & LMP_5SLOT)
687 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
688
689 if (hdev->features[0][1] & LMP_HV2) {
690 hdev->pkt_type |= (HCI_HV2);
691 hdev->esco_type |= (ESCO_HV2);
692 }
693
694 if (hdev->features[0][1] & LMP_HV3) {
695 hdev->pkt_type |= (HCI_HV3);
696 hdev->esco_type |= (ESCO_HV3);
697 }
698
699 if (lmp_esco_capable(hdev))
700 hdev->esco_type |= (ESCO_EV3);
701
702 if (hdev->features[0][4] & LMP_EV4)
703 hdev->esco_type |= (ESCO_EV4);
704
705 if (hdev->features[0][4] & LMP_EV5)
706 hdev->esco_type |= (ESCO_EV5);
707
708 if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
709 hdev->esco_type |= (ESCO_2EV3);
710
711 if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
712 hdev->esco_type |= (ESCO_3EV3);
713
714 if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
715 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
716 }
717
718 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
719 struct sk_buff *skb)
720 {
721 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
722
723 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
724
725 if (rp->status)
726 return;
727
728 if (hdev->max_page < rp->max_page)
729 hdev->max_page = rp->max_page;
730
731 if (rp->page < HCI_MAX_PAGES)
732 memcpy(hdev->features[rp->page], rp->features, 8);
733 }
734
735 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
736 struct sk_buff *skb)
737 {
738 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
739
740 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
741
742 if (rp->status)
743 return;
744
745 hdev->flow_ctl_mode = rp->mode;
746 }
747
748 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
749 {
750 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
751
752 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
753
754 if (rp->status)
755 return;
756
757 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
758 hdev->sco_mtu = rp->sco_mtu;
759 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
760 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
761
762 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
763 hdev->sco_mtu = 64;
764 hdev->sco_pkts = 8;
765 }
766
767 hdev->acl_cnt = hdev->acl_pkts;
768 hdev->sco_cnt = hdev->sco_pkts;
769
770 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
771 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
772 }
773
774 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
775 {
776 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
777
778 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
779
780 if (rp->status)
781 return;
782
783 if (test_bit(HCI_INIT, &hdev->flags))
784 bacpy(&hdev->bdaddr, &rp->bdaddr);
785
786 if (hci_dev_test_flag(hdev, HCI_SETUP))
787 bacpy(&hdev->setup_addr, &rp->bdaddr);
788 }
789
790 static void hci_cc_read_local_pairing_opts(struct hci_dev *hdev,
791 struct sk_buff *skb)
792 {
793 struct hci_rp_read_local_pairing_opts *rp = (void *) skb->data;
794
795 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
796
797 if (rp->status)
798 return;
799
800 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
801 hci_dev_test_flag(hdev, HCI_CONFIG)) {
802 hdev->pairing_opts = rp->pairing_opts;
803 hdev->max_enc_key_size = rp->max_key_size;
804 }
805 }
806
807 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
808 struct sk_buff *skb)
809 {
810 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
811
812 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
813
814 if (rp->status)
815 return;
816
817 if (test_bit(HCI_INIT, &hdev->flags)) {
818 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
819 hdev->page_scan_window = __le16_to_cpu(rp->window);
820 }
821 }
822
823 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
824 struct sk_buff *skb)
825 {
826 u8 status = *((u8 *) skb->data);
827 struct hci_cp_write_page_scan_activity *sent;
828
829 BT_DBG("%s status 0x%2.2x", hdev->name, status);
830
831 if (status)
832 return;
833
834 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
835 if (!sent)
836 return;
837
838 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
839 hdev->page_scan_window = __le16_to_cpu(sent->window);
840 }
841
842 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
843 struct sk_buff *skb)
844 {
845 struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
846
847 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
848
849 if (rp->status)
850 return;
851
852 if (test_bit(HCI_INIT, &hdev->flags))
853 hdev->page_scan_type = rp->type;
854 }
855
856 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
857 struct sk_buff *skb)
858 {
859 u8 status = *((u8 *) skb->data);
860 u8 *type;
861
862 BT_DBG("%s status 0x%2.2x", hdev->name, status);
863
864 if (status)
865 return;
866
867 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
868 if (type)
869 hdev->page_scan_type = *type;
870 }
871
872 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
873 struct sk_buff *skb)
874 {
875 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
876
877 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
878
879 if (rp->status)
880 return;
881
882 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
883 hdev->block_len = __le16_to_cpu(rp->block_len);
884 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
885
886 hdev->block_cnt = hdev->num_blocks;
887
888 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
889 hdev->block_cnt, hdev->block_len);
890 }
891
892 static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
893 {
894 struct hci_rp_read_clock *rp = (void *) skb->data;
895 struct hci_cp_read_clock *cp;
896 struct hci_conn *conn;
897
898 BT_DBG("%s", hdev->name);
899
900 if (skb->len < sizeof(*rp))
901 return;
902
903 if (rp->status)
904 return;
905
906 hci_dev_lock(hdev);
907
908 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
909 if (!cp)
910 goto unlock;
911
912 if (cp->which == 0x00) {
913 hdev->clock = le32_to_cpu(rp->clock);
914 goto unlock;
915 }
916
917 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
918 if (conn) {
919 conn->clock = le32_to_cpu(rp->clock);
920 conn->clock_accuracy = le16_to_cpu(rp->accuracy);
921 }
922
923 unlock:
924 hci_dev_unlock(hdev);
925 }
926
927 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
928 struct sk_buff *skb)
929 {
930 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
931
932 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
933
934 if (rp->status)
935 return;
936
937 hdev->amp_status = rp->amp_status;
938 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
939 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
940 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
941 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
942 hdev->amp_type = rp->amp_type;
943 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
944 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
945 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
946 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
947 }
948
949 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
950 struct sk_buff *skb)
951 {
952 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
953
954 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
955
956 if (rp->status)
957 return;
958
959 hdev->inq_tx_power = rp->tx_power;
960 }
961
962 static void hci_cc_read_def_err_data_reporting(struct hci_dev *hdev,
963 struct sk_buff *skb)
964 {
965 struct hci_rp_read_def_err_data_reporting *rp = (void *)skb->data;
966
967 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
968
969 if (rp->status)
970 return;
971
972 hdev->err_data_reporting = rp->err_data_reporting;
973 }
974
975 static void hci_cc_write_def_err_data_reporting(struct hci_dev *hdev,
976 struct sk_buff *skb)
977 {
978 __u8 status = *((__u8 *)skb->data);
979 struct hci_cp_write_def_err_data_reporting *cp;
980
981 BT_DBG("%s status 0x%2.2x", hdev->name, status);
982
983 if (status)
984 return;
985
986 cp = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING);
987 if (!cp)
988 return;
989
990 hdev->err_data_reporting = cp->err_data_reporting;
991 }
992
993 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
994 {
995 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
996 struct hci_cp_pin_code_reply *cp;
997 struct hci_conn *conn;
998
999 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1000
1001 hci_dev_lock(hdev);
1002
1003 if (hci_dev_test_flag(hdev, HCI_MGMT))
1004 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
1005
1006 if (rp->status)
1007 goto unlock;
1008
1009 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
1010 if (!cp)
1011 goto unlock;
1012
1013 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1014 if (conn)
1015 conn->pin_length = cp->pin_len;
1016
1017 unlock:
1018 hci_dev_unlock(hdev);
1019 }
1020
1021 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
1022 {
1023 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
1024
1025 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1026
1027 hci_dev_lock(hdev);
1028
1029 if (hci_dev_test_flag(hdev, HCI_MGMT))
1030 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
1031 rp->status);
1032
1033 hci_dev_unlock(hdev);
1034 }
1035
1036 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
1037 struct sk_buff *skb)
1038 {
1039 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
1040
1041 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1042
1043 if (rp->status)
1044 return;
1045
1046 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
1047 hdev->le_pkts = rp->le_max_pkt;
1048
1049 hdev->le_cnt = hdev->le_pkts;
1050
1051 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
1052 }
1053
1054 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
1055 struct sk_buff *skb)
1056 {
1057 struct hci_rp_le_read_local_features *rp = (void *) skb->data;
1058
1059 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1060
1061 if (rp->status)
1062 return;
1063
1064 memcpy(hdev->le_features, rp->features, 8);
1065 }
1066
1067 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
1068 struct sk_buff *skb)
1069 {
1070 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
1071
1072 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1073
1074 if (rp->status)
1075 return;
1076
1077 hdev->adv_tx_power = rp->tx_power;
1078 }
1079
1080 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
1081 {
1082 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1083
1084 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1085
1086 hci_dev_lock(hdev);
1087
1088 if (hci_dev_test_flag(hdev, HCI_MGMT))
1089 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
1090 rp->status);
1091
1092 hci_dev_unlock(hdev);
1093 }
1094
1095 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
1096 struct sk_buff *skb)
1097 {
1098 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1099
1100 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1101
1102 hci_dev_lock(hdev);
1103
1104 if (hci_dev_test_flag(hdev, HCI_MGMT))
1105 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1106 ACL_LINK, 0, rp->status);
1107
1108 hci_dev_unlock(hdev);
1109 }
1110
1111 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
1112 {
1113 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1114
1115 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1116
1117 hci_dev_lock(hdev);
1118
1119 if (hci_dev_test_flag(hdev, HCI_MGMT))
1120 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1121 0, rp->status);
1122
1123 hci_dev_unlock(hdev);
1124 }
1125
1126 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
1127 struct sk_buff *skb)
1128 {
1129 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1130
1131 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1132
1133 hci_dev_lock(hdev);
1134
1135 if (hci_dev_test_flag(hdev, HCI_MGMT))
1136 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1137 ACL_LINK, 0, rp->status);
1138
1139 hci_dev_unlock(hdev);
1140 }
1141
1142 static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
1143 struct sk_buff *skb)
1144 {
1145 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1146
1147 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1148 }
1149
1150 static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
1151 struct sk_buff *skb)
1152 {
1153 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
1154
1155 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1156 }
1157
1158 static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
1159 {
1160 __u8 status = *((__u8 *) skb->data);
1161 bdaddr_t *sent;
1162
1163 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1164
1165 if (status)
1166 return;
1167
1168 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1169 if (!sent)
1170 return;
1171
1172 hci_dev_lock(hdev);
1173
1174 bacpy(&hdev->random_addr, sent);
1175
1176 if (!bacmp(&hdev->rpa, sent)) {
1177 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
1178 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired,
1179 secs_to_jiffies(hdev->rpa_timeout));
1180 }
1181
1182 hci_dev_unlock(hdev);
1183 }
1184
1185 static void hci_cc_le_set_default_phy(struct hci_dev *hdev, struct sk_buff *skb)
1186 {
1187 __u8 status = *((__u8 *) skb->data);
1188 struct hci_cp_le_set_default_phy *cp;
1189
1190 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1191
1192 if (status)
1193 return;
1194
1195 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY);
1196 if (!cp)
1197 return;
1198
1199 hci_dev_lock(hdev);
1200
1201 hdev->le_tx_def_phys = cp->tx_phys;
1202 hdev->le_rx_def_phys = cp->rx_phys;
1203
1204 hci_dev_unlock(hdev);
1205 }
1206
1207 static void hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev,
1208 struct sk_buff *skb)
1209 {
1210 __u8 status = *((__u8 *) skb->data);
1211 struct hci_cp_le_set_adv_set_rand_addr *cp;
1212 struct adv_info *adv;
1213
1214 if (status)
1215 return;
1216
1217 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR);
1218 /* Update only in case the adv instance since handle 0x00 shall be using
1219 * HCI_OP_LE_SET_RANDOM_ADDR since that allows both extended and
1220 * non-extended adverting.
1221 */
1222 if (!cp || !cp->handle)
1223 return;
1224
1225 hci_dev_lock(hdev);
1226
1227 adv = hci_find_adv_instance(hdev, cp->handle);
1228 if (adv) {
1229 bacpy(&adv->random_addr, &cp->bdaddr);
1230 if (!bacmp(&hdev->rpa, &cp->bdaddr)) {
1231 adv->rpa_expired = false;
1232 queue_delayed_work(hdev->workqueue,
1233 &adv->rpa_expired_cb,
1234 secs_to_jiffies(hdev->rpa_timeout));
1235 }
1236 }
1237
1238 hci_dev_unlock(hdev);
1239 }
1240
1241 static void hci_cc_le_read_transmit_power(struct hci_dev *hdev,
1242 struct sk_buff *skb)
1243 {
1244 struct hci_rp_le_read_transmit_power *rp = (void *)skb->data;
1245
1246 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1247
1248 if (rp->status)
1249 return;
1250
1251 hdev->min_le_tx_power = rp->min_le_tx_power;
1252 hdev->max_le_tx_power = rp->max_le_tx_power;
1253 }
1254
1255 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1256 {
1257 __u8 *sent, status = *((__u8 *) skb->data);
1258
1259 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1260
1261 if (status)
1262 return;
1263
1264 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1265 if (!sent)
1266 return;
1267
1268 hci_dev_lock(hdev);
1269
1270 /* If we're doing connection initiation as peripheral. Set a
1271 * timeout in case something goes wrong.
1272 */
1273 if (*sent) {
1274 struct hci_conn *conn;
1275
1276 hci_dev_set_flag(hdev, HCI_LE_ADV);
1277
1278 conn = hci_lookup_le_connect(hdev);
1279 if (conn)
1280 queue_delayed_work(hdev->workqueue,
1281 &conn->le_conn_timeout,
1282 conn->conn_timeout);
1283 } else {
1284 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1285 }
1286
1287 hci_dev_unlock(hdev);
1288 }
1289
1290 static void hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev,
1291 struct sk_buff *skb)
1292 {
1293 struct hci_cp_le_set_ext_adv_enable *cp;
1294 struct hci_cp_ext_adv_set *set;
1295 __u8 status = *((__u8 *) skb->data);
1296 struct adv_info *adv = NULL, *n;
1297
1298 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1299
1300 if (status)
1301 return;
1302
1303 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE);
1304 if (!cp)
1305 return;
1306
1307 set = (void *)cp->data;
1308
1309 hci_dev_lock(hdev);
1310
1311 if (cp->num_of_sets)
1312 adv = hci_find_adv_instance(hdev, set->handle);
1313
1314 if (cp->enable) {
1315 struct hci_conn *conn;
1316
1317 hci_dev_set_flag(hdev, HCI_LE_ADV);
1318
1319 if (adv)
1320 adv->enabled = true;
1321
1322 conn = hci_lookup_le_connect(hdev);
1323 if (conn)
1324 queue_delayed_work(hdev->workqueue,
1325 &conn->le_conn_timeout,
1326 conn->conn_timeout);
1327 } else {
1328 if (cp->num_of_sets) {
1329 if (adv)
1330 adv->enabled = false;
1331
1332 /* If just one instance was disabled check if there are
1333 * any other instance enabled before clearing HCI_LE_ADV
1334 */
1335 list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1336 list) {
1337 if (adv->enabled)
1338 goto unlock;
1339 }
1340 } else {
1341 /* All instances shall be considered disabled */
1342 list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1343 list)
1344 adv->enabled = false;
1345 }
1346
1347 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1348 }
1349
1350 unlock:
1351 hci_dev_unlock(hdev);
1352 }
1353
1354 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1355 {
1356 struct hci_cp_le_set_scan_param *cp;
1357 __u8 status = *((__u8 *) skb->data);
1358
1359 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1360
1361 if (status)
1362 return;
1363
1364 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1365 if (!cp)
1366 return;
1367
1368 hci_dev_lock(hdev);
1369
1370 hdev->le_scan_type = cp->type;
1371
1372 hci_dev_unlock(hdev);
1373 }
1374
1375 static void hci_cc_le_set_ext_scan_param(struct hci_dev *hdev,
1376 struct sk_buff *skb)
1377 {
1378 struct hci_cp_le_set_ext_scan_params *cp;
1379 __u8 status = *((__u8 *) skb->data);
1380 struct hci_cp_le_scan_phy_params *phy_param;
1381
1382 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1383
1384 if (status)
1385 return;
1386
1387 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS);
1388 if (!cp)
1389 return;
1390
1391 phy_param = (void *)cp->data;
1392
1393 hci_dev_lock(hdev);
1394
1395 hdev->le_scan_type = phy_param->type;
1396
1397 hci_dev_unlock(hdev);
1398 }
1399
1400 static bool has_pending_adv_report(struct hci_dev *hdev)
1401 {
1402 struct discovery_state *d = &hdev->discovery;
1403
1404 return bacmp(&d->last_adv_addr, BDADDR_ANY);
1405 }
1406
1407 static void clear_pending_adv_report(struct hci_dev *hdev)
1408 {
1409 struct discovery_state *d = &hdev->discovery;
1410
1411 bacpy(&d->last_adv_addr, BDADDR_ANY);
1412 d->last_adv_data_len = 0;
1413 }
1414
1415 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1416 u8 bdaddr_type, s8 rssi, u32 flags,
1417 u8 *data, u8 len)
1418 {
1419 struct discovery_state *d = &hdev->discovery;
1420
1421 if (len > HCI_MAX_AD_LENGTH)
1422 return;
1423
1424 bacpy(&d->last_adv_addr, bdaddr);
1425 d->last_adv_addr_type = bdaddr_type;
1426 d->last_adv_rssi = rssi;
1427 d->last_adv_flags = flags;
1428 memcpy(d->last_adv_data, data, len);
1429 d->last_adv_data_len = len;
1430 }
1431
1432 static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)
1433 {
1434 hci_dev_lock(hdev);
1435
1436 switch (enable) {
1437 case LE_SCAN_ENABLE:
1438 hci_dev_set_flag(hdev, HCI_LE_SCAN);
1439 if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1440 clear_pending_adv_report(hdev);
1441 break;
1442
1443 case LE_SCAN_DISABLE:
1444 /* We do this here instead of when setting DISCOVERY_STOPPED
1445 * since the latter would potentially require waiting for
1446 * inquiry to stop too.
1447 */
1448 if (has_pending_adv_report(hdev)) {
1449 struct discovery_state *d = &hdev->discovery;
1450
1451 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1452 d->last_adv_addr_type, NULL,
1453 d->last_adv_rssi, d->last_adv_flags,
1454 d->last_adv_data,
1455 d->last_adv_data_len, NULL, 0);
1456 }
1457
1458 /* Cancel this timer so that we don't try to disable scanning
1459 * when it's already disabled.
1460 */
1461 cancel_delayed_work(&hdev->le_scan_disable);
1462
1463 hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1464
1465 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1466 * interrupted scanning due to a connect request. Mark
1467 * therefore discovery as stopped. If this was not
1468 * because of a connect request advertising might have
1469 * been disabled because of active scanning, so
1470 * re-enable it again if necessary.
1471 */
1472 if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1473 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1474 else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1475 hdev->discovery.state == DISCOVERY_FINDING)
1476 hci_req_reenable_advertising(hdev);
1477
1478 break;
1479
1480 default:
1481 bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d",
1482 enable);
1483 break;
1484 }
1485
1486 hci_dev_unlock(hdev);
1487 }
1488
1489 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1490 struct sk_buff *skb)
1491 {
1492 struct hci_cp_le_set_scan_enable *cp;
1493 __u8 status = *((__u8 *) skb->data);
1494
1495 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1496
1497 if (status)
1498 return;
1499
1500 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1501 if (!cp)
1502 return;
1503
1504 le_set_scan_enable_complete(hdev, cp->enable);
1505 }
1506
1507 static void hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev,
1508 struct sk_buff *skb)
1509 {
1510 struct hci_cp_le_set_ext_scan_enable *cp;
1511 __u8 status = *((__u8 *) skb->data);
1512
1513 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1514
1515 if (status)
1516 return;
1517
1518 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE);
1519 if (!cp)
1520 return;
1521
1522 le_set_scan_enable_complete(hdev, cp->enable);
1523 }
1524
1525 static void hci_cc_le_read_num_adv_sets(struct hci_dev *hdev,
1526 struct sk_buff *skb)
1527 {
1528 struct hci_rp_le_read_num_supported_adv_sets *rp = (void *) skb->data;
1529
1530 BT_DBG("%s status 0x%2.2x No of Adv sets %u", hdev->name, rp->status,
1531 rp->num_of_sets);
1532
1533 if (rp->status)
1534 return;
1535
1536 hdev->le_num_of_adv_sets = rp->num_of_sets;
1537 }
1538
1539 static void hci_cc_le_read_accept_list_size(struct hci_dev *hdev,
1540 struct sk_buff *skb)
1541 {
1542 struct hci_rp_le_read_accept_list_size *rp = (void *)skb->data;
1543
1544 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1545
1546 if (rp->status)
1547 return;
1548
1549 hdev->le_accept_list_size = rp->size;
1550 }
1551
1552 static void hci_cc_le_clear_accept_list(struct hci_dev *hdev,
1553 struct sk_buff *skb)
1554 {
1555 __u8 status = *((__u8 *) skb->data);
1556
1557 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1558
1559 if (status)
1560 return;
1561
1562 hci_bdaddr_list_clear(&hdev->le_accept_list);
1563 }
1564
1565 static void hci_cc_le_add_to_accept_list(struct hci_dev *hdev,
1566 struct sk_buff *skb)
1567 {
1568 struct hci_cp_le_add_to_accept_list *sent;
1569 __u8 status = *((__u8 *) skb->data);
1570
1571 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1572
1573 if (status)
1574 return;
1575
1576 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST);
1577 if (!sent)
1578 return;
1579
1580 hci_bdaddr_list_add(&hdev->le_accept_list, &sent->bdaddr,
1581 sent->bdaddr_type);
1582 }
1583
1584 static void hci_cc_le_del_from_accept_list(struct hci_dev *hdev,
1585 struct sk_buff *skb)
1586 {
1587 struct hci_cp_le_del_from_accept_list *sent;
1588 __u8 status = *((__u8 *) skb->data);
1589
1590 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1591
1592 if (status)
1593 return;
1594
1595 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST);
1596 if (!sent)
1597 return;
1598
1599 hci_bdaddr_list_del(&hdev->le_accept_list, &sent->bdaddr,
1600 sent->bdaddr_type);
1601 }
1602
1603 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1604 struct sk_buff *skb)
1605 {
1606 struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1607
1608 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1609
1610 if (rp->status)
1611 return;
1612
1613 memcpy(hdev->le_states, rp->le_states, 8);
1614 }
1615
1616 static void hci_cc_le_read_def_data_len(struct hci_dev *hdev,
1617 struct sk_buff *skb)
1618 {
1619 struct hci_rp_le_read_def_data_len *rp = (void *) skb->data;
1620
1621 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1622
1623 if (rp->status)
1624 return;
1625
1626 hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1627 hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1628 }
1629
1630 static void hci_cc_le_write_def_data_len(struct hci_dev *hdev,
1631 struct sk_buff *skb)
1632 {
1633 struct hci_cp_le_write_def_data_len *sent;
1634 __u8 status = *((__u8 *) skb->data);
1635
1636 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1637
1638 if (status)
1639 return;
1640
1641 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
1642 if (!sent)
1643 return;
1644
1645 hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
1646 hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
1647 }
1648
1649 static void hci_cc_le_add_to_resolv_list(struct hci_dev *hdev,
1650 struct sk_buff *skb)
1651 {
1652 struct hci_cp_le_add_to_resolv_list *sent;
1653 __u8 status = *((__u8 *) skb->data);
1654
1655 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1656
1657 if (status)
1658 return;
1659
1660 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST);
1661 if (!sent)
1662 return;
1663
1664 hci_bdaddr_list_add_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
1665 sent->bdaddr_type, sent->peer_irk,
1666 sent->local_irk);
1667 }
1668
1669 static void hci_cc_le_del_from_resolv_list(struct hci_dev *hdev,
1670 struct sk_buff *skb)
1671 {
1672 struct hci_cp_le_del_from_resolv_list *sent;
1673 __u8 status = *((__u8 *) skb->data);
1674
1675 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1676
1677 if (status)
1678 return;
1679
1680 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST);
1681 if (!sent)
1682 return;
1683
1684 hci_bdaddr_list_del_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
1685 sent->bdaddr_type);
1686 }
1687
1688 static void hci_cc_le_clear_resolv_list(struct hci_dev *hdev,
1689 struct sk_buff *skb)
1690 {
1691 __u8 status = *((__u8 *) skb->data);
1692
1693 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1694
1695 if (status)
1696 return;
1697
1698 hci_bdaddr_list_clear(&hdev->le_resolv_list);
1699 }
1700
1701 static void hci_cc_le_read_resolv_list_size(struct hci_dev *hdev,
1702 struct sk_buff *skb)
1703 {
1704 struct hci_rp_le_read_resolv_list_size *rp = (void *) skb->data;
1705
1706 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1707
1708 if (rp->status)
1709 return;
1710
1711 hdev->le_resolv_list_size = rp->size;
1712 }
1713
1714 static void hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev,
1715 struct sk_buff *skb)
1716 {
1717 __u8 *sent, status = *((__u8 *) skb->data);
1718
1719 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1720
1721 if (status)
1722 return;
1723
1724 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE);
1725 if (!sent)
1726 return;
1727
1728 hci_dev_lock(hdev);
1729
1730 if (*sent)
1731 hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION);
1732 else
1733 hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);
1734
1735 hci_dev_unlock(hdev);
1736 }
1737
1738 static void hci_cc_le_read_max_data_len(struct hci_dev *hdev,
1739 struct sk_buff *skb)
1740 {
1741 struct hci_rp_le_read_max_data_len *rp = (void *) skb->data;
1742
1743 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1744
1745 if (rp->status)
1746 return;
1747
1748 hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
1749 hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
1750 hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
1751 hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
1752 }
1753
1754 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1755 struct sk_buff *skb)
1756 {
1757 struct hci_cp_write_le_host_supported *sent;
1758 __u8 status = *((__u8 *) skb->data);
1759
1760 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1761
1762 if (status)
1763 return;
1764
1765 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1766 if (!sent)
1767 return;
1768
1769 hci_dev_lock(hdev);
1770
1771 if (sent->le) {
1772 hdev->features[1][0] |= LMP_HOST_LE;
1773 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
1774 } else {
1775 hdev->features[1][0] &= ~LMP_HOST_LE;
1776 hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
1777 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1778 }
1779
1780 if (sent->simul)
1781 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1782 else
1783 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1784
1785 hci_dev_unlock(hdev);
1786 }
1787
1788 static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1789 {
1790 struct hci_cp_le_set_adv_param *cp;
1791 u8 status = *((u8 *) skb->data);
1792
1793 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1794
1795 if (status)
1796 return;
1797
1798 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1799 if (!cp)
1800 return;
1801
1802 hci_dev_lock(hdev);
1803 hdev->adv_addr_type = cp->own_address_type;
1804 hci_dev_unlock(hdev);
1805 }
1806
1807 static void hci_cc_set_ext_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1808 {
1809 struct hci_rp_le_set_ext_adv_params *rp = (void *) skb->data;
1810 struct hci_cp_le_set_ext_adv_params *cp;
1811 struct adv_info *adv_instance;
1812
1813 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1814
1815 if (rp->status)
1816 return;
1817
1818 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS);
1819 if (!cp)
1820 return;
1821
1822 hci_dev_lock(hdev);
1823 hdev->adv_addr_type = cp->own_addr_type;
1824 if (!cp->handle) {
1825 /* Store in hdev for instance 0 */
1826 hdev->adv_tx_power = rp->tx_power;
1827 } else {
1828 adv_instance = hci_find_adv_instance(hdev, cp->handle);
1829 if (adv_instance)
1830 adv_instance->tx_power = rp->tx_power;
1831 }
1832 /* Update adv data as tx power is known now */
1833 hci_req_update_adv_data(hdev, cp->handle);
1834
1835 hci_dev_unlock(hdev);
1836 }
1837
1838 static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
1839 {
1840 struct hci_rp_read_rssi *rp = (void *) skb->data;
1841 struct hci_conn *conn;
1842
1843 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1844
1845 if (rp->status)
1846 return;
1847
1848 hci_dev_lock(hdev);
1849
1850 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1851 if (conn)
1852 conn->rssi = rp->rssi;
1853
1854 hci_dev_unlock(hdev);
1855 }
1856
1857 static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
1858 {
1859 struct hci_cp_read_tx_power *sent;
1860 struct hci_rp_read_tx_power *rp = (void *) skb->data;
1861 struct hci_conn *conn;
1862
1863 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1864
1865 if (rp->status)
1866 return;
1867
1868 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
1869 if (!sent)
1870 return;
1871
1872 hci_dev_lock(hdev);
1873
1874 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1875 if (!conn)
1876 goto unlock;
1877
1878 switch (sent->type) {
1879 case 0x00:
1880 conn->tx_power = rp->tx_power;
1881 break;
1882 case 0x01:
1883 conn->max_tx_power = rp->tx_power;
1884 break;
1885 }
1886
1887 unlock:
1888 hci_dev_unlock(hdev);
1889 }
1890
1891 static void hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, struct sk_buff *skb)
1892 {
1893 u8 status = *((u8 *) skb->data);
1894 u8 *mode;
1895
1896 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1897
1898 if (status)
1899 return;
1900
1901 mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
1902 if (mode)
1903 hdev->ssp_debug_mode = *mode;
1904 }
1905
1906 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1907 {
1908 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1909
1910 if (status) {
1911 hci_conn_check_pending(hdev);
1912 return;
1913 }
1914
1915 set_bit(HCI_INQUIRY, &hdev->flags);
1916 }
1917
1918 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1919 {
1920 struct hci_cp_create_conn *cp;
1921 struct hci_conn *conn;
1922
1923 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1924
1925 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1926 if (!cp)
1927 return;
1928
1929 hci_dev_lock(hdev);
1930
1931 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1932
1933 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1934
1935 if (status) {
1936 if (conn && conn->state == BT_CONNECT) {
1937 if (status != 0x0c || conn->attempt > 2) {
1938 conn->state = BT_CLOSED;
1939 hci_connect_cfm(conn, status);
1940 hci_conn_del(conn);
1941 } else
1942 conn->state = BT_CONNECT2;
1943 }
1944 } else {
1945 if (!conn) {
1946 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
1947 HCI_ROLE_MASTER);
1948 if (!conn)
1949 bt_dev_err(hdev, "no memory for new connection");
1950 }
1951 }
1952
1953 hci_dev_unlock(hdev);
1954 }
1955
1956 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1957 {
1958 struct hci_cp_add_sco *cp;
1959 struct hci_conn *acl, *sco;
1960 __u16 handle;
1961
1962 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1963
1964 if (!status)
1965 return;
1966
1967 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1968 if (!cp)
1969 return;
1970
1971 handle = __le16_to_cpu(cp->handle);
1972
1973 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1974
1975 hci_dev_lock(hdev);
1976
1977 acl = hci_conn_hash_lookup_handle(hdev, handle);
1978 if (acl) {
1979 sco = acl->link;
1980 if (sco) {
1981 sco->state = BT_CLOSED;
1982
1983 hci_connect_cfm(sco, status);
1984 hci_conn_del(sco);
1985 }
1986 }
1987
1988 hci_dev_unlock(hdev);
1989 }
1990
1991 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1992 {
1993 struct hci_cp_auth_requested *cp;
1994 struct hci_conn *conn;
1995
1996 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1997
1998 if (!status)
1999 return;
2000
2001 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
2002 if (!cp)
2003 return;
2004
2005 hci_dev_lock(hdev);
2006
2007 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2008 if (conn) {
2009 if (conn->state == BT_CONFIG) {
2010 hci_connect_cfm(conn, status);
2011 hci_conn_drop(conn);
2012 }
2013 }
2014
2015 hci_dev_unlock(hdev);
2016 }
2017
2018 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
2019 {
2020 struct hci_cp_set_conn_encrypt *cp;
2021 struct hci_conn *conn;
2022
2023 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2024
2025 if (!status)
2026 return;
2027
2028 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
2029 if (!cp)
2030 return;
2031
2032 hci_dev_lock(hdev);
2033
2034 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2035 if (conn) {
2036 if (conn->state == BT_CONFIG) {
2037 hci_connect_cfm(conn, status);
2038 hci_conn_drop(conn);
2039 }
2040 }
2041
2042 hci_dev_unlock(hdev);
2043 }
2044
2045 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
2046 struct hci_conn *conn)
2047 {
2048 if (conn->state != BT_CONFIG || !conn->out)
2049 return 0;
2050
2051 if (conn->pending_sec_level == BT_SECURITY_SDP)
2052 return 0;
2053
2054 /* Only request authentication for SSP connections or non-SSP
2055 * devices with sec_level MEDIUM or HIGH or if MITM protection
2056 * is requested.
2057 */
2058 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
2059 conn->pending_sec_level != BT_SECURITY_FIPS &&
2060 conn->pending_sec_level != BT_SECURITY_HIGH &&
2061 conn->pending_sec_level != BT_SECURITY_MEDIUM)
2062 return 0;
2063
2064 return 1;
2065 }
2066
2067 static int hci_resolve_name(struct hci_dev *hdev,
2068 struct inquiry_entry *e)
2069 {
2070 struct hci_cp_remote_name_req cp;
2071
2072 memset(&cp, 0, sizeof(cp));
2073
2074 bacpy(&cp.bdaddr, &e->data.bdaddr);
2075 cp.pscan_rep_mode = e->data.pscan_rep_mode;
2076 cp.pscan_mode = e->data.pscan_mode;
2077 cp.clock_offset = e->data.clock_offset;
2078
2079 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2080 }
2081
2082 static bool hci_resolve_next_name(struct hci_dev *hdev)
2083 {
2084 struct discovery_state *discov = &hdev->discovery;
2085 struct inquiry_entry *e;
2086
2087 if (list_empty(&discov->resolve))
2088 return false;
2089
2090 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2091 if (!e)
2092 return false;
2093
2094 if (hci_resolve_name(hdev, e) == 0) {
2095 e->name_state = NAME_PENDING;
2096 return true;
2097 }
2098
2099 return false;
2100 }
2101
2102 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
2103 bdaddr_t *bdaddr, u8 *name, u8 name_len)
2104 {
2105 struct discovery_state *discov = &hdev->discovery;
2106 struct inquiry_entry *e;
2107
2108 /* Update the mgmt connected state if necessary. Be careful with
2109 * conn objects that exist but are not (yet) connected however.
2110 * Only those in BT_CONFIG or BT_CONNECTED states can be
2111 * considered connected.
2112 */
2113 if (conn &&
2114 (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
2115 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2116 mgmt_device_connected(hdev, conn, name, name_len);
2117
2118 if (discov->state == DISCOVERY_STOPPED)
2119 return;
2120
2121 if (discov->state == DISCOVERY_STOPPING)
2122 goto discov_complete;
2123
2124 if (discov->state != DISCOVERY_RESOLVING)
2125 return;
2126
2127 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
2128 /* If the device was not found in a list of found devices names of which
2129 * are pending. there is no need to continue resolving a next name as it
2130 * will be done upon receiving another Remote Name Request Complete
2131 * Event */
2132 if (!e)
2133 return;
2134
2135 list_del(&e->list);
2136 if (name) {
2137 e->name_state = NAME_KNOWN;
2138 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
2139 e->data.rssi, name, name_len);
2140 } else {
2141 e->name_state = NAME_NOT_KNOWN;
2142 }
2143
2144 if (hci_resolve_next_name(hdev))
2145 return;
2146
2147 discov_complete:
2148 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2149 }
2150
2151 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
2152 {
2153 struct hci_cp_remote_name_req *cp;
2154 struct hci_conn *conn;
2155
2156 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2157
2158 /* If successful wait for the name req complete event before
2159 * checking for the need to do authentication */
2160 if (!status)
2161 return;
2162
2163 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
2164 if (!cp)
2165 return;
2166
2167 hci_dev_lock(hdev);
2168
2169 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2170
2171 if (hci_dev_test_flag(hdev, HCI_MGMT))
2172 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
2173
2174 if (!conn)
2175 goto unlock;
2176
2177 if (!hci_outgoing_auth_needed(hdev, conn))
2178 goto unlock;
2179
2180 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2181 struct hci_cp_auth_requested auth_cp;
2182
2183 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2184
2185 auth_cp.handle = __cpu_to_le16(conn->handle);
2186 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
2187 sizeof(auth_cp), &auth_cp);
2188 }
2189
2190 unlock:
2191 hci_dev_unlock(hdev);
2192 }
2193
2194 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
2195 {
2196 struct hci_cp_read_remote_features *cp;
2197 struct hci_conn *conn;
2198
2199 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2200
2201 if (!status)
2202 return;
2203
2204 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
2205 if (!cp)
2206 return;
2207
2208 hci_dev_lock(hdev);
2209
2210 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2211 if (conn) {
2212 if (conn->state == BT_CONFIG) {
2213 hci_connect_cfm(conn, status);
2214 hci_conn_drop(conn);
2215 }
2216 }
2217
2218 hci_dev_unlock(hdev);
2219 }
2220
2221 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
2222 {
2223 struct hci_cp_read_remote_ext_features *cp;
2224 struct hci_conn *conn;
2225
2226 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2227
2228 if (!status)
2229 return;
2230
2231 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
2232 if (!cp)
2233 return;
2234
2235 hci_dev_lock(hdev);
2236
2237 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2238 if (conn) {
2239 if (conn->state == BT_CONFIG) {
2240 hci_connect_cfm(conn, status);
2241 hci_conn_drop(conn);
2242 }
2243 }
2244
2245 hci_dev_unlock(hdev);
2246 }
2247
2248 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2249 {
2250 struct hci_cp_setup_sync_conn *cp;
2251 struct hci_conn *acl, *sco;
2252 __u16 handle;
2253
2254 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2255
2256 if (!status)
2257 return;
2258
2259 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
2260 if (!cp)
2261 return;
2262
2263 handle = __le16_to_cpu(cp->handle);
2264
2265 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
2266
2267 hci_dev_lock(hdev);
2268
2269 acl = hci_conn_hash_lookup_handle(hdev, handle);
2270 if (acl) {
2271 sco = acl->link;
2272 if (sco) {
2273 sco->state = BT_CLOSED;
2274
2275 hci_connect_cfm(sco, status);
2276 hci_conn_del(sco);
2277 }
2278 }
2279
2280 hci_dev_unlock(hdev);
2281 }
2282
2283 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
2284 {
2285 struct hci_cp_sniff_mode *cp;
2286 struct hci_conn *conn;
2287
2288 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2289
2290 if (!status)
2291 return;
2292
2293 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
2294 if (!cp)
2295 return;
2296
2297 hci_dev_lock(hdev);
2298
2299 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2300 if (conn) {
2301 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2302
2303 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2304 hci_sco_setup(conn, status);
2305 }
2306
2307 hci_dev_unlock(hdev);
2308 }
2309
2310 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
2311 {
2312 struct hci_cp_exit_sniff_mode *cp;
2313 struct hci_conn *conn;
2314
2315 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2316
2317 if (!status)
2318 return;
2319
2320 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
2321 if (!cp)
2322 return;
2323
2324 hci_dev_lock(hdev);
2325
2326 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2327 if (conn) {
2328 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2329
2330 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2331 hci_sco_setup(conn, status);
2332 }
2333
2334 hci_dev_unlock(hdev);
2335 }
2336
2337 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
2338 {
2339 struct hci_cp_disconnect *cp;
2340 struct hci_conn *conn;
2341
2342 if (!status)
2343 return;
2344
2345 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
2346 if (!cp)
2347 return;
2348
2349 hci_dev_lock(hdev);
2350
2351 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2352 if (conn) {
2353 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2354 conn->dst_type, status);
2355
2356 if (conn->type == LE_LINK) {
2357 hdev->cur_adv_instance = conn->adv_instance;
2358 hci_req_reenable_advertising(hdev);
2359 }
2360
2361 /* If the disconnection failed for any reason, the upper layer
2362 * does not retry to disconnect in current implementation.
2363 * Hence, we need to do some basic cleanup here and re-enable
2364 * advertising if necessary.
2365 */
2366 hci_conn_del(conn);
2367 }
2368
2369 hci_dev_unlock(hdev);
2370 }
2371
2372 static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr,
2373 u8 peer_addr_type, u8 own_address_type,
2374 u8 filter_policy)
2375 {
2376 struct hci_conn *conn;
2377
2378 conn = hci_conn_hash_lookup_le(hdev, peer_addr,
2379 peer_addr_type);
2380 if (!conn)
2381 return;
2382
2383 /* When using controller based address resolution, then the new
2384 * address types 0x02 and 0x03 are used. These types need to be
2385 * converted back into either public address or random address type
2386 */
2387 if (use_ll_privacy(hdev) &&
2388 hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) {
2389 switch (own_address_type) {
2390 case ADDR_LE_DEV_PUBLIC_RESOLVED:
2391 own_address_type = ADDR_LE_DEV_PUBLIC;
2392 break;
2393 case ADDR_LE_DEV_RANDOM_RESOLVED:
2394 own_address_type = ADDR_LE_DEV_RANDOM;
2395 break;
2396 }
2397 }
2398
2399 /* Store the initiator and responder address information which
2400 * is needed for SMP. These values will not change during the
2401 * lifetime of the connection.
2402 */
2403 conn->init_addr_type = own_address_type;
2404 if (own_address_type == ADDR_LE_DEV_RANDOM)
2405 bacpy(&conn->init_addr, &hdev->random_addr);
2406 else
2407 bacpy(&conn->init_addr, &hdev->bdaddr);
2408
2409 conn->resp_addr_type = peer_addr_type;
2410 bacpy(&conn->resp_addr, peer_addr);
2411
2412 /* We don't want the connection attempt to stick around
2413 * indefinitely since LE doesn't have a page timeout concept
2414 * like BR/EDR. Set a timer for any connection that doesn't use
2415 * the accept list for connecting.
2416 */
2417 if (filter_policy == HCI_LE_USE_PEER_ADDR)
2418 queue_delayed_work(conn->hdev->workqueue,
2419 &conn->le_conn_timeout,
2420 conn->conn_timeout);
2421 }
2422
2423 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
2424 {
2425 struct hci_cp_le_create_conn *cp;
2426
2427 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2428
2429 /* All connection failure handling is taken care of by the
2430 * hci_le_conn_failed function which is triggered by the HCI
2431 * request completion callbacks used for connecting.
2432 */
2433 if (status)
2434 return;
2435
2436 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
2437 if (!cp)
2438 return;
2439
2440 hci_dev_lock(hdev);
2441
2442 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2443 cp->own_address_type, cp->filter_policy);
2444
2445 hci_dev_unlock(hdev);
2446 }
2447
2448 static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status)
2449 {
2450 struct hci_cp_le_ext_create_conn *cp;
2451
2452 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2453
2454 /* All connection failure handling is taken care of by the
2455 * hci_le_conn_failed function which is triggered by the HCI
2456 * request completion callbacks used for connecting.
2457 */
2458 if (status)
2459 return;
2460
2461 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN);
2462 if (!cp)
2463 return;
2464
2465 hci_dev_lock(hdev);
2466
2467 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2468 cp->own_addr_type, cp->filter_policy);
2469
2470 hci_dev_unlock(hdev);
2471 }
2472
2473 static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
2474 {
2475 struct hci_cp_le_read_remote_features *cp;
2476 struct hci_conn *conn;
2477
2478 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2479
2480 if (!status)
2481 return;
2482
2483 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
2484 if (!cp)
2485 return;
2486
2487 hci_dev_lock(hdev);
2488
2489 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2490 if (conn) {
2491 if (conn->state == BT_CONFIG) {
2492 hci_connect_cfm(conn, status);
2493 hci_conn_drop(conn);
2494 }
2495 }
2496
2497 hci_dev_unlock(hdev);
2498 }
2499
2500 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
2501 {
2502 struct hci_cp_le_start_enc *cp;
2503 struct hci_conn *conn;
2504
2505 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2506
2507 if (!status)
2508 return;
2509
2510 hci_dev_lock(hdev);
2511
2512 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
2513 if (!cp)
2514 goto unlock;
2515
2516 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2517 if (!conn)
2518 goto unlock;
2519
2520 if (conn->state != BT_CONNECTED)
2521 goto unlock;
2522
2523 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2524 hci_conn_drop(conn);
2525
2526 unlock:
2527 hci_dev_unlock(hdev);
2528 }
2529
2530 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
2531 {
2532 struct hci_cp_switch_role *cp;
2533 struct hci_conn *conn;
2534
2535 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2536
2537 if (!status)
2538 return;
2539
2540 cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
2541 if (!cp)
2542 return;
2543
2544 hci_dev_lock(hdev);
2545
2546 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2547 if (conn)
2548 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2549
2550 hci_dev_unlock(hdev);
2551 }
2552
2553 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2554 {
2555 __u8 status = *((__u8 *) skb->data);
2556 struct discovery_state *discov = &hdev->discovery;
2557 struct inquiry_entry *e;
2558
2559 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2560
2561 hci_conn_check_pending(hdev);
2562
2563 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
2564 return;
2565
2566 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
2567 wake_up_bit(&hdev->flags, HCI_INQUIRY);
2568
2569 if (!hci_dev_test_flag(hdev, HCI_MGMT))
2570 return;
2571
2572 hci_dev_lock(hdev);
2573
2574 if (discov->state != DISCOVERY_FINDING)
2575 goto unlock;
2576
2577 if (list_empty(&discov->resolve)) {
2578 /* When BR/EDR inquiry is active and no LE scanning is in
2579 * progress, then change discovery state to indicate completion.
2580 *
2581 * When running LE scanning and BR/EDR inquiry simultaneously
2582 * and the LE scan already finished, then change the discovery
2583 * state to indicate completion.
2584 */
2585 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2586 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2587 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2588 goto unlock;
2589 }
2590
2591 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2592 if (e && hci_resolve_name(hdev, e) == 0) {
2593 e->name_state = NAME_PENDING;
2594 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
2595 } else {
2596 /* When BR/EDR inquiry is active and no LE scanning is in
2597 * progress, then change discovery state to indicate completion.
2598 *
2599 * When running LE scanning and BR/EDR inquiry simultaneously
2600 * and the LE scan already finished, then change the discovery
2601 * state to indicate completion.
2602 */
2603 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2604 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2605 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2606 }
2607
2608 unlock:
2609 hci_dev_unlock(hdev);
2610 }
2611
2612 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2613 {
2614 struct inquiry_data data;
2615 struct inquiry_info *info = (void *) (skb->data + 1);
2616 int num_rsp = *((__u8 *) skb->data);
2617
2618 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2619
2620 if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1)
2621 return;
2622
2623 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
2624 return;
2625
2626 hci_dev_lock(hdev);
2627
2628 for (; num_rsp; num_rsp--, info++) {
2629 u32 flags;
2630
2631 bacpy(&data.bdaddr, &info->bdaddr);
2632 data.pscan_rep_mode = info->pscan_rep_mode;
2633 data.pscan_period_mode = info->pscan_period_mode;
2634 data.pscan_mode = info->pscan_mode;
2635 memcpy(data.dev_class, info->dev_class, 3);
2636 data.clock_offset = info->clock_offset;
2637 data.rssi = HCI_RSSI_INVALID;
2638 data.ssp_mode = 0x00;
2639
2640 flags = hci_inquiry_cache_update(hdev, &data, false);
2641
2642 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2643 info->dev_class, HCI_RSSI_INVALID,
2644 flags, NULL, 0, NULL, 0);
2645 }
2646
2647 hci_dev_unlock(hdev);
2648 }
2649
2650 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2651 {
2652 struct hci_ev_conn_complete *ev = (void *) skb->data;
2653 struct hci_conn *conn;
2654
2655 BT_DBG("%s", hdev->name);
2656
2657 hci_dev_lock(hdev);
2658
2659 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2660 if (!conn) {
2661 /* Connection may not exist if auto-connected. Check the bredr
2662 * allowlist to see if this device is allowed to auto connect.
2663 * If link is an ACL type, create a connection class
2664 * automatically.
2665 *
2666 * Auto-connect will only occur if the event filter is
2667 * programmed with a given address. Right now, event filter is
2668 * only used during suspend.
2669 */
2670 if (ev->link_type == ACL_LINK &&
2671 hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
2672 &ev->bdaddr,
2673 BDADDR_BREDR)) {
2674 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2675 HCI_ROLE_SLAVE);
2676 if (!conn) {
2677 bt_dev_err(hdev, "no memory for new conn");
2678 goto unlock;
2679 }
2680 } else {
2681 if (ev->link_type != SCO_LINK)
2682 goto unlock;
2683
2684 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK,
2685 &ev->bdaddr);
2686 if (!conn)
2687 goto unlock;
2688
2689 conn->type = SCO_LINK;
2690 }
2691 }
2692
2693 if (!ev->status) {
2694 conn->handle = __le16_to_cpu(ev->handle);
2695
2696 if (conn->type == ACL_LINK) {
2697 conn->state = BT_CONFIG;
2698 hci_conn_hold(conn);
2699
2700 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
2701 !hci_find_link_key(hdev, &ev->bdaddr))
2702 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2703 else
2704 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2705 } else
2706 conn->state = BT_CONNECTED;
2707
2708 hci_debugfs_create_conn(conn);
2709 hci_conn_add_sysfs(conn);
2710
2711 if (test_bit(HCI_AUTH, &hdev->flags))
2712 set_bit(HCI_CONN_AUTH, &conn->flags);
2713
2714 if (test_bit(HCI_ENCRYPT, &hdev->flags))
2715 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2716
2717 /* Get remote features */
2718 if (conn->type == ACL_LINK) {
2719 struct hci_cp_read_remote_features cp;
2720 cp.handle = ev->handle;
2721 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2722 sizeof(cp), &cp);
2723
2724 hci_req_update_scan(hdev);
2725 }
2726
2727 /* Set packet type for incoming connection */
2728 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2729 struct hci_cp_change_conn_ptype cp;
2730 cp.handle = ev->handle;
2731 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2732 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2733 &cp);
2734 }
2735 } else {
2736 conn->state = BT_CLOSED;
2737 if (conn->type == ACL_LINK)
2738 mgmt_connect_failed(hdev, &conn->dst, conn->type,
2739 conn->dst_type, ev->status);
2740 }
2741
2742 if (conn->type == ACL_LINK)
2743 hci_sco_setup(conn, ev->status);
2744
2745 if (ev->status) {
2746 hci_connect_cfm(conn, ev->status);
2747 hci_conn_del(conn);
2748 } else if (ev->link_type == SCO_LINK) {
2749 switch (conn->setting & SCO_AIRMODE_MASK) {
2750 case SCO_AIRMODE_CVSD:
2751 if (hdev->notify)
2752 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
2753 break;
2754 }
2755
2756 hci_connect_cfm(conn, ev->status);
2757 }
2758
2759 unlock:
2760 hci_dev_unlock(hdev);
2761
2762 hci_conn_check_pending(hdev);
2763 }
2764
2765 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
2766 {
2767 struct hci_cp_reject_conn_req cp;
2768
2769 bacpy(&cp.bdaddr, bdaddr);
2770 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2771 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2772 }
2773
2774 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2775 {
2776 struct hci_ev_conn_request *ev = (void *) skb->data;
2777 int mask = hdev->link_mode;
2778 struct inquiry_entry *ie;
2779 struct hci_conn *conn;
2780 __u8 flags = 0;
2781
2782 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2783 ev->link_type);
2784
2785 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2786 &flags);
2787
2788 if (!(mask & HCI_LM_ACCEPT)) {
2789 hci_reject_conn(hdev, &ev->bdaddr);
2790 return;
2791 }
2792
2793 if (hci_bdaddr_list_lookup(&hdev->reject_list, &ev->bdaddr,
2794 BDADDR_BREDR)) {
2795 hci_reject_conn(hdev, &ev->bdaddr);
2796 return;
2797 }
2798
2799 /* Require HCI_CONNECTABLE or an accept list entry to accept the
2800 * connection. These features are only touched through mgmt so
2801 * only do the checks if HCI_MGMT is set.
2802 */
2803 if (hci_dev_test_flag(hdev, HCI_MGMT) &&
2804 !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
2805 !hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, &ev->bdaddr,
2806 BDADDR_BREDR)) {
2807 hci_reject_conn(hdev, &ev->bdaddr);
2808 return;
2809 }
2810
2811 /* Connection accepted */
2812
2813 hci_dev_lock(hdev);
2814
2815 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2816 if (ie)
2817 memcpy(ie->data.dev_class, ev->dev_class, 3);
2818
2819 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2820 &ev->bdaddr);
2821 if (!conn) {
2822 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2823 HCI_ROLE_SLAVE);
2824 if (!conn) {
2825 bt_dev_err(hdev, "no memory for new connection");
2826 hci_dev_unlock(hdev);
2827 return;
2828 }
2829 }
2830
2831 memcpy(conn->dev_class, ev->dev_class, 3);
2832
2833 hci_dev_unlock(hdev);
2834
2835 if (ev->link_type == ACL_LINK ||
2836 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2837 struct hci_cp_accept_conn_req cp;
2838 conn->state = BT_CONNECT;
2839
2840 bacpy(&cp.bdaddr, &ev->bdaddr);
2841
2842 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2843 cp.role = 0x00; /* Become central */
2844 else
2845 cp.role = 0x01; /* Remain peripheral */
2846
2847 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
2848 } else if (!(flags & HCI_PROTO_DEFER)) {
2849 struct hci_cp_accept_sync_conn_req cp;
2850 conn->state = BT_CONNECT;
2851
2852 bacpy(&cp.bdaddr, &ev->bdaddr);
2853 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2854
2855 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
2856 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
2857 cp.max_latency = cpu_to_le16(0xffff);
2858 cp.content_format = cpu_to_le16(hdev->voice_setting);
2859 cp.retrans_effort = 0xff;
2860
2861 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
2862 &cp);
2863 } else {
2864 conn->state = BT_CONNECT2;
2865 hci_connect_cfm(conn, 0);
2866 }
2867 }
2868
2869 static u8 hci_to_mgmt_reason(u8 err)
2870 {
2871 switch (err) {
2872 case HCI_ERROR_CONNECTION_TIMEOUT:
2873 return MGMT_DEV_DISCONN_TIMEOUT;
2874 case HCI_ERROR_REMOTE_USER_TERM:
2875 case HCI_ERROR_REMOTE_LOW_RESOURCES:
2876 case HCI_ERROR_REMOTE_POWER_OFF:
2877 return MGMT_DEV_DISCONN_REMOTE;
2878 case HCI_ERROR_LOCAL_HOST_TERM:
2879 return MGMT_DEV_DISCONN_LOCAL_HOST;
2880 default:
2881 return MGMT_DEV_DISCONN_UNKNOWN;
2882 }
2883 }
2884
2885 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2886 {
2887 struct hci_ev_disconn_complete *ev = (void *) skb->data;
2888 u8 reason;
2889 struct hci_conn_params *params;
2890 struct hci_conn *conn;
2891 bool mgmt_connected;
2892
2893 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2894
2895 hci_dev_lock(hdev);
2896
2897 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2898 if (!conn)
2899 goto unlock;
2900
2901 if (ev->status) {
2902 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2903 conn->dst_type, ev->status);
2904 goto unlock;
2905 }
2906
2907 conn->state = BT_CLOSED;
2908
2909 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2910
2911 if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags))
2912 reason = MGMT_DEV_DISCONN_AUTH_FAILURE;
2913 else
2914 reason = hci_to_mgmt_reason(ev->reason);
2915
2916 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2917 reason, mgmt_connected);
2918
2919 if (conn->type == ACL_LINK) {
2920 if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2921 hci_remove_link_key(hdev, &conn->dst);
2922
2923 hci_req_update_scan(hdev);
2924 }
2925
2926 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2927 if (params) {
2928 switch (params->auto_connect) {
2929 case HCI_AUTO_CONN_LINK_LOSS:
2930 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2931 break;
2932 fallthrough;
2933
2934 case HCI_AUTO_CONN_DIRECT:
2935 case HCI_AUTO_CONN_ALWAYS:
2936 list_del_init(&params->action);
2937 list_add(&params->action, &hdev->pend_le_conns);
2938 hci_update_background_scan(hdev);
2939 break;
2940
2941 default:
2942 break;
2943 }
2944 }
2945
2946 hci_disconn_cfm(conn, ev->reason);
2947
2948 /* The suspend notifier is waiting for all devices to disconnect so
2949 * clear the bit from pending tasks and inform the wait queue.
2950 */
2951 if (list_empty(&hdev->conn_hash.list) &&
2952 test_and_clear_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks)) {
2953 wake_up(&hdev->suspend_wait_q);
2954 }
2955
2956 /* Re-enable advertising if necessary, since it might
2957 * have been disabled by the connection. From the
2958 * HCI_LE_Set_Advertise_Enable command description in
2959 * the core specification (v4.0):
2960 * "The Controller shall continue advertising until the Host
2961 * issues an LE_Set_Advertise_Enable command with
2962 * Advertising_Enable set to 0x00 (Advertising is disabled)
2963 * or until a connection is created or until the Advertising
2964 * is timed out due to Directed Advertising."
2965 */
2966 if (conn->type == LE_LINK) {
2967 hdev->cur_adv_instance = conn->adv_instance;
2968 hci_req_reenable_advertising(hdev);
2969 }
2970
2971 hci_conn_del(conn);
2972
2973 unlock:
2974 hci_dev_unlock(hdev);
2975 }
2976
2977 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2978 {
2979 struct hci_ev_auth_complete *ev = (void *) skb->data;
2980 struct hci_conn *conn;
2981
2982 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2983
2984 hci_dev_lock(hdev);
2985
2986 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2987 if (!conn)
2988 goto unlock;
2989
2990 if (!ev->status) {
2991 clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
2992
2993 if (!hci_conn_ssp_enabled(conn) &&
2994 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
2995 bt_dev_info(hdev, "re-auth of legacy device is not possible.");
2996 } else {
2997 set_bit(HCI_CONN_AUTH, &conn->flags);
2998 conn->sec_level = conn->pending_sec_level;
2999 }
3000 } else {
3001 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3002 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3003
3004 mgmt_auth_failed(conn, ev->status);
3005 }
3006
3007 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3008 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
3009
3010 if (conn->state == BT_CONFIG) {
3011 if (!ev->status && hci_conn_ssp_enabled(conn)) {
3012 struct hci_cp_set_conn_encrypt cp;
3013 cp.handle = ev->handle;
3014 cp.encrypt = 0x01;
3015 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3016 &cp);
3017 } else {
3018 conn->state = BT_CONNECTED;
3019 hci_connect_cfm(conn, ev->status);
3020 hci_conn_drop(conn);
3021 }
3022 } else {
3023 hci_auth_cfm(conn, ev->status);
3024
3025 hci_conn_hold(conn);
3026 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3027 hci_conn_drop(conn);
3028 }
3029
3030 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
3031 if (!ev->status) {
3032 struct hci_cp_set_conn_encrypt cp;
3033 cp.handle = ev->handle;
3034 cp.encrypt = 0x01;
3035 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3036 &cp);
3037 } else {
3038 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3039 hci_encrypt_cfm(conn, ev->status);
3040 }
3041 }
3042
3043 unlock:
3044 hci_dev_unlock(hdev);
3045 }
3046
3047 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
3048 {
3049 struct hci_ev_remote_name *ev = (void *) skb->data;
3050 struct hci_conn *conn;
3051
3052 BT_DBG("%s", hdev->name);
3053
3054 hci_conn_check_pending(hdev);
3055
3056 hci_dev_lock(hdev);
3057
3058 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3059
3060 if (!hci_dev_test_flag(hdev, HCI_MGMT))
3061 goto check_auth;
3062
3063 if (ev->status == 0)
3064 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
3065 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
3066 else
3067 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
3068
3069 check_auth:
3070 if (!conn)
3071 goto unlock;
3072
3073 if (!hci_outgoing_auth_needed(hdev, conn))
3074 goto unlock;
3075
3076 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3077 struct hci_cp_auth_requested cp;
3078
3079 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
3080
3081 cp.handle = __cpu_to_le16(conn->handle);
3082 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
3083 }
3084
3085 unlock:
3086 hci_dev_unlock(hdev);
3087 }
3088
3089 static void read_enc_key_size_complete(struct hci_dev *hdev, u8 status,
3090 u16 opcode, struct sk_buff *skb)
3091 {
3092 const struct hci_rp_read_enc_key_size *rp;
3093 struct hci_conn *conn;
3094 u16 handle;
3095
3096 BT_DBG("%s status 0x%02x", hdev->name, status);
3097
3098 if (!skb || skb->len < sizeof(*rp)) {
3099 bt_dev_err(hdev, "invalid read key size response");
3100 return;
3101 }
3102
3103 rp = (void *)skb->data;
3104 handle = le16_to_cpu(rp->handle);
3105
3106 hci_dev_lock(hdev);
3107
3108 conn = hci_conn_hash_lookup_handle(hdev, handle);
3109 if (!conn)
3110 goto unlock;
3111
3112 /* While unexpected, the read_enc_key_size command may fail. The most
3113 * secure approach is to then assume the key size is 0 to force a
3114 * disconnection.
3115 */
3116 if (rp->status) {
3117 bt_dev_err(hdev, "failed to read key size for handle %u",
3118 handle);
3119 conn->enc_key_size = 0;
3120 } else {
3121 conn->enc_key_size = rp->key_size;
3122 }
3123
3124 hci_encrypt_cfm(conn, 0);
3125
3126 unlock:
3127 hci_dev_unlock(hdev);
3128 }
3129
3130 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3131 {
3132 struct hci_ev_encrypt_change *ev = (void *) skb->data;
3133 struct hci_conn *conn;
3134
3135 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3136
3137 hci_dev_lock(hdev);
3138
3139 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3140 if (!conn)
3141 goto unlock;
3142
3143 if (!ev->status) {
3144 if (ev->encrypt) {
3145 /* Encryption implies authentication */
3146 set_bit(HCI_CONN_AUTH, &conn->flags);
3147 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3148 conn->sec_level = conn->pending_sec_level;
3149
3150 /* P-256 authentication key implies FIPS */
3151 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
3152 set_bit(HCI_CONN_FIPS, &conn->flags);
3153
3154 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
3155 conn->type == LE_LINK)
3156 set_bit(HCI_CONN_AES_CCM, &conn->flags);
3157 } else {
3158 clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
3159 clear_bit(HCI_CONN_AES_CCM, &conn->flags);
3160 }
3161 }
3162
3163 /* We should disregard the current RPA and generate a new one
3164 * whenever the encryption procedure fails.
3165 */
3166 if (ev->status && conn->type == LE_LINK) {
3167 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
3168 hci_adv_instances_set_rpa_expired(hdev, true);
3169 }
3170
3171 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3172
3173 /* Check link security requirements are met */
3174 if (!hci_conn_check_link_mode(conn))
3175 ev->status = HCI_ERROR_AUTH_FAILURE;
3176
3177 if (ev->status && conn->state == BT_CONNECTED) {
3178 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3179 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3180
3181 /* Notify upper layers so they can cleanup before
3182 * disconnecting.
3183 */
3184 hci_encrypt_cfm(conn, ev->status);
3185 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3186 hci_conn_drop(conn);
3187 goto unlock;
3188 }
3189
3190 /* Try reading the encryption key size for encrypted ACL links */
3191 if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
3192 struct hci_cp_read_enc_key_size cp;
3193 struct hci_request req;
3194
3195 /* Only send HCI_Read_Encryption_Key_Size if the
3196 * controller really supports it. If it doesn't, assume
3197 * the default size (16).
3198 */
3199 if (!(hdev->commands[20] & 0x10)) {
3200 conn->enc_key_size = HCI_LINK_KEY_SIZE;
3201 goto notify;
3202 }
3203
3204 hci_req_init(&req, hdev);
3205
3206 cp.handle = cpu_to_le16(conn->handle);
3207 hci_req_add(&req, HCI_OP_READ_ENC_KEY_SIZE, sizeof(cp), &cp);
3208
3209 if (hci_req_run_skb(&req, read_enc_key_size_complete)) {
3210 bt_dev_err(hdev, "sending read key size failed");
3211 conn->enc_key_size = HCI_LINK_KEY_SIZE;
3212 goto notify;
3213 }
3214
3215 goto unlock;
3216 }
3217
3218 /* Set the default Authenticated Payload Timeout after
3219 * an LE Link is established. As per Core Spec v5.0, Vol 2, Part B
3220 * Section 3.3, the HCI command WRITE_AUTH_PAYLOAD_TIMEOUT should be
3221 * sent when the link is active and Encryption is enabled, the conn
3222 * type can be either LE or ACL and controller must support LMP Ping.
3223 * Ensure for AES-CCM encryption as well.
3224 */
3225 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
3226 test_bit(HCI_CONN_AES_CCM, &conn->flags) &&
3227 ((conn->type == ACL_LINK && lmp_ping_capable(hdev)) ||
3228 (conn->type == LE_LINK && (hdev->le_features[0] & HCI_LE_PING)))) {
3229 struct hci_cp_write_auth_payload_to cp;
3230
3231 cp.handle = cpu_to_le16(conn->handle);
3232 cp.timeout = cpu_to_le16(hdev->auth_payload_timeout);
3233 hci_send_cmd(conn->hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO,
3234 sizeof(cp), &cp);
3235 }
3236
3237 notify:
3238 hci_encrypt_cfm(conn, ev->status);
3239
3240 unlock:
3241 hci_dev_unlock(hdev);
3242 }
3243
3244 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
3245 struct sk_buff *skb)
3246 {
3247 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
3248 struct hci_conn *conn;
3249
3250 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3251
3252 hci_dev_lock(hdev);
3253
3254 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3255 if (conn) {
3256 if (!ev->status)
3257 set_bit(HCI_CONN_SECURE, &conn->flags);
3258
3259 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3260
3261 hci_key_change_cfm(conn, ev->status);
3262 }
3263
3264 hci_dev_unlock(hdev);
3265 }
3266
3267 static void hci_remote_features_evt(struct hci_dev *hdev,
3268 struct sk_buff *skb)
3269 {
3270 struct hci_ev_remote_features *ev = (void *) skb->data;
3271 struct hci_conn *conn;
3272
3273 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3274
3275 hci_dev_lock(hdev);
3276
3277 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3278 if (!conn)
3279 goto unlock;
3280
3281 if (!ev->status)
3282 memcpy(conn->features[0], ev->features, 8);
3283
3284 if (conn->state != BT_CONFIG)
3285 goto unlock;
3286
3287 if (!ev->status && lmp_ext_feat_capable(hdev) &&
3288 lmp_ext_feat_capable(conn)) {
3289 struct hci_cp_read_remote_ext_features cp;
3290 cp.handle = ev->handle;
3291 cp.page = 0x01;
3292 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
3293 sizeof(cp), &cp);
3294 goto unlock;
3295 }
3296
3297 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3298 struct hci_cp_remote_name_req cp;
3299 memset(&cp, 0, sizeof(cp));
3300 bacpy(&cp.bdaddr, &conn->dst);
3301 cp.pscan_rep_mode = 0x02;
3302 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3303 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3304 mgmt_device_connected(hdev, conn, NULL, 0);
3305
3306 if (!hci_outgoing_auth_needed(hdev, conn)) {
3307 conn->state = BT_CONNECTED;
3308 hci_connect_cfm(conn, ev->status);
3309 hci_conn_drop(conn);
3310 }
3311
3312 unlock:
3313 hci_dev_unlock(hdev);
3314 }
3315
3316 static inline void handle_cmd_cnt_and_timer(struct hci_dev *hdev, u8 ncmd)
3317 {
3318 cancel_delayed_work(&hdev->cmd_timer);
3319
3320 if (!test_bit(HCI_RESET, &hdev->flags)) {
3321 if (ncmd) {
3322 cancel_delayed_work(&hdev->ncmd_timer);
3323 atomic_set(&hdev->cmd_cnt, 1);
3324 } else {
3325 schedule_delayed_work(&hdev->ncmd_timer,
3326 HCI_NCMD_TIMEOUT);
3327 }
3328 }
3329 }
3330
3331 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
3332 u16 *opcode, u8 *status,
3333 hci_req_complete_t *req_complete,
3334 hci_req_complete_skb_t *req_complete_skb)
3335 {
3336 struct hci_ev_cmd_complete *ev = (void *) skb->data;
3337
3338 *opcode = __le16_to_cpu(ev->opcode);
3339 *status = skb->data[sizeof(*ev)];
3340
3341 skb_pull(skb, sizeof(*ev));
3342
3343 switch (*opcode) {
3344 case HCI_OP_INQUIRY_CANCEL:
3345 hci_cc_inquiry_cancel(hdev, skb, status);
3346 break;
3347
3348 case HCI_OP_PERIODIC_INQ:
3349 hci_cc_periodic_inq(hdev, skb);
3350 break;
3351
3352 case HCI_OP_EXIT_PERIODIC_INQ:
3353 hci_cc_exit_periodic_inq(hdev, skb);
3354 break;
3355
3356 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
3357 hci_cc_remote_name_req_cancel(hdev, skb);
3358 break;
3359
3360 case HCI_OP_ROLE_DISCOVERY:
3361 hci_cc_role_discovery(hdev, skb);
3362 break;
3363
3364 case HCI_OP_READ_LINK_POLICY:
3365 hci_cc_read_link_policy(hdev, skb);
3366 break;
3367
3368 case HCI_OP_WRITE_LINK_POLICY:
3369 hci_cc_write_link_policy(hdev, skb);
3370 break;
3371
3372 case HCI_OP_READ_DEF_LINK_POLICY:
3373 hci_cc_read_def_link_policy(hdev, skb);
3374 break;
3375
3376 case HCI_OP_WRITE_DEF_LINK_POLICY:
3377 hci_cc_write_def_link_policy(hdev, skb);
3378 break;
3379
3380 case HCI_OP_RESET:
3381 hci_cc_reset(hdev, skb);
3382 break;
3383
3384 case HCI_OP_READ_STORED_LINK_KEY:
3385 hci_cc_read_stored_link_key(hdev, skb);
3386 break;
3387
3388 case HCI_OP_DELETE_STORED_LINK_KEY:
3389 hci_cc_delete_stored_link_key(hdev, skb);
3390 break;
3391
3392 case HCI_OP_WRITE_LOCAL_NAME:
3393 hci_cc_write_local_name(hdev, skb);
3394 break;
3395
3396 case HCI_OP_READ_LOCAL_NAME:
3397 hci_cc_read_local_name(hdev, skb);
3398 break;
3399
3400 case HCI_OP_WRITE_AUTH_ENABLE:
3401 hci_cc_write_auth_enable(hdev, skb);
3402 break;
3403
3404 case HCI_OP_WRITE_ENCRYPT_MODE:
3405 hci_cc_write_encrypt_mode(hdev, skb);
3406 break;
3407
3408 case HCI_OP_WRITE_SCAN_ENABLE:
3409 hci_cc_write_scan_enable(hdev, skb);
3410 break;
3411
3412 case HCI_OP_SET_EVENT_FLT:
3413 hci_cc_set_event_filter(hdev, skb);
3414 break;
3415
3416 case HCI_OP_READ_CLASS_OF_DEV:
3417 hci_cc_read_class_of_dev(hdev, skb);
3418 break;
3419
3420 case HCI_OP_WRITE_CLASS_OF_DEV:
3421 hci_cc_write_class_of_dev(hdev, skb);
3422 break;
3423
3424 case HCI_OP_READ_VOICE_SETTING:
3425 hci_cc_read_voice_setting(hdev, skb);
3426 break;
3427
3428 case HCI_OP_WRITE_VOICE_SETTING:
3429 hci_cc_write_voice_setting(hdev, skb);
3430 break;
3431
3432 case HCI_OP_READ_NUM_SUPPORTED_IAC:
3433 hci_cc_read_num_supported_iac(hdev, skb);
3434 break;
3435
3436 case HCI_OP_WRITE_SSP_MODE:
3437 hci_cc_write_ssp_mode(hdev, skb);
3438 break;
3439
3440 case HCI_OP_WRITE_SC_SUPPORT:
3441 hci_cc_write_sc_support(hdev, skb);
3442 break;
3443
3444 case HCI_OP_READ_AUTH_PAYLOAD_TO:
3445 hci_cc_read_auth_payload_timeout(hdev, skb);
3446 break;
3447
3448 case HCI_OP_WRITE_AUTH_PAYLOAD_TO:
3449 hci_cc_write_auth_payload_timeout(hdev, skb);
3450 break;
3451
3452 case HCI_OP_READ_LOCAL_VERSION:
3453 hci_cc_read_local_version(hdev, skb);
3454 break;
3455
3456 case HCI_OP_READ_LOCAL_COMMANDS:
3457 hci_cc_read_local_commands(hdev, skb);
3458 break;
3459
3460 case HCI_OP_READ_LOCAL_FEATURES:
3461 hci_cc_read_local_features(hdev, skb);
3462 break;
3463
3464 case HCI_OP_READ_LOCAL_EXT_FEATURES:
3465 hci_cc_read_local_ext_features(hdev, skb);
3466 break;
3467
3468 case HCI_OP_READ_BUFFER_SIZE:
3469 hci_cc_read_buffer_size(hdev, skb);
3470 break;
3471
3472 case HCI_OP_READ_BD_ADDR:
3473 hci_cc_read_bd_addr(hdev, skb);
3474 break;
3475
3476 case HCI_OP_READ_LOCAL_PAIRING_OPTS:
3477 hci_cc_read_local_pairing_opts(hdev, skb);
3478 break;
3479
3480 case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
3481 hci_cc_read_page_scan_activity(hdev, skb);
3482 break;
3483
3484 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
3485 hci_cc_write_page_scan_activity(hdev, skb);
3486 break;
3487
3488 case HCI_OP_READ_PAGE_SCAN_TYPE:
3489 hci_cc_read_page_scan_type(hdev, skb);
3490 break;
3491
3492 case HCI_OP_WRITE_PAGE_SCAN_TYPE:
3493 hci_cc_write_page_scan_type(hdev, skb);
3494 break;
3495
3496 case HCI_OP_READ_DATA_BLOCK_SIZE:
3497 hci_cc_read_data_block_size(hdev, skb);
3498 break;
3499
3500 case HCI_OP_READ_FLOW_CONTROL_MODE:
3501 hci_cc_read_flow_control_mode(hdev, skb);
3502 break;
3503
3504 case HCI_OP_READ_LOCAL_AMP_INFO:
3505 hci_cc_read_local_amp_info(hdev, skb);
3506 break;
3507
3508 case HCI_OP_READ_CLOCK:
3509 hci_cc_read_clock(hdev, skb);
3510 break;
3511
3512 case HCI_OP_READ_INQ_RSP_TX_POWER:
3513 hci_cc_read_inq_rsp_tx_power(hdev, skb);
3514 break;
3515
3516 case HCI_OP_READ_DEF_ERR_DATA_REPORTING:
3517 hci_cc_read_def_err_data_reporting(hdev, skb);
3518 break;
3519
3520 case HCI_OP_WRITE_DEF_ERR_DATA_REPORTING:
3521 hci_cc_write_def_err_data_reporting(hdev, skb);
3522 break;
3523
3524 case HCI_OP_PIN_CODE_REPLY:
3525 hci_cc_pin_code_reply(hdev, skb);
3526 break;
3527
3528 case HCI_OP_PIN_CODE_NEG_REPLY:
3529 hci_cc_pin_code_neg_reply(hdev, skb);
3530 break;
3531
3532 case HCI_OP_READ_LOCAL_OOB_DATA:
3533 hci_cc_read_local_oob_data(hdev, skb);
3534 break;
3535
3536 case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
3537 hci_cc_read_local_oob_ext_data(hdev, skb);
3538 break;
3539
3540 case HCI_OP_LE_READ_BUFFER_SIZE:
3541 hci_cc_le_read_buffer_size(hdev, skb);
3542 break;
3543
3544 case HCI_OP_LE_READ_LOCAL_FEATURES:
3545 hci_cc_le_read_local_features(hdev, skb);
3546 break;
3547
3548 case HCI_OP_LE_READ_ADV_TX_POWER:
3549 hci_cc_le_read_adv_tx_power(hdev, skb);
3550 break;
3551
3552 case HCI_OP_USER_CONFIRM_REPLY:
3553 hci_cc_user_confirm_reply(hdev, skb);
3554 break;
3555
3556 case HCI_OP_USER_CONFIRM_NEG_REPLY:
3557 hci_cc_user_confirm_neg_reply(hdev, skb);
3558 break;
3559
3560 case HCI_OP_USER_PASSKEY_REPLY:
3561 hci_cc_user_passkey_reply(hdev, skb);
3562 break;
3563
3564 case HCI_OP_USER_PASSKEY_NEG_REPLY:
3565 hci_cc_user_passkey_neg_reply(hdev, skb);
3566 break;
3567
3568 case HCI_OP_LE_SET_RANDOM_ADDR:
3569 hci_cc_le_set_random_addr(hdev, skb);
3570 break;
3571
3572 case HCI_OP_LE_SET_ADV_ENABLE:
3573 hci_cc_le_set_adv_enable(hdev, skb);
3574 break;
3575
3576 case HCI_OP_LE_SET_SCAN_PARAM:
3577 hci_cc_le_set_scan_param(hdev, skb);
3578 break;
3579
3580 case HCI_OP_LE_SET_SCAN_ENABLE:
3581 hci_cc_le_set_scan_enable(hdev, skb);
3582 break;
3583
3584 case HCI_OP_LE_READ_ACCEPT_LIST_SIZE:
3585 hci_cc_le_read_accept_list_size(hdev, skb);
3586 break;
3587
3588 case HCI_OP_LE_CLEAR_ACCEPT_LIST:
3589 hci_cc_le_clear_accept_list(hdev, skb);
3590 break;
3591
3592 case HCI_OP_LE_ADD_TO_ACCEPT_LIST:
3593 hci_cc_le_add_to_accept_list(hdev, skb);
3594 break;
3595
3596 case HCI_OP_LE_DEL_FROM_ACCEPT_LIST:
3597 hci_cc_le_del_from_accept_list(hdev, skb);
3598 break;
3599
3600 case HCI_OP_LE_READ_SUPPORTED_STATES:
3601 hci_cc_le_read_supported_states(hdev, skb);
3602 break;
3603
3604 case HCI_OP_LE_READ_DEF_DATA_LEN:
3605 hci_cc_le_read_def_data_len(hdev, skb);
3606 break;
3607
3608 case HCI_OP_LE_WRITE_DEF_DATA_LEN:
3609 hci_cc_le_write_def_data_len(hdev, skb);
3610 break;
3611
3612 case HCI_OP_LE_ADD_TO_RESOLV_LIST:
3613 hci_cc_le_add_to_resolv_list(hdev, skb);
3614 break;
3615
3616 case HCI_OP_LE_DEL_FROM_RESOLV_LIST:
3617 hci_cc_le_del_from_resolv_list(hdev, skb);
3618 break;
3619
3620 case HCI_OP_LE_CLEAR_RESOLV_LIST:
3621 hci_cc_le_clear_resolv_list(hdev, skb);
3622 break;
3623
3624 case HCI_OP_LE_READ_RESOLV_LIST_SIZE:
3625 hci_cc_le_read_resolv_list_size(hdev, skb);
3626 break;
3627
3628 case HCI_OP_LE_SET_ADDR_RESOLV_ENABLE:
3629 hci_cc_le_set_addr_resolution_enable(hdev, skb);
3630 break;
3631
3632 case HCI_OP_LE_READ_MAX_DATA_LEN:
3633 hci_cc_le_read_max_data_len(hdev, skb);
3634 break;
3635
3636 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
3637 hci_cc_write_le_host_supported(hdev, skb);
3638 break;
3639
3640 case HCI_OP_LE_SET_ADV_PARAM:
3641 hci_cc_set_adv_param(hdev, skb);
3642 break;
3643
3644 case HCI_OP_READ_RSSI:
3645 hci_cc_read_rssi(hdev, skb);
3646 break;
3647
3648 case HCI_OP_READ_TX_POWER:
3649 hci_cc_read_tx_power(hdev, skb);
3650 break;
3651
3652 case HCI_OP_WRITE_SSP_DEBUG_MODE:
3653 hci_cc_write_ssp_debug_mode(hdev, skb);
3654 break;
3655
3656 case HCI_OP_LE_SET_EXT_SCAN_PARAMS:
3657 hci_cc_le_set_ext_scan_param(hdev, skb);
3658 break;
3659
3660 case HCI_OP_LE_SET_EXT_SCAN_ENABLE:
3661 hci_cc_le_set_ext_scan_enable(hdev, skb);
3662 break;
3663
3664 case HCI_OP_LE_SET_DEFAULT_PHY:
3665 hci_cc_le_set_default_phy(hdev, skb);
3666 break;
3667
3668 case HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS:
3669 hci_cc_le_read_num_adv_sets(hdev, skb);
3670 break;
3671
3672 case HCI_OP_LE_SET_EXT_ADV_PARAMS:
3673 hci_cc_set_ext_adv_param(hdev, skb);
3674 break;
3675
3676 case HCI_OP_LE_SET_EXT_ADV_ENABLE:
3677 hci_cc_le_set_ext_adv_enable(hdev, skb);
3678 break;
3679
3680 case HCI_OP_LE_SET_ADV_SET_RAND_ADDR:
3681 hci_cc_le_set_adv_set_random_addr(hdev, skb);
3682 break;
3683
3684 case HCI_OP_LE_READ_TRANSMIT_POWER:
3685 hci_cc_le_read_transmit_power(hdev, skb);
3686 break;
3687
3688 default:
3689 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3690 break;
3691 }
3692
3693 handle_cmd_cnt_and_timer(hdev, ev->ncmd);
3694
3695 hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
3696 req_complete_skb);
3697
3698 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
3699 bt_dev_err(hdev,
3700 "unexpected event for opcode 0x%4.4x", *opcode);
3701 return;
3702 }
3703
3704 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3705 queue_work(hdev->workqueue, &hdev->cmd_work);
3706 }
3707
3708 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb,
3709 u16 *opcode, u8 *status,
3710 hci_req_complete_t *req_complete,
3711 hci_req_complete_skb_t *req_complete_skb)
3712 {
3713 struct hci_ev_cmd_status *ev = (void *) skb->data;
3714
3715 skb_pull(skb, sizeof(*ev));
3716
3717 *opcode = __le16_to_cpu(ev->opcode);
3718 *status = ev->status;
3719
3720 switch (*opcode) {
3721 case HCI_OP_INQUIRY:
3722 hci_cs_inquiry(hdev, ev->status);
3723 break;
3724
3725 case HCI_OP_CREATE_CONN:
3726 hci_cs_create_conn(hdev, ev->status);
3727 break;
3728
3729 case HCI_OP_DISCONNECT:
3730 hci_cs_disconnect(hdev, ev->status);
3731 break;
3732
3733 case HCI_OP_ADD_SCO:
3734 hci_cs_add_sco(hdev, ev->status);
3735 break;
3736
3737 case HCI_OP_AUTH_REQUESTED:
3738 hci_cs_auth_requested(hdev, ev->status);
3739 break;
3740
3741 case HCI_OP_SET_CONN_ENCRYPT:
3742 hci_cs_set_conn_encrypt(hdev, ev->status);
3743 break;
3744
3745 case HCI_OP_REMOTE_NAME_REQ:
3746 hci_cs_remote_name_req(hdev, ev->status);
3747 break;
3748
3749 case HCI_OP_READ_REMOTE_FEATURES:
3750 hci_cs_read_remote_features(hdev, ev->status);
3751 break;
3752
3753 case HCI_OP_READ_REMOTE_EXT_FEATURES:
3754 hci_cs_read_remote_ext_features(hdev, ev->status);
3755 break;
3756
3757 case HCI_OP_SETUP_SYNC_CONN:
3758 hci_cs_setup_sync_conn(hdev, ev->status);
3759 break;
3760
3761 case HCI_OP_SNIFF_MODE:
3762 hci_cs_sniff_mode(hdev, ev->status);
3763 break;
3764
3765 case HCI_OP_EXIT_SNIFF_MODE:
3766 hci_cs_exit_sniff_mode(hdev, ev->status);
3767 break;
3768
3769 case HCI_OP_SWITCH_ROLE:
3770 hci_cs_switch_role(hdev, ev->status);
3771 break;
3772
3773 case HCI_OP_LE_CREATE_CONN:
3774 hci_cs_le_create_conn(hdev, ev->status);
3775 break;
3776
3777 case HCI_OP_LE_READ_REMOTE_FEATURES:
3778 hci_cs_le_read_remote_features(hdev, ev->status);
3779 break;
3780
3781 case HCI_OP_LE_START_ENC:
3782 hci_cs_le_start_enc(hdev, ev->status);
3783 break;
3784
3785 case HCI_OP_LE_EXT_CREATE_CONN:
3786 hci_cs_le_ext_create_conn(hdev, ev->status);
3787 break;
3788
3789 default:
3790 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3791 break;
3792 }
3793
3794 handle_cmd_cnt_and_timer(hdev, ev->ncmd);
3795
3796 /* Indicate request completion if the command failed. Also, if
3797 * we're not waiting for a special event and we get a success
3798 * command status we should try to flag the request as completed
3799 * (since for this kind of commands there will not be a command
3800 * complete event).
3801 */
3802 if (ev->status ||
3803 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->hci.req_event))
3804 hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
3805 req_complete_skb);
3806
3807 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
3808 bt_dev_err(hdev,
3809 "unexpected event for opcode 0x%4.4x", *opcode);
3810 return;
3811 }
3812
3813 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3814 queue_work(hdev->workqueue, &hdev->cmd_work);
3815 }
3816
3817 static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb)
3818 {
3819 struct hci_ev_hardware_error *ev = (void *) skb->data;
3820
3821 hdev->hw_error_code = ev->code;
3822
3823 queue_work(hdev->req_workqueue, &hdev->error_reset);
3824 }
3825
3826 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3827 {
3828 struct hci_ev_role_change *ev = (void *) skb->data;
3829 struct hci_conn *conn;
3830
3831 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3832
3833 hci_dev_lock(hdev);
3834
3835 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3836 if (conn) {
3837 if (!ev->status)
3838 conn->role = ev->role;
3839
3840 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
3841
3842 hci_role_switch_cfm(conn, ev->status, ev->role);
3843 }
3844
3845 hci_dev_unlock(hdev);
3846 }
3847
3848 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
3849 {
3850 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
3851 int i;
3852
3853 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
3854 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
3855 return;
3856 }
3857
3858 if (skb->len < sizeof(*ev) ||
3859 skb->len < struct_size(ev, handles, ev->num_hndl)) {
3860 BT_DBG("%s bad parameters", hdev->name);
3861 return;
3862 }
3863
3864 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
3865
3866 for (i = 0; i < ev->num_hndl; i++) {
3867 struct hci_comp_pkts_info *info = &ev->handles[i];
3868 struct hci_conn *conn;
3869 __u16 handle, count;
3870
3871 handle = __le16_to_cpu(info->handle);
3872 count = __le16_to_cpu(info->count);
3873
3874 conn = hci_conn_hash_lookup_handle(hdev, handle);
3875 if (!conn)
3876 continue;
3877
3878 conn->sent -= count;
3879
3880 switch (conn->type) {
3881 case ACL_LINK:
3882 hdev->acl_cnt += count;
3883 if (hdev->acl_cnt > hdev->acl_pkts)
3884 hdev->acl_cnt = hdev->acl_pkts;
3885 break;
3886
3887 case LE_LINK:
3888 if (hdev->le_pkts) {
3889 hdev->le_cnt += count;
3890 if (hdev->le_cnt > hdev->le_pkts)
3891 hdev->le_cnt = hdev->le_pkts;
3892 } else {
3893 hdev->acl_cnt += count;
3894 if (hdev->acl_cnt > hdev->acl_pkts)
3895 hdev->acl_cnt = hdev->acl_pkts;
3896 }
3897 break;
3898
3899 case SCO_LINK:
3900 hdev->sco_cnt += count;
3901 if (hdev->sco_cnt > hdev->sco_pkts)
3902 hdev->sco_cnt = hdev->sco_pkts;
3903 break;
3904
3905 default:
3906 bt_dev_err(hdev, "unknown type %d conn %p",
3907 conn->type, conn);
3908 break;
3909 }
3910 }
3911
3912 queue_work(hdev->workqueue, &hdev->tx_work);
3913 }
3914
3915 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
3916 __u16 handle)
3917 {
3918 struct hci_chan *chan;
3919
3920 switch (hdev->dev_type) {
3921 case HCI_PRIMARY:
3922 return hci_conn_hash_lookup_handle(hdev, handle);
3923 case HCI_AMP:
3924 chan = hci_chan_lookup_handle(hdev, handle);
3925 if (chan)
3926 return chan->conn;
3927 break;
3928 default:
3929 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
3930 break;
3931 }
3932
3933 return NULL;
3934 }
3935
3936 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
3937 {
3938 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
3939 int i;
3940
3941 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
3942 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
3943 return;
3944 }
3945
3946 if (skb->len < sizeof(*ev) ||
3947 skb->len < struct_size(ev, handles, ev->num_hndl)) {
3948 BT_DBG("%s bad parameters", hdev->name);
3949 return;
3950 }
3951
3952 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
3953 ev->num_hndl);
3954
3955 for (i = 0; i < ev->num_hndl; i++) {
3956 struct hci_comp_blocks_info *info = &ev->handles[i];
3957 struct hci_conn *conn = NULL;
3958 __u16 handle, block_count;
3959
3960 handle = __le16_to_cpu(info->handle);
3961 block_count = __le16_to_cpu(info->blocks);
3962
3963 conn = __hci_conn_lookup_handle(hdev, handle);
3964 if (!conn)
3965 continue;
3966
3967 conn->sent -= block_count;
3968
3969 switch (conn->type) {
3970 case ACL_LINK:
3971 case AMP_LINK:
3972 hdev->block_cnt += block_count;
3973 if (hdev->block_cnt > hdev->num_blocks)
3974 hdev->block_cnt = hdev->num_blocks;
3975 break;
3976
3977 default:
3978 bt_dev_err(hdev, "unknown type %d conn %p",
3979 conn->type, conn);
3980 break;
3981 }
3982 }
3983
3984 queue_work(hdev->workqueue, &hdev->tx_work);
3985 }
3986
3987 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3988 {
3989 struct hci_ev_mode_change *ev = (void *) skb->data;
3990 struct hci_conn *conn;
3991
3992 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3993
3994 hci_dev_lock(hdev);
3995
3996 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3997 if (conn) {
3998 conn->mode = ev->mode;
3999
4000 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
4001 &conn->flags)) {
4002 if (conn->mode == HCI_CM_ACTIVE)
4003 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4004 else
4005 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4006 }
4007
4008 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
4009 hci_sco_setup(conn, ev->status);
4010 }
4011
4012 hci_dev_unlock(hdev);
4013 }
4014
4015 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4016 {
4017 struct hci_ev_pin_code_req *ev = (void *) skb->data;
4018 struct hci_conn *conn;
4019
4020 BT_DBG("%s", hdev->name);
4021
4022 hci_dev_lock(hdev);
4023
4024 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4025 if (!conn)
4026 goto unlock;
4027
4028 if (conn->state == BT_CONNECTED) {
4029 hci_conn_hold(conn);
4030 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
4031 hci_conn_drop(conn);
4032 }
4033
4034 if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
4035 !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
4036 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
4037 sizeof(ev->bdaddr), &ev->bdaddr);
4038 } else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
4039 u8 secure;
4040
4041 if (conn->pending_sec_level == BT_SECURITY_HIGH)
4042 secure = 1;
4043 else
4044 secure = 0;
4045
4046 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
4047 }
4048
4049 unlock:
4050 hci_dev_unlock(hdev);
4051 }
4052
4053 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
4054 {
4055 if (key_type == HCI_LK_CHANGED_COMBINATION)
4056 return;
4057
4058 conn->pin_length = pin_len;
4059 conn->key_type = key_type;
4060
4061 switch (key_type) {
4062 case HCI_LK_LOCAL_UNIT:
4063 case HCI_LK_REMOTE_UNIT:
4064 case HCI_LK_DEBUG_COMBINATION:
4065 return;
4066 case HCI_LK_COMBINATION:
4067 if (pin_len == 16)
4068 conn->pending_sec_level = BT_SECURITY_HIGH;
4069 else
4070 conn->pending_sec_level = BT_SECURITY_MEDIUM;
4071 break;
4072 case HCI_LK_UNAUTH_COMBINATION_P192:
4073 case HCI_LK_UNAUTH_COMBINATION_P256:
4074 conn->pending_sec_level = BT_SECURITY_MEDIUM;
4075 break;
4076 case HCI_LK_AUTH_COMBINATION_P192:
4077 conn->pending_sec_level = BT_SECURITY_HIGH;
4078 break;
4079 case HCI_LK_AUTH_COMBINATION_P256:
4080 conn->pending_sec_level = BT_SECURITY_FIPS;
4081 break;
4082 }
4083 }
4084
4085 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4086 {
4087 struct hci_ev_link_key_req *ev = (void *) skb->data;
4088 struct hci_cp_link_key_reply cp;
4089 struct hci_conn *conn;
4090 struct link_key *key;
4091
4092 BT_DBG("%s", hdev->name);
4093
4094 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4095 return;
4096
4097 hci_dev_lock(hdev);
4098
4099 key = hci_find_link_key(hdev, &ev->bdaddr);
4100 if (!key) {
4101 BT_DBG("%s link key not found for %pMR", hdev->name,
4102 &ev->bdaddr);
4103 goto not_found;
4104 }
4105
4106 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
4107 &ev->bdaddr);
4108
4109 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4110 if (conn) {
4111 clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4112
4113 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
4114 key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
4115 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
4116 BT_DBG("%s ignoring unauthenticated key", hdev->name);
4117 goto not_found;
4118 }
4119
4120 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
4121 (conn->pending_sec_level == BT_SECURITY_HIGH ||
4122 conn->pending_sec_level == BT_SECURITY_FIPS)) {
4123 BT_DBG("%s ignoring key unauthenticated for high security",
4124 hdev->name);
4125 goto not_found;
4126 }
4127
4128 conn_set_key(conn, key->type, key->pin_len);
4129 }
4130
4131 bacpy(&cp.bdaddr, &ev->bdaddr);
4132 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
4133
4134 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
4135
4136 hci_dev_unlock(hdev);
4137
4138 return;
4139
4140 not_found:
4141 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
4142 hci_dev_unlock(hdev);
4143 }
4144
4145 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
4146 {
4147 struct hci_ev_link_key_notify *ev = (void *) skb->data;
4148 struct hci_conn *conn;
4149 struct link_key *key;
4150 bool persistent;
4151 u8 pin_len = 0;
4152
4153 BT_DBG("%s", hdev->name);
4154
4155 hci_dev_lock(hdev);
4156
4157 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4158 if (!conn)
4159 goto unlock;
4160
4161 hci_conn_hold(conn);
4162 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4163 hci_conn_drop(conn);
4164
4165 set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4166 conn_set_key(conn, ev->key_type, conn->pin_length);
4167
4168 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4169 goto unlock;
4170
4171 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
4172 ev->key_type, pin_len, &persistent);
4173 if (!key)
4174 goto unlock;
4175
4176 /* Update connection information since adding the key will have
4177 * fixed up the type in the case of changed combination keys.
4178 */
4179 if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
4180 conn_set_key(conn, key->type, key->pin_len);
4181
4182 mgmt_new_link_key(hdev, key, persistent);
4183
4184 /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
4185 * is set. If it's not set simply remove the key from the kernel
4186 * list (we've still notified user space about it but with
4187 * store_hint being 0).
4188 */
4189 if (key->type == HCI_LK_DEBUG_COMBINATION &&
4190 !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
4191 list_del_rcu(&key->list);
4192 kfree_rcu(key, rcu);
4193 goto unlock;
4194 }
4195
4196 if (persistent)
4197 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4198 else
4199 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4200
4201 unlock:
4202 hci_dev_unlock(hdev);
4203 }
4204
4205 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
4206 {
4207 struct hci_ev_clock_offset *ev = (void *) skb->data;
4208 struct hci_conn *conn;
4209
4210 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4211
4212 hci_dev_lock(hdev);
4213
4214 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4215 if (conn && !ev->status) {
4216 struct inquiry_entry *ie;
4217
4218 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4219 if (ie) {
4220 ie->data.clock_offset = ev->clock_offset;
4221 ie->timestamp = jiffies;
4222 }
4223 }
4224
4225 hci_dev_unlock(hdev);
4226 }
4227
4228 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
4229 {
4230 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
4231 struct hci_conn *conn;
4232
4233 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4234
4235 hci_dev_lock(hdev);
4236
4237 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4238 if (conn && !ev->status)
4239 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
4240
4241 hci_dev_unlock(hdev);
4242 }
4243
4244 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
4245 {
4246 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
4247 struct inquiry_entry *ie;
4248
4249 BT_DBG("%s", hdev->name);
4250
4251 hci_dev_lock(hdev);
4252
4253 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4254 if (ie) {
4255 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
4256 ie->timestamp = jiffies;
4257 }
4258
4259 hci_dev_unlock(hdev);
4260 }
4261
4262 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
4263 struct sk_buff *skb)
4264 {
4265 struct inquiry_data data;
4266 int num_rsp = *((__u8 *) skb->data);
4267
4268 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
4269
4270 if (!num_rsp)
4271 return;
4272
4273 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4274 return;
4275
4276 hci_dev_lock(hdev);
4277
4278 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
4279 struct inquiry_info_with_rssi_and_pscan_mode *info;
4280 info = (void *) (skb->data + 1);
4281
4282 if (skb->len < num_rsp * sizeof(*info) + 1)
4283 goto unlock;
4284
4285 for (; num_rsp; num_rsp--, info++) {
4286 u32 flags;
4287
4288 bacpy(&data.bdaddr, &info->bdaddr);
4289 data.pscan_rep_mode = info->pscan_rep_mode;
4290 data.pscan_period_mode = info->pscan_period_mode;
4291 data.pscan_mode = info->pscan_mode;
4292 memcpy(data.dev_class, info->dev_class, 3);
4293 data.clock_offset = info->clock_offset;
4294 data.rssi = info->rssi;
4295 data.ssp_mode = 0x00;
4296
4297 flags = hci_inquiry_cache_update(hdev, &data, false);
4298
4299 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4300 info->dev_class, info->rssi,
4301 flags, NULL, 0, NULL, 0);
4302 }
4303 } else {
4304 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
4305
4306 if (skb->len < num_rsp * sizeof(*info) + 1)
4307 goto unlock;
4308
4309 for (; num_rsp; num_rsp--, info++) {
4310 u32 flags;
4311
4312 bacpy(&data.bdaddr, &info->bdaddr);
4313 data.pscan_rep_mode = info->pscan_rep_mode;
4314 data.pscan_period_mode = info->pscan_period_mode;
4315 data.pscan_mode = 0x00;
4316 memcpy(data.dev_class, info->dev_class, 3);
4317 data.clock_offset = info->clock_offset;
4318 data.rssi = info->rssi;
4319 data.ssp_mode = 0x00;
4320
4321 flags = hci_inquiry_cache_update(hdev, &data, false);
4322
4323 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4324 info->dev_class, info->rssi,
4325 flags, NULL, 0, NULL, 0);
4326 }
4327 }
4328
4329 unlock:
4330 hci_dev_unlock(hdev);
4331 }
4332
4333 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
4334 struct sk_buff *skb)
4335 {
4336 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
4337 struct hci_conn *conn;
4338
4339 BT_DBG("%s", hdev->name);
4340
4341 hci_dev_lock(hdev);
4342
4343 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4344 if (!conn)
4345 goto unlock;
4346
4347 if (ev->page < HCI_MAX_PAGES)
4348 memcpy(conn->features[ev->page], ev->features, 8);
4349
4350 if (!ev->status && ev->page == 0x01) {
4351 struct inquiry_entry *ie;
4352
4353 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4354 if (ie)
4355 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4356
4357 if (ev->features[0] & LMP_HOST_SSP) {
4358 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4359 } else {
4360 /* It is mandatory by the Bluetooth specification that
4361 * Extended Inquiry Results are only used when Secure
4362 * Simple Pairing is enabled, but some devices violate
4363 * this.
4364 *
4365 * To make these devices work, the internal SSP
4366 * enabled flag needs to be cleared if the remote host
4367 * features do not indicate SSP support */
4368 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4369 }
4370
4371 if (ev->features[0] & LMP_HOST_SC)
4372 set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
4373 }
4374
4375 if (conn->state != BT_CONFIG)
4376 goto unlock;
4377
4378 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
4379 struct hci_cp_remote_name_req cp;
4380 memset(&cp, 0, sizeof(cp));
4381 bacpy(&cp.bdaddr, &conn->dst);
4382 cp.pscan_rep_mode = 0x02;
4383 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
4384 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4385 mgmt_device_connected(hdev, conn, NULL, 0);
4386
4387 if (!hci_outgoing_auth_needed(hdev, conn)) {
4388 conn->state = BT_CONNECTED;
4389 hci_connect_cfm(conn, ev->status);
4390 hci_conn_drop(conn);
4391 }
4392
4393 unlock:
4394 hci_dev_unlock(hdev);
4395 }
4396
4397 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
4398 struct sk_buff *skb)
4399 {
4400 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
4401 struct hci_conn *conn;
4402
4403 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4404
4405 hci_dev_lock(hdev);
4406
4407 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
4408 if (!conn) {
4409 if (ev->link_type == ESCO_LINK)
4410 goto unlock;
4411
4412 /* When the link type in the event indicates SCO connection
4413 * and lookup of the connection object fails, then check
4414 * if an eSCO connection object exists.
4415 *
4416 * The core limits the synchronous connections to either
4417 * SCO or eSCO. The eSCO connection is preferred and tried
4418 * to be setup first and until successfully established,
4419 * the link type will be hinted as eSCO.
4420 */
4421 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
4422 if (!conn)
4423 goto unlock;
4424 }
4425
4426 switch (ev->status) {
4427 case 0x00:
4428 /* The synchronous connection complete event should only be
4429 * sent once per new connection. Receiving a successful
4430 * complete event when the connection status is already
4431 * BT_CONNECTED means that the device is misbehaving and sent
4432 * multiple complete event packets for the same new connection.
4433 *
4434 * Registering the device more than once can corrupt kernel
4435 * memory, hence upon detecting this invalid event, we report
4436 * an error and ignore the packet.
4437 */
4438 if (conn->state == BT_CONNECTED) {
4439 bt_dev_err(hdev, "Ignoring connect complete event for existing connection");
4440 goto unlock;
4441 }
4442
4443 conn->handle = __le16_to_cpu(ev->handle);
4444 conn->state = BT_CONNECTED;
4445 conn->type = ev->link_type;
4446
4447 hci_debugfs_create_conn(conn);
4448 hci_conn_add_sysfs(conn);
4449 break;
4450
4451 case 0x10: /* Connection Accept Timeout */
4452 case 0x0d: /* Connection Rejected due to Limited Resources */
4453 case 0x11: /* Unsupported Feature or Parameter Value */
4454 case 0x1c: /* SCO interval rejected */
4455 case 0x1a: /* Unsupported Remote Feature */
4456 case 0x1e: /* Invalid LMP Parameters */
4457 case 0x1f: /* Unspecified error */
4458 case 0x20: /* Unsupported LMP Parameter value */
4459 if (conn->out) {
4460 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
4461 (hdev->esco_type & EDR_ESCO_MASK);
4462 if (hci_setup_sync(conn, conn->link->handle))
4463 goto unlock;
4464 }
4465 fallthrough;
4466
4467 default:
4468 conn->state = BT_CLOSED;
4469 break;
4470 }
4471
4472 bt_dev_dbg(hdev, "SCO connected with air mode: %02x", ev->air_mode);
4473
4474 switch (ev->air_mode) {
4475 case 0x02:
4476 if (hdev->notify)
4477 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
4478 break;
4479 case 0x03:
4480 if (hdev->notify)
4481 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_TRANSP);
4482 break;
4483 }
4484
4485 hci_connect_cfm(conn, ev->status);
4486 if (ev->status)
4487 hci_conn_del(conn);
4488
4489 unlock:
4490 hci_dev_unlock(hdev);
4491 }
4492
4493 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
4494 {
4495 size_t parsed = 0;
4496
4497 while (parsed < eir_len) {
4498 u8 field_len = eir[0];
4499
4500 if (field_len == 0)
4501 return parsed;
4502
4503 parsed += field_len + 1;
4504 eir += field_len + 1;
4505 }
4506
4507 return eir_len;
4508 }
4509
4510 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
4511 struct sk_buff *skb)
4512 {
4513 struct inquiry_data data;
4514 struct extended_inquiry_info *info = (void *) (skb->data + 1);
4515 int num_rsp = *((__u8 *) skb->data);
4516 size_t eir_len;
4517
4518 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
4519
4520 if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1)
4521 return;
4522
4523 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4524 return;
4525
4526 hci_dev_lock(hdev);
4527
4528 for (; num_rsp; num_rsp--, info++) {
4529 u32 flags;
4530 bool name_known;
4531
4532 bacpy(&data.bdaddr, &info->bdaddr);
4533 data.pscan_rep_mode = info->pscan_rep_mode;
4534 data.pscan_period_mode = info->pscan_period_mode;
4535 data.pscan_mode = 0x00;
4536 memcpy(data.dev_class, info->dev_class, 3);
4537 data.clock_offset = info->clock_offset;
4538 data.rssi = info->rssi;
4539 data.ssp_mode = 0x01;
4540
4541 if (hci_dev_test_flag(hdev, HCI_MGMT))
4542 name_known = eir_get_data(info->data,
4543 sizeof(info->data),
4544 EIR_NAME_COMPLETE, NULL);
4545 else
4546 name_known = true;
4547
4548 flags = hci_inquiry_cache_update(hdev, &data, name_known);
4549
4550 eir_len = eir_get_length(info->data, sizeof(info->data));
4551
4552 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4553 info->dev_class, info->rssi,
4554 flags, info->data, eir_len, NULL, 0);
4555 }
4556
4557 hci_dev_unlock(hdev);
4558 }
4559
4560 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
4561 struct sk_buff *skb)
4562 {
4563 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
4564 struct hci_conn *conn;
4565
4566 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
4567 __le16_to_cpu(ev->handle));
4568
4569 hci_dev_lock(hdev);
4570
4571 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4572 if (!conn)
4573 goto unlock;
4574
4575 /* For BR/EDR the necessary steps are taken through the
4576 * auth_complete event.
4577 */
4578 if (conn->type != LE_LINK)
4579 goto unlock;
4580
4581 if (!ev->status)
4582 conn->sec_level = conn->pending_sec_level;
4583
4584 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
4585
4586 if (ev->status && conn->state == BT_CONNECTED) {
4587 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
4588 hci_conn_drop(conn);
4589 goto unlock;
4590 }
4591
4592 if (conn->state == BT_CONFIG) {
4593 if (!ev->status)
4594 conn->state = BT_CONNECTED;
4595
4596 hci_connect_cfm(conn, ev->status);
4597 hci_conn_drop(conn);
4598 } else {
4599 hci_auth_cfm(conn, ev->status);
4600
4601 hci_conn_hold(conn);
4602 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4603 hci_conn_drop(conn);
4604 }
4605
4606 unlock:
4607 hci_dev_unlock(hdev);
4608 }
4609
4610 static u8 hci_get_auth_req(struct hci_conn *conn)
4611 {
4612 /* If remote requests no-bonding follow that lead */
4613 if (conn->remote_auth == HCI_AT_NO_BONDING ||
4614 conn->remote_auth == HCI_AT_NO_BONDING_MITM)
4615 return conn->remote_auth | (conn->auth_type & 0x01);
4616
4617 /* If both remote and local have enough IO capabilities, require
4618 * MITM protection
4619 */
4620 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
4621 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
4622 return conn->remote_auth | 0x01;
4623
4624 /* No MITM protection possible so ignore remote requirement */
4625 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
4626 }
4627
4628 static u8 bredr_oob_data_present(struct hci_conn *conn)
4629 {
4630 struct hci_dev *hdev = conn->hdev;
4631 struct oob_data *data;
4632
4633 data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
4634 if (!data)
4635 return 0x00;
4636
4637 if (bredr_sc_enabled(hdev)) {
4638 /* When Secure Connections is enabled, then just
4639 * return the present value stored with the OOB
4640 * data. The stored value contains the right present
4641 * information. However it can only be trusted when
4642 * not in Secure Connection Only mode.
4643 */
4644 if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
4645 return data->present;
4646
4647 /* When Secure Connections Only mode is enabled, then
4648 * the P-256 values are required. If they are not
4649 * available, then do not declare that OOB data is
4650 * present.
4651 */
4652 if (!memcmp(data->rand256, ZERO_KEY, 16) ||
4653 !memcmp(data->hash256, ZERO_KEY, 16))
4654 return 0x00;
4655
4656 return 0x02;
4657 }
4658
4659 /* When Secure Connections is not enabled or actually
4660 * not supported by the hardware, then check that if
4661 * P-192 data values are present.
4662 */
4663 if (!memcmp(data->rand192, ZERO_KEY, 16) ||
4664 !memcmp(data->hash192, ZERO_KEY, 16))
4665 return 0x00;
4666
4667 return 0x01;
4668 }
4669
4670 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4671 {
4672 struct hci_ev_io_capa_request *ev = (void *) skb->data;
4673 struct hci_conn *conn;
4674
4675 BT_DBG("%s", hdev->name);
4676
4677 hci_dev_lock(hdev);
4678
4679 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4680 if (!conn)
4681 goto unlock;
4682
4683 hci_conn_hold(conn);
4684
4685 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4686 goto unlock;
4687
4688 /* Allow pairing if we're pairable, the initiators of the
4689 * pairing or if the remote is not requesting bonding.
4690 */
4691 if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
4692 test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
4693 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
4694 struct hci_cp_io_capability_reply cp;
4695
4696 bacpy(&cp.bdaddr, &ev->bdaddr);
4697 /* Change the IO capability from KeyboardDisplay
4698 * to DisplayYesNo as it is not supported by BT spec. */
4699 cp.capability = (conn->io_capability == 0x04) ?
4700 HCI_IO_DISPLAY_YESNO : conn->io_capability;
4701
4702 /* If we are initiators, there is no remote information yet */
4703 if (conn->remote_auth == 0xff) {
4704 /* Request MITM protection if our IO caps allow it
4705 * except for the no-bonding case.
4706 */
4707 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4708 conn->auth_type != HCI_AT_NO_BONDING)
4709 conn->auth_type |= 0x01;
4710 } else {
4711 conn->auth_type = hci_get_auth_req(conn);
4712 }
4713
4714 /* If we're not bondable, force one of the non-bondable
4715 * authentication requirement values.
4716 */
4717 if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
4718 conn->auth_type &= HCI_AT_NO_BONDING_MITM;
4719
4720 cp.authentication = conn->auth_type;
4721 cp.oob_data = bredr_oob_data_present(conn);
4722
4723 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
4724 sizeof(cp), &cp);
4725 } else {
4726 struct hci_cp_io_capability_neg_reply cp;
4727
4728 bacpy(&cp.bdaddr, &ev->bdaddr);
4729 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
4730
4731 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
4732 sizeof(cp), &cp);
4733 }
4734
4735 unlock:
4736 hci_dev_unlock(hdev);
4737 }
4738
4739 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
4740 {
4741 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
4742 struct hci_conn *conn;
4743
4744 BT_DBG("%s", hdev->name);
4745
4746 hci_dev_lock(hdev);
4747
4748 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4749 if (!conn)
4750 goto unlock;
4751
4752 conn->remote_cap = ev->capability;
4753 conn->remote_auth = ev->authentication;
4754
4755 unlock:
4756 hci_dev_unlock(hdev);
4757 }
4758
4759 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
4760 struct sk_buff *skb)
4761 {
4762 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
4763 int loc_mitm, rem_mitm, confirm_hint = 0;
4764 struct hci_conn *conn;
4765
4766 BT_DBG("%s", hdev->name);
4767
4768 hci_dev_lock(hdev);
4769
4770 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4771 goto unlock;
4772
4773 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4774 if (!conn)
4775 goto unlock;
4776
4777 loc_mitm = (conn->auth_type & 0x01);
4778 rem_mitm = (conn->remote_auth & 0x01);
4779
4780 /* If we require MITM but the remote device can't provide that
4781 * (it has NoInputNoOutput) then reject the confirmation
4782 * request. We check the security level here since it doesn't
4783 * necessarily match conn->auth_type.
4784 */
4785 if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
4786 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
4787 BT_DBG("Rejecting request: remote device can't provide MITM");
4788 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
4789 sizeof(ev->bdaddr), &ev->bdaddr);
4790 goto unlock;
4791 }
4792
4793 /* If no side requires MITM protection; auto-accept */
4794 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
4795 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
4796
4797 /* If we're not the initiators request authorization to
4798 * proceed from user space (mgmt_user_confirm with
4799 * confirm_hint set to 1). The exception is if neither
4800 * side had MITM or if the local IO capability is
4801 * NoInputNoOutput, in which case we do auto-accept
4802 */
4803 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
4804 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4805 (loc_mitm || rem_mitm)) {
4806 BT_DBG("Confirming auto-accept as acceptor");
4807 confirm_hint = 1;
4808 goto confirm;
4809 }
4810
4811 /* If there already exists link key in local host, leave the
4812 * decision to user space since the remote device could be
4813 * legitimate or malicious.
4814 */
4815 if (hci_find_link_key(hdev, &ev->bdaddr)) {
4816 bt_dev_dbg(hdev, "Local host already has link key");
4817 confirm_hint = 1;
4818 goto confirm;
4819 }
4820
4821 BT_DBG("Auto-accept of user confirmation with %ums delay",
4822 hdev->auto_accept_delay);
4823
4824 if (hdev->auto_accept_delay > 0) {
4825 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
4826 queue_delayed_work(conn->hdev->workqueue,
4827 &conn->auto_accept_work, delay);
4828 goto unlock;
4829 }
4830
4831 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
4832 sizeof(ev->bdaddr), &ev->bdaddr);
4833 goto unlock;
4834 }
4835
4836 confirm:
4837 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
4838 le32_to_cpu(ev->passkey), confirm_hint);
4839
4840 unlock:
4841 hci_dev_unlock(hdev);
4842 }
4843
4844 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
4845 struct sk_buff *skb)
4846 {
4847 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
4848
4849 BT_DBG("%s", hdev->name);
4850
4851 if (hci_dev_test_flag(hdev, HCI_MGMT))
4852 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
4853 }
4854
4855 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
4856 struct sk_buff *skb)
4857 {
4858 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
4859 struct hci_conn *conn;
4860
4861 BT_DBG("%s", hdev->name);
4862
4863 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4864 if (!conn)
4865 return;
4866
4867 conn->passkey_notify = __le32_to_cpu(ev->passkey);
4868 conn->passkey_entered = 0;
4869
4870 if (hci_dev_test_flag(hdev, HCI_MGMT))
4871 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4872 conn->dst_type, conn->passkey_notify,
4873 conn->passkey_entered);
4874 }
4875
4876 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
4877 {
4878 struct hci_ev_keypress_notify *ev = (void *) skb->data;
4879 struct hci_conn *conn;
4880
4881 BT_DBG("%s", hdev->name);
4882
4883 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4884 if (!conn)
4885 return;
4886
4887 switch (ev->type) {
4888 case HCI_KEYPRESS_STARTED:
4889 conn->passkey_entered = 0;
4890 return;
4891
4892 case HCI_KEYPRESS_ENTERED:
4893 conn->passkey_entered++;
4894 break;
4895
4896 case HCI_KEYPRESS_ERASED:
4897 conn->passkey_entered--;
4898 break;
4899
4900 case HCI_KEYPRESS_CLEARED:
4901 conn->passkey_entered = 0;
4902 break;
4903
4904 case HCI_KEYPRESS_COMPLETED:
4905 return;
4906 }
4907
4908 if (hci_dev_test_flag(hdev, HCI_MGMT))
4909 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4910 conn->dst_type, conn->passkey_notify,
4911 conn->passkey_entered);
4912 }
4913
4914 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
4915 struct sk_buff *skb)
4916 {
4917 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
4918 struct hci_conn *conn;
4919
4920 BT_DBG("%s", hdev->name);
4921
4922 hci_dev_lock(hdev);
4923
4924 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4925 if (!conn)
4926 goto unlock;
4927
4928 /* Reset the authentication requirement to unknown */
4929 conn->remote_auth = 0xff;
4930
4931 /* To avoid duplicate auth_failed events to user space we check
4932 * the HCI_CONN_AUTH_PEND flag which will be set if we
4933 * initiated the authentication. A traditional auth_complete
4934 * event gets always produced as initiator and is also mapped to
4935 * the mgmt_auth_failed event */
4936 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
4937 mgmt_auth_failed(conn, ev->status);
4938
4939 hci_conn_drop(conn);
4940
4941 unlock:
4942 hci_dev_unlock(hdev);
4943 }
4944
4945 static void hci_remote_host_features_evt(struct hci_dev *hdev,
4946 struct sk_buff *skb)
4947 {
4948 struct hci_ev_remote_host_features *ev = (void *) skb->data;
4949 struct inquiry_entry *ie;
4950 struct hci_conn *conn;
4951
4952 BT_DBG("%s", hdev->name);
4953
4954 hci_dev_lock(hdev);
4955
4956 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4957 if (conn)
4958 memcpy(conn->features[1], ev->features, 8);
4959
4960 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4961 if (ie)
4962 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4963
4964 hci_dev_unlock(hdev);
4965 }
4966
4967 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
4968 struct sk_buff *skb)
4969 {
4970 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
4971 struct oob_data *data;
4972
4973 BT_DBG("%s", hdev->name);
4974
4975 hci_dev_lock(hdev);
4976
4977 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4978 goto unlock;
4979
4980 data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
4981 if (!data) {
4982 struct hci_cp_remote_oob_data_neg_reply cp;
4983
4984 bacpy(&cp.bdaddr, &ev->bdaddr);
4985 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
4986 sizeof(cp), &cp);
4987 goto unlock;
4988 }
4989
4990 if (bredr_sc_enabled(hdev)) {
4991 struct hci_cp_remote_oob_ext_data_reply cp;
4992
4993 bacpy(&cp.bdaddr, &ev->bdaddr);
4994 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
4995 memset(cp.hash192, 0, sizeof(cp.hash192));
4996 memset(cp.rand192, 0, sizeof(cp.rand192));
4997 } else {
4998 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
4999 memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
5000 }
5001 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
5002 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
5003
5004 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
5005 sizeof(cp), &cp);
5006 } else {
5007 struct hci_cp_remote_oob_data_reply cp;
5008
5009 bacpy(&cp.bdaddr, &ev->bdaddr);
5010 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
5011 memcpy(cp.rand, data->rand192, sizeof(cp.rand));
5012
5013 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
5014 sizeof(cp), &cp);
5015 }
5016
5017 unlock:
5018 hci_dev_unlock(hdev);
5019 }
5020
5021 #if IS_ENABLED(CONFIG_BT_HS)
5022 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
5023 {
5024 struct hci_ev_channel_selected *ev = (void *)skb->data;
5025 struct hci_conn *hcon;
5026
5027 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
5028
5029 skb_pull(skb, sizeof(*ev));
5030
5031 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5032 if (!hcon)
5033 return;
5034
5035 amp_read_loc_assoc_final_data(hdev, hcon);
5036 }
5037
5038 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
5039 struct sk_buff *skb)
5040 {
5041 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
5042 struct hci_conn *hcon, *bredr_hcon;
5043
5044 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
5045 ev->status);
5046
5047 hci_dev_lock(hdev);
5048
5049 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5050 if (!hcon)
5051 goto unlock;
5052
5053 if (!hcon->amp_mgr)
5054 goto unlock;
5055
5056 if (ev->status) {
5057 hci_conn_del(hcon);
5058 goto unlock;
5059 }
5060
5061 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
5062
5063 hcon->state = BT_CONNECTED;
5064 bacpy(&hcon->dst, &bredr_hcon->dst);
5065
5066 hci_conn_hold(hcon);
5067 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
5068 hci_conn_drop(hcon);
5069
5070 hci_debugfs_create_conn(hcon);
5071 hci_conn_add_sysfs(hcon);
5072
5073 amp_physical_cfm(bredr_hcon, hcon);
5074
5075 unlock:
5076 hci_dev_unlock(hdev);
5077 }
5078
5079 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
5080 {
5081 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
5082 struct hci_conn *hcon;
5083 struct hci_chan *hchan;
5084 struct amp_mgr *mgr;
5085
5086 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
5087 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
5088 ev->status);
5089
5090 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5091 if (!hcon)
5092 return;
5093
5094 /* Create AMP hchan */
5095 hchan = hci_chan_create(hcon);
5096 if (!hchan)
5097 return;
5098
5099 hchan->handle = le16_to_cpu(ev->handle);
5100 hchan->amp = true;
5101
5102 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
5103
5104 mgr = hcon->amp_mgr;
5105 if (mgr && mgr->bredr_chan) {
5106 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
5107
5108 l2cap_chan_lock(bredr_chan);
5109
5110 bredr_chan->conn->mtu = hdev->block_mtu;
5111 l2cap_logical_cfm(bredr_chan, hchan, 0);
5112 hci_conn_hold(hcon);
5113
5114 l2cap_chan_unlock(bredr_chan);
5115 }
5116 }
5117
5118 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
5119 struct sk_buff *skb)
5120 {
5121 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
5122 struct hci_chan *hchan;
5123
5124 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
5125 le16_to_cpu(ev->handle), ev->status);
5126
5127 if (ev->status)
5128 return;
5129
5130 hci_dev_lock(hdev);
5131
5132 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
5133 if (!hchan || !hchan->amp)
5134 goto unlock;
5135
5136 amp_destroy_logical_link(hchan, ev->reason);
5137
5138 unlock:
5139 hci_dev_unlock(hdev);
5140 }
5141
5142 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
5143 struct sk_buff *skb)
5144 {
5145 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
5146 struct hci_conn *hcon;
5147
5148 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5149
5150 if (ev->status)
5151 return;
5152
5153 hci_dev_lock(hdev);
5154
5155 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5156 if (hcon) {
5157 hcon->state = BT_CLOSED;
5158 hci_conn_del(hcon);
5159 }
5160
5161 hci_dev_unlock(hdev);
5162 }
5163 #endif
5164
5165 static void le_conn_update_addr(struct hci_conn *conn, bdaddr_t *bdaddr,
5166 u8 bdaddr_type, bdaddr_t *local_rpa)
5167 {
5168 if (conn->out) {
5169 conn->dst_type = bdaddr_type;
5170 conn->resp_addr_type = bdaddr_type;
5171 bacpy(&conn->resp_addr, bdaddr);
5172
5173 /* Check if the controller has set a Local RPA then it must be
5174 * used instead or hdev->rpa.
5175 */
5176 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5177 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5178 bacpy(&conn->init_addr, local_rpa);
5179 } else if (hci_dev_test_flag(conn->hdev, HCI_PRIVACY)) {
5180 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5181 bacpy(&conn->init_addr, &conn->hdev->rpa);
5182 } else {
5183 hci_copy_identity_address(conn->hdev, &conn->init_addr,
5184 &conn->init_addr_type);
5185 }
5186 } else {
5187 conn->resp_addr_type = conn->hdev->adv_addr_type;
5188 /* Check if the controller has set a Local RPA then it must be
5189 * used instead or hdev->rpa.
5190 */
5191 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5192 conn->resp_addr_type = ADDR_LE_DEV_RANDOM;
5193 bacpy(&conn->resp_addr, local_rpa);
5194 } else if (conn->hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) {
5195 /* In case of ext adv, resp_addr will be updated in
5196 * Adv Terminated event.
5197 */
5198 if (!ext_adv_capable(conn->hdev))
5199 bacpy(&conn->resp_addr,
5200 &conn->hdev->random_addr);
5201 } else {
5202 bacpy(&conn->resp_addr, &conn->hdev->bdaddr);
5203 }
5204
5205 conn->init_addr_type = bdaddr_type;
5206 bacpy(&conn->init_addr, bdaddr);
5207
5208 /* For incoming connections, set the default minimum
5209 * and maximum connection interval. They will be used
5210 * to check if the parameters are in range and if not
5211 * trigger the connection update procedure.
5212 */
5213 conn->le_conn_min_interval = conn->hdev->le_conn_min_interval;
5214 conn->le_conn_max_interval = conn->hdev->le_conn_max_interval;
5215 }
5216 }
5217
5218 static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
5219 bdaddr_t *bdaddr, u8 bdaddr_type,
5220 bdaddr_t *local_rpa, u8 role, u16 handle,
5221 u16 interval, u16 latency,
5222 u16 supervision_timeout)
5223 {
5224 struct hci_conn_params *params;
5225 struct hci_conn *conn;
5226 struct smp_irk *irk;
5227 u8 addr_type;
5228
5229 hci_dev_lock(hdev);
5230
5231 /* All controllers implicitly stop advertising in the event of a
5232 * connection, so ensure that the state bit is cleared.
5233 */
5234 hci_dev_clear_flag(hdev, HCI_LE_ADV);
5235
5236 conn = hci_lookup_le_connect(hdev);
5237 if (!conn) {
5238 conn = hci_conn_add(hdev, LE_LINK, bdaddr, role);
5239 if (!conn) {
5240 bt_dev_err(hdev, "no memory for new connection");
5241 goto unlock;
5242 }
5243
5244 conn->dst_type = bdaddr_type;
5245
5246 /* If we didn't have a hci_conn object previously
5247 * but we're in central role this must be something
5248 * initiated using an accept list. Since accept list based
5249 * connections are not "first class citizens" we don't
5250 * have full tracking of them. Therefore, we go ahead
5251 * with a "best effort" approach of determining the
5252 * initiator address based on the HCI_PRIVACY flag.
5253 */
5254 if (conn->out) {
5255 conn->resp_addr_type = bdaddr_type;
5256 bacpy(&conn->resp_addr, bdaddr);
5257 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
5258 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5259 bacpy(&conn->init_addr, &hdev->rpa);
5260 } else {
5261 hci_copy_identity_address(hdev,
5262 &conn->init_addr,
5263 &conn->init_addr_type);
5264 }
5265 }
5266 } else {
5267 cancel_delayed_work(&conn->le_conn_timeout);
5268 }
5269
5270 le_conn_update_addr(conn, bdaddr, bdaddr_type, local_rpa);
5271
5272 /* Lookup the identity address from the stored connection
5273 * address and address type.
5274 *
5275 * When establishing connections to an identity address, the
5276 * connection procedure will store the resolvable random
5277 * address first. Now if it can be converted back into the
5278 * identity address, start using the identity address from
5279 * now on.
5280 */
5281 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
5282 if (irk) {
5283 bacpy(&conn->dst, &irk->bdaddr);
5284 conn->dst_type = irk->addr_type;
5285 }
5286
5287 /* When using controller based address resolution, then the new
5288 * address types 0x02 and 0x03 are used. These types need to be
5289 * converted back into either public address or random address type
5290 */
5291 if (use_ll_privacy(hdev) &&
5292 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
5293 hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) {
5294 switch (conn->dst_type) {
5295 case ADDR_LE_DEV_PUBLIC_RESOLVED:
5296 conn->dst_type = ADDR_LE_DEV_PUBLIC;
5297 break;
5298 case ADDR_LE_DEV_RANDOM_RESOLVED:
5299 conn->dst_type = ADDR_LE_DEV_RANDOM;
5300 break;
5301 }
5302 }
5303
5304 if (status) {
5305 hci_le_conn_failed(conn, status);
5306 goto unlock;
5307 }
5308
5309 if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
5310 addr_type = BDADDR_LE_PUBLIC;
5311 else
5312 addr_type = BDADDR_LE_RANDOM;
5313
5314 /* Drop the connection if the device is blocked */
5315 if (hci_bdaddr_list_lookup(&hdev->reject_list, &conn->dst, addr_type)) {
5316 hci_conn_drop(conn);
5317 goto unlock;
5318 }
5319
5320 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
5321 mgmt_device_connected(hdev, conn, NULL, 0);
5322
5323 conn->sec_level = BT_SECURITY_LOW;
5324 conn->handle = handle;
5325 conn->state = BT_CONFIG;
5326
5327 /* Store current advertising instance as connection advertising instance
5328 * when sotfware rotation is in use so it can be re-enabled when
5329 * disconnected.
5330 */
5331 if (!ext_adv_capable(hdev))
5332 conn->adv_instance = hdev->cur_adv_instance;
5333
5334 conn->le_conn_interval = interval;
5335 conn->le_conn_latency = latency;
5336 conn->le_supv_timeout = supervision_timeout;
5337
5338 hci_debugfs_create_conn(conn);
5339 hci_conn_add_sysfs(conn);
5340
5341 /* The remote features procedure is defined for central
5342 * role only. So only in case of an initiated connection
5343 * request the remote features.
5344 *
5345 * If the local controller supports peripheral-initiated features
5346 * exchange, then requesting the remote features in peripheral
5347 * role is possible. Otherwise just transition into the
5348 * connected state without requesting the remote features.
5349 */
5350 if (conn->out ||
5351 (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES)) {
5352 struct hci_cp_le_read_remote_features cp;
5353
5354 cp.handle = __cpu_to_le16(conn->handle);
5355
5356 hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
5357 sizeof(cp), &cp);
5358
5359 hci_conn_hold(conn);
5360 } else {
5361 conn->state = BT_CONNECTED;
5362 hci_connect_cfm(conn, status);
5363 }
5364
5365 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
5366 conn->dst_type);
5367 if (params) {
5368 list_del_init(&params->action);
5369 if (params->conn) {
5370 hci_conn_drop(params->conn);
5371 hci_conn_put(params->conn);
5372 params->conn = NULL;
5373 }
5374 }
5375
5376 unlock:
5377 hci_update_background_scan(hdev);
5378 hci_dev_unlock(hdev);
5379 }
5380
5381 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
5382 {
5383 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
5384
5385 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5386
5387 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5388 NULL, ev->role, le16_to_cpu(ev->handle),
5389 le16_to_cpu(ev->interval),
5390 le16_to_cpu(ev->latency),
5391 le16_to_cpu(ev->supervision_timeout));
5392 }
5393
5394 static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev,
5395 struct sk_buff *skb)
5396 {
5397 struct hci_ev_le_enh_conn_complete *ev = (void *) skb->data;
5398
5399 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5400
5401 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5402 &ev->local_rpa, ev->role, le16_to_cpu(ev->handle),
5403 le16_to_cpu(ev->interval),
5404 le16_to_cpu(ev->latency),
5405 le16_to_cpu(ev->supervision_timeout));
5406
5407 if (use_ll_privacy(hdev) &&
5408 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
5409 hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
5410 hci_req_disable_address_resolution(hdev);
5411 }
5412
5413 static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, struct sk_buff *skb)
5414 {
5415 struct hci_evt_le_ext_adv_set_term *ev = (void *) skb->data;
5416 struct hci_conn *conn;
5417 struct adv_info *adv;
5418
5419 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5420
5421 adv = hci_find_adv_instance(hdev, ev->handle);
5422
5423 if (ev->status) {
5424 if (!adv)
5425 return;
5426
5427 /* Remove advertising as it has been terminated */
5428 hci_remove_adv_instance(hdev, ev->handle);
5429 mgmt_advertising_removed(NULL, hdev, ev->handle);
5430
5431 return;
5432 }
5433
5434 if (adv)
5435 adv->enabled = false;
5436
5437 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle));
5438 if (conn) {
5439 /* Store handle in the connection so the correct advertising
5440 * instance can be re-enabled when disconnected.
5441 */
5442 conn->adv_instance = ev->handle;
5443
5444 if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM ||
5445 bacmp(&conn->resp_addr, BDADDR_ANY))
5446 return;
5447
5448 if (!ev->handle) {
5449 bacpy(&conn->resp_addr, &hdev->random_addr);
5450 return;
5451 }
5452
5453 if (adv)
5454 bacpy(&conn->resp_addr, &adv->random_addr);
5455 }
5456 }
5457
5458 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
5459 struct sk_buff *skb)
5460 {
5461 struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
5462 struct hci_conn *conn;
5463
5464 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5465
5466 if (ev->status)
5467 return;
5468
5469 hci_dev_lock(hdev);
5470
5471 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5472 if (conn) {
5473 conn->le_conn_interval = le16_to_cpu(ev->interval);
5474 conn->le_conn_latency = le16_to_cpu(ev->latency);
5475 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
5476 }
5477
5478 hci_dev_unlock(hdev);
5479 }
5480
5481 /* This function requires the caller holds hdev->lock */
5482 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
5483 bdaddr_t *addr,
5484 u8 addr_type, u8 adv_type,
5485 bdaddr_t *direct_rpa)
5486 {
5487 struct hci_conn *conn;
5488 struct hci_conn_params *params;
5489
5490 /* If the event is not connectable don't proceed further */
5491 if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
5492 return NULL;
5493
5494 /* Ignore if the device is blocked */
5495 if (hci_bdaddr_list_lookup(&hdev->reject_list, addr, addr_type))
5496 return NULL;
5497
5498 /* Most controller will fail if we try to create new connections
5499 * while we have an existing one in peripheral role.
5500 */
5501 if (hdev->conn_hash.le_num_peripheral > 0 &&
5502 (!test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) ||
5503 !(hdev->le_states[3] & 0x10)))
5504 return NULL;
5505
5506 /* If we're not connectable only connect devices that we have in
5507 * our pend_le_conns list.
5508 */
5509 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr,
5510 addr_type);
5511 if (!params)
5512 return NULL;
5513
5514 if (!params->explicit_connect) {
5515 switch (params->auto_connect) {
5516 case HCI_AUTO_CONN_DIRECT:
5517 /* Only devices advertising with ADV_DIRECT_IND are
5518 * triggering a connection attempt. This is allowing
5519 * incoming connections from peripheral devices.
5520 */
5521 if (adv_type != LE_ADV_DIRECT_IND)
5522 return NULL;
5523 break;
5524 case HCI_AUTO_CONN_ALWAYS:
5525 /* Devices advertising with ADV_IND or ADV_DIRECT_IND
5526 * are triggering a connection attempt. This means
5527 * that incoming connections from peripheral device are
5528 * accepted and also outgoing connections to peripheral
5529 * devices are established when found.
5530 */
5531 break;
5532 default:
5533 return NULL;
5534 }
5535 }
5536
5537 conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
5538 hdev->def_le_autoconnect_timeout, HCI_ROLE_MASTER,
5539 direct_rpa);
5540 if (!IS_ERR(conn)) {
5541 /* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
5542 * by higher layer that tried to connect, if no then
5543 * store the pointer since we don't really have any
5544 * other owner of the object besides the params that
5545 * triggered it. This way we can abort the connection if
5546 * the parameters get removed and keep the reference
5547 * count consistent once the connection is established.
5548 */
5549
5550 if (!params->explicit_connect)
5551 params->conn = hci_conn_get(conn);
5552
5553 return conn;
5554 }
5555
5556 switch (PTR_ERR(conn)) {
5557 case -EBUSY:
5558 /* If hci_connect() returns -EBUSY it means there is already
5559 * an LE connection attempt going on. Since controllers don't
5560 * support more than one connection attempt at the time, we
5561 * don't consider this an error case.
5562 */
5563 break;
5564 default:
5565 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
5566 return NULL;
5567 }
5568
5569 return NULL;
5570 }
5571
5572 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
5573 u8 bdaddr_type, bdaddr_t *direct_addr,
5574 u8 direct_addr_type, s8 rssi, u8 *data, u8 len,
5575 bool ext_adv)
5576 {
5577 struct discovery_state *d = &hdev->discovery;
5578 struct smp_irk *irk;
5579 struct hci_conn *conn;
5580 bool match;
5581 u32 flags;
5582 u8 *ptr;
5583
5584 switch (type) {
5585 case LE_ADV_IND:
5586 case LE_ADV_DIRECT_IND:
5587 case LE_ADV_SCAN_IND:
5588 case LE_ADV_NONCONN_IND:
5589 case LE_ADV_SCAN_RSP:
5590 break;
5591 default:
5592 bt_dev_err_ratelimited(hdev, "unknown advertising packet "
5593 "type: 0x%02x", type);
5594 return;
5595 }
5596
5597 if (!ext_adv && len > HCI_MAX_AD_LENGTH) {
5598 bt_dev_err_ratelimited(hdev, "legacy adv larger than 31 bytes");
5599 return;
5600 }
5601
5602 /* Find the end of the data in case the report contains padded zero
5603 * bytes at the end causing an invalid length value.
5604 *
5605 * When data is NULL, len is 0 so there is no need for extra ptr
5606 * check as 'ptr < data + 0' is already false in such case.
5607 */
5608 for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) {
5609 if (ptr + 1 + *ptr > data + len)
5610 break;
5611 }
5612
5613 /* Adjust for actual length. This handles the case when remote
5614 * device is advertising with incorrect data length.
5615 */
5616 len = ptr - data;
5617
5618 /* If the direct address is present, then this report is from
5619 * a LE Direct Advertising Report event. In that case it is
5620 * important to see if the address is matching the local
5621 * controller address.
5622 */
5623 if (direct_addr) {
5624 /* Only resolvable random addresses are valid for these
5625 * kind of reports and others can be ignored.
5626 */
5627 if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
5628 return;
5629
5630 /* If the controller is not using resolvable random
5631 * addresses, then this report can be ignored.
5632 */
5633 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
5634 return;
5635
5636 /* If the local IRK of the controller does not match
5637 * with the resolvable random address provided, then
5638 * this report can be ignored.
5639 */
5640 if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
5641 return;
5642 }
5643
5644 /* Check if we need to convert to identity address */
5645 irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
5646 if (irk) {
5647 bdaddr = &irk->bdaddr;
5648 bdaddr_type = irk->addr_type;
5649 }
5650
5651 /* Check if we have been requested to connect to this device.
5652 *
5653 * direct_addr is set only for directed advertising reports (it is NULL
5654 * for advertising reports) and is already verified to be RPA above.
5655 */
5656 conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type,
5657 direct_addr);
5658 if (!ext_adv && conn && type == LE_ADV_IND && len <= HCI_MAX_AD_LENGTH) {
5659 /* Store report for later inclusion by
5660 * mgmt_device_connected
5661 */
5662 memcpy(conn->le_adv_data, data, len);
5663 conn->le_adv_data_len = len;
5664 }
5665
5666 /* Passive scanning shouldn't trigger any device found events,
5667 * except for devices marked as CONN_REPORT for which we do send
5668 * device found events, or advertisement monitoring requested.
5669 */
5670 if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
5671 if (type == LE_ADV_DIRECT_IND)
5672 return;
5673
5674 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
5675 bdaddr, bdaddr_type) &&
5676 idr_is_empty(&hdev->adv_monitors_idr))
5677 return;
5678
5679 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
5680 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
5681 else
5682 flags = 0;
5683 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5684 rssi, flags, data, len, NULL, 0);
5685 return;
5686 }
5687
5688 /* When receiving non-connectable or scannable undirected
5689 * advertising reports, this means that the remote device is
5690 * not connectable and then clearly indicate this in the
5691 * device found event.
5692 *
5693 * When receiving a scan response, then there is no way to
5694 * know if the remote device is connectable or not. However
5695 * since scan responses are merged with a previously seen
5696 * advertising report, the flags field from that report
5697 * will be used.
5698 *
5699 * In the really unlikely case that a controller get confused
5700 * and just sends a scan response event, then it is marked as
5701 * not connectable as well.
5702 */
5703 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND ||
5704 type == LE_ADV_SCAN_RSP)
5705 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
5706 else
5707 flags = 0;
5708
5709 /* If there's nothing pending either store the data from this
5710 * event or send an immediate device found event if the data
5711 * should not be stored for later.
5712 */
5713 if (!ext_adv && !has_pending_adv_report(hdev)) {
5714 /* If the report will trigger a SCAN_REQ store it for
5715 * later merging.
5716 */
5717 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
5718 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
5719 rssi, flags, data, len);
5720 return;
5721 }
5722
5723 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5724 rssi, flags, data, len, NULL, 0);
5725 return;
5726 }
5727
5728 /* Check if the pending report is for the same device as the new one */
5729 match = (!bacmp(bdaddr, &d->last_adv_addr) &&
5730 bdaddr_type == d->last_adv_addr_type);
5731
5732 /* If the pending data doesn't match this report or this isn't a
5733 * scan response (e.g. we got a duplicate ADV_IND) then force
5734 * sending of the pending data.
5735 */
5736 if (type != LE_ADV_SCAN_RSP || !match) {
5737 /* Send out whatever is in the cache, but skip duplicates */
5738 if (!match)
5739 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
5740 d->last_adv_addr_type, NULL,
5741 d->last_adv_rssi, d->last_adv_flags,
5742 d->last_adv_data,
5743 d->last_adv_data_len, NULL, 0);
5744
5745 /* If the new report will trigger a SCAN_REQ store it for
5746 * later merging.
5747 */
5748 if (!ext_adv && (type == LE_ADV_IND ||
5749 type == LE_ADV_SCAN_IND)) {
5750 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
5751 rssi, flags, data, len);
5752 return;
5753 }
5754
5755 /* The advertising reports cannot be merged, so clear
5756 * the pending report and send out a device found event.
5757 */
5758 clear_pending_adv_report(hdev);
5759 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5760 rssi, flags, data, len, NULL, 0);
5761 return;
5762 }
5763
5764 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
5765 * the new event is a SCAN_RSP. We can therefore proceed with
5766 * sending a merged device found event.
5767 */
5768 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
5769 d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
5770 d->last_adv_data, d->last_adv_data_len, data, len);
5771 clear_pending_adv_report(hdev);
5772 }
5773
5774 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
5775 {
5776 u8 num_reports = skb->data[0];
5777 void *ptr = &skb->data[1];
5778
5779 hci_dev_lock(hdev);
5780
5781 while (num_reports--) {
5782 struct hci_ev_le_advertising_info *ev = ptr;
5783 s8 rssi;
5784
5785 if (ev->length <= HCI_MAX_AD_LENGTH &&
5786 ev->data + ev->length <= skb_tail_pointer(skb)) {
5787 rssi = ev->data[ev->length];
5788 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
5789 ev->bdaddr_type, NULL, 0, rssi,
5790 ev->data, ev->length, false);
5791 } else {
5792 bt_dev_err(hdev, "Dropping invalid advertising data");
5793 }
5794
5795 ptr += sizeof(*ev) + ev->length + 1;
5796
5797 if (ptr > (void *) skb_tail_pointer(skb) - sizeof(*ev)) {
5798 bt_dev_err(hdev, "Malicious advertising data. Stopping processing");
5799 break;
5800 }
5801 }
5802
5803 hci_dev_unlock(hdev);
5804 }
5805
5806 static u8 ext_evt_type_to_legacy(struct hci_dev *hdev, u16 evt_type)
5807 {
5808 if (evt_type & LE_EXT_ADV_LEGACY_PDU) {
5809 switch (evt_type) {
5810 case LE_LEGACY_ADV_IND:
5811 return LE_ADV_IND;
5812 case LE_LEGACY_ADV_DIRECT_IND:
5813 return LE_ADV_DIRECT_IND;
5814 case LE_LEGACY_ADV_SCAN_IND:
5815 return LE_ADV_SCAN_IND;
5816 case LE_LEGACY_NONCONN_IND:
5817 return LE_ADV_NONCONN_IND;
5818 case LE_LEGACY_SCAN_RSP_ADV:
5819 case LE_LEGACY_SCAN_RSP_ADV_SCAN:
5820 return LE_ADV_SCAN_RSP;
5821 }
5822
5823 goto invalid;
5824 }
5825
5826 if (evt_type & LE_EXT_ADV_CONN_IND) {
5827 if (evt_type & LE_EXT_ADV_DIRECT_IND)
5828 return LE_ADV_DIRECT_IND;
5829
5830 return LE_ADV_IND;
5831 }
5832
5833 if (evt_type & LE_EXT_ADV_SCAN_RSP)
5834 return LE_ADV_SCAN_RSP;
5835
5836 if (evt_type & LE_EXT_ADV_SCAN_IND)
5837 return LE_ADV_SCAN_IND;
5838
5839 if (evt_type == LE_EXT_ADV_NON_CONN_IND ||
5840 evt_type & LE_EXT_ADV_DIRECT_IND)
5841 return LE_ADV_NONCONN_IND;
5842
5843 invalid:
5844 bt_dev_err_ratelimited(hdev, "Unknown advertising packet type: 0x%02x",
5845 evt_type);
5846
5847 return LE_ADV_INVALID;
5848 }
5849
5850 static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
5851 {
5852 u8 num_reports = skb->data[0];
5853 void *ptr = &skb->data[1];
5854
5855 hci_dev_lock(hdev);
5856
5857 while (num_reports--) {
5858 struct hci_ev_le_ext_adv_report *ev = ptr;
5859 u8 legacy_evt_type;
5860 u16 evt_type;
5861
5862 evt_type = __le16_to_cpu(ev->evt_type);
5863 legacy_evt_type = ext_evt_type_to_legacy(hdev, evt_type);
5864 if (legacy_evt_type != LE_ADV_INVALID) {
5865 process_adv_report(hdev, legacy_evt_type, &ev->bdaddr,
5866 ev->bdaddr_type, NULL, 0, ev->rssi,
5867 ev->data, ev->length,
5868 !(evt_type & LE_EXT_ADV_LEGACY_PDU));
5869 }
5870
5871 ptr += sizeof(*ev) + ev->length;
5872 }
5873
5874 hci_dev_unlock(hdev);
5875 }
5876
5877 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev,
5878 struct sk_buff *skb)
5879 {
5880 struct hci_ev_le_remote_feat_complete *ev = (void *)skb->data;
5881 struct hci_conn *conn;
5882
5883 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5884
5885 hci_dev_lock(hdev);
5886
5887 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5888 if (conn) {
5889 if (!ev->status)
5890 memcpy(conn->features[0], ev->features, 8);
5891
5892 if (conn->state == BT_CONFIG) {
5893 __u8 status;
5894
5895 /* If the local controller supports peripheral-initiated
5896 * features exchange, but the remote controller does
5897 * not, then it is possible that the error code 0x1a
5898 * for unsupported remote feature gets returned.
5899 *
5900 * In this specific case, allow the connection to
5901 * transition into connected state and mark it as
5902 * successful.
5903 */
5904 if (!conn->out && ev->status == 0x1a &&
5905 (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES))
5906 status = 0x00;
5907 else
5908 status = ev->status;
5909
5910 conn->state = BT_CONNECTED;
5911 hci_connect_cfm(conn, status);
5912 hci_conn_drop(conn);
5913 }
5914 }
5915
5916 hci_dev_unlock(hdev);
5917 }
5918
5919 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
5920 {
5921 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
5922 struct hci_cp_le_ltk_reply cp;
5923 struct hci_cp_le_ltk_neg_reply neg;
5924 struct hci_conn *conn;
5925 struct smp_ltk *ltk;
5926
5927 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
5928
5929 hci_dev_lock(hdev);
5930
5931 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5932 if (conn == NULL)
5933 goto not_found;
5934
5935 ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
5936 if (!ltk)
5937 goto not_found;
5938
5939 if (smp_ltk_is_sc(ltk)) {
5940 /* With SC both EDiv and Rand are set to zero */
5941 if (ev->ediv || ev->rand)
5942 goto not_found;
5943 } else {
5944 /* For non-SC keys check that EDiv and Rand match */
5945 if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
5946 goto not_found;
5947 }
5948
5949 memcpy(cp.ltk, ltk->val, ltk->enc_size);
5950 memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size);
5951 cp.handle = cpu_to_le16(conn->handle);
5952
5953 conn->pending_sec_level = smp_ltk_sec_level(ltk);
5954
5955 conn->enc_key_size = ltk->enc_size;
5956
5957 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
5958
5959 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
5960 * temporary key used to encrypt a connection following
5961 * pairing. It is used during the Encrypted Session Setup to
5962 * distribute the keys. Later, security can be re-established
5963 * using a distributed LTK.
5964 */
5965 if (ltk->type == SMP_STK) {
5966 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
5967 list_del_rcu(&ltk->list);
5968 kfree_rcu(ltk, rcu);
5969 } else {
5970 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
5971 }
5972
5973 hci_dev_unlock(hdev);
5974
5975 return;
5976
5977 not_found:
5978 neg.handle = ev->handle;
5979 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
5980 hci_dev_unlock(hdev);
5981 }
5982
5983 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
5984 u8 reason)
5985 {
5986 struct hci_cp_le_conn_param_req_neg_reply cp;
5987
5988 cp.handle = cpu_to_le16(handle);
5989 cp.reason = reason;
5990
5991 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
5992 &cp);
5993 }
5994
5995 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
5996 struct sk_buff *skb)
5997 {
5998 struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data;
5999 struct hci_cp_le_conn_param_req_reply cp;
6000 struct hci_conn *hcon;
6001 u16 handle, min, max, latency, timeout;
6002
6003 handle = le16_to_cpu(ev->handle);
6004 min = le16_to_cpu(ev->interval_min);
6005 max = le16_to_cpu(ev->interval_max);
6006 latency = le16_to_cpu(ev->latency);
6007 timeout = le16_to_cpu(ev->timeout);
6008
6009 hcon = hci_conn_hash_lookup_handle(hdev, handle);
6010 if (!hcon || hcon->state != BT_CONNECTED)
6011 return send_conn_param_neg_reply(hdev, handle,
6012 HCI_ERROR_UNKNOWN_CONN_ID);
6013
6014 if (hci_check_conn_params(min, max, latency, timeout))
6015 return send_conn_param_neg_reply(hdev, handle,
6016 HCI_ERROR_INVALID_LL_PARAMS);
6017
6018 if (hcon->role == HCI_ROLE_MASTER) {
6019 struct hci_conn_params *params;
6020 u8 store_hint;
6021
6022 hci_dev_lock(hdev);
6023
6024 params = hci_conn_params_lookup(hdev, &hcon->dst,
6025 hcon->dst_type);
6026 if (params) {
6027 params->conn_min_interval = min;
6028 params->conn_max_interval = max;
6029 params->conn_latency = latency;
6030 params->supervision_timeout = timeout;
6031 store_hint = 0x01;
6032 } else {
6033 store_hint = 0x00;
6034 }
6035
6036 hci_dev_unlock(hdev);
6037
6038 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
6039 store_hint, min, max, latency, timeout);
6040 }
6041
6042 cp.handle = ev->handle;
6043 cp.interval_min = ev->interval_min;
6044 cp.interval_max = ev->interval_max;
6045 cp.latency = ev->latency;
6046 cp.timeout = ev->timeout;
6047 cp.min_ce_len = 0;
6048 cp.max_ce_len = 0;
6049
6050 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
6051 }
6052
6053 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev,
6054 struct sk_buff *skb)
6055 {
6056 u8 num_reports = skb->data[0];
6057 struct hci_ev_le_direct_adv_info *ev = (void *)&skb->data[1];
6058
6059 if (!num_reports || skb->len < num_reports * sizeof(*ev) + 1)
6060 return;
6061
6062 hci_dev_lock(hdev);
6063
6064 for (; num_reports; num_reports--, ev++)
6065 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
6066 ev->bdaddr_type, &ev->direct_addr,
6067 ev->direct_addr_type, ev->rssi, NULL, 0,
6068 false);
6069
6070 hci_dev_unlock(hdev);
6071 }
6072
6073 static void hci_le_phy_update_evt(struct hci_dev *hdev, struct sk_buff *skb)
6074 {
6075 struct hci_ev_le_phy_update_complete *ev = (void *) skb->data;
6076 struct hci_conn *conn;
6077
6078 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
6079
6080 if (ev->status)
6081 return;
6082
6083 hci_dev_lock(hdev);
6084
6085 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6086 if (!conn)
6087 goto unlock;
6088
6089 conn->le_tx_phy = ev->tx_phy;
6090 conn->le_rx_phy = ev->rx_phy;
6091
6092 unlock:
6093 hci_dev_unlock(hdev);
6094 }
6095
6096 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
6097 {
6098 struct hci_ev_le_meta *le_ev = (void *) skb->data;
6099
6100 skb_pull(skb, sizeof(*le_ev));
6101
6102 switch (le_ev->subevent) {
6103 case HCI_EV_LE_CONN_COMPLETE:
6104 hci_le_conn_complete_evt(hdev, skb);
6105 break;
6106
6107 case HCI_EV_LE_CONN_UPDATE_COMPLETE:
6108 hci_le_conn_update_complete_evt(hdev, skb);
6109 break;
6110
6111 case HCI_EV_LE_ADVERTISING_REPORT:
6112 hci_le_adv_report_evt(hdev, skb);
6113 break;
6114
6115 case HCI_EV_LE_REMOTE_FEAT_COMPLETE:
6116 hci_le_remote_feat_complete_evt(hdev, skb);
6117 break;
6118
6119 case HCI_EV_LE_LTK_REQ:
6120 hci_le_ltk_request_evt(hdev, skb);
6121 break;
6122
6123 case HCI_EV_LE_REMOTE_CONN_PARAM_REQ:
6124 hci_le_remote_conn_param_req_evt(hdev, skb);
6125 break;
6126
6127 case HCI_EV_LE_DIRECT_ADV_REPORT:
6128 hci_le_direct_adv_report_evt(hdev, skb);
6129 break;
6130
6131 case HCI_EV_LE_PHY_UPDATE_COMPLETE:
6132 hci_le_phy_update_evt(hdev, skb);
6133 break;
6134
6135 case HCI_EV_LE_EXT_ADV_REPORT:
6136 hci_le_ext_adv_report_evt(hdev, skb);
6137 break;
6138
6139 case HCI_EV_LE_ENHANCED_CONN_COMPLETE:
6140 hci_le_enh_conn_complete_evt(hdev, skb);
6141 break;
6142
6143 case HCI_EV_LE_EXT_ADV_SET_TERM:
6144 hci_le_ext_adv_term_evt(hdev, skb);
6145 break;
6146
6147 default:
6148 break;
6149 }
6150 }
6151
6152 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
6153 u8 event, struct sk_buff *skb)
6154 {
6155 struct hci_ev_cmd_complete *ev;
6156 struct hci_event_hdr *hdr;
6157
6158 if (!skb)
6159 return false;
6160
6161 if (skb->len < sizeof(*hdr)) {
6162 bt_dev_err(hdev, "too short HCI event");
6163 return false;
6164 }
6165
6166 hdr = (void *) skb->data;
6167 skb_pull(skb, HCI_EVENT_HDR_SIZE);
6168
6169 if (event) {
6170 if (hdr->evt != event)
6171 return false;
6172 return true;
6173 }
6174
6175 /* Check if request ended in Command Status - no way to retrieve
6176 * any extra parameters in this case.
6177 */
6178 if (hdr->evt == HCI_EV_CMD_STATUS)
6179 return false;
6180
6181 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
6182 bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)",
6183 hdr->evt);
6184 return false;
6185 }
6186
6187 if (skb->len < sizeof(*ev)) {
6188 bt_dev_err(hdev, "too short cmd_complete event");
6189 return false;
6190 }
6191
6192 ev = (void *) skb->data;
6193 skb_pull(skb, sizeof(*ev));
6194
6195 if (opcode != __le16_to_cpu(ev->opcode)) {
6196 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
6197 __le16_to_cpu(ev->opcode));
6198 return false;
6199 }
6200
6201 return true;
6202 }
6203
6204 static void hci_store_wake_reason(struct hci_dev *hdev, u8 event,
6205 struct sk_buff *skb)
6206 {
6207 struct hci_ev_le_advertising_info *adv;
6208 struct hci_ev_le_direct_adv_info *direct_adv;
6209 struct hci_ev_le_ext_adv_report *ext_adv;
6210 const struct hci_ev_conn_complete *conn_complete = (void *)skb->data;
6211 const struct hci_ev_conn_request *conn_request = (void *)skb->data;
6212
6213 hci_dev_lock(hdev);
6214
6215 /* If we are currently suspended and this is the first BT event seen,
6216 * save the wake reason associated with the event.
6217 */
6218 if (!hdev->suspended || hdev->wake_reason)
6219 goto unlock;
6220
6221 /* Default to remote wake. Values for wake_reason are documented in the
6222 * Bluez mgmt api docs.
6223 */
6224 hdev->wake_reason = MGMT_WAKE_REASON_REMOTE_WAKE;
6225
6226 /* Once configured for remote wakeup, we should only wake up for
6227 * reconnections. It's useful to see which device is waking us up so
6228 * keep track of the bdaddr of the connection event that woke us up.
6229 */
6230 if (event == HCI_EV_CONN_REQUEST) {
6231 bacpy(&hdev->wake_addr, &conn_complete->bdaddr);
6232 hdev->wake_addr_type = BDADDR_BREDR;
6233 } else if (event == HCI_EV_CONN_COMPLETE) {
6234 bacpy(&hdev->wake_addr, &conn_request->bdaddr);
6235 hdev->wake_addr_type = BDADDR_BREDR;
6236 } else if (event == HCI_EV_LE_META) {
6237 struct hci_ev_le_meta *le_ev = (void *)skb->data;
6238 u8 subevent = le_ev->subevent;
6239 u8 *ptr = &skb->data[sizeof(*le_ev)];
6240 u8 num_reports = *ptr;
6241
6242 if ((subevent == HCI_EV_LE_ADVERTISING_REPORT ||
6243 subevent == HCI_EV_LE_DIRECT_ADV_REPORT ||
6244 subevent == HCI_EV_LE_EXT_ADV_REPORT) &&
6245 num_reports) {
6246 adv = (void *)(ptr + 1);
6247 direct_adv = (void *)(ptr + 1);
6248 ext_adv = (void *)(ptr + 1);
6249
6250 switch (subevent) {
6251 case HCI_EV_LE_ADVERTISING_REPORT:
6252 bacpy(&hdev->wake_addr, &adv->bdaddr);
6253 hdev->wake_addr_type = adv->bdaddr_type;
6254 break;
6255 case HCI_EV_LE_DIRECT_ADV_REPORT:
6256 bacpy(&hdev->wake_addr, &direct_adv->bdaddr);
6257 hdev->wake_addr_type = direct_adv->bdaddr_type;
6258 break;
6259 case HCI_EV_LE_EXT_ADV_REPORT:
6260 bacpy(&hdev->wake_addr, &ext_adv->bdaddr);
6261 hdev->wake_addr_type = ext_adv->bdaddr_type;
6262 break;
6263 }
6264 }
6265 } else {
6266 hdev->wake_reason = MGMT_WAKE_REASON_UNEXPECTED;
6267 }
6268
6269 unlock:
6270 hci_dev_unlock(hdev);
6271 }
6272
6273 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
6274 {
6275 struct hci_event_hdr *hdr = (void *) skb->data;
6276 hci_req_complete_t req_complete = NULL;
6277 hci_req_complete_skb_t req_complete_skb = NULL;
6278 struct sk_buff *orig_skb = NULL;
6279 u8 status = 0, event = hdr->evt, req_evt = 0;
6280 u16 opcode = HCI_OP_NOP;
6281
6282 if (!event) {
6283 bt_dev_warn(hdev, "Received unexpected HCI Event 00000000");
6284 goto done;
6285 }
6286
6287 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->hci.req_event == event) {
6288 struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
6289 opcode = __le16_to_cpu(cmd_hdr->opcode);
6290 hci_req_cmd_complete(hdev, opcode, status, &req_complete,
6291 &req_complete_skb);
6292 req_evt = event;
6293 }
6294
6295 /* If it looks like we might end up having to call
6296 * req_complete_skb, store a pristine copy of the skb since the
6297 * various handlers may modify the original one through
6298 * skb_pull() calls, etc.
6299 */
6300 if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
6301 event == HCI_EV_CMD_COMPLETE)
6302 orig_skb = skb_clone(skb, GFP_KERNEL);
6303
6304 skb_pull(skb, HCI_EVENT_HDR_SIZE);
6305
6306 /* Store wake reason if we're suspended */
6307 hci_store_wake_reason(hdev, event, skb);
6308
6309 switch (event) {
6310 case HCI_EV_INQUIRY_COMPLETE:
6311 hci_inquiry_complete_evt(hdev, skb);
6312 break;
6313
6314 case HCI_EV_INQUIRY_RESULT:
6315 hci_inquiry_result_evt(hdev, skb);
6316 break;
6317
6318 case HCI_EV_CONN_COMPLETE:
6319 hci_conn_complete_evt(hdev, skb);
6320 break;
6321
6322 case HCI_EV_CONN_REQUEST:
6323 hci_conn_request_evt(hdev, skb);
6324 break;
6325
6326 case HCI_EV_DISCONN_COMPLETE:
6327 hci_disconn_complete_evt(hdev, skb);
6328 break;
6329
6330 case HCI_EV_AUTH_COMPLETE:
6331 hci_auth_complete_evt(hdev, skb);
6332 break;
6333
6334 case HCI_EV_REMOTE_NAME:
6335 hci_remote_name_evt(hdev, skb);
6336 break;
6337
6338 case HCI_EV_ENCRYPT_CHANGE:
6339 hci_encrypt_change_evt(hdev, skb);
6340 break;
6341
6342 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
6343 hci_change_link_key_complete_evt(hdev, skb);
6344 break;
6345
6346 case HCI_EV_REMOTE_FEATURES:
6347 hci_remote_features_evt(hdev, skb);
6348 break;
6349
6350 case HCI_EV_CMD_COMPLETE:
6351 hci_cmd_complete_evt(hdev, skb, &opcode, &status,
6352 &req_complete, &req_complete_skb);
6353 break;
6354
6355 case HCI_EV_CMD_STATUS:
6356 hci_cmd_status_evt(hdev, skb, &opcode, &status, &req_complete,
6357 &req_complete_skb);
6358 break;
6359
6360 case HCI_EV_HARDWARE_ERROR:
6361 hci_hardware_error_evt(hdev, skb);
6362 break;
6363
6364 case HCI_EV_ROLE_CHANGE:
6365 hci_role_change_evt(hdev, skb);
6366 break;
6367
6368 case HCI_EV_NUM_COMP_PKTS:
6369 hci_num_comp_pkts_evt(hdev, skb);
6370 break;
6371
6372 case HCI_EV_MODE_CHANGE:
6373 hci_mode_change_evt(hdev, skb);
6374 break;
6375
6376 case HCI_EV_PIN_CODE_REQ:
6377 hci_pin_code_request_evt(hdev, skb);
6378 break;
6379
6380 case HCI_EV_LINK_KEY_REQ:
6381 hci_link_key_request_evt(hdev, skb);
6382 break;
6383
6384 case HCI_EV_LINK_KEY_NOTIFY:
6385 hci_link_key_notify_evt(hdev, skb);
6386 break;
6387
6388 case HCI_EV_CLOCK_OFFSET:
6389 hci_clock_offset_evt(hdev, skb);
6390 break;
6391
6392 case HCI_EV_PKT_TYPE_CHANGE:
6393 hci_pkt_type_change_evt(hdev, skb);
6394 break;
6395
6396 case HCI_EV_PSCAN_REP_MODE:
6397 hci_pscan_rep_mode_evt(hdev, skb);
6398 break;
6399
6400 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
6401 hci_inquiry_result_with_rssi_evt(hdev, skb);
6402 break;
6403
6404 case HCI_EV_REMOTE_EXT_FEATURES:
6405 hci_remote_ext_features_evt(hdev, skb);
6406 break;
6407
6408 case HCI_EV_SYNC_CONN_COMPLETE:
6409 hci_sync_conn_complete_evt(hdev, skb);
6410 break;
6411
6412 case HCI_EV_EXTENDED_INQUIRY_RESULT:
6413 hci_extended_inquiry_result_evt(hdev, skb);
6414 break;
6415
6416 case HCI_EV_KEY_REFRESH_COMPLETE:
6417 hci_key_refresh_complete_evt(hdev, skb);
6418 break;
6419
6420 case HCI_EV_IO_CAPA_REQUEST:
6421 hci_io_capa_request_evt(hdev, skb);
6422 break;
6423
6424 case HCI_EV_IO_CAPA_REPLY:
6425 hci_io_capa_reply_evt(hdev, skb);
6426 break;
6427
6428 case HCI_EV_USER_CONFIRM_REQUEST:
6429 hci_user_confirm_request_evt(hdev, skb);
6430 break;
6431
6432 case HCI_EV_USER_PASSKEY_REQUEST:
6433 hci_user_passkey_request_evt(hdev, skb);
6434 break;
6435
6436 case HCI_EV_USER_PASSKEY_NOTIFY:
6437 hci_user_passkey_notify_evt(hdev, skb);
6438 break;
6439
6440 case HCI_EV_KEYPRESS_NOTIFY:
6441 hci_keypress_notify_evt(hdev, skb);
6442 break;
6443
6444 case HCI_EV_SIMPLE_PAIR_COMPLETE:
6445 hci_simple_pair_complete_evt(hdev, skb);
6446 break;
6447
6448 case HCI_EV_REMOTE_HOST_FEATURES:
6449 hci_remote_host_features_evt(hdev, skb);
6450 break;
6451
6452 case HCI_EV_LE_META:
6453 hci_le_meta_evt(hdev, skb);
6454 break;
6455
6456 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
6457 hci_remote_oob_data_request_evt(hdev, skb);
6458 break;
6459
6460 #if IS_ENABLED(CONFIG_BT_HS)
6461 case HCI_EV_CHANNEL_SELECTED:
6462 hci_chan_selected_evt(hdev, skb);
6463 break;
6464
6465 case HCI_EV_PHY_LINK_COMPLETE:
6466 hci_phy_link_complete_evt(hdev, skb);
6467 break;
6468
6469 case HCI_EV_LOGICAL_LINK_COMPLETE:
6470 hci_loglink_complete_evt(hdev, skb);
6471 break;
6472
6473 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
6474 hci_disconn_loglink_complete_evt(hdev, skb);
6475 break;
6476
6477 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
6478 hci_disconn_phylink_complete_evt(hdev, skb);
6479 break;
6480 #endif
6481
6482 case HCI_EV_NUM_COMP_BLOCKS:
6483 hci_num_comp_blocks_evt(hdev, skb);
6484 break;
6485
6486 case HCI_EV_VENDOR:
6487 msft_vendor_evt(hdev, skb);
6488 break;
6489
6490 default:
6491 BT_DBG("%s event 0x%2.2x", hdev->name, event);
6492 break;
6493 }
6494
6495 if (req_complete) {
6496 req_complete(hdev, status, opcode);
6497 } else if (req_complete_skb) {
6498 if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
6499 kfree_skb(orig_skb);
6500 orig_skb = NULL;
6501 }
6502 req_complete_skb(hdev, status, opcode, orig_skb);
6503 }
6504
6505 done:
6506 kfree_skb(orig_skb);
6507 kfree_skb(skb);
6508 hdev->stat.evt_rx++;
6509 }