]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - net/bluetooth/hci_event.c
Merge branch 'for-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/bluetoot...
[mirror_ubuntu-artful-kernel.git] / net / bluetooth / hci_event.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI event handling. */
26
27 #include <asm/unaligned.h>
28
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
32
33 #include "hci_request.h"
34 #include "hci_debugfs.h"
35 #include "a2mp.h"
36 #include "amp.h"
37 #include "smp.h"
38
39 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
40 "\x00\x00\x00\x00\x00\x00\x00\x00"
41
42 /* Handle HCI Event packets */
43
44 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
45 {
46 __u8 status = *((__u8 *) skb->data);
47
48 BT_DBG("%s status 0x%2.2x", hdev->name, status);
49
50 if (status)
51 return;
52
53 clear_bit(HCI_INQUIRY, &hdev->flags);
54 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
55 wake_up_bit(&hdev->flags, HCI_INQUIRY);
56
57 hci_dev_lock(hdev);
58 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
59 hci_dev_unlock(hdev);
60
61 hci_conn_check_pending(hdev);
62 }
63
64 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
65 {
66 __u8 status = *((__u8 *) skb->data);
67
68 BT_DBG("%s status 0x%2.2x", hdev->name, status);
69
70 if (status)
71 return;
72
73 set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
74 }
75
76 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
77 {
78 __u8 status = *((__u8 *) skb->data);
79
80 BT_DBG("%s status 0x%2.2x", hdev->name, status);
81
82 if (status)
83 return;
84
85 clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
86
87 hci_conn_check_pending(hdev);
88 }
89
90 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
91 struct sk_buff *skb)
92 {
93 BT_DBG("%s", hdev->name);
94 }
95
96 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
97 {
98 struct hci_rp_role_discovery *rp = (void *) skb->data;
99 struct hci_conn *conn;
100
101 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
102
103 if (rp->status)
104 return;
105
106 hci_dev_lock(hdev);
107
108 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
109 if (conn)
110 conn->role = rp->role;
111
112 hci_dev_unlock(hdev);
113 }
114
115 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
116 {
117 struct hci_rp_read_link_policy *rp = (void *) skb->data;
118 struct hci_conn *conn;
119
120 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
121
122 if (rp->status)
123 return;
124
125 hci_dev_lock(hdev);
126
127 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
128 if (conn)
129 conn->link_policy = __le16_to_cpu(rp->policy);
130
131 hci_dev_unlock(hdev);
132 }
133
134 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
135 {
136 struct hci_rp_write_link_policy *rp = (void *) skb->data;
137 struct hci_conn *conn;
138 void *sent;
139
140 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
141
142 if (rp->status)
143 return;
144
145 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
146 if (!sent)
147 return;
148
149 hci_dev_lock(hdev);
150
151 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
152 if (conn)
153 conn->link_policy = get_unaligned_le16(sent + 2);
154
155 hci_dev_unlock(hdev);
156 }
157
158 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
159 struct sk_buff *skb)
160 {
161 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
162
163 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
164
165 if (rp->status)
166 return;
167
168 hdev->link_policy = __le16_to_cpu(rp->policy);
169 }
170
171 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
172 struct sk_buff *skb)
173 {
174 __u8 status = *((__u8 *) skb->data);
175 void *sent;
176
177 BT_DBG("%s status 0x%2.2x", hdev->name, status);
178
179 if (status)
180 return;
181
182 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
183 if (!sent)
184 return;
185
186 hdev->link_policy = get_unaligned_le16(sent);
187 }
188
189 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
190 {
191 __u8 status = *((__u8 *) skb->data);
192
193 BT_DBG("%s status 0x%2.2x", hdev->name, status);
194
195 clear_bit(HCI_RESET, &hdev->flags);
196
197 if (status)
198 return;
199
200 /* Reset all non-persistent flags */
201 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
202
203 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
204
205 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
206 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
207
208 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
209 hdev->adv_data_len = 0;
210
211 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
212 hdev->scan_rsp_data_len = 0;
213
214 hdev->le_scan_type = LE_SCAN_PASSIVE;
215
216 hdev->ssp_debug_mode = 0;
217
218 hci_bdaddr_list_clear(&hdev->le_white_list);
219 }
220
221 static void hci_cc_read_stored_link_key(struct hci_dev *hdev,
222 struct sk_buff *skb)
223 {
224 struct hci_rp_read_stored_link_key *rp = (void *)skb->data;
225 struct hci_cp_read_stored_link_key *sent;
226
227 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
228
229 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
230 if (!sent)
231 return;
232
233 if (!rp->status && sent->read_all == 0x01) {
234 hdev->stored_max_keys = rp->max_keys;
235 hdev->stored_num_keys = rp->num_keys;
236 }
237 }
238
239 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
240 struct sk_buff *skb)
241 {
242 struct hci_rp_delete_stored_link_key *rp = (void *)skb->data;
243
244 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
245
246 if (rp->status)
247 return;
248
249 if (rp->num_keys <= hdev->stored_num_keys)
250 hdev->stored_num_keys -= rp->num_keys;
251 else
252 hdev->stored_num_keys = 0;
253 }
254
255 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
256 {
257 __u8 status = *((__u8 *) skb->data);
258 void *sent;
259
260 BT_DBG("%s status 0x%2.2x", hdev->name, status);
261
262 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
263 if (!sent)
264 return;
265
266 hci_dev_lock(hdev);
267
268 if (test_bit(HCI_MGMT, &hdev->dev_flags))
269 mgmt_set_local_name_complete(hdev, sent, status);
270 else if (!status)
271 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
272
273 hci_dev_unlock(hdev);
274 }
275
276 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
277 {
278 struct hci_rp_read_local_name *rp = (void *) skb->data;
279
280 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
281
282 if (rp->status)
283 return;
284
285 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
286 test_bit(HCI_CONFIG, &hdev->dev_flags))
287 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
288 }
289
290 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
291 {
292 __u8 status = *((__u8 *) skb->data);
293 void *sent;
294
295 BT_DBG("%s status 0x%2.2x", hdev->name, status);
296
297 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
298 if (!sent)
299 return;
300
301 hci_dev_lock(hdev);
302
303 if (!status) {
304 __u8 param = *((__u8 *) sent);
305
306 if (param == AUTH_ENABLED)
307 set_bit(HCI_AUTH, &hdev->flags);
308 else
309 clear_bit(HCI_AUTH, &hdev->flags);
310 }
311
312 if (test_bit(HCI_MGMT, &hdev->dev_flags))
313 mgmt_auth_enable_complete(hdev, status);
314
315 hci_dev_unlock(hdev);
316 }
317
318 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
319 {
320 __u8 status = *((__u8 *) skb->data);
321 __u8 param;
322 void *sent;
323
324 BT_DBG("%s status 0x%2.2x", hdev->name, status);
325
326 if (status)
327 return;
328
329 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
330 if (!sent)
331 return;
332
333 param = *((__u8 *) sent);
334
335 if (param)
336 set_bit(HCI_ENCRYPT, &hdev->flags);
337 else
338 clear_bit(HCI_ENCRYPT, &hdev->flags);
339 }
340
341 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
342 {
343 __u8 status = *((__u8 *) skb->data);
344 __u8 param;
345 void *sent;
346
347 BT_DBG("%s status 0x%2.2x", hdev->name, status);
348
349 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
350 if (!sent)
351 return;
352
353 param = *((__u8 *) sent);
354
355 hci_dev_lock(hdev);
356
357 if (status) {
358 hdev->discov_timeout = 0;
359 goto done;
360 }
361
362 if (param & SCAN_INQUIRY)
363 set_bit(HCI_ISCAN, &hdev->flags);
364 else
365 clear_bit(HCI_ISCAN, &hdev->flags);
366
367 if (param & SCAN_PAGE)
368 set_bit(HCI_PSCAN, &hdev->flags);
369 else
370 clear_bit(HCI_PSCAN, &hdev->flags);
371
372 done:
373 hci_dev_unlock(hdev);
374 }
375
376 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
377 {
378 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
379
380 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
381
382 if (rp->status)
383 return;
384
385 memcpy(hdev->dev_class, rp->dev_class, 3);
386
387 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
388 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
389 }
390
391 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
392 {
393 __u8 status = *((__u8 *) skb->data);
394 void *sent;
395
396 BT_DBG("%s status 0x%2.2x", hdev->name, status);
397
398 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
399 if (!sent)
400 return;
401
402 hci_dev_lock(hdev);
403
404 if (status == 0)
405 memcpy(hdev->dev_class, sent, 3);
406
407 if (test_bit(HCI_MGMT, &hdev->dev_flags))
408 mgmt_set_class_of_dev_complete(hdev, sent, status);
409
410 hci_dev_unlock(hdev);
411 }
412
413 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
414 {
415 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
416 __u16 setting;
417
418 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
419
420 if (rp->status)
421 return;
422
423 setting = __le16_to_cpu(rp->voice_setting);
424
425 if (hdev->voice_setting == setting)
426 return;
427
428 hdev->voice_setting = setting;
429
430 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
431
432 if (hdev->notify)
433 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
434 }
435
436 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
437 struct sk_buff *skb)
438 {
439 __u8 status = *((__u8 *) skb->data);
440 __u16 setting;
441 void *sent;
442
443 BT_DBG("%s status 0x%2.2x", hdev->name, status);
444
445 if (status)
446 return;
447
448 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
449 if (!sent)
450 return;
451
452 setting = get_unaligned_le16(sent);
453
454 if (hdev->voice_setting == setting)
455 return;
456
457 hdev->voice_setting = setting;
458
459 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
460
461 if (hdev->notify)
462 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
463 }
464
465 static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
466 struct sk_buff *skb)
467 {
468 struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
469
470 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
471
472 if (rp->status)
473 return;
474
475 hdev->num_iac = rp->num_iac;
476
477 BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
478 }
479
480 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
481 {
482 __u8 status = *((__u8 *) skb->data);
483 struct hci_cp_write_ssp_mode *sent;
484
485 BT_DBG("%s status 0x%2.2x", hdev->name, status);
486
487 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
488 if (!sent)
489 return;
490
491 hci_dev_lock(hdev);
492
493 if (!status) {
494 if (sent->mode)
495 hdev->features[1][0] |= LMP_HOST_SSP;
496 else
497 hdev->features[1][0] &= ~LMP_HOST_SSP;
498 }
499
500 if (test_bit(HCI_MGMT, &hdev->dev_flags))
501 mgmt_ssp_enable_complete(hdev, sent->mode, status);
502 else if (!status) {
503 if (sent->mode)
504 set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
505 else
506 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
507 }
508
509 hci_dev_unlock(hdev);
510 }
511
512 static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
513 {
514 u8 status = *((u8 *) skb->data);
515 struct hci_cp_write_sc_support *sent;
516
517 BT_DBG("%s status 0x%2.2x", hdev->name, status);
518
519 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
520 if (!sent)
521 return;
522
523 hci_dev_lock(hdev);
524
525 if (!status) {
526 if (sent->support)
527 hdev->features[1][0] |= LMP_HOST_SC;
528 else
529 hdev->features[1][0] &= ~LMP_HOST_SC;
530 }
531
532 if (!test_bit(HCI_MGMT, &hdev->dev_flags) && !status) {
533 if (sent->support)
534 set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
535 else
536 clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
537 }
538
539 hci_dev_unlock(hdev);
540 }
541
542 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
543 {
544 struct hci_rp_read_local_version *rp = (void *) skb->data;
545
546 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
547
548 if (rp->status)
549 return;
550
551 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
552 test_bit(HCI_CONFIG, &hdev->dev_flags)) {
553 hdev->hci_ver = rp->hci_ver;
554 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
555 hdev->lmp_ver = rp->lmp_ver;
556 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
557 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
558 }
559 }
560
561 static void hci_cc_read_local_commands(struct hci_dev *hdev,
562 struct sk_buff *skb)
563 {
564 struct hci_rp_read_local_commands *rp = (void *) skb->data;
565
566 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
567
568 if (rp->status)
569 return;
570
571 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
572 test_bit(HCI_CONFIG, &hdev->dev_flags))
573 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
574 }
575
576 static void hci_cc_read_local_features(struct hci_dev *hdev,
577 struct sk_buff *skb)
578 {
579 struct hci_rp_read_local_features *rp = (void *) skb->data;
580
581 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
582
583 if (rp->status)
584 return;
585
586 memcpy(hdev->features, rp->features, 8);
587
588 /* Adjust default settings according to features
589 * supported by device. */
590
591 if (hdev->features[0][0] & LMP_3SLOT)
592 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
593
594 if (hdev->features[0][0] & LMP_5SLOT)
595 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
596
597 if (hdev->features[0][1] & LMP_HV2) {
598 hdev->pkt_type |= (HCI_HV2);
599 hdev->esco_type |= (ESCO_HV2);
600 }
601
602 if (hdev->features[0][1] & LMP_HV3) {
603 hdev->pkt_type |= (HCI_HV3);
604 hdev->esco_type |= (ESCO_HV3);
605 }
606
607 if (lmp_esco_capable(hdev))
608 hdev->esco_type |= (ESCO_EV3);
609
610 if (hdev->features[0][4] & LMP_EV4)
611 hdev->esco_type |= (ESCO_EV4);
612
613 if (hdev->features[0][4] & LMP_EV5)
614 hdev->esco_type |= (ESCO_EV5);
615
616 if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
617 hdev->esco_type |= (ESCO_2EV3);
618
619 if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
620 hdev->esco_type |= (ESCO_3EV3);
621
622 if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
623 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
624 }
625
626 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
627 struct sk_buff *skb)
628 {
629 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
630
631 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
632
633 if (rp->status)
634 return;
635
636 if (hdev->max_page < rp->max_page)
637 hdev->max_page = rp->max_page;
638
639 if (rp->page < HCI_MAX_PAGES)
640 memcpy(hdev->features[rp->page], rp->features, 8);
641 }
642
643 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
644 struct sk_buff *skb)
645 {
646 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
647
648 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
649
650 if (rp->status)
651 return;
652
653 hdev->flow_ctl_mode = rp->mode;
654 }
655
656 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
657 {
658 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
659
660 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
661
662 if (rp->status)
663 return;
664
665 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
666 hdev->sco_mtu = rp->sco_mtu;
667 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
668 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
669
670 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
671 hdev->sco_mtu = 64;
672 hdev->sco_pkts = 8;
673 }
674
675 hdev->acl_cnt = hdev->acl_pkts;
676 hdev->sco_cnt = hdev->sco_pkts;
677
678 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
679 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
680 }
681
682 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
683 {
684 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
685
686 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
687
688 if (rp->status)
689 return;
690
691 if (test_bit(HCI_INIT, &hdev->flags))
692 bacpy(&hdev->bdaddr, &rp->bdaddr);
693
694 if (test_bit(HCI_SETUP, &hdev->dev_flags))
695 bacpy(&hdev->setup_addr, &rp->bdaddr);
696 }
697
698 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
699 struct sk_buff *skb)
700 {
701 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
702
703 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
704
705 if (rp->status)
706 return;
707
708 if (test_bit(HCI_INIT, &hdev->flags)) {
709 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
710 hdev->page_scan_window = __le16_to_cpu(rp->window);
711 }
712 }
713
714 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
715 struct sk_buff *skb)
716 {
717 u8 status = *((u8 *) skb->data);
718 struct hci_cp_write_page_scan_activity *sent;
719
720 BT_DBG("%s status 0x%2.2x", hdev->name, status);
721
722 if (status)
723 return;
724
725 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
726 if (!sent)
727 return;
728
729 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
730 hdev->page_scan_window = __le16_to_cpu(sent->window);
731 }
732
733 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
734 struct sk_buff *skb)
735 {
736 struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
737
738 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
739
740 if (rp->status)
741 return;
742
743 if (test_bit(HCI_INIT, &hdev->flags))
744 hdev->page_scan_type = rp->type;
745 }
746
747 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
748 struct sk_buff *skb)
749 {
750 u8 status = *((u8 *) skb->data);
751 u8 *type;
752
753 BT_DBG("%s status 0x%2.2x", hdev->name, status);
754
755 if (status)
756 return;
757
758 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
759 if (type)
760 hdev->page_scan_type = *type;
761 }
762
763 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
764 struct sk_buff *skb)
765 {
766 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
767
768 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
769
770 if (rp->status)
771 return;
772
773 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
774 hdev->block_len = __le16_to_cpu(rp->block_len);
775 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
776
777 hdev->block_cnt = hdev->num_blocks;
778
779 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
780 hdev->block_cnt, hdev->block_len);
781 }
782
783 static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
784 {
785 struct hci_rp_read_clock *rp = (void *) skb->data;
786 struct hci_cp_read_clock *cp;
787 struct hci_conn *conn;
788
789 BT_DBG("%s", hdev->name);
790
791 if (skb->len < sizeof(*rp))
792 return;
793
794 if (rp->status)
795 return;
796
797 hci_dev_lock(hdev);
798
799 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
800 if (!cp)
801 goto unlock;
802
803 if (cp->which == 0x00) {
804 hdev->clock = le32_to_cpu(rp->clock);
805 goto unlock;
806 }
807
808 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
809 if (conn) {
810 conn->clock = le32_to_cpu(rp->clock);
811 conn->clock_accuracy = le16_to_cpu(rp->accuracy);
812 }
813
814 unlock:
815 hci_dev_unlock(hdev);
816 }
817
818 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
819 struct sk_buff *skb)
820 {
821 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
822
823 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
824
825 if (rp->status)
826 goto a2mp_rsp;
827
828 hdev->amp_status = rp->amp_status;
829 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
830 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
831 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
832 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
833 hdev->amp_type = rp->amp_type;
834 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
835 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
836 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
837 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
838
839 a2mp_rsp:
840 a2mp_send_getinfo_rsp(hdev);
841 }
842
843 static void hci_cc_read_local_amp_assoc(struct hci_dev *hdev,
844 struct sk_buff *skb)
845 {
846 struct hci_rp_read_local_amp_assoc *rp = (void *) skb->data;
847 struct amp_assoc *assoc = &hdev->loc_assoc;
848 size_t rem_len, frag_len;
849
850 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
851
852 if (rp->status)
853 goto a2mp_rsp;
854
855 frag_len = skb->len - sizeof(*rp);
856 rem_len = __le16_to_cpu(rp->rem_len);
857
858 if (rem_len > frag_len) {
859 BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
860
861 memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
862 assoc->offset += frag_len;
863
864 /* Read other fragments */
865 amp_read_loc_assoc_frag(hdev, rp->phy_handle);
866
867 return;
868 }
869
870 memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
871 assoc->len = assoc->offset + rem_len;
872 assoc->offset = 0;
873
874 a2mp_rsp:
875 /* Send A2MP Rsp when all fragments are received */
876 a2mp_send_getampassoc_rsp(hdev, rp->status);
877 a2mp_send_create_phy_link_req(hdev, rp->status);
878 }
879
880 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
881 struct sk_buff *skb)
882 {
883 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
884
885 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
886
887 if (rp->status)
888 return;
889
890 hdev->inq_tx_power = rp->tx_power;
891 }
892
893 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
894 {
895 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
896 struct hci_cp_pin_code_reply *cp;
897 struct hci_conn *conn;
898
899 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
900
901 hci_dev_lock(hdev);
902
903 if (test_bit(HCI_MGMT, &hdev->dev_flags))
904 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
905
906 if (rp->status)
907 goto unlock;
908
909 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
910 if (!cp)
911 goto unlock;
912
913 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
914 if (conn)
915 conn->pin_length = cp->pin_len;
916
917 unlock:
918 hci_dev_unlock(hdev);
919 }
920
921 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
922 {
923 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
924
925 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
926
927 hci_dev_lock(hdev);
928
929 if (test_bit(HCI_MGMT, &hdev->dev_flags))
930 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
931 rp->status);
932
933 hci_dev_unlock(hdev);
934 }
935
936 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
937 struct sk_buff *skb)
938 {
939 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
940
941 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
942
943 if (rp->status)
944 return;
945
946 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
947 hdev->le_pkts = rp->le_max_pkt;
948
949 hdev->le_cnt = hdev->le_pkts;
950
951 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
952 }
953
954 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
955 struct sk_buff *skb)
956 {
957 struct hci_rp_le_read_local_features *rp = (void *) skb->data;
958
959 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
960
961 if (rp->status)
962 return;
963
964 memcpy(hdev->le_features, rp->features, 8);
965 }
966
967 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
968 struct sk_buff *skb)
969 {
970 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
971
972 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
973
974 if (rp->status)
975 return;
976
977 hdev->adv_tx_power = rp->tx_power;
978 }
979
980 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
981 {
982 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
983
984 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
985
986 hci_dev_lock(hdev);
987
988 if (test_bit(HCI_MGMT, &hdev->dev_flags))
989 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
990 rp->status);
991
992 hci_dev_unlock(hdev);
993 }
994
995 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
996 struct sk_buff *skb)
997 {
998 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
999
1000 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1001
1002 hci_dev_lock(hdev);
1003
1004 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1005 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1006 ACL_LINK, 0, rp->status);
1007
1008 hci_dev_unlock(hdev);
1009 }
1010
1011 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
1012 {
1013 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1014
1015 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1016
1017 hci_dev_lock(hdev);
1018
1019 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1020 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1021 0, rp->status);
1022
1023 hci_dev_unlock(hdev);
1024 }
1025
1026 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
1027 struct sk_buff *skb)
1028 {
1029 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1030
1031 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1032
1033 hci_dev_lock(hdev);
1034
1035 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1036 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1037 ACL_LINK, 0, rp->status);
1038
1039 hci_dev_unlock(hdev);
1040 }
1041
1042 static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
1043 struct sk_buff *skb)
1044 {
1045 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1046
1047 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1048
1049 hci_dev_lock(hdev);
1050 mgmt_read_local_oob_data_complete(hdev, rp->hash, rp->rand, NULL, NULL,
1051 rp->status);
1052 hci_dev_unlock(hdev);
1053 }
1054
1055 static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
1056 struct sk_buff *skb)
1057 {
1058 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
1059
1060 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1061
1062 hci_dev_lock(hdev);
1063 mgmt_read_local_oob_data_complete(hdev, rp->hash192, rp->rand192,
1064 rp->hash256, rp->rand256,
1065 rp->status);
1066 hci_dev_unlock(hdev);
1067 }
1068
1069
1070 static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
1071 {
1072 __u8 status = *((__u8 *) skb->data);
1073 bdaddr_t *sent;
1074
1075 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1076
1077 if (status)
1078 return;
1079
1080 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1081 if (!sent)
1082 return;
1083
1084 hci_dev_lock(hdev);
1085
1086 bacpy(&hdev->random_addr, sent);
1087
1088 hci_dev_unlock(hdev);
1089 }
1090
1091 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1092 {
1093 __u8 *sent, status = *((__u8 *) skb->data);
1094
1095 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1096
1097 if (status)
1098 return;
1099
1100 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1101 if (!sent)
1102 return;
1103
1104 hci_dev_lock(hdev);
1105
1106 /* If we're doing connection initiation as peripheral. Set a
1107 * timeout in case something goes wrong.
1108 */
1109 if (*sent) {
1110 struct hci_conn *conn;
1111
1112 set_bit(HCI_LE_ADV, &hdev->dev_flags);
1113
1114 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1115 if (conn)
1116 queue_delayed_work(hdev->workqueue,
1117 &conn->le_conn_timeout,
1118 conn->conn_timeout);
1119 } else {
1120 clear_bit(HCI_LE_ADV, &hdev->dev_flags);
1121 }
1122
1123 hci_dev_unlock(hdev);
1124 }
1125
1126 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1127 {
1128 struct hci_cp_le_set_scan_param *cp;
1129 __u8 status = *((__u8 *) skb->data);
1130
1131 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1132
1133 if (status)
1134 return;
1135
1136 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1137 if (!cp)
1138 return;
1139
1140 hci_dev_lock(hdev);
1141
1142 hdev->le_scan_type = cp->type;
1143
1144 hci_dev_unlock(hdev);
1145 }
1146
1147 static bool has_pending_adv_report(struct hci_dev *hdev)
1148 {
1149 struct discovery_state *d = &hdev->discovery;
1150
1151 return bacmp(&d->last_adv_addr, BDADDR_ANY);
1152 }
1153
1154 static void clear_pending_adv_report(struct hci_dev *hdev)
1155 {
1156 struct discovery_state *d = &hdev->discovery;
1157
1158 bacpy(&d->last_adv_addr, BDADDR_ANY);
1159 d->last_adv_data_len = 0;
1160 }
1161
1162 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1163 u8 bdaddr_type, s8 rssi, u32 flags,
1164 u8 *data, u8 len)
1165 {
1166 struct discovery_state *d = &hdev->discovery;
1167
1168 bacpy(&d->last_adv_addr, bdaddr);
1169 d->last_adv_addr_type = bdaddr_type;
1170 d->last_adv_rssi = rssi;
1171 d->last_adv_flags = flags;
1172 memcpy(d->last_adv_data, data, len);
1173 d->last_adv_data_len = len;
1174 }
1175
1176 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1177 struct sk_buff *skb)
1178 {
1179 struct hci_cp_le_set_scan_enable *cp;
1180 __u8 status = *((__u8 *) skb->data);
1181
1182 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1183
1184 if (status)
1185 return;
1186
1187 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1188 if (!cp)
1189 return;
1190
1191 hci_dev_lock(hdev);
1192
1193 switch (cp->enable) {
1194 case LE_SCAN_ENABLE:
1195 set_bit(HCI_LE_SCAN, &hdev->dev_flags);
1196 if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1197 clear_pending_adv_report(hdev);
1198 break;
1199
1200 case LE_SCAN_DISABLE:
1201 /* We do this here instead of when setting DISCOVERY_STOPPED
1202 * since the latter would potentially require waiting for
1203 * inquiry to stop too.
1204 */
1205 if (has_pending_adv_report(hdev)) {
1206 struct discovery_state *d = &hdev->discovery;
1207
1208 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1209 d->last_adv_addr_type, NULL,
1210 d->last_adv_rssi, d->last_adv_flags,
1211 d->last_adv_data,
1212 d->last_adv_data_len, NULL, 0);
1213 }
1214
1215 /* Cancel this timer so that we don't try to disable scanning
1216 * when it's already disabled.
1217 */
1218 cancel_delayed_work(&hdev->le_scan_disable);
1219
1220 clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
1221
1222 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1223 * interrupted scanning due to a connect request. Mark
1224 * therefore discovery as stopped. If this was not
1225 * because of a connect request advertising might have
1226 * been disabled because of active scanning, so
1227 * re-enable it again if necessary.
1228 */
1229 if (test_and_clear_bit(HCI_LE_SCAN_INTERRUPTED,
1230 &hdev->dev_flags))
1231 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1232 else if (!test_bit(HCI_LE_ADV, &hdev->dev_flags) &&
1233 hdev->discovery.state == DISCOVERY_FINDING)
1234 mgmt_reenable_advertising(hdev);
1235
1236 break;
1237
1238 default:
1239 BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1240 break;
1241 }
1242
1243 hci_dev_unlock(hdev);
1244 }
1245
1246 static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1247 struct sk_buff *skb)
1248 {
1249 struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1250
1251 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1252
1253 if (rp->status)
1254 return;
1255
1256 hdev->le_white_list_size = rp->size;
1257 }
1258
1259 static void hci_cc_le_clear_white_list(struct hci_dev *hdev,
1260 struct sk_buff *skb)
1261 {
1262 __u8 status = *((__u8 *) skb->data);
1263
1264 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1265
1266 if (status)
1267 return;
1268
1269 hci_bdaddr_list_clear(&hdev->le_white_list);
1270 }
1271
1272 static void hci_cc_le_add_to_white_list(struct hci_dev *hdev,
1273 struct sk_buff *skb)
1274 {
1275 struct hci_cp_le_add_to_white_list *sent;
1276 __u8 status = *((__u8 *) skb->data);
1277
1278 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1279
1280 if (status)
1281 return;
1282
1283 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST);
1284 if (!sent)
1285 return;
1286
1287 hci_bdaddr_list_add(&hdev->le_white_list, &sent->bdaddr,
1288 sent->bdaddr_type);
1289 }
1290
1291 static void hci_cc_le_del_from_white_list(struct hci_dev *hdev,
1292 struct sk_buff *skb)
1293 {
1294 struct hci_cp_le_del_from_white_list *sent;
1295 __u8 status = *((__u8 *) skb->data);
1296
1297 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1298
1299 if (status)
1300 return;
1301
1302 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST);
1303 if (!sent)
1304 return;
1305
1306 hci_bdaddr_list_del(&hdev->le_white_list, &sent->bdaddr,
1307 sent->bdaddr_type);
1308 }
1309
1310 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1311 struct sk_buff *skb)
1312 {
1313 struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1314
1315 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1316
1317 if (rp->status)
1318 return;
1319
1320 memcpy(hdev->le_states, rp->le_states, 8);
1321 }
1322
1323 static void hci_cc_le_read_def_data_len(struct hci_dev *hdev,
1324 struct sk_buff *skb)
1325 {
1326 struct hci_rp_le_read_def_data_len *rp = (void *) skb->data;
1327
1328 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1329
1330 if (rp->status)
1331 return;
1332
1333 hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1334 hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1335 }
1336
1337 static void hci_cc_le_write_def_data_len(struct hci_dev *hdev,
1338 struct sk_buff *skb)
1339 {
1340 struct hci_cp_le_write_def_data_len *sent;
1341 __u8 status = *((__u8 *) skb->data);
1342
1343 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1344
1345 if (status)
1346 return;
1347
1348 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
1349 if (!sent)
1350 return;
1351
1352 hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
1353 hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
1354 }
1355
1356 static void hci_cc_le_read_max_data_len(struct hci_dev *hdev,
1357 struct sk_buff *skb)
1358 {
1359 struct hci_rp_le_read_max_data_len *rp = (void *) skb->data;
1360
1361 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1362
1363 if (rp->status)
1364 return;
1365
1366 hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
1367 hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
1368 hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
1369 hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
1370 }
1371
1372 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1373 struct sk_buff *skb)
1374 {
1375 struct hci_cp_write_le_host_supported *sent;
1376 __u8 status = *((__u8 *) skb->data);
1377
1378 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1379
1380 if (status)
1381 return;
1382
1383 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1384 if (!sent)
1385 return;
1386
1387 hci_dev_lock(hdev);
1388
1389 if (sent->le) {
1390 hdev->features[1][0] |= LMP_HOST_LE;
1391 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1392 } else {
1393 hdev->features[1][0] &= ~LMP_HOST_LE;
1394 clear_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1395 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
1396 }
1397
1398 if (sent->simul)
1399 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1400 else
1401 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1402
1403 hci_dev_unlock(hdev);
1404 }
1405
1406 static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1407 {
1408 struct hci_cp_le_set_adv_param *cp;
1409 u8 status = *((u8 *) skb->data);
1410
1411 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1412
1413 if (status)
1414 return;
1415
1416 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1417 if (!cp)
1418 return;
1419
1420 hci_dev_lock(hdev);
1421 hdev->adv_addr_type = cp->own_address_type;
1422 hci_dev_unlock(hdev);
1423 }
1424
1425 static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,
1426 struct sk_buff *skb)
1427 {
1428 struct hci_rp_write_remote_amp_assoc *rp = (void *) skb->data;
1429
1430 BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
1431 hdev->name, rp->status, rp->phy_handle);
1432
1433 if (rp->status)
1434 return;
1435
1436 amp_write_rem_assoc_continue(hdev, rp->phy_handle);
1437 }
1438
1439 static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
1440 {
1441 struct hci_rp_read_rssi *rp = (void *) skb->data;
1442 struct hci_conn *conn;
1443
1444 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1445
1446 if (rp->status)
1447 return;
1448
1449 hci_dev_lock(hdev);
1450
1451 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1452 if (conn)
1453 conn->rssi = rp->rssi;
1454
1455 hci_dev_unlock(hdev);
1456 }
1457
1458 static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
1459 {
1460 struct hci_cp_read_tx_power *sent;
1461 struct hci_rp_read_tx_power *rp = (void *) skb->data;
1462 struct hci_conn *conn;
1463
1464 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1465
1466 if (rp->status)
1467 return;
1468
1469 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
1470 if (!sent)
1471 return;
1472
1473 hci_dev_lock(hdev);
1474
1475 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1476 if (!conn)
1477 goto unlock;
1478
1479 switch (sent->type) {
1480 case 0x00:
1481 conn->tx_power = rp->tx_power;
1482 break;
1483 case 0x01:
1484 conn->max_tx_power = rp->tx_power;
1485 break;
1486 }
1487
1488 unlock:
1489 hci_dev_unlock(hdev);
1490 }
1491
1492 static void hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, struct sk_buff *skb)
1493 {
1494 u8 status = *((u8 *) skb->data);
1495 u8 *mode;
1496
1497 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1498
1499 if (status)
1500 return;
1501
1502 mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
1503 if (mode)
1504 hdev->ssp_debug_mode = *mode;
1505 }
1506
1507 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1508 {
1509 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1510
1511 if (status) {
1512 hci_conn_check_pending(hdev);
1513 return;
1514 }
1515
1516 set_bit(HCI_INQUIRY, &hdev->flags);
1517 }
1518
1519 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1520 {
1521 struct hci_cp_create_conn *cp;
1522 struct hci_conn *conn;
1523
1524 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1525
1526 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1527 if (!cp)
1528 return;
1529
1530 hci_dev_lock(hdev);
1531
1532 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1533
1534 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1535
1536 if (status) {
1537 if (conn && conn->state == BT_CONNECT) {
1538 if (status != 0x0c || conn->attempt > 2) {
1539 conn->state = BT_CLOSED;
1540 hci_connect_cfm(conn, status);
1541 hci_conn_del(conn);
1542 } else
1543 conn->state = BT_CONNECT2;
1544 }
1545 } else {
1546 if (!conn) {
1547 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
1548 HCI_ROLE_MASTER);
1549 if (!conn)
1550 BT_ERR("No memory for new connection");
1551 }
1552 }
1553
1554 hci_dev_unlock(hdev);
1555 }
1556
1557 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1558 {
1559 struct hci_cp_add_sco *cp;
1560 struct hci_conn *acl, *sco;
1561 __u16 handle;
1562
1563 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1564
1565 if (!status)
1566 return;
1567
1568 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1569 if (!cp)
1570 return;
1571
1572 handle = __le16_to_cpu(cp->handle);
1573
1574 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1575
1576 hci_dev_lock(hdev);
1577
1578 acl = hci_conn_hash_lookup_handle(hdev, handle);
1579 if (acl) {
1580 sco = acl->link;
1581 if (sco) {
1582 sco->state = BT_CLOSED;
1583
1584 hci_connect_cfm(sco, status);
1585 hci_conn_del(sco);
1586 }
1587 }
1588
1589 hci_dev_unlock(hdev);
1590 }
1591
1592 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1593 {
1594 struct hci_cp_auth_requested *cp;
1595 struct hci_conn *conn;
1596
1597 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1598
1599 if (!status)
1600 return;
1601
1602 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1603 if (!cp)
1604 return;
1605
1606 hci_dev_lock(hdev);
1607
1608 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1609 if (conn) {
1610 if (conn->state == BT_CONFIG) {
1611 hci_connect_cfm(conn, status);
1612 hci_conn_drop(conn);
1613 }
1614 }
1615
1616 hci_dev_unlock(hdev);
1617 }
1618
1619 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1620 {
1621 struct hci_cp_set_conn_encrypt *cp;
1622 struct hci_conn *conn;
1623
1624 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1625
1626 if (!status)
1627 return;
1628
1629 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1630 if (!cp)
1631 return;
1632
1633 hci_dev_lock(hdev);
1634
1635 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1636 if (conn) {
1637 if (conn->state == BT_CONFIG) {
1638 hci_connect_cfm(conn, status);
1639 hci_conn_drop(conn);
1640 }
1641 }
1642
1643 hci_dev_unlock(hdev);
1644 }
1645
1646 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1647 struct hci_conn *conn)
1648 {
1649 if (conn->state != BT_CONFIG || !conn->out)
1650 return 0;
1651
1652 if (conn->pending_sec_level == BT_SECURITY_SDP)
1653 return 0;
1654
1655 /* Only request authentication for SSP connections or non-SSP
1656 * devices with sec_level MEDIUM or HIGH or if MITM protection
1657 * is requested.
1658 */
1659 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1660 conn->pending_sec_level != BT_SECURITY_FIPS &&
1661 conn->pending_sec_level != BT_SECURITY_HIGH &&
1662 conn->pending_sec_level != BT_SECURITY_MEDIUM)
1663 return 0;
1664
1665 return 1;
1666 }
1667
1668 static int hci_resolve_name(struct hci_dev *hdev,
1669 struct inquiry_entry *e)
1670 {
1671 struct hci_cp_remote_name_req cp;
1672
1673 memset(&cp, 0, sizeof(cp));
1674
1675 bacpy(&cp.bdaddr, &e->data.bdaddr);
1676 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1677 cp.pscan_mode = e->data.pscan_mode;
1678 cp.clock_offset = e->data.clock_offset;
1679
1680 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1681 }
1682
1683 static bool hci_resolve_next_name(struct hci_dev *hdev)
1684 {
1685 struct discovery_state *discov = &hdev->discovery;
1686 struct inquiry_entry *e;
1687
1688 if (list_empty(&discov->resolve))
1689 return false;
1690
1691 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1692 if (!e)
1693 return false;
1694
1695 if (hci_resolve_name(hdev, e) == 0) {
1696 e->name_state = NAME_PENDING;
1697 return true;
1698 }
1699
1700 return false;
1701 }
1702
1703 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1704 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1705 {
1706 struct discovery_state *discov = &hdev->discovery;
1707 struct inquiry_entry *e;
1708
1709 /* Update the mgmt connected state if necessary. Be careful with
1710 * conn objects that exist but are not (yet) connected however.
1711 * Only those in BT_CONFIG or BT_CONNECTED states can be
1712 * considered connected.
1713 */
1714 if (conn &&
1715 (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
1716 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1717 mgmt_device_connected(hdev, conn, 0, name, name_len);
1718
1719 if (discov->state == DISCOVERY_STOPPED)
1720 return;
1721
1722 if (discov->state == DISCOVERY_STOPPING)
1723 goto discov_complete;
1724
1725 if (discov->state != DISCOVERY_RESOLVING)
1726 return;
1727
1728 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1729 /* If the device was not found in a list of found devices names of which
1730 * are pending. there is no need to continue resolving a next name as it
1731 * will be done upon receiving another Remote Name Request Complete
1732 * Event */
1733 if (!e)
1734 return;
1735
1736 list_del(&e->list);
1737 if (name) {
1738 e->name_state = NAME_KNOWN;
1739 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1740 e->data.rssi, name, name_len);
1741 } else {
1742 e->name_state = NAME_NOT_KNOWN;
1743 }
1744
1745 if (hci_resolve_next_name(hdev))
1746 return;
1747
1748 discov_complete:
1749 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1750 }
1751
1752 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1753 {
1754 struct hci_cp_remote_name_req *cp;
1755 struct hci_conn *conn;
1756
1757 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1758
1759 /* If successful wait for the name req complete event before
1760 * checking for the need to do authentication */
1761 if (!status)
1762 return;
1763
1764 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1765 if (!cp)
1766 return;
1767
1768 hci_dev_lock(hdev);
1769
1770 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1771
1772 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1773 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1774
1775 if (!conn)
1776 goto unlock;
1777
1778 if (!hci_outgoing_auth_needed(hdev, conn))
1779 goto unlock;
1780
1781 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1782 struct hci_cp_auth_requested auth_cp;
1783
1784 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
1785
1786 auth_cp.handle = __cpu_to_le16(conn->handle);
1787 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
1788 sizeof(auth_cp), &auth_cp);
1789 }
1790
1791 unlock:
1792 hci_dev_unlock(hdev);
1793 }
1794
1795 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1796 {
1797 struct hci_cp_read_remote_features *cp;
1798 struct hci_conn *conn;
1799
1800 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1801
1802 if (!status)
1803 return;
1804
1805 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1806 if (!cp)
1807 return;
1808
1809 hci_dev_lock(hdev);
1810
1811 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1812 if (conn) {
1813 if (conn->state == BT_CONFIG) {
1814 hci_connect_cfm(conn, status);
1815 hci_conn_drop(conn);
1816 }
1817 }
1818
1819 hci_dev_unlock(hdev);
1820 }
1821
1822 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1823 {
1824 struct hci_cp_read_remote_ext_features *cp;
1825 struct hci_conn *conn;
1826
1827 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1828
1829 if (!status)
1830 return;
1831
1832 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1833 if (!cp)
1834 return;
1835
1836 hci_dev_lock(hdev);
1837
1838 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1839 if (conn) {
1840 if (conn->state == BT_CONFIG) {
1841 hci_connect_cfm(conn, status);
1842 hci_conn_drop(conn);
1843 }
1844 }
1845
1846 hci_dev_unlock(hdev);
1847 }
1848
1849 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1850 {
1851 struct hci_cp_setup_sync_conn *cp;
1852 struct hci_conn *acl, *sco;
1853 __u16 handle;
1854
1855 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1856
1857 if (!status)
1858 return;
1859
1860 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1861 if (!cp)
1862 return;
1863
1864 handle = __le16_to_cpu(cp->handle);
1865
1866 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1867
1868 hci_dev_lock(hdev);
1869
1870 acl = hci_conn_hash_lookup_handle(hdev, handle);
1871 if (acl) {
1872 sco = acl->link;
1873 if (sco) {
1874 sco->state = BT_CLOSED;
1875
1876 hci_connect_cfm(sco, status);
1877 hci_conn_del(sco);
1878 }
1879 }
1880
1881 hci_dev_unlock(hdev);
1882 }
1883
1884 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1885 {
1886 struct hci_cp_sniff_mode *cp;
1887 struct hci_conn *conn;
1888
1889 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1890
1891 if (!status)
1892 return;
1893
1894 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1895 if (!cp)
1896 return;
1897
1898 hci_dev_lock(hdev);
1899
1900 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1901 if (conn) {
1902 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1903
1904 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1905 hci_sco_setup(conn, status);
1906 }
1907
1908 hci_dev_unlock(hdev);
1909 }
1910
1911 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1912 {
1913 struct hci_cp_exit_sniff_mode *cp;
1914 struct hci_conn *conn;
1915
1916 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1917
1918 if (!status)
1919 return;
1920
1921 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1922 if (!cp)
1923 return;
1924
1925 hci_dev_lock(hdev);
1926
1927 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1928 if (conn) {
1929 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1930
1931 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1932 hci_sco_setup(conn, status);
1933 }
1934
1935 hci_dev_unlock(hdev);
1936 }
1937
1938 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1939 {
1940 struct hci_cp_disconnect *cp;
1941 struct hci_conn *conn;
1942
1943 if (!status)
1944 return;
1945
1946 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1947 if (!cp)
1948 return;
1949
1950 hci_dev_lock(hdev);
1951
1952 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1953 if (conn)
1954 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1955 conn->dst_type, status);
1956
1957 hci_dev_unlock(hdev);
1958 }
1959
1960 static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status)
1961 {
1962 struct hci_cp_create_phy_link *cp;
1963
1964 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1965
1966 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
1967 if (!cp)
1968 return;
1969
1970 hci_dev_lock(hdev);
1971
1972 if (status) {
1973 struct hci_conn *hcon;
1974
1975 hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
1976 if (hcon)
1977 hci_conn_del(hcon);
1978 } else {
1979 amp_write_remote_assoc(hdev, cp->phy_handle);
1980 }
1981
1982 hci_dev_unlock(hdev);
1983 }
1984
1985 static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status)
1986 {
1987 struct hci_cp_accept_phy_link *cp;
1988
1989 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1990
1991 if (status)
1992 return;
1993
1994 cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
1995 if (!cp)
1996 return;
1997
1998 amp_write_remote_assoc(hdev, cp->phy_handle);
1999 }
2000
2001 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
2002 {
2003 struct hci_cp_le_create_conn *cp;
2004 struct hci_conn *conn;
2005
2006 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2007
2008 /* All connection failure handling is taken care of by the
2009 * hci_le_conn_failed function which is triggered by the HCI
2010 * request completion callbacks used for connecting.
2011 */
2012 if (status)
2013 return;
2014
2015 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
2016 if (!cp)
2017 return;
2018
2019 hci_dev_lock(hdev);
2020
2021 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
2022 if (!conn)
2023 goto unlock;
2024
2025 /* Store the initiator and responder address information which
2026 * is needed for SMP. These values will not change during the
2027 * lifetime of the connection.
2028 */
2029 conn->init_addr_type = cp->own_address_type;
2030 if (cp->own_address_type == ADDR_LE_DEV_RANDOM)
2031 bacpy(&conn->init_addr, &hdev->random_addr);
2032 else
2033 bacpy(&conn->init_addr, &hdev->bdaddr);
2034
2035 conn->resp_addr_type = cp->peer_addr_type;
2036 bacpy(&conn->resp_addr, &cp->peer_addr);
2037
2038 /* We don't want the connection attempt to stick around
2039 * indefinitely since LE doesn't have a page timeout concept
2040 * like BR/EDR. Set a timer for any connection that doesn't use
2041 * the white list for connecting.
2042 */
2043 if (cp->filter_policy == HCI_LE_USE_PEER_ADDR)
2044 queue_delayed_work(conn->hdev->workqueue,
2045 &conn->le_conn_timeout,
2046 conn->conn_timeout);
2047
2048 unlock:
2049 hci_dev_unlock(hdev);
2050 }
2051
2052 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
2053 {
2054 struct hci_cp_le_start_enc *cp;
2055 struct hci_conn *conn;
2056
2057 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2058
2059 if (!status)
2060 return;
2061
2062 hci_dev_lock(hdev);
2063
2064 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
2065 if (!cp)
2066 goto unlock;
2067
2068 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2069 if (!conn)
2070 goto unlock;
2071
2072 if (conn->state != BT_CONNECTED)
2073 goto unlock;
2074
2075 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2076 hci_conn_drop(conn);
2077
2078 unlock:
2079 hci_dev_unlock(hdev);
2080 }
2081
2082 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
2083 {
2084 struct hci_cp_switch_role *cp;
2085 struct hci_conn *conn;
2086
2087 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2088
2089 if (!status)
2090 return;
2091
2092 cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
2093 if (!cp)
2094 return;
2095
2096 hci_dev_lock(hdev);
2097
2098 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2099 if (conn)
2100 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2101
2102 hci_dev_unlock(hdev);
2103 }
2104
2105 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2106 {
2107 __u8 status = *((__u8 *) skb->data);
2108 struct discovery_state *discov = &hdev->discovery;
2109 struct inquiry_entry *e;
2110
2111 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2112
2113 hci_conn_check_pending(hdev);
2114
2115 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
2116 return;
2117
2118 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
2119 wake_up_bit(&hdev->flags, HCI_INQUIRY);
2120
2121 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2122 return;
2123
2124 hci_dev_lock(hdev);
2125
2126 if (discov->state != DISCOVERY_FINDING)
2127 goto unlock;
2128
2129 if (list_empty(&discov->resolve)) {
2130 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2131 goto unlock;
2132 }
2133
2134 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2135 if (e && hci_resolve_name(hdev, e) == 0) {
2136 e->name_state = NAME_PENDING;
2137 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
2138 } else {
2139 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2140 }
2141
2142 unlock:
2143 hci_dev_unlock(hdev);
2144 }
2145
2146 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2147 {
2148 struct inquiry_data data;
2149 struct inquiry_info *info = (void *) (skb->data + 1);
2150 int num_rsp = *((__u8 *) skb->data);
2151
2152 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2153
2154 if (!num_rsp)
2155 return;
2156
2157 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
2158 return;
2159
2160 hci_dev_lock(hdev);
2161
2162 for (; num_rsp; num_rsp--, info++) {
2163 u32 flags;
2164
2165 bacpy(&data.bdaddr, &info->bdaddr);
2166 data.pscan_rep_mode = info->pscan_rep_mode;
2167 data.pscan_period_mode = info->pscan_period_mode;
2168 data.pscan_mode = info->pscan_mode;
2169 memcpy(data.dev_class, info->dev_class, 3);
2170 data.clock_offset = info->clock_offset;
2171 data.rssi = HCI_RSSI_INVALID;
2172 data.ssp_mode = 0x00;
2173
2174 flags = hci_inquiry_cache_update(hdev, &data, false);
2175
2176 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2177 info->dev_class, HCI_RSSI_INVALID,
2178 flags, NULL, 0, NULL, 0);
2179 }
2180
2181 hci_dev_unlock(hdev);
2182 }
2183
2184 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2185 {
2186 struct hci_ev_conn_complete *ev = (void *) skb->data;
2187 struct hci_conn *conn;
2188
2189 BT_DBG("%s", hdev->name);
2190
2191 hci_dev_lock(hdev);
2192
2193 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2194 if (!conn) {
2195 if (ev->link_type != SCO_LINK)
2196 goto unlock;
2197
2198 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2199 if (!conn)
2200 goto unlock;
2201
2202 conn->type = SCO_LINK;
2203 }
2204
2205 if (!ev->status) {
2206 conn->handle = __le16_to_cpu(ev->handle);
2207
2208 if (conn->type == ACL_LINK) {
2209 conn->state = BT_CONFIG;
2210 hci_conn_hold(conn);
2211
2212 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
2213 !hci_find_link_key(hdev, &ev->bdaddr))
2214 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2215 else
2216 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2217 } else
2218 conn->state = BT_CONNECTED;
2219
2220 hci_debugfs_create_conn(conn);
2221 hci_conn_add_sysfs(conn);
2222
2223 if (test_bit(HCI_AUTH, &hdev->flags))
2224 set_bit(HCI_CONN_AUTH, &conn->flags);
2225
2226 if (test_bit(HCI_ENCRYPT, &hdev->flags))
2227 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2228
2229 /* Get remote features */
2230 if (conn->type == ACL_LINK) {
2231 struct hci_cp_read_remote_features cp;
2232 cp.handle = ev->handle;
2233 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2234 sizeof(cp), &cp);
2235
2236 hci_update_page_scan(hdev);
2237 }
2238
2239 /* Set packet type for incoming connection */
2240 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2241 struct hci_cp_change_conn_ptype cp;
2242 cp.handle = ev->handle;
2243 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2244 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2245 &cp);
2246 }
2247 } else {
2248 conn->state = BT_CLOSED;
2249 if (conn->type == ACL_LINK)
2250 mgmt_connect_failed(hdev, &conn->dst, conn->type,
2251 conn->dst_type, ev->status);
2252 }
2253
2254 if (conn->type == ACL_LINK)
2255 hci_sco_setup(conn, ev->status);
2256
2257 if (ev->status) {
2258 hci_connect_cfm(conn, ev->status);
2259 hci_conn_del(conn);
2260 } else if (ev->link_type != ACL_LINK)
2261 hci_connect_cfm(conn, ev->status);
2262
2263 unlock:
2264 hci_dev_unlock(hdev);
2265
2266 hci_conn_check_pending(hdev);
2267 }
2268
2269 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
2270 {
2271 struct hci_cp_reject_conn_req cp;
2272
2273 bacpy(&cp.bdaddr, bdaddr);
2274 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2275 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2276 }
2277
2278 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2279 {
2280 struct hci_ev_conn_request *ev = (void *) skb->data;
2281 int mask = hdev->link_mode;
2282 struct inquiry_entry *ie;
2283 struct hci_conn *conn;
2284 __u8 flags = 0;
2285
2286 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2287 ev->link_type);
2288
2289 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2290 &flags);
2291
2292 if (!(mask & HCI_LM_ACCEPT)) {
2293 hci_reject_conn(hdev, &ev->bdaddr);
2294 return;
2295 }
2296
2297 if (hci_bdaddr_list_lookup(&hdev->blacklist, &ev->bdaddr,
2298 BDADDR_BREDR)) {
2299 hci_reject_conn(hdev, &ev->bdaddr);
2300 return;
2301 }
2302
2303 /* Require HCI_CONNECTABLE or a whitelist entry to accept the
2304 * connection. These features are only touched through mgmt so
2305 * only do the checks if HCI_MGMT is set.
2306 */
2307 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
2308 !test_bit(HCI_CONNECTABLE, &hdev->dev_flags) &&
2309 !hci_bdaddr_list_lookup(&hdev->whitelist, &ev->bdaddr,
2310 BDADDR_BREDR)) {
2311 hci_reject_conn(hdev, &ev->bdaddr);
2312 return;
2313 }
2314
2315 /* Connection accepted */
2316
2317 hci_dev_lock(hdev);
2318
2319 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2320 if (ie)
2321 memcpy(ie->data.dev_class, ev->dev_class, 3);
2322
2323 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2324 &ev->bdaddr);
2325 if (!conn) {
2326 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2327 HCI_ROLE_SLAVE);
2328 if (!conn) {
2329 BT_ERR("No memory for new connection");
2330 hci_dev_unlock(hdev);
2331 return;
2332 }
2333 }
2334
2335 memcpy(conn->dev_class, ev->dev_class, 3);
2336
2337 hci_dev_unlock(hdev);
2338
2339 if (ev->link_type == ACL_LINK ||
2340 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2341 struct hci_cp_accept_conn_req cp;
2342 conn->state = BT_CONNECT;
2343
2344 bacpy(&cp.bdaddr, &ev->bdaddr);
2345
2346 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2347 cp.role = 0x00; /* Become master */
2348 else
2349 cp.role = 0x01; /* Remain slave */
2350
2351 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
2352 } else if (!(flags & HCI_PROTO_DEFER)) {
2353 struct hci_cp_accept_sync_conn_req cp;
2354 conn->state = BT_CONNECT;
2355
2356 bacpy(&cp.bdaddr, &ev->bdaddr);
2357 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2358
2359 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
2360 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
2361 cp.max_latency = cpu_to_le16(0xffff);
2362 cp.content_format = cpu_to_le16(hdev->voice_setting);
2363 cp.retrans_effort = 0xff;
2364
2365 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
2366 &cp);
2367 } else {
2368 conn->state = BT_CONNECT2;
2369 hci_connect_cfm(conn, 0);
2370 }
2371 }
2372
2373 static u8 hci_to_mgmt_reason(u8 err)
2374 {
2375 switch (err) {
2376 case HCI_ERROR_CONNECTION_TIMEOUT:
2377 return MGMT_DEV_DISCONN_TIMEOUT;
2378 case HCI_ERROR_REMOTE_USER_TERM:
2379 case HCI_ERROR_REMOTE_LOW_RESOURCES:
2380 case HCI_ERROR_REMOTE_POWER_OFF:
2381 return MGMT_DEV_DISCONN_REMOTE;
2382 case HCI_ERROR_LOCAL_HOST_TERM:
2383 return MGMT_DEV_DISCONN_LOCAL_HOST;
2384 default:
2385 return MGMT_DEV_DISCONN_UNKNOWN;
2386 }
2387 }
2388
2389 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2390 {
2391 struct hci_ev_disconn_complete *ev = (void *) skb->data;
2392 u8 reason = hci_to_mgmt_reason(ev->reason);
2393 struct hci_conn_params *params;
2394 struct hci_conn *conn;
2395 bool mgmt_connected;
2396 u8 type;
2397
2398 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2399
2400 hci_dev_lock(hdev);
2401
2402 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2403 if (!conn)
2404 goto unlock;
2405
2406 if (ev->status) {
2407 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2408 conn->dst_type, ev->status);
2409 goto unlock;
2410 }
2411
2412 conn->state = BT_CLOSED;
2413
2414 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2415 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2416 reason, mgmt_connected);
2417
2418 if (conn->type == ACL_LINK) {
2419 if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2420 hci_remove_link_key(hdev, &conn->dst);
2421
2422 hci_update_page_scan(hdev);
2423 }
2424
2425 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2426 if (params) {
2427 switch (params->auto_connect) {
2428 case HCI_AUTO_CONN_LINK_LOSS:
2429 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2430 break;
2431 /* Fall through */
2432
2433 case HCI_AUTO_CONN_DIRECT:
2434 case HCI_AUTO_CONN_ALWAYS:
2435 list_del_init(&params->action);
2436 list_add(&params->action, &hdev->pend_le_conns);
2437 hci_update_background_scan(hdev);
2438 break;
2439
2440 default:
2441 break;
2442 }
2443 }
2444
2445 type = conn->type;
2446
2447 hci_disconn_cfm(conn, ev->reason);
2448 hci_conn_del(conn);
2449
2450 /* Re-enable advertising if necessary, since it might
2451 * have been disabled by the connection. From the
2452 * HCI_LE_Set_Advertise_Enable command description in
2453 * the core specification (v4.0):
2454 * "The Controller shall continue advertising until the Host
2455 * issues an LE_Set_Advertise_Enable command with
2456 * Advertising_Enable set to 0x00 (Advertising is disabled)
2457 * or until a connection is created or until the Advertising
2458 * is timed out due to Directed Advertising."
2459 */
2460 if (type == LE_LINK)
2461 mgmt_reenable_advertising(hdev);
2462
2463 unlock:
2464 hci_dev_unlock(hdev);
2465 }
2466
2467 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2468 {
2469 struct hci_ev_auth_complete *ev = (void *) skb->data;
2470 struct hci_conn *conn;
2471
2472 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2473
2474 hci_dev_lock(hdev);
2475
2476 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2477 if (!conn)
2478 goto unlock;
2479
2480 if (!ev->status) {
2481 if (!hci_conn_ssp_enabled(conn) &&
2482 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
2483 BT_INFO("re-auth of legacy device is not possible.");
2484 } else {
2485 set_bit(HCI_CONN_AUTH, &conn->flags);
2486 conn->sec_level = conn->pending_sec_level;
2487 }
2488 } else {
2489 mgmt_auth_failed(conn, ev->status);
2490 }
2491
2492 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2493 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2494
2495 if (conn->state == BT_CONFIG) {
2496 if (!ev->status && hci_conn_ssp_enabled(conn)) {
2497 struct hci_cp_set_conn_encrypt cp;
2498 cp.handle = ev->handle;
2499 cp.encrypt = 0x01;
2500 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2501 &cp);
2502 } else {
2503 conn->state = BT_CONNECTED;
2504 hci_connect_cfm(conn, ev->status);
2505 hci_conn_drop(conn);
2506 }
2507 } else {
2508 hci_auth_cfm(conn, ev->status);
2509
2510 hci_conn_hold(conn);
2511 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2512 hci_conn_drop(conn);
2513 }
2514
2515 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2516 if (!ev->status) {
2517 struct hci_cp_set_conn_encrypt cp;
2518 cp.handle = ev->handle;
2519 cp.encrypt = 0x01;
2520 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2521 &cp);
2522 } else {
2523 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2524 hci_encrypt_cfm(conn, ev->status, 0x00);
2525 }
2526 }
2527
2528 unlock:
2529 hci_dev_unlock(hdev);
2530 }
2531
2532 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2533 {
2534 struct hci_ev_remote_name *ev = (void *) skb->data;
2535 struct hci_conn *conn;
2536
2537 BT_DBG("%s", hdev->name);
2538
2539 hci_conn_check_pending(hdev);
2540
2541 hci_dev_lock(hdev);
2542
2543 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2544
2545 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2546 goto check_auth;
2547
2548 if (ev->status == 0)
2549 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2550 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2551 else
2552 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2553
2554 check_auth:
2555 if (!conn)
2556 goto unlock;
2557
2558 if (!hci_outgoing_auth_needed(hdev, conn))
2559 goto unlock;
2560
2561 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2562 struct hci_cp_auth_requested cp;
2563
2564 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2565
2566 cp.handle = __cpu_to_le16(conn->handle);
2567 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2568 }
2569
2570 unlock:
2571 hci_dev_unlock(hdev);
2572 }
2573
2574 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2575 {
2576 struct hci_ev_encrypt_change *ev = (void *) skb->data;
2577 struct hci_conn *conn;
2578
2579 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2580
2581 hci_dev_lock(hdev);
2582
2583 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2584 if (!conn)
2585 goto unlock;
2586
2587 if (!ev->status) {
2588 if (ev->encrypt) {
2589 /* Encryption implies authentication */
2590 set_bit(HCI_CONN_AUTH, &conn->flags);
2591 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2592 conn->sec_level = conn->pending_sec_level;
2593
2594 /* P-256 authentication key implies FIPS */
2595 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
2596 set_bit(HCI_CONN_FIPS, &conn->flags);
2597
2598 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
2599 conn->type == LE_LINK)
2600 set_bit(HCI_CONN_AES_CCM, &conn->flags);
2601 } else {
2602 clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
2603 clear_bit(HCI_CONN_AES_CCM, &conn->flags);
2604 }
2605 }
2606
2607 /* We should disregard the current RPA and generate a new one
2608 * whenever the encryption procedure fails.
2609 */
2610 if (ev->status && conn->type == LE_LINK)
2611 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
2612
2613 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2614
2615 if (ev->status && conn->state == BT_CONNECTED) {
2616 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2617 hci_conn_drop(conn);
2618 goto unlock;
2619 }
2620
2621 if (conn->state == BT_CONFIG) {
2622 if (!ev->status)
2623 conn->state = BT_CONNECTED;
2624
2625 /* In Secure Connections Only mode, do not allow any
2626 * connections that are not encrypted with AES-CCM
2627 * using a P-256 authenticated combination key.
2628 */
2629 if (test_bit(HCI_SC_ONLY, &hdev->dev_flags) &&
2630 (!test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
2631 conn->key_type != HCI_LK_AUTH_COMBINATION_P256)) {
2632 hci_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE);
2633 hci_conn_drop(conn);
2634 goto unlock;
2635 }
2636
2637 hci_connect_cfm(conn, ev->status);
2638 hci_conn_drop(conn);
2639 } else
2640 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2641
2642 unlock:
2643 hci_dev_unlock(hdev);
2644 }
2645
2646 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2647 struct sk_buff *skb)
2648 {
2649 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2650 struct hci_conn *conn;
2651
2652 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2653
2654 hci_dev_lock(hdev);
2655
2656 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2657 if (conn) {
2658 if (!ev->status)
2659 set_bit(HCI_CONN_SECURE, &conn->flags);
2660
2661 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2662
2663 hci_key_change_cfm(conn, ev->status);
2664 }
2665
2666 hci_dev_unlock(hdev);
2667 }
2668
2669 static void hci_remote_features_evt(struct hci_dev *hdev,
2670 struct sk_buff *skb)
2671 {
2672 struct hci_ev_remote_features *ev = (void *) skb->data;
2673 struct hci_conn *conn;
2674
2675 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2676
2677 hci_dev_lock(hdev);
2678
2679 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2680 if (!conn)
2681 goto unlock;
2682
2683 if (!ev->status)
2684 memcpy(conn->features[0], ev->features, 8);
2685
2686 if (conn->state != BT_CONFIG)
2687 goto unlock;
2688
2689 if (!ev->status && lmp_ext_feat_capable(hdev) &&
2690 lmp_ext_feat_capable(conn)) {
2691 struct hci_cp_read_remote_ext_features cp;
2692 cp.handle = ev->handle;
2693 cp.page = 0x01;
2694 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2695 sizeof(cp), &cp);
2696 goto unlock;
2697 }
2698
2699 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2700 struct hci_cp_remote_name_req cp;
2701 memset(&cp, 0, sizeof(cp));
2702 bacpy(&cp.bdaddr, &conn->dst);
2703 cp.pscan_rep_mode = 0x02;
2704 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2705 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2706 mgmt_device_connected(hdev, conn, 0, NULL, 0);
2707
2708 if (!hci_outgoing_auth_needed(hdev, conn)) {
2709 conn->state = BT_CONNECTED;
2710 hci_connect_cfm(conn, ev->status);
2711 hci_conn_drop(conn);
2712 }
2713
2714 unlock:
2715 hci_dev_unlock(hdev);
2716 }
2717
2718 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2719 {
2720 struct hci_ev_cmd_complete *ev = (void *) skb->data;
2721 u8 status = skb->data[sizeof(*ev)];
2722 __u16 opcode;
2723
2724 skb_pull(skb, sizeof(*ev));
2725
2726 opcode = __le16_to_cpu(ev->opcode);
2727
2728 switch (opcode) {
2729 case HCI_OP_INQUIRY_CANCEL:
2730 hci_cc_inquiry_cancel(hdev, skb);
2731 break;
2732
2733 case HCI_OP_PERIODIC_INQ:
2734 hci_cc_periodic_inq(hdev, skb);
2735 break;
2736
2737 case HCI_OP_EXIT_PERIODIC_INQ:
2738 hci_cc_exit_periodic_inq(hdev, skb);
2739 break;
2740
2741 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2742 hci_cc_remote_name_req_cancel(hdev, skb);
2743 break;
2744
2745 case HCI_OP_ROLE_DISCOVERY:
2746 hci_cc_role_discovery(hdev, skb);
2747 break;
2748
2749 case HCI_OP_READ_LINK_POLICY:
2750 hci_cc_read_link_policy(hdev, skb);
2751 break;
2752
2753 case HCI_OP_WRITE_LINK_POLICY:
2754 hci_cc_write_link_policy(hdev, skb);
2755 break;
2756
2757 case HCI_OP_READ_DEF_LINK_POLICY:
2758 hci_cc_read_def_link_policy(hdev, skb);
2759 break;
2760
2761 case HCI_OP_WRITE_DEF_LINK_POLICY:
2762 hci_cc_write_def_link_policy(hdev, skb);
2763 break;
2764
2765 case HCI_OP_RESET:
2766 hci_cc_reset(hdev, skb);
2767 break;
2768
2769 case HCI_OP_READ_STORED_LINK_KEY:
2770 hci_cc_read_stored_link_key(hdev, skb);
2771 break;
2772
2773 case HCI_OP_DELETE_STORED_LINK_KEY:
2774 hci_cc_delete_stored_link_key(hdev, skb);
2775 break;
2776
2777 case HCI_OP_WRITE_LOCAL_NAME:
2778 hci_cc_write_local_name(hdev, skb);
2779 break;
2780
2781 case HCI_OP_READ_LOCAL_NAME:
2782 hci_cc_read_local_name(hdev, skb);
2783 break;
2784
2785 case HCI_OP_WRITE_AUTH_ENABLE:
2786 hci_cc_write_auth_enable(hdev, skb);
2787 break;
2788
2789 case HCI_OP_WRITE_ENCRYPT_MODE:
2790 hci_cc_write_encrypt_mode(hdev, skb);
2791 break;
2792
2793 case HCI_OP_WRITE_SCAN_ENABLE:
2794 hci_cc_write_scan_enable(hdev, skb);
2795 break;
2796
2797 case HCI_OP_READ_CLASS_OF_DEV:
2798 hci_cc_read_class_of_dev(hdev, skb);
2799 break;
2800
2801 case HCI_OP_WRITE_CLASS_OF_DEV:
2802 hci_cc_write_class_of_dev(hdev, skb);
2803 break;
2804
2805 case HCI_OP_READ_VOICE_SETTING:
2806 hci_cc_read_voice_setting(hdev, skb);
2807 break;
2808
2809 case HCI_OP_WRITE_VOICE_SETTING:
2810 hci_cc_write_voice_setting(hdev, skb);
2811 break;
2812
2813 case HCI_OP_READ_NUM_SUPPORTED_IAC:
2814 hci_cc_read_num_supported_iac(hdev, skb);
2815 break;
2816
2817 case HCI_OP_WRITE_SSP_MODE:
2818 hci_cc_write_ssp_mode(hdev, skb);
2819 break;
2820
2821 case HCI_OP_WRITE_SC_SUPPORT:
2822 hci_cc_write_sc_support(hdev, skb);
2823 break;
2824
2825 case HCI_OP_READ_LOCAL_VERSION:
2826 hci_cc_read_local_version(hdev, skb);
2827 break;
2828
2829 case HCI_OP_READ_LOCAL_COMMANDS:
2830 hci_cc_read_local_commands(hdev, skb);
2831 break;
2832
2833 case HCI_OP_READ_LOCAL_FEATURES:
2834 hci_cc_read_local_features(hdev, skb);
2835 break;
2836
2837 case HCI_OP_READ_LOCAL_EXT_FEATURES:
2838 hci_cc_read_local_ext_features(hdev, skb);
2839 break;
2840
2841 case HCI_OP_READ_BUFFER_SIZE:
2842 hci_cc_read_buffer_size(hdev, skb);
2843 break;
2844
2845 case HCI_OP_READ_BD_ADDR:
2846 hci_cc_read_bd_addr(hdev, skb);
2847 break;
2848
2849 case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
2850 hci_cc_read_page_scan_activity(hdev, skb);
2851 break;
2852
2853 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
2854 hci_cc_write_page_scan_activity(hdev, skb);
2855 break;
2856
2857 case HCI_OP_READ_PAGE_SCAN_TYPE:
2858 hci_cc_read_page_scan_type(hdev, skb);
2859 break;
2860
2861 case HCI_OP_WRITE_PAGE_SCAN_TYPE:
2862 hci_cc_write_page_scan_type(hdev, skb);
2863 break;
2864
2865 case HCI_OP_READ_DATA_BLOCK_SIZE:
2866 hci_cc_read_data_block_size(hdev, skb);
2867 break;
2868
2869 case HCI_OP_READ_FLOW_CONTROL_MODE:
2870 hci_cc_read_flow_control_mode(hdev, skb);
2871 break;
2872
2873 case HCI_OP_READ_LOCAL_AMP_INFO:
2874 hci_cc_read_local_amp_info(hdev, skb);
2875 break;
2876
2877 case HCI_OP_READ_CLOCK:
2878 hci_cc_read_clock(hdev, skb);
2879 break;
2880
2881 case HCI_OP_READ_LOCAL_AMP_ASSOC:
2882 hci_cc_read_local_amp_assoc(hdev, skb);
2883 break;
2884
2885 case HCI_OP_READ_INQ_RSP_TX_POWER:
2886 hci_cc_read_inq_rsp_tx_power(hdev, skb);
2887 break;
2888
2889 case HCI_OP_PIN_CODE_REPLY:
2890 hci_cc_pin_code_reply(hdev, skb);
2891 break;
2892
2893 case HCI_OP_PIN_CODE_NEG_REPLY:
2894 hci_cc_pin_code_neg_reply(hdev, skb);
2895 break;
2896
2897 case HCI_OP_READ_LOCAL_OOB_DATA:
2898 hci_cc_read_local_oob_data(hdev, skb);
2899 break;
2900
2901 case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
2902 hci_cc_read_local_oob_ext_data(hdev, skb);
2903 break;
2904
2905 case HCI_OP_LE_READ_BUFFER_SIZE:
2906 hci_cc_le_read_buffer_size(hdev, skb);
2907 break;
2908
2909 case HCI_OP_LE_READ_LOCAL_FEATURES:
2910 hci_cc_le_read_local_features(hdev, skb);
2911 break;
2912
2913 case HCI_OP_LE_READ_ADV_TX_POWER:
2914 hci_cc_le_read_adv_tx_power(hdev, skb);
2915 break;
2916
2917 case HCI_OP_USER_CONFIRM_REPLY:
2918 hci_cc_user_confirm_reply(hdev, skb);
2919 break;
2920
2921 case HCI_OP_USER_CONFIRM_NEG_REPLY:
2922 hci_cc_user_confirm_neg_reply(hdev, skb);
2923 break;
2924
2925 case HCI_OP_USER_PASSKEY_REPLY:
2926 hci_cc_user_passkey_reply(hdev, skb);
2927 break;
2928
2929 case HCI_OP_USER_PASSKEY_NEG_REPLY:
2930 hci_cc_user_passkey_neg_reply(hdev, skb);
2931 break;
2932
2933 case HCI_OP_LE_SET_RANDOM_ADDR:
2934 hci_cc_le_set_random_addr(hdev, skb);
2935 break;
2936
2937 case HCI_OP_LE_SET_ADV_ENABLE:
2938 hci_cc_le_set_adv_enable(hdev, skb);
2939 break;
2940
2941 case HCI_OP_LE_SET_SCAN_PARAM:
2942 hci_cc_le_set_scan_param(hdev, skb);
2943 break;
2944
2945 case HCI_OP_LE_SET_SCAN_ENABLE:
2946 hci_cc_le_set_scan_enable(hdev, skb);
2947 break;
2948
2949 case HCI_OP_LE_READ_WHITE_LIST_SIZE:
2950 hci_cc_le_read_white_list_size(hdev, skb);
2951 break;
2952
2953 case HCI_OP_LE_CLEAR_WHITE_LIST:
2954 hci_cc_le_clear_white_list(hdev, skb);
2955 break;
2956
2957 case HCI_OP_LE_ADD_TO_WHITE_LIST:
2958 hci_cc_le_add_to_white_list(hdev, skb);
2959 break;
2960
2961 case HCI_OP_LE_DEL_FROM_WHITE_LIST:
2962 hci_cc_le_del_from_white_list(hdev, skb);
2963 break;
2964
2965 case HCI_OP_LE_READ_SUPPORTED_STATES:
2966 hci_cc_le_read_supported_states(hdev, skb);
2967 break;
2968
2969 case HCI_OP_LE_READ_DEF_DATA_LEN:
2970 hci_cc_le_read_def_data_len(hdev, skb);
2971 break;
2972
2973 case HCI_OP_LE_WRITE_DEF_DATA_LEN:
2974 hci_cc_le_write_def_data_len(hdev, skb);
2975 break;
2976
2977 case HCI_OP_LE_READ_MAX_DATA_LEN:
2978 hci_cc_le_read_max_data_len(hdev, skb);
2979 break;
2980
2981 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2982 hci_cc_write_le_host_supported(hdev, skb);
2983 break;
2984
2985 case HCI_OP_LE_SET_ADV_PARAM:
2986 hci_cc_set_adv_param(hdev, skb);
2987 break;
2988
2989 case HCI_OP_WRITE_REMOTE_AMP_ASSOC:
2990 hci_cc_write_remote_amp_assoc(hdev, skb);
2991 break;
2992
2993 case HCI_OP_READ_RSSI:
2994 hci_cc_read_rssi(hdev, skb);
2995 break;
2996
2997 case HCI_OP_READ_TX_POWER:
2998 hci_cc_read_tx_power(hdev, skb);
2999 break;
3000
3001 case HCI_OP_WRITE_SSP_DEBUG_MODE:
3002 hci_cc_write_ssp_debug_mode(hdev, skb);
3003 break;
3004
3005 default:
3006 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3007 break;
3008 }
3009
3010 if (opcode != HCI_OP_NOP)
3011 cancel_delayed_work(&hdev->cmd_timer);
3012
3013 hci_req_cmd_complete(hdev, opcode, status);
3014
3015 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
3016 atomic_set(&hdev->cmd_cnt, 1);
3017 if (!skb_queue_empty(&hdev->cmd_q))
3018 queue_work(hdev->workqueue, &hdev->cmd_work);
3019 }
3020 }
3021
3022 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
3023 {
3024 struct hci_ev_cmd_status *ev = (void *) skb->data;
3025 __u16 opcode;
3026
3027 skb_pull(skb, sizeof(*ev));
3028
3029 opcode = __le16_to_cpu(ev->opcode);
3030
3031 switch (opcode) {
3032 case HCI_OP_INQUIRY:
3033 hci_cs_inquiry(hdev, ev->status);
3034 break;
3035
3036 case HCI_OP_CREATE_CONN:
3037 hci_cs_create_conn(hdev, ev->status);
3038 break;
3039
3040 case HCI_OP_DISCONNECT:
3041 hci_cs_disconnect(hdev, ev->status);
3042 break;
3043
3044 case HCI_OP_ADD_SCO:
3045 hci_cs_add_sco(hdev, ev->status);
3046 break;
3047
3048 case HCI_OP_AUTH_REQUESTED:
3049 hci_cs_auth_requested(hdev, ev->status);
3050 break;
3051
3052 case HCI_OP_SET_CONN_ENCRYPT:
3053 hci_cs_set_conn_encrypt(hdev, ev->status);
3054 break;
3055
3056 case HCI_OP_REMOTE_NAME_REQ:
3057 hci_cs_remote_name_req(hdev, ev->status);
3058 break;
3059
3060 case HCI_OP_READ_REMOTE_FEATURES:
3061 hci_cs_read_remote_features(hdev, ev->status);
3062 break;
3063
3064 case HCI_OP_READ_REMOTE_EXT_FEATURES:
3065 hci_cs_read_remote_ext_features(hdev, ev->status);
3066 break;
3067
3068 case HCI_OP_SETUP_SYNC_CONN:
3069 hci_cs_setup_sync_conn(hdev, ev->status);
3070 break;
3071
3072 case HCI_OP_CREATE_PHY_LINK:
3073 hci_cs_create_phylink(hdev, ev->status);
3074 break;
3075
3076 case HCI_OP_ACCEPT_PHY_LINK:
3077 hci_cs_accept_phylink(hdev, ev->status);
3078 break;
3079
3080 case HCI_OP_SNIFF_MODE:
3081 hci_cs_sniff_mode(hdev, ev->status);
3082 break;
3083
3084 case HCI_OP_EXIT_SNIFF_MODE:
3085 hci_cs_exit_sniff_mode(hdev, ev->status);
3086 break;
3087
3088 case HCI_OP_SWITCH_ROLE:
3089 hci_cs_switch_role(hdev, ev->status);
3090 break;
3091
3092 case HCI_OP_LE_CREATE_CONN:
3093 hci_cs_le_create_conn(hdev, ev->status);
3094 break;
3095
3096 case HCI_OP_LE_START_ENC:
3097 hci_cs_le_start_enc(hdev, ev->status);
3098 break;
3099
3100 default:
3101 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3102 break;
3103 }
3104
3105 if (opcode != HCI_OP_NOP)
3106 cancel_delayed_work(&hdev->cmd_timer);
3107
3108 if (ev->status ||
3109 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->req_event))
3110 hci_req_cmd_complete(hdev, opcode, ev->status);
3111
3112 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
3113 atomic_set(&hdev->cmd_cnt, 1);
3114 if (!skb_queue_empty(&hdev->cmd_q))
3115 queue_work(hdev->workqueue, &hdev->cmd_work);
3116 }
3117 }
3118
3119 static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb)
3120 {
3121 struct hci_ev_hardware_error *ev = (void *) skb->data;
3122
3123 hdev->hw_error_code = ev->code;
3124
3125 queue_work(hdev->req_workqueue, &hdev->error_reset);
3126 }
3127
3128 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3129 {
3130 struct hci_ev_role_change *ev = (void *) skb->data;
3131 struct hci_conn *conn;
3132
3133 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3134
3135 hci_dev_lock(hdev);
3136
3137 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3138 if (conn) {
3139 if (!ev->status)
3140 conn->role = ev->role;
3141
3142 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
3143
3144 hci_role_switch_cfm(conn, ev->status, ev->role);
3145 }
3146
3147 hci_dev_unlock(hdev);
3148 }
3149
3150 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
3151 {
3152 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
3153 int i;
3154
3155 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
3156 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
3157 return;
3158 }
3159
3160 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
3161 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
3162 BT_DBG("%s bad parameters", hdev->name);
3163 return;
3164 }
3165
3166 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
3167
3168 for (i = 0; i < ev->num_hndl; i++) {
3169 struct hci_comp_pkts_info *info = &ev->handles[i];
3170 struct hci_conn *conn;
3171 __u16 handle, count;
3172
3173 handle = __le16_to_cpu(info->handle);
3174 count = __le16_to_cpu(info->count);
3175
3176 conn = hci_conn_hash_lookup_handle(hdev, handle);
3177 if (!conn)
3178 continue;
3179
3180 conn->sent -= count;
3181
3182 switch (conn->type) {
3183 case ACL_LINK:
3184 hdev->acl_cnt += count;
3185 if (hdev->acl_cnt > hdev->acl_pkts)
3186 hdev->acl_cnt = hdev->acl_pkts;
3187 break;
3188
3189 case LE_LINK:
3190 if (hdev->le_pkts) {
3191 hdev->le_cnt += count;
3192 if (hdev->le_cnt > hdev->le_pkts)
3193 hdev->le_cnt = hdev->le_pkts;
3194 } else {
3195 hdev->acl_cnt += count;
3196 if (hdev->acl_cnt > hdev->acl_pkts)
3197 hdev->acl_cnt = hdev->acl_pkts;
3198 }
3199 break;
3200
3201 case SCO_LINK:
3202 hdev->sco_cnt += count;
3203 if (hdev->sco_cnt > hdev->sco_pkts)
3204 hdev->sco_cnt = hdev->sco_pkts;
3205 break;
3206
3207 default:
3208 BT_ERR("Unknown type %d conn %p", conn->type, conn);
3209 break;
3210 }
3211 }
3212
3213 queue_work(hdev->workqueue, &hdev->tx_work);
3214 }
3215
3216 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
3217 __u16 handle)
3218 {
3219 struct hci_chan *chan;
3220
3221 switch (hdev->dev_type) {
3222 case HCI_BREDR:
3223 return hci_conn_hash_lookup_handle(hdev, handle);
3224 case HCI_AMP:
3225 chan = hci_chan_lookup_handle(hdev, handle);
3226 if (chan)
3227 return chan->conn;
3228 break;
3229 default:
3230 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3231 break;
3232 }
3233
3234 return NULL;
3235 }
3236
3237 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
3238 {
3239 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
3240 int i;
3241
3242 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
3243 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
3244 return;
3245 }
3246
3247 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
3248 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
3249 BT_DBG("%s bad parameters", hdev->name);
3250 return;
3251 }
3252
3253 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
3254 ev->num_hndl);
3255
3256 for (i = 0; i < ev->num_hndl; i++) {
3257 struct hci_comp_blocks_info *info = &ev->handles[i];
3258 struct hci_conn *conn = NULL;
3259 __u16 handle, block_count;
3260
3261 handle = __le16_to_cpu(info->handle);
3262 block_count = __le16_to_cpu(info->blocks);
3263
3264 conn = __hci_conn_lookup_handle(hdev, handle);
3265 if (!conn)
3266 continue;
3267
3268 conn->sent -= block_count;
3269
3270 switch (conn->type) {
3271 case ACL_LINK:
3272 case AMP_LINK:
3273 hdev->block_cnt += block_count;
3274 if (hdev->block_cnt > hdev->num_blocks)
3275 hdev->block_cnt = hdev->num_blocks;
3276 break;
3277
3278 default:
3279 BT_ERR("Unknown type %d conn %p", conn->type, conn);
3280 break;
3281 }
3282 }
3283
3284 queue_work(hdev->workqueue, &hdev->tx_work);
3285 }
3286
3287 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3288 {
3289 struct hci_ev_mode_change *ev = (void *) skb->data;
3290 struct hci_conn *conn;
3291
3292 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3293
3294 hci_dev_lock(hdev);
3295
3296 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3297 if (conn) {
3298 conn->mode = ev->mode;
3299
3300 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
3301 &conn->flags)) {
3302 if (conn->mode == HCI_CM_ACTIVE)
3303 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3304 else
3305 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3306 }
3307
3308 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
3309 hci_sco_setup(conn, ev->status);
3310 }
3311
3312 hci_dev_unlock(hdev);
3313 }
3314
3315 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3316 {
3317 struct hci_ev_pin_code_req *ev = (void *) skb->data;
3318 struct hci_conn *conn;
3319
3320 BT_DBG("%s", hdev->name);
3321
3322 hci_dev_lock(hdev);
3323
3324 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3325 if (!conn)
3326 goto unlock;
3327
3328 if (conn->state == BT_CONNECTED) {
3329 hci_conn_hold(conn);
3330 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3331 hci_conn_drop(conn);
3332 }
3333
3334 if (!test_bit(HCI_BONDABLE, &hdev->dev_flags) &&
3335 !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
3336 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3337 sizeof(ev->bdaddr), &ev->bdaddr);
3338 } else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
3339 u8 secure;
3340
3341 if (conn->pending_sec_level == BT_SECURITY_HIGH)
3342 secure = 1;
3343 else
3344 secure = 0;
3345
3346 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
3347 }
3348
3349 unlock:
3350 hci_dev_unlock(hdev);
3351 }
3352
3353 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
3354 {
3355 if (key_type == HCI_LK_CHANGED_COMBINATION)
3356 return;
3357
3358 conn->pin_length = pin_len;
3359 conn->key_type = key_type;
3360
3361 switch (key_type) {
3362 case HCI_LK_LOCAL_UNIT:
3363 case HCI_LK_REMOTE_UNIT:
3364 case HCI_LK_DEBUG_COMBINATION:
3365 return;
3366 case HCI_LK_COMBINATION:
3367 if (pin_len == 16)
3368 conn->pending_sec_level = BT_SECURITY_HIGH;
3369 else
3370 conn->pending_sec_level = BT_SECURITY_MEDIUM;
3371 break;
3372 case HCI_LK_UNAUTH_COMBINATION_P192:
3373 case HCI_LK_UNAUTH_COMBINATION_P256:
3374 conn->pending_sec_level = BT_SECURITY_MEDIUM;
3375 break;
3376 case HCI_LK_AUTH_COMBINATION_P192:
3377 conn->pending_sec_level = BT_SECURITY_HIGH;
3378 break;
3379 case HCI_LK_AUTH_COMBINATION_P256:
3380 conn->pending_sec_level = BT_SECURITY_FIPS;
3381 break;
3382 }
3383 }
3384
3385 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3386 {
3387 struct hci_ev_link_key_req *ev = (void *) skb->data;
3388 struct hci_cp_link_key_reply cp;
3389 struct hci_conn *conn;
3390 struct link_key *key;
3391
3392 BT_DBG("%s", hdev->name);
3393
3394 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3395 return;
3396
3397 hci_dev_lock(hdev);
3398
3399 key = hci_find_link_key(hdev, &ev->bdaddr);
3400 if (!key) {
3401 BT_DBG("%s link key not found for %pMR", hdev->name,
3402 &ev->bdaddr);
3403 goto not_found;
3404 }
3405
3406 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
3407 &ev->bdaddr);
3408
3409 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3410 if (conn) {
3411 clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
3412
3413 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
3414 key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
3415 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
3416 BT_DBG("%s ignoring unauthenticated key", hdev->name);
3417 goto not_found;
3418 }
3419
3420 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
3421 (conn->pending_sec_level == BT_SECURITY_HIGH ||
3422 conn->pending_sec_level == BT_SECURITY_FIPS)) {
3423 BT_DBG("%s ignoring key unauthenticated for high security",
3424 hdev->name);
3425 goto not_found;
3426 }
3427
3428 conn_set_key(conn, key->type, key->pin_len);
3429 }
3430
3431 bacpy(&cp.bdaddr, &ev->bdaddr);
3432 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
3433
3434 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
3435
3436 hci_dev_unlock(hdev);
3437
3438 return;
3439
3440 not_found:
3441 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
3442 hci_dev_unlock(hdev);
3443 }
3444
3445 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3446 {
3447 struct hci_ev_link_key_notify *ev = (void *) skb->data;
3448 struct hci_conn *conn;
3449 struct link_key *key;
3450 bool persistent;
3451 u8 pin_len = 0;
3452
3453 BT_DBG("%s", hdev->name);
3454
3455 hci_dev_lock(hdev);
3456
3457 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3458 if (!conn)
3459 goto unlock;
3460
3461 hci_conn_hold(conn);
3462 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3463 hci_conn_drop(conn);
3464
3465 set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
3466 conn_set_key(conn, ev->key_type, conn->pin_length);
3467
3468 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3469 goto unlock;
3470
3471 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
3472 ev->key_type, pin_len, &persistent);
3473 if (!key)
3474 goto unlock;
3475
3476 /* Update connection information since adding the key will have
3477 * fixed up the type in the case of changed combination keys.
3478 */
3479 if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
3480 conn_set_key(conn, key->type, key->pin_len);
3481
3482 mgmt_new_link_key(hdev, key, persistent);
3483
3484 /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
3485 * is set. If it's not set simply remove the key from the kernel
3486 * list (we've still notified user space about it but with
3487 * store_hint being 0).
3488 */
3489 if (key->type == HCI_LK_DEBUG_COMBINATION &&
3490 !test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags)) {
3491 list_del_rcu(&key->list);
3492 kfree_rcu(key, rcu);
3493 goto unlock;
3494 }
3495
3496 if (persistent)
3497 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3498 else
3499 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3500
3501 unlock:
3502 hci_dev_unlock(hdev);
3503 }
3504
3505 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
3506 {
3507 struct hci_ev_clock_offset *ev = (void *) skb->data;
3508 struct hci_conn *conn;
3509
3510 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3511
3512 hci_dev_lock(hdev);
3513
3514 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3515 if (conn && !ev->status) {
3516 struct inquiry_entry *ie;
3517
3518 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3519 if (ie) {
3520 ie->data.clock_offset = ev->clock_offset;
3521 ie->timestamp = jiffies;
3522 }
3523 }
3524
3525 hci_dev_unlock(hdev);
3526 }
3527
3528 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3529 {
3530 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
3531 struct hci_conn *conn;
3532
3533 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3534
3535 hci_dev_lock(hdev);
3536
3537 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3538 if (conn && !ev->status)
3539 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
3540
3541 hci_dev_unlock(hdev);
3542 }
3543
3544 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
3545 {
3546 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
3547 struct inquiry_entry *ie;
3548
3549 BT_DBG("%s", hdev->name);
3550
3551 hci_dev_lock(hdev);
3552
3553 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3554 if (ie) {
3555 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
3556 ie->timestamp = jiffies;
3557 }
3558
3559 hci_dev_unlock(hdev);
3560 }
3561
3562 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
3563 struct sk_buff *skb)
3564 {
3565 struct inquiry_data data;
3566 int num_rsp = *((__u8 *) skb->data);
3567
3568 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3569
3570 if (!num_rsp)
3571 return;
3572
3573 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3574 return;
3575
3576 hci_dev_lock(hdev);
3577
3578 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
3579 struct inquiry_info_with_rssi_and_pscan_mode *info;
3580 info = (void *) (skb->data + 1);
3581
3582 for (; num_rsp; num_rsp--, info++) {
3583 u32 flags;
3584
3585 bacpy(&data.bdaddr, &info->bdaddr);
3586 data.pscan_rep_mode = info->pscan_rep_mode;
3587 data.pscan_period_mode = info->pscan_period_mode;
3588 data.pscan_mode = info->pscan_mode;
3589 memcpy(data.dev_class, info->dev_class, 3);
3590 data.clock_offset = info->clock_offset;
3591 data.rssi = info->rssi;
3592 data.ssp_mode = 0x00;
3593
3594 flags = hci_inquiry_cache_update(hdev, &data, false);
3595
3596 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3597 info->dev_class, info->rssi,
3598 flags, NULL, 0, NULL, 0);
3599 }
3600 } else {
3601 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
3602
3603 for (; num_rsp; num_rsp--, info++) {
3604 u32 flags;
3605
3606 bacpy(&data.bdaddr, &info->bdaddr);
3607 data.pscan_rep_mode = info->pscan_rep_mode;
3608 data.pscan_period_mode = info->pscan_period_mode;
3609 data.pscan_mode = 0x00;
3610 memcpy(data.dev_class, info->dev_class, 3);
3611 data.clock_offset = info->clock_offset;
3612 data.rssi = info->rssi;
3613 data.ssp_mode = 0x00;
3614
3615 flags = hci_inquiry_cache_update(hdev, &data, false);
3616
3617 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3618 info->dev_class, info->rssi,
3619 flags, NULL, 0, NULL, 0);
3620 }
3621 }
3622
3623 hci_dev_unlock(hdev);
3624 }
3625
3626 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
3627 struct sk_buff *skb)
3628 {
3629 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
3630 struct hci_conn *conn;
3631
3632 BT_DBG("%s", hdev->name);
3633
3634 hci_dev_lock(hdev);
3635
3636 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3637 if (!conn)
3638 goto unlock;
3639
3640 if (ev->page < HCI_MAX_PAGES)
3641 memcpy(conn->features[ev->page], ev->features, 8);
3642
3643 if (!ev->status && ev->page == 0x01) {
3644 struct inquiry_entry *ie;
3645
3646 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3647 if (ie)
3648 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3649
3650 if (ev->features[0] & LMP_HOST_SSP) {
3651 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3652 } else {
3653 /* It is mandatory by the Bluetooth specification that
3654 * Extended Inquiry Results are only used when Secure
3655 * Simple Pairing is enabled, but some devices violate
3656 * this.
3657 *
3658 * To make these devices work, the internal SSP
3659 * enabled flag needs to be cleared if the remote host
3660 * features do not indicate SSP support */
3661 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3662 }
3663
3664 if (ev->features[0] & LMP_HOST_SC)
3665 set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
3666 }
3667
3668 if (conn->state != BT_CONFIG)
3669 goto unlock;
3670
3671 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3672 struct hci_cp_remote_name_req cp;
3673 memset(&cp, 0, sizeof(cp));
3674 bacpy(&cp.bdaddr, &conn->dst);
3675 cp.pscan_rep_mode = 0x02;
3676 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3677 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3678 mgmt_device_connected(hdev, conn, 0, NULL, 0);
3679
3680 if (!hci_outgoing_auth_needed(hdev, conn)) {
3681 conn->state = BT_CONNECTED;
3682 hci_connect_cfm(conn, ev->status);
3683 hci_conn_drop(conn);
3684 }
3685
3686 unlock:
3687 hci_dev_unlock(hdev);
3688 }
3689
3690 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
3691 struct sk_buff *skb)
3692 {
3693 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
3694 struct hci_conn *conn;
3695
3696 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3697
3698 hci_dev_lock(hdev);
3699
3700 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3701 if (!conn) {
3702 if (ev->link_type == ESCO_LINK)
3703 goto unlock;
3704
3705 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
3706 if (!conn)
3707 goto unlock;
3708
3709 conn->type = SCO_LINK;
3710 }
3711
3712 switch (ev->status) {
3713 case 0x00:
3714 conn->handle = __le16_to_cpu(ev->handle);
3715 conn->state = BT_CONNECTED;
3716
3717 hci_debugfs_create_conn(conn);
3718 hci_conn_add_sysfs(conn);
3719 break;
3720
3721 case 0x10: /* Connection Accept Timeout */
3722 case 0x0d: /* Connection Rejected due to Limited Resources */
3723 case 0x11: /* Unsupported Feature or Parameter Value */
3724 case 0x1c: /* SCO interval rejected */
3725 case 0x1a: /* Unsupported Remote Feature */
3726 case 0x1f: /* Unspecified error */
3727 case 0x20: /* Unsupported LMP Parameter value */
3728 if (conn->out) {
3729 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
3730 (hdev->esco_type & EDR_ESCO_MASK);
3731 if (hci_setup_sync(conn, conn->link->handle))
3732 goto unlock;
3733 }
3734 /* fall through */
3735
3736 default:
3737 conn->state = BT_CLOSED;
3738 break;
3739 }
3740
3741 hci_connect_cfm(conn, ev->status);
3742 if (ev->status)
3743 hci_conn_del(conn);
3744
3745 unlock:
3746 hci_dev_unlock(hdev);
3747 }
3748
3749 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
3750 {
3751 size_t parsed = 0;
3752
3753 while (parsed < eir_len) {
3754 u8 field_len = eir[0];
3755
3756 if (field_len == 0)
3757 return parsed;
3758
3759 parsed += field_len + 1;
3760 eir += field_len + 1;
3761 }
3762
3763 return eir_len;
3764 }
3765
3766 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
3767 struct sk_buff *skb)
3768 {
3769 struct inquiry_data data;
3770 struct extended_inquiry_info *info = (void *) (skb->data + 1);
3771 int num_rsp = *((__u8 *) skb->data);
3772 size_t eir_len;
3773
3774 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3775
3776 if (!num_rsp)
3777 return;
3778
3779 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3780 return;
3781
3782 hci_dev_lock(hdev);
3783
3784 for (; num_rsp; num_rsp--, info++) {
3785 u32 flags;
3786 bool name_known;
3787
3788 bacpy(&data.bdaddr, &info->bdaddr);
3789 data.pscan_rep_mode = info->pscan_rep_mode;
3790 data.pscan_period_mode = info->pscan_period_mode;
3791 data.pscan_mode = 0x00;
3792 memcpy(data.dev_class, info->dev_class, 3);
3793 data.clock_offset = info->clock_offset;
3794 data.rssi = info->rssi;
3795 data.ssp_mode = 0x01;
3796
3797 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3798 name_known = eir_has_data_type(info->data,
3799 sizeof(info->data),
3800 EIR_NAME_COMPLETE);
3801 else
3802 name_known = true;
3803
3804 flags = hci_inquiry_cache_update(hdev, &data, name_known);
3805
3806 eir_len = eir_get_length(info->data, sizeof(info->data));
3807
3808 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3809 info->dev_class, info->rssi,
3810 flags, info->data, eir_len, NULL, 0);
3811 }
3812
3813 hci_dev_unlock(hdev);
3814 }
3815
3816 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
3817 struct sk_buff *skb)
3818 {
3819 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
3820 struct hci_conn *conn;
3821
3822 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
3823 __le16_to_cpu(ev->handle));
3824
3825 hci_dev_lock(hdev);
3826
3827 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3828 if (!conn)
3829 goto unlock;
3830
3831 /* For BR/EDR the necessary steps are taken through the
3832 * auth_complete event.
3833 */
3834 if (conn->type != LE_LINK)
3835 goto unlock;
3836
3837 if (!ev->status)
3838 conn->sec_level = conn->pending_sec_level;
3839
3840 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3841
3842 if (ev->status && conn->state == BT_CONNECTED) {
3843 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3844 hci_conn_drop(conn);
3845 goto unlock;
3846 }
3847
3848 if (conn->state == BT_CONFIG) {
3849 if (!ev->status)
3850 conn->state = BT_CONNECTED;
3851
3852 hci_connect_cfm(conn, ev->status);
3853 hci_conn_drop(conn);
3854 } else {
3855 hci_auth_cfm(conn, ev->status);
3856
3857 hci_conn_hold(conn);
3858 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3859 hci_conn_drop(conn);
3860 }
3861
3862 unlock:
3863 hci_dev_unlock(hdev);
3864 }
3865
3866 static u8 hci_get_auth_req(struct hci_conn *conn)
3867 {
3868 /* If remote requests no-bonding follow that lead */
3869 if (conn->remote_auth == HCI_AT_NO_BONDING ||
3870 conn->remote_auth == HCI_AT_NO_BONDING_MITM)
3871 return conn->remote_auth | (conn->auth_type & 0x01);
3872
3873 /* If both remote and local have enough IO capabilities, require
3874 * MITM protection
3875 */
3876 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
3877 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
3878 return conn->remote_auth | 0x01;
3879
3880 /* No MITM protection possible so ignore remote requirement */
3881 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
3882 }
3883
3884 static u8 bredr_oob_data_present(struct hci_conn *conn)
3885 {
3886 struct hci_dev *hdev = conn->hdev;
3887 struct oob_data *data;
3888
3889 data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
3890 if (!data)
3891 return 0x00;
3892
3893 if (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)) {
3894 if (bredr_sc_enabled(hdev)) {
3895 /* When Secure Connections is enabled, then just
3896 * return the present value stored with the OOB
3897 * data. The stored value contains the right present
3898 * information. However it can only be trusted when
3899 * not in Secure Connection Only mode.
3900 */
3901 if (!test_bit(HCI_SC_ONLY, &hdev->dev_flags))
3902 return data->present;
3903
3904 /* When Secure Connections Only mode is enabled, then
3905 * the P-256 values are required. If they are not
3906 * available, then do not declare that OOB data is
3907 * present.
3908 */
3909 if (!memcmp(data->rand256, ZERO_KEY, 16) ||
3910 !memcmp(data->hash256, ZERO_KEY, 16))
3911 return 0x00;
3912
3913 return 0x02;
3914 }
3915
3916 /* When Secure Connections is not enabled or actually
3917 * not supported by the hardware, then check that if
3918 * P-192 data values are present.
3919 */
3920 if (!memcmp(data->rand192, ZERO_KEY, 16) ||
3921 !memcmp(data->hash192, ZERO_KEY, 16))
3922 return 0x00;
3923
3924 return 0x01;
3925 }
3926
3927 return 0x00;
3928 }
3929
3930 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3931 {
3932 struct hci_ev_io_capa_request *ev = (void *) skb->data;
3933 struct hci_conn *conn;
3934
3935 BT_DBG("%s", hdev->name);
3936
3937 hci_dev_lock(hdev);
3938
3939 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3940 if (!conn)
3941 goto unlock;
3942
3943 hci_conn_hold(conn);
3944
3945 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3946 goto unlock;
3947
3948 /* Allow pairing if we're pairable, the initiators of the
3949 * pairing or if the remote is not requesting bonding.
3950 */
3951 if (test_bit(HCI_BONDABLE, &hdev->dev_flags) ||
3952 test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
3953 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3954 struct hci_cp_io_capability_reply cp;
3955
3956 bacpy(&cp.bdaddr, &ev->bdaddr);
3957 /* Change the IO capability from KeyboardDisplay
3958 * to DisplayYesNo as it is not supported by BT spec. */
3959 cp.capability = (conn->io_capability == 0x04) ?
3960 HCI_IO_DISPLAY_YESNO : conn->io_capability;
3961
3962 /* If we are initiators, there is no remote information yet */
3963 if (conn->remote_auth == 0xff) {
3964 /* Request MITM protection if our IO caps allow it
3965 * except for the no-bonding case.
3966 */
3967 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
3968 conn->auth_type != HCI_AT_NO_BONDING)
3969 conn->auth_type |= 0x01;
3970 } else {
3971 conn->auth_type = hci_get_auth_req(conn);
3972 }
3973
3974 /* If we're not bondable, force one of the non-bondable
3975 * authentication requirement values.
3976 */
3977 if (!test_bit(HCI_BONDABLE, &hdev->dev_flags))
3978 conn->auth_type &= HCI_AT_NO_BONDING_MITM;
3979
3980 cp.authentication = conn->auth_type;
3981 cp.oob_data = bredr_oob_data_present(conn);
3982
3983 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3984 sizeof(cp), &cp);
3985 } else {
3986 struct hci_cp_io_capability_neg_reply cp;
3987
3988 bacpy(&cp.bdaddr, &ev->bdaddr);
3989 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3990
3991 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3992 sizeof(cp), &cp);
3993 }
3994
3995 unlock:
3996 hci_dev_unlock(hdev);
3997 }
3998
3999 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
4000 {
4001 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
4002 struct hci_conn *conn;
4003
4004 BT_DBG("%s", hdev->name);
4005
4006 hci_dev_lock(hdev);
4007
4008 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4009 if (!conn)
4010 goto unlock;
4011
4012 conn->remote_cap = ev->capability;
4013 conn->remote_auth = ev->authentication;
4014 if (ev->oob_data)
4015 set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
4016
4017 unlock:
4018 hci_dev_unlock(hdev);
4019 }
4020
4021 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
4022 struct sk_buff *skb)
4023 {
4024 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
4025 int loc_mitm, rem_mitm, confirm_hint = 0;
4026 struct hci_conn *conn;
4027
4028 BT_DBG("%s", hdev->name);
4029
4030 hci_dev_lock(hdev);
4031
4032 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
4033 goto unlock;
4034
4035 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4036 if (!conn)
4037 goto unlock;
4038
4039 loc_mitm = (conn->auth_type & 0x01);
4040 rem_mitm = (conn->remote_auth & 0x01);
4041
4042 /* If we require MITM but the remote device can't provide that
4043 * (it has NoInputNoOutput) then reject the confirmation
4044 * request. We check the security level here since it doesn't
4045 * necessarily match conn->auth_type.
4046 */
4047 if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
4048 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
4049 BT_DBG("Rejecting request: remote device can't provide MITM");
4050 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
4051 sizeof(ev->bdaddr), &ev->bdaddr);
4052 goto unlock;
4053 }
4054
4055 /* If no side requires MITM protection; auto-accept */
4056 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
4057 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
4058
4059 /* If we're not the initiators request authorization to
4060 * proceed from user space (mgmt_user_confirm with
4061 * confirm_hint set to 1). The exception is if neither
4062 * side had MITM or if the local IO capability is
4063 * NoInputNoOutput, in which case we do auto-accept
4064 */
4065 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
4066 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4067 (loc_mitm || rem_mitm)) {
4068 BT_DBG("Confirming auto-accept as acceptor");
4069 confirm_hint = 1;
4070 goto confirm;
4071 }
4072
4073 BT_DBG("Auto-accept of user confirmation with %ums delay",
4074 hdev->auto_accept_delay);
4075
4076 if (hdev->auto_accept_delay > 0) {
4077 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
4078 queue_delayed_work(conn->hdev->workqueue,
4079 &conn->auto_accept_work, delay);
4080 goto unlock;
4081 }
4082
4083 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
4084 sizeof(ev->bdaddr), &ev->bdaddr);
4085 goto unlock;
4086 }
4087
4088 confirm:
4089 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
4090 le32_to_cpu(ev->passkey), confirm_hint);
4091
4092 unlock:
4093 hci_dev_unlock(hdev);
4094 }
4095
4096 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
4097 struct sk_buff *skb)
4098 {
4099 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
4100
4101 BT_DBG("%s", hdev->name);
4102
4103 if (test_bit(HCI_MGMT, &hdev->dev_flags))
4104 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
4105 }
4106
4107 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
4108 struct sk_buff *skb)
4109 {
4110 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
4111 struct hci_conn *conn;
4112
4113 BT_DBG("%s", hdev->name);
4114
4115 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4116 if (!conn)
4117 return;
4118
4119 conn->passkey_notify = __le32_to_cpu(ev->passkey);
4120 conn->passkey_entered = 0;
4121
4122 if (test_bit(HCI_MGMT, &hdev->dev_flags))
4123 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4124 conn->dst_type, conn->passkey_notify,
4125 conn->passkey_entered);
4126 }
4127
4128 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
4129 {
4130 struct hci_ev_keypress_notify *ev = (void *) skb->data;
4131 struct hci_conn *conn;
4132
4133 BT_DBG("%s", hdev->name);
4134
4135 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4136 if (!conn)
4137 return;
4138
4139 switch (ev->type) {
4140 case HCI_KEYPRESS_STARTED:
4141 conn->passkey_entered = 0;
4142 return;
4143
4144 case HCI_KEYPRESS_ENTERED:
4145 conn->passkey_entered++;
4146 break;
4147
4148 case HCI_KEYPRESS_ERASED:
4149 conn->passkey_entered--;
4150 break;
4151
4152 case HCI_KEYPRESS_CLEARED:
4153 conn->passkey_entered = 0;
4154 break;
4155
4156 case HCI_KEYPRESS_COMPLETED:
4157 return;
4158 }
4159
4160 if (test_bit(HCI_MGMT, &hdev->dev_flags))
4161 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4162 conn->dst_type, conn->passkey_notify,
4163 conn->passkey_entered);
4164 }
4165
4166 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
4167 struct sk_buff *skb)
4168 {
4169 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
4170 struct hci_conn *conn;
4171
4172 BT_DBG("%s", hdev->name);
4173
4174 hci_dev_lock(hdev);
4175
4176 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4177 if (!conn)
4178 goto unlock;
4179
4180 /* Reset the authentication requirement to unknown */
4181 conn->remote_auth = 0xff;
4182
4183 /* To avoid duplicate auth_failed events to user space we check
4184 * the HCI_CONN_AUTH_PEND flag which will be set if we
4185 * initiated the authentication. A traditional auth_complete
4186 * event gets always produced as initiator and is also mapped to
4187 * the mgmt_auth_failed event */
4188 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
4189 mgmt_auth_failed(conn, ev->status);
4190
4191 hci_conn_drop(conn);
4192
4193 unlock:
4194 hci_dev_unlock(hdev);
4195 }
4196
4197 static void hci_remote_host_features_evt(struct hci_dev *hdev,
4198 struct sk_buff *skb)
4199 {
4200 struct hci_ev_remote_host_features *ev = (void *) skb->data;
4201 struct inquiry_entry *ie;
4202 struct hci_conn *conn;
4203
4204 BT_DBG("%s", hdev->name);
4205
4206 hci_dev_lock(hdev);
4207
4208 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4209 if (conn)
4210 memcpy(conn->features[1], ev->features, 8);
4211
4212 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4213 if (ie)
4214 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4215
4216 hci_dev_unlock(hdev);
4217 }
4218
4219 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
4220 struct sk_buff *skb)
4221 {
4222 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
4223 struct oob_data *data;
4224
4225 BT_DBG("%s", hdev->name);
4226
4227 hci_dev_lock(hdev);
4228
4229 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
4230 goto unlock;
4231
4232 data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
4233 if (!data) {
4234 struct hci_cp_remote_oob_data_neg_reply cp;
4235
4236 bacpy(&cp.bdaddr, &ev->bdaddr);
4237 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
4238 sizeof(cp), &cp);
4239 goto unlock;
4240 }
4241
4242 if (bredr_sc_enabled(hdev)) {
4243 struct hci_cp_remote_oob_ext_data_reply cp;
4244
4245 bacpy(&cp.bdaddr, &ev->bdaddr);
4246 if (test_bit(HCI_SC_ONLY, &hdev->dev_flags)) {
4247 memset(cp.hash192, 0, sizeof(cp.hash192));
4248 memset(cp.rand192, 0, sizeof(cp.rand192));
4249 } else {
4250 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
4251 memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
4252 }
4253 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
4254 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
4255
4256 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
4257 sizeof(cp), &cp);
4258 } else {
4259 struct hci_cp_remote_oob_data_reply cp;
4260
4261 bacpy(&cp.bdaddr, &ev->bdaddr);
4262 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
4263 memcpy(cp.rand, data->rand192, sizeof(cp.rand));
4264
4265 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
4266 sizeof(cp), &cp);
4267 }
4268
4269 unlock:
4270 hci_dev_unlock(hdev);
4271 }
4272
4273 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
4274 struct sk_buff *skb)
4275 {
4276 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
4277 struct hci_conn *hcon, *bredr_hcon;
4278
4279 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
4280 ev->status);
4281
4282 hci_dev_lock(hdev);
4283
4284 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4285 if (!hcon) {
4286 hci_dev_unlock(hdev);
4287 return;
4288 }
4289
4290 if (ev->status) {
4291 hci_conn_del(hcon);
4292 hci_dev_unlock(hdev);
4293 return;
4294 }
4295
4296 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
4297
4298 hcon->state = BT_CONNECTED;
4299 bacpy(&hcon->dst, &bredr_hcon->dst);
4300
4301 hci_conn_hold(hcon);
4302 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4303 hci_conn_drop(hcon);
4304
4305 hci_debugfs_create_conn(hcon);
4306 hci_conn_add_sysfs(hcon);
4307
4308 amp_physical_cfm(bredr_hcon, hcon);
4309
4310 hci_dev_unlock(hdev);
4311 }
4312
4313 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4314 {
4315 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
4316 struct hci_conn *hcon;
4317 struct hci_chan *hchan;
4318 struct amp_mgr *mgr;
4319
4320 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
4321 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
4322 ev->status);
4323
4324 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4325 if (!hcon)
4326 return;
4327
4328 /* Create AMP hchan */
4329 hchan = hci_chan_create(hcon);
4330 if (!hchan)
4331 return;
4332
4333 hchan->handle = le16_to_cpu(ev->handle);
4334
4335 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
4336
4337 mgr = hcon->amp_mgr;
4338 if (mgr && mgr->bredr_chan) {
4339 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
4340
4341 l2cap_chan_lock(bredr_chan);
4342
4343 bredr_chan->conn->mtu = hdev->block_mtu;
4344 l2cap_logical_cfm(bredr_chan, hchan, 0);
4345 hci_conn_hold(hcon);
4346
4347 l2cap_chan_unlock(bredr_chan);
4348 }
4349 }
4350
4351 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
4352 struct sk_buff *skb)
4353 {
4354 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
4355 struct hci_chan *hchan;
4356
4357 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
4358 le16_to_cpu(ev->handle), ev->status);
4359
4360 if (ev->status)
4361 return;
4362
4363 hci_dev_lock(hdev);
4364
4365 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
4366 if (!hchan)
4367 goto unlock;
4368
4369 amp_destroy_logical_link(hchan, ev->reason);
4370
4371 unlock:
4372 hci_dev_unlock(hdev);
4373 }
4374
4375 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
4376 struct sk_buff *skb)
4377 {
4378 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
4379 struct hci_conn *hcon;
4380
4381 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4382
4383 if (ev->status)
4384 return;
4385
4386 hci_dev_lock(hdev);
4387
4388 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4389 if (hcon) {
4390 hcon->state = BT_CLOSED;
4391 hci_conn_del(hcon);
4392 }
4393
4394 hci_dev_unlock(hdev);
4395 }
4396
4397 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4398 {
4399 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
4400 struct hci_conn_params *params;
4401 struct hci_conn *conn;
4402 struct smp_irk *irk;
4403 u8 addr_type;
4404
4405 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4406
4407 hci_dev_lock(hdev);
4408
4409 /* All controllers implicitly stop advertising in the event of a
4410 * connection, so ensure that the state bit is cleared.
4411 */
4412 clear_bit(HCI_LE_ADV, &hdev->dev_flags);
4413
4414 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
4415 if (!conn) {
4416 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr, ev->role);
4417 if (!conn) {
4418 BT_ERR("No memory for new connection");
4419 goto unlock;
4420 }
4421
4422 conn->dst_type = ev->bdaddr_type;
4423
4424 /* If we didn't have a hci_conn object previously
4425 * but we're in master role this must be something
4426 * initiated using a white list. Since white list based
4427 * connections are not "first class citizens" we don't
4428 * have full tracking of them. Therefore, we go ahead
4429 * with a "best effort" approach of determining the
4430 * initiator address based on the HCI_PRIVACY flag.
4431 */
4432 if (conn->out) {
4433 conn->resp_addr_type = ev->bdaddr_type;
4434 bacpy(&conn->resp_addr, &ev->bdaddr);
4435 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
4436 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
4437 bacpy(&conn->init_addr, &hdev->rpa);
4438 } else {
4439 hci_copy_identity_address(hdev,
4440 &conn->init_addr,
4441 &conn->init_addr_type);
4442 }
4443 }
4444 } else {
4445 cancel_delayed_work(&conn->le_conn_timeout);
4446 }
4447
4448 if (!conn->out) {
4449 /* Set the responder (our side) address type based on
4450 * the advertising address type.
4451 */
4452 conn->resp_addr_type = hdev->adv_addr_type;
4453 if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM)
4454 bacpy(&conn->resp_addr, &hdev->random_addr);
4455 else
4456 bacpy(&conn->resp_addr, &hdev->bdaddr);
4457
4458 conn->init_addr_type = ev->bdaddr_type;
4459 bacpy(&conn->init_addr, &ev->bdaddr);
4460
4461 /* For incoming connections, set the default minimum
4462 * and maximum connection interval. They will be used
4463 * to check if the parameters are in range and if not
4464 * trigger the connection update procedure.
4465 */
4466 conn->le_conn_min_interval = hdev->le_conn_min_interval;
4467 conn->le_conn_max_interval = hdev->le_conn_max_interval;
4468 }
4469
4470 /* Lookup the identity address from the stored connection
4471 * address and address type.
4472 *
4473 * When establishing connections to an identity address, the
4474 * connection procedure will store the resolvable random
4475 * address first. Now if it can be converted back into the
4476 * identity address, start using the identity address from
4477 * now on.
4478 */
4479 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
4480 if (irk) {
4481 bacpy(&conn->dst, &irk->bdaddr);
4482 conn->dst_type = irk->addr_type;
4483 }
4484
4485 if (ev->status) {
4486 hci_le_conn_failed(conn, ev->status);
4487 goto unlock;
4488 }
4489
4490 if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
4491 addr_type = BDADDR_LE_PUBLIC;
4492 else
4493 addr_type = BDADDR_LE_RANDOM;
4494
4495 /* Drop the connection if the device is blocked */
4496 if (hci_bdaddr_list_lookup(&hdev->blacklist, &conn->dst, addr_type)) {
4497 hci_conn_drop(conn);
4498 goto unlock;
4499 }
4500
4501 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4502 mgmt_device_connected(hdev, conn, 0, NULL, 0);
4503
4504 conn->sec_level = BT_SECURITY_LOW;
4505 conn->handle = __le16_to_cpu(ev->handle);
4506 conn->state = BT_CONNECTED;
4507
4508 conn->le_conn_interval = le16_to_cpu(ev->interval);
4509 conn->le_conn_latency = le16_to_cpu(ev->latency);
4510 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4511
4512 hci_debugfs_create_conn(conn);
4513 hci_conn_add_sysfs(conn);
4514
4515 hci_connect_cfm(conn, ev->status);
4516
4517 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
4518 conn->dst_type);
4519 if (params) {
4520 list_del_init(&params->action);
4521 if (params->conn) {
4522 hci_conn_drop(params->conn);
4523 hci_conn_put(params->conn);
4524 params->conn = NULL;
4525 }
4526 }
4527
4528 unlock:
4529 hci_update_background_scan(hdev);
4530 hci_dev_unlock(hdev);
4531 }
4532
4533 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
4534 struct sk_buff *skb)
4535 {
4536 struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
4537 struct hci_conn *conn;
4538
4539 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4540
4541 if (ev->status)
4542 return;
4543
4544 hci_dev_lock(hdev);
4545
4546 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4547 if (conn) {
4548 conn->le_conn_interval = le16_to_cpu(ev->interval);
4549 conn->le_conn_latency = le16_to_cpu(ev->latency);
4550 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4551 }
4552
4553 hci_dev_unlock(hdev);
4554 }
4555
4556 /* This function requires the caller holds hdev->lock */
4557 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
4558 bdaddr_t *addr,
4559 u8 addr_type, u8 adv_type)
4560 {
4561 struct hci_conn *conn;
4562 struct hci_conn_params *params;
4563
4564 /* If the event is not connectable don't proceed further */
4565 if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
4566 return NULL;
4567
4568 /* Ignore if the device is blocked */
4569 if (hci_bdaddr_list_lookup(&hdev->blacklist, addr, addr_type))
4570 return NULL;
4571
4572 /* Most controller will fail if we try to create new connections
4573 * while we have an existing one in slave role.
4574 */
4575 if (hdev->conn_hash.le_num_slave > 0)
4576 return NULL;
4577
4578 /* If we're not connectable only connect devices that we have in
4579 * our pend_le_conns list.
4580 */
4581 params = hci_pend_le_action_lookup(&hdev->pend_le_conns,
4582 addr, addr_type);
4583 if (!params)
4584 return NULL;
4585
4586 switch (params->auto_connect) {
4587 case HCI_AUTO_CONN_DIRECT:
4588 /* Only devices advertising with ADV_DIRECT_IND are
4589 * triggering a connection attempt. This is allowing
4590 * incoming connections from slave devices.
4591 */
4592 if (adv_type != LE_ADV_DIRECT_IND)
4593 return NULL;
4594 break;
4595 case HCI_AUTO_CONN_ALWAYS:
4596 /* Devices advertising with ADV_IND or ADV_DIRECT_IND
4597 * are triggering a connection attempt. This means
4598 * that incoming connectioms from slave device are
4599 * accepted and also outgoing connections to slave
4600 * devices are established when found.
4601 */
4602 break;
4603 default:
4604 return NULL;
4605 }
4606
4607 conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
4608 HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER);
4609 if (!IS_ERR(conn)) {
4610 /* Store the pointer since we don't really have any
4611 * other owner of the object besides the params that
4612 * triggered it. This way we can abort the connection if
4613 * the parameters get removed and keep the reference
4614 * count consistent once the connection is established.
4615 */
4616 params->conn = hci_conn_get(conn);
4617 return conn;
4618 }
4619
4620 switch (PTR_ERR(conn)) {
4621 case -EBUSY:
4622 /* If hci_connect() returns -EBUSY it means there is already
4623 * an LE connection attempt going on. Since controllers don't
4624 * support more than one connection attempt at the time, we
4625 * don't consider this an error case.
4626 */
4627 break;
4628 default:
4629 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
4630 return NULL;
4631 }
4632
4633 return NULL;
4634 }
4635
4636 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
4637 u8 bdaddr_type, bdaddr_t *direct_addr,
4638 u8 direct_addr_type, s8 rssi, u8 *data, u8 len)
4639 {
4640 struct discovery_state *d = &hdev->discovery;
4641 struct smp_irk *irk;
4642 struct hci_conn *conn;
4643 bool match;
4644 u32 flags;
4645
4646 /* If the direct address is present, then this report is from
4647 * a LE Direct Advertising Report event. In that case it is
4648 * important to see if the address is matching the local
4649 * controller address.
4650 */
4651 if (direct_addr) {
4652 /* Only resolvable random addresses are valid for these
4653 * kind of reports and others can be ignored.
4654 */
4655 if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
4656 return;
4657
4658 /* If the controller is not using resolvable random
4659 * addresses, then this report can be ignored.
4660 */
4661 if (!test_bit(HCI_PRIVACY, &hdev->dev_flags))
4662 return;
4663
4664 /* If the local IRK of the controller does not match
4665 * with the resolvable random address provided, then
4666 * this report can be ignored.
4667 */
4668 if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
4669 return;
4670 }
4671
4672 /* Check if we need to convert to identity address */
4673 irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
4674 if (irk) {
4675 bdaddr = &irk->bdaddr;
4676 bdaddr_type = irk->addr_type;
4677 }
4678
4679 /* Check if we have been requested to connect to this device */
4680 conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type);
4681 if (conn && type == LE_ADV_IND) {
4682 /* Store report for later inclusion by
4683 * mgmt_device_connected
4684 */
4685 memcpy(conn->le_adv_data, data, len);
4686 conn->le_adv_data_len = len;
4687 }
4688
4689 /* Passive scanning shouldn't trigger any device found events,
4690 * except for devices marked as CONN_REPORT for which we do send
4691 * device found events.
4692 */
4693 if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
4694 if (type == LE_ADV_DIRECT_IND)
4695 return;
4696
4697 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
4698 bdaddr, bdaddr_type))
4699 return;
4700
4701 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
4702 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
4703 else
4704 flags = 0;
4705 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4706 rssi, flags, data, len, NULL, 0);
4707 return;
4708 }
4709
4710 /* When receiving non-connectable or scannable undirected
4711 * advertising reports, this means that the remote device is
4712 * not connectable and then clearly indicate this in the
4713 * device found event.
4714 *
4715 * When receiving a scan response, then there is no way to
4716 * know if the remote device is connectable or not. However
4717 * since scan responses are merged with a previously seen
4718 * advertising report, the flags field from that report
4719 * will be used.
4720 *
4721 * In the really unlikely case that a controller get confused
4722 * and just sends a scan response event, then it is marked as
4723 * not connectable as well.
4724 */
4725 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND ||
4726 type == LE_ADV_SCAN_RSP)
4727 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
4728 else
4729 flags = 0;
4730
4731 /* If there's nothing pending either store the data from this
4732 * event or send an immediate device found event if the data
4733 * should not be stored for later.
4734 */
4735 if (!has_pending_adv_report(hdev)) {
4736 /* If the report will trigger a SCAN_REQ store it for
4737 * later merging.
4738 */
4739 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4740 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4741 rssi, flags, data, len);
4742 return;
4743 }
4744
4745 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4746 rssi, flags, data, len, NULL, 0);
4747 return;
4748 }
4749
4750 /* Check if the pending report is for the same device as the new one */
4751 match = (!bacmp(bdaddr, &d->last_adv_addr) &&
4752 bdaddr_type == d->last_adv_addr_type);
4753
4754 /* If the pending data doesn't match this report or this isn't a
4755 * scan response (e.g. we got a duplicate ADV_IND) then force
4756 * sending of the pending data.
4757 */
4758 if (type != LE_ADV_SCAN_RSP || !match) {
4759 /* Send out whatever is in the cache, but skip duplicates */
4760 if (!match)
4761 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4762 d->last_adv_addr_type, NULL,
4763 d->last_adv_rssi, d->last_adv_flags,
4764 d->last_adv_data,
4765 d->last_adv_data_len, NULL, 0);
4766
4767 /* If the new report will trigger a SCAN_REQ store it for
4768 * later merging.
4769 */
4770 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4771 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4772 rssi, flags, data, len);
4773 return;
4774 }
4775
4776 /* The advertising reports cannot be merged, so clear
4777 * the pending report and send out a device found event.
4778 */
4779 clear_pending_adv_report(hdev);
4780 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4781 rssi, flags, data, len, NULL, 0);
4782 return;
4783 }
4784
4785 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
4786 * the new event is a SCAN_RSP. We can therefore proceed with
4787 * sending a merged device found event.
4788 */
4789 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4790 d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
4791 d->last_adv_data, d->last_adv_data_len, data, len);
4792 clear_pending_adv_report(hdev);
4793 }
4794
4795 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
4796 {
4797 u8 num_reports = skb->data[0];
4798 void *ptr = &skb->data[1];
4799
4800 hci_dev_lock(hdev);
4801
4802 while (num_reports--) {
4803 struct hci_ev_le_advertising_info *ev = ptr;
4804 s8 rssi;
4805
4806 rssi = ev->data[ev->length];
4807 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
4808 ev->bdaddr_type, NULL, 0, rssi,
4809 ev->data, ev->length);
4810
4811 ptr += sizeof(*ev) + ev->length + 1;
4812 }
4813
4814 hci_dev_unlock(hdev);
4815 }
4816
4817 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4818 {
4819 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
4820 struct hci_cp_le_ltk_reply cp;
4821 struct hci_cp_le_ltk_neg_reply neg;
4822 struct hci_conn *conn;
4823 struct smp_ltk *ltk;
4824
4825 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
4826
4827 hci_dev_lock(hdev);
4828
4829 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4830 if (conn == NULL)
4831 goto not_found;
4832
4833 ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
4834 if (!ltk)
4835 goto not_found;
4836
4837 if (smp_ltk_is_sc(ltk)) {
4838 /* With SC both EDiv and Rand are set to zero */
4839 if (ev->ediv || ev->rand)
4840 goto not_found;
4841 } else {
4842 /* For non-SC keys check that EDiv and Rand match */
4843 if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
4844 goto not_found;
4845 }
4846
4847 memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
4848 cp.handle = cpu_to_le16(conn->handle);
4849
4850 conn->pending_sec_level = smp_ltk_sec_level(ltk);
4851
4852 conn->enc_key_size = ltk->enc_size;
4853
4854 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
4855
4856 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
4857 * temporary key used to encrypt a connection following
4858 * pairing. It is used during the Encrypted Session Setup to
4859 * distribute the keys. Later, security can be re-established
4860 * using a distributed LTK.
4861 */
4862 if (ltk->type == SMP_STK) {
4863 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
4864 list_del_rcu(&ltk->list);
4865 kfree_rcu(ltk, rcu);
4866 } else {
4867 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
4868 }
4869
4870 hci_dev_unlock(hdev);
4871
4872 return;
4873
4874 not_found:
4875 neg.handle = ev->handle;
4876 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
4877 hci_dev_unlock(hdev);
4878 }
4879
4880 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
4881 u8 reason)
4882 {
4883 struct hci_cp_le_conn_param_req_neg_reply cp;
4884
4885 cp.handle = cpu_to_le16(handle);
4886 cp.reason = reason;
4887
4888 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
4889 &cp);
4890 }
4891
4892 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
4893 struct sk_buff *skb)
4894 {
4895 struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data;
4896 struct hci_cp_le_conn_param_req_reply cp;
4897 struct hci_conn *hcon;
4898 u16 handle, min, max, latency, timeout;
4899
4900 handle = le16_to_cpu(ev->handle);
4901 min = le16_to_cpu(ev->interval_min);
4902 max = le16_to_cpu(ev->interval_max);
4903 latency = le16_to_cpu(ev->latency);
4904 timeout = le16_to_cpu(ev->timeout);
4905
4906 hcon = hci_conn_hash_lookup_handle(hdev, handle);
4907 if (!hcon || hcon->state != BT_CONNECTED)
4908 return send_conn_param_neg_reply(hdev, handle,
4909 HCI_ERROR_UNKNOWN_CONN_ID);
4910
4911 if (hci_check_conn_params(min, max, latency, timeout))
4912 return send_conn_param_neg_reply(hdev, handle,
4913 HCI_ERROR_INVALID_LL_PARAMS);
4914
4915 if (hcon->role == HCI_ROLE_MASTER) {
4916 struct hci_conn_params *params;
4917 u8 store_hint;
4918
4919 hci_dev_lock(hdev);
4920
4921 params = hci_conn_params_lookup(hdev, &hcon->dst,
4922 hcon->dst_type);
4923 if (params) {
4924 params->conn_min_interval = min;
4925 params->conn_max_interval = max;
4926 params->conn_latency = latency;
4927 params->supervision_timeout = timeout;
4928 store_hint = 0x01;
4929 } else{
4930 store_hint = 0x00;
4931 }
4932
4933 hci_dev_unlock(hdev);
4934
4935 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
4936 store_hint, min, max, latency, timeout);
4937 }
4938
4939 cp.handle = ev->handle;
4940 cp.interval_min = ev->interval_min;
4941 cp.interval_max = ev->interval_max;
4942 cp.latency = ev->latency;
4943 cp.timeout = ev->timeout;
4944 cp.min_ce_len = 0;
4945 cp.max_ce_len = 0;
4946
4947 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
4948 }
4949
4950 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev,
4951 struct sk_buff *skb)
4952 {
4953 u8 num_reports = skb->data[0];
4954 void *ptr = &skb->data[1];
4955
4956 hci_dev_lock(hdev);
4957
4958 while (num_reports--) {
4959 struct hci_ev_le_direct_adv_info *ev = ptr;
4960
4961 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
4962 ev->bdaddr_type, &ev->direct_addr,
4963 ev->direct_addr_type, ev->rssi, NULL, 0);
4964
4965 ptr += sizeof(*ev);
4966 }
4967
4968 hci_dev_unlock(hdev);
4969 }
4970
4971 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
4972 {
4973 struct hci_ev_le_meta *le_ev = (void *) skb->data;
4974
4975 skb_pull(skb, sizeof(*le_ev));
4976
4977 switch (le_ev->subevent) {
4978 case HCI_EV_LE_CONN_COMPLETE:
4979 hci_le_conn_complete_evt(hdev, skb);
4980 break;
4981
4982 case HCI_EV_LE_CONN_UPDATE_COMPLETE:
4983 hci_le_conn_update_complete_evt(hdev, skb);
4984 break;
4985
4986 case HCI_EV_LE_ADVERTISING_REPORT:
4987 hci_le_adv_report_evt(hdev, skb);
4988 break;
4989
4990 case HCI_EV_LE_LTK_REQ:
4991 hci_le_ltk_request_evt(hdev, skb);
4992 break;
4993
4994 case HCI_EV_LE_REMOTE_CONN_PARAM_REQ:
4995 hci_le_remote_conn_param_req_evt(hdev, skb);
4996 break;
4997
4998 case HCI_EV_LE_DIRECT_ADV_REPORT:
4999 hci_le_direct_adv_report_evt(hdev, skb);
5000 break;
5001
5002 default:
5003 break;
5004 }
5005 }
5006
5007 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
5008 {
5009 struct hci_ev_channel_selected *ev = (void *) skb->data;
5010 struct hci_conn *hcon;
5011
5012 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
5013
5014 skb_pull(skb, sizeof(*ev));
5015
5016 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5017 if (!hcon)
5018 return;
5019
5020 amp_read_loc_assoc_final_data(hdev, hcon);
5021 }
5022
5023 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
5024 {
5025 struct hci_event_hdr *hdr = (void *) skb->data;
5026 __u8 event = hdr->evt;
5027
5028 hci_dev_lock(hdev);
5029
5030 /* Received events are (currently) only needed when a request is
5031 * ongoing so avoid unnecessary memory allocation.
5032 */
5033 if (hci_req_pending(hdev)) {
5034 kfree_skb(hdev->recv_evt);
5035 hdev->recv_evt = skb_clone(skb, GFP_KERNEL);
5036 }
5037
5038 hci_dev_unlock(hdev);
5039
5040 skb_pull(skb, HCI_EVENT_HDR_SIZE);
5041
5042 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->req_event == event) {
5043 struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
5044 u16 opcode = __le16_to_cpu(cmd_hdr->opcode);
5045
5046 hci_req_cmd_complete(hdev, opcode, 0);
5047 }
5048
5049 switch (event) {
5050 case HCI_EV_INQUIRY_COMPLETE:
5051 hci_inquiry_complete_evt(hdev, skb);
5052 break;
5053
5054 case HCI_EV_INQUIRY_RESULT:
5055 hci_inquiry_result_evt(hdev, skb);
5056 break;
5057
5058 case HCI_EV_CONN_COMPLETE:
5059 hci_conn_complete_evt(hdev, skb);
5060 break;
5061
5062 case HCI_EV_CONN_REQUEST:
5063 hci_conn_request_evt(hdev, skb);
5064 break;
5065
5066 case HCI_EV_DISCONN_COMPLETE:
5067 hci_disconn_complete_evt(hdev, skb);
5068 break;
5069
5070 case HCI_EV_AUTH_COMPLETE:
5071 hci_auth_complete_evt(hdev, skb);
5072 break;
5073
5074 case HCI_EV_REMOTE_NAME:
5075 hci_remote_name_evt(hdev, skb);
5076 break;
5077
5078 case HCI_EV_ENCRYPT_CHANGE:
5079 hci_encrypt_change_evt(hdev, skb);
5080 break;
5081
5082 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
5083 hci_change_link_key_complete_evt(hdev, skb);
5084 break;
5085
5086 case HCI_EV_REMOTE_FEATURES:
5087 hci_remote_features_evt(hdev, skb);
5088 break;
5089
5090 case HCI_EV_CMD_COMPLETE:
5091 hci_cmd_complete_evt(hdev, skb);
5092 break;
5093
5094 case HCI_EV_CMD_STATUS:
5095 hci_cmd_status_evt(hdev, skb);
5096 break;
5097
5098 case HCI_EV_HARDWARE_ERROR:
5099 hci_hardware_error_evt(hdev, skb);
5100 break;
5101
5102 case HCI_EV_ROLE_CHANGE:
5103 hci_role_change_evt(hdev, skb);
5104 break;
5105
5106 case HCI_EV_NUM_COMP_PKTS:
5107 hci_num_comp_pkts_evt(hdev, skb);
5108 break;
5109
5110 case HCI_EV_MODE_CHANGE:
5111 hci_mode_change_evt(hdev, skb);
5112 break;
5113
5114 case HCI_EV_PIN_CODE_REQ:
5115 hci_pin_code_request_evt(hdev, skb);
5116 break;
5117
5118 case HCI_EV_LINK_KEY_REQ:
5119 hci_link_key_request_evt(hdev, skb);
5120 break;
5121
5122 case HCI_EV_LINK_KEY_NOTIFY:
5123 hci_link_key_notify_evt(hdev, skb);
5124 break;
5125
5126 case HCI_EV_CLOCK_OFFSET:
5127 hci_clock_offset_evt(hdev, skb);
5128 break;
5129
5130 case HCI_EV_PKT_TYPE_CHANGE:
5131 hci_pkt_type_change_evt(hdev, skb);
5132 break;
5133
5134 case HCI_EV_PSCAN_REP_MODE:
5135 hci_pscan_rep_mode_evt(hdev, skb);
5136 break;
5137
5138 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
5139 hci_inquiry_result_with_rssi_evt(hdev, skb);
5140 break;
5141
5142 case HCI_EV_REMOTE_EXT_FEATURES:
5143 hci_remote_ext_features_evt(hdev, skb);
5144 break;
5145
5146 case HCI_EV_SYNC_CONN_COMPLETE:
5147 hci_sync_conn_complete_evt(hdev, skb);
5148 break;
5149
5150 case HCI_EV_EXTENDED_INQUIRY_RESULT:
5151 hci_extended_inquiry_result_evt(hdev, skb);
5152 break;
5153
5154 case HCI_EV_KEY_REFRESH_COMPLETE:
5155 hci_key_refresh_complete_evt(hdev, skb);
5156 break;
5157
5158 case HCI_EV_IO_CAPA_REQUEST:
5159 hci_io_capa_request_evt(hdev, skb);
5160 break;
5161
5162 case HCI_EV_IO_CAPA_REPLY:
5163 hci_io_capa_reply_evt(hdev, skb);
5164 break;
5165
5166 case HCI_EV_USER_CONFIRM_REQUEST:
5167 hci_user_confirm_request_evt(hdev, skb);
5168 break;
5169
5170 case HCI_EV_USER_PASSKEY_REQUEST:
5171 hci_user_passkey_request_evt(hdev, skb);
5172 break;
5173
5174 case HCI_EV_USER_PASSKEY_NOTIFY:
5175 hci_user_passkey_notify_evt(hdev, skb);
5176 break;
5177
5178 case HCI_EV_KEYPRESS_NOTIFY:
5179 hci_keypress_notify_evt(hdev, skb);
5180 break;
5181
5182 case HCI_EV_SIMPLE_PAIR_COMPLETE:
5183 hci_simple_pair_complete_evt(hdev, skb);
5184 break;
5185
5186 case HCI_EV_REMOTE_HOST_FEATURES:
5187 hci_remote_host_features_evt(hdev, skb);
5188 break;
5189
5190 case HCI_EV_LE_META:
5191 hci_le_meta_evt(hdev, skb);
5192 break;
5193
5194 case HCI_EV_CHANNEL_SELECTED:
5195 hci_chan_selected_evt(hdev, skb);
5196 break;
5197
5198 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
5199 hci_remote_oob_data_request_evt(hdev, skb);
5200 break;
5201
5202 case HCI_EV_PHY_LINK_COMPLETE:
5203 hci_phy_link_complete_evt(hdev, skb);
5204 break;
5205
5206 case HCI_EV_LOGICAL_LINK_COMPLETE:
5207 hci_loglink_complete_evt(hdev, skb);
5208 break;
5209
5210 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
5211 hci_disconn_loglink_complete_evt(hdev, skb);
5212 break;
5213
5214 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
5215 hci_disconn_phylink_complete_evt(hdev, skb);
5216 break;
5217
5218 case HCI_EV_NUM_COMP_BLOCKS:
5219 hci_num_comp_blocks_evt(hdev, skb);
5220 break;
5221
5222 default:
5223 BT_DBG("%s event 0x%2.2x", hdev->name, event);
5224 break;
5225 }
5226
5227 kfree_skb(skb);
5228 hdev->stat.evt_rx++;
5229 }