]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/bluetooth/hci_event.c
Bluetooth: Sort switch cases by opcode's numeric value
[mirror_ubuntu-bionic-kernel.git] / net / bluetooth / hci_event.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI event handling. */
26
27 #include <asm/unaligned.h>
28
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
32
33 #include "a2mp.h"
34 #include "amp.h"
35 #include "smp.h"
36
37 /* Handle HCI Event packets */
38
39 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
40 {
41 __u8 status = *((__u8 *) skb->data);
42
43 BT_DBG("%s status 0x%2.2x", hdev->name, status);
44
45 if (status)
46 return;
47
48 clear_bit(HCI_INQUIRY, &hdev->flags);
49 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
50 wake_up_bit(&hdev->flags, HCI_INQUIRY);
51
52 hci_dev_lock(hdev);
53 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
54 hci_dev_unlock(hdev);
55
56 hci_conn_check_pending(hdev);
57 }
58
59 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
60 {
61 __u8 status = *((__u8 *) skb->data);
62
63 BT_DBG("%s status 0x%2.2x", hdev->name, status);
64
65 if (status)
66 return;
67
68 set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
69 }
70
71 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
72 {
73 __u8 status = *((__u8 *) skb->data);
74
75 BT_DBG("%s status 0x%2.2x", hdev->name, status);
76
77 if (status)
78 return;
79
80 clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
81
82 hci_conn_check_pending(hdev);
83 }
84
85 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
86 struct sk_buff *skb)
87 {
88 BT_DBG("%s", hdev->name);
89 }
90
91 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
92 {
93 struct hci_rp_role_discovery *rp = (void *) skb->data;
94 struct hci_conn *conn;
95
96 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
97
98 if (rp->status)
99 return;
100
101 hci_dev_lock(hdev);
102
103 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
104 if (conn)
105 conn->role = rp->role;
106
107 hci_dev_unlock(hdev);
108 }
109
110 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
111 {
112 struct hci_rp_read_link_policy *rp = (void *) skb->data;
113 struct hci_conn *conn;
114
115 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
116
117 if (rp->status)
118 return;
119
120 hci_dev_lock(hdev);
121
122 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
123 if (conn)
124 conn->link_policy = __le16_to_cpu(rp->policy);
125
126 hci_dev_unlock(hdev);
127 }
128
129 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
130 {
131 struct hci_rp_write_link_policy *rp = (void *) skb->data;
132 struct hci_conn *conn;
133 void *sent;
134
135 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
136
137 if (rp->status)
138 return;
139
140 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
141 if (!sent)
142 return;
143
144 hci_dev_lock(hdev);
145
146 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
147 if (conn)
148 conn->link_policy = get_unaligned_le16(sent + 2);
149
150 hci_dev_unlock(hdev);
151 }
152
153 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
154 struct sk_buff *skb)
155 {
156 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
157
158 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
159
160 if (rp->status)
161 return;
162
163 hdev->link_policy = __le16_to_cpu(rp->policy);
164 }
165
166 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
167 struct sk_buff *skb)
168 {
169 __u8 status = *((__u8 *) skb->data);
170 void *sent;
171
172 BT_DBG("%s status 0x%2.2x", hdev->name, status);
173
174 if (status)
175 return;
176
177 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
178 if (!sent)
179 return;
180
181 hdev->link_policy = get_unaligned_le16(sent);
182 }
183
184 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
185 {
186 __u8 status = *((__u8 *) skb->data);
187
188 BT_DBG("%s status 0x%2.2x", hdev->name, status);
189
190 clear_bit(HCI_RESET, &hdev->flags);
191
192 if (status)
193 return;
194
195 /* Reset all non-persistent flags */
196 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
197
198 hdev->discovery.state = DISCOVERY_STOPPED;
199 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
200 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
201
202 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
203 hdev->adv_data_len = 0;
204
205 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
206 hdev->scan_rsp_data_len = 0;
207
208 hdev->le_scan_type = LE_SCAN_PASSIVE;
209
210 hdev->ssp_debug_mode = 0;
211
212 hci_bdaddr_list_clear(&hdev->le_white_list);
213 }
214
215 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
216 {
217 __u8 status = *((__u8 *) skb->data);
218 void *sent;
219
220 BT_DBG("%s status 0x%2.2x", hdev->name, status);
221
222 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
223 if (!sent)
224 return;
225
226 hci_dev_lock(hdev);
227
228 if (test_bit(HCI_MGMT, &hdev->dev_flags))
229 mgmt_set_local_name_complete(hdev, sent, status);
230 else if (!status)
231 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
232
233 hci_dev_unlock(hdev);
234 }
235
236 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
237 {
238 struct hci_rp_read_local_name *rp = (void *) skb->data;
239
240 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
241
242 if (rp->status)
243 return;
244
245 if (test_bit(HCI_SETUP, &hdev->dev_flags))
246 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
247 }
248
249 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
250 {
251 __u8 status = *((__u8 *) skb->data);
252 void *sent;
253
254 BT_DBG("%s status 0x%2.2x", hdev->name, status);
255
256 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
257 if (!sent)
258 return;
259
260 if (!status) {
261 __u8 param = *((__u8 *) sent);
262
263 if (param == AUTH_ENABLED)
264 set_bit(HCI_AUTH, &hdev->flags);
265 else
266 clear_bit(HCI_AUTH, &hdev->flags);
267 }
268
269 if (test_bit(HCI_MGMT, &hdev->dev_flags))
270 mgmt_auth_enable_complete(hdev, status);
271 }
272
273 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
274 {
275 __u8 status = *((__u8 *) skb->data);
276 __u8 param;
277 void *sent;
278
279 BT_DBG("%s status 0x%2.2x", hdev->name, status);
280
281 if (status)
282 return;
283
284 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
285 if (!sent)
286 return;
287
288 param = *((__u8 *) sent);
289
290 if (param)
291 set_bit(HCI_ENCRYPT, &hdev->flags);
292 else
293 clear_bit(HCI_ENCRYPT, &hdev->flags);
294 }
295
296 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
297 {
298 __u8 status = *((__u8 *) skb->data);
299 __u8 param;
300 void *sent;
301
302 BT_DBG("%s status 0x%2.2x", hdev->name, status);
303
304 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
305 if (!sent)
306 return;
307
308 param = *((__u8 *) sent);
309
310 hci_dev_lock(hdev);
311
312 if (status) {
313 hdev->discov_timeout = 0;
314 goto done;
315 }
316
317 if (param & SCAN_INQUIRY)
318 set_bit(HCI_ISCAN, &hdev->flags);
319 else
320 clear_bit(HCI_ISCAN, &hdev->flags);
321
322 if (param & SCAN_PAGE)
323 set_bit(HCI_PSCAN, &hdev->flags);
324 else
325 clear_bit(HCI_PSCAN, &hdev->flags);
326
327 done:
328 hci_dev_unlock(hdev);
329 }
330
331 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
332 {
333 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
334
335 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
336
337 if (rp->status)
338 return;
339
340 memcpy(hdev->dev_class, rp->dev_class, 3);
341
342 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
343 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
344 }
345
346 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
347 {
348 __u8 status = *((__u8 *) skb->data);
349 void *sent;
350
351 BT_DBG("%s status 0x%2.2x", hdev->name, status);
352
353 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
354 if (!sent)
355 return;
356
357 hci_dev_lock(hdev);
358
359 if (status == 0)
360 memcpy(hdev->dev_class, sent, 3);
361
362 if (test_bit(HCI_MGMT, &hdev->dev_flags))
363 mgmt_set_class_of_dev_complete(hdev, sent, status);
364
365 hci_dev_unlock(hdev);
366 }
367
368 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
369 {
370 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
371 __u16 setting;
372
373 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
374
375 if (rp->status)
376 return;
377
378 setting = __le16_to_cpu(rp->voice_setting);
379
380 if (hdev->voice_setting == setting)
381 return;
382
383 hdev->voice_setting = setting;
384
385 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
386
387 if (hdev->notify)
388 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
389 }
390
391 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
392 struct sk_buff *skb)
393 {
394 __u8 status = *((__u8 *) skb->data);
395 __u16 setting;
396 void *sent;
397
398 BT_DBG("%s status 0x%2.2x", hdev->name, status);
399
400 if (status)
401 return;
402
403 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
404 if (!sent)
405 return;
406
407 setting = get_unaligned_le16(sent);
408
409 if (hdev->voice_setting == setting)
410 return;
411
412 hdev->voice_setting = setting;
413
414 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
415
416 if (hdev->notify)
417 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
418 }
419
420 static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
421 struct sk_buff *skb)
422 {
423 struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
424
425 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
426
427 if (rp->status)
428 return;
429
430 hdev->num_iac = rp->num_iac;
431
432 BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
433 }
434
435 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
436 {
437 __u8 status = *((__u8 *) skb->data);
438 struct hci_cp_write_ssp_mode *sent;
439
440 BT_DBG("%s status 0x%2.2x", hdev->name, status);
441
442 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
443 if (!sent)
444 return;
445
446 if (!status) {
447 if (sent->mode)
448 hdev->features[1][0] |= LMP_HOST_SSP;
449 else
450 hdev->features[1][0] &= ~LMP_HOST_SSP;
451 }
452
453 if (test_bit(HCI_MGMT, &hdev->dev_flags))
454 mgmt_ssp_enable_complete(hdev, sent->mode, status);
455 else if (!status) {
456 if (sent->mode)
457 set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
458 else
459 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
460 }
461 }
462
463 static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
464 {
465 u8 status = *((u8 *) skb->data);
466 struct hci_cp_write_sc_support *sent;
467
468 BT_DBG("%s status 0x%2.2x", hdev->name, status);
469
470 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
471 if (!sent)
472 return;
473
474 if (!status) {
475 if (sent->support)
476 hdev->features[1][0] |= LMP_HOST_SC;
477 else
478 hdev->features[1][0] &= ~LMP_HOST_SC;
479 }
480
481 if (test_bit(HCI_MGMT, &hdev->dev_flags))
482 mgmt_sc_enable_complete(hdev, sent->support, status);
483 else if (!status) {
484 if (sent->support)
485 set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
486 else
487 clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
488 }
489 }
490
491 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
492 {
493 struct hci_rp_read_local_version *rp = (void *) skb->data;
494
495 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
496
497 if (rp->status)
498 return;
499
500 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
501 hdev->hci_ver = rp->hci_ver;
502 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
503 hdev->lmp_ver = rp->lmp_ver;
504 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
505 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
506 }
507 }
508
509 static void hci_cc_read_local_commands(struct hci_dev *hdev,
510 struct sk_buff *skb)
511 {
512 struct hci_rp_read_local_commands *rp = (void *) skb->data;
513
514 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
515
516 if (rp->status)
517 return;
518
519 if (test_bit(HCI_SETUP, &hdev->dev_flags))
520 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
521 }
522
523 static void hci_cc_read_local_features(struct hci_dev *hdev,
524 struct sk_buff *skb)
525 {
526 struct hci_rp_read_local_features *rp = (void *) skb->data;
527
528 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
529
530 if (rp->status)
531 return;
532
533 memcpy(hdev->features, rp->features, 8);
534
535 /* Adjust default settings according to features
536 * supported by device. */
537
538 if (hdev->features[0][0] & LMP_3SLOT)
539 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
540
541 if (hdev->features[0][0] & LMP_5SLOT)
542 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
543
544 if (hdev->features[0][1] & LMP_HV2) {
545 hdev->pkt_type |= (HCI_HV2);
546 hdev->esco_type |= (ESCO_HV2);
547 }
548
549 if (hdev->features[0][1] & LMP_HV3) {
550 hdev->pkt_type |= (HCI_HV3);
551 hdev->esco_type |= (ESCO_HV3);
552 }
553
554 if (lmp_esco_capable(hdev))
555 hdev->esco_type |= (ESCO_EV3);
556
557 if (hdev->features[0][4] & LMP_EV4)
558 hdev->esco_type |= (ESCO_EV4);
559
560 if (hdev->features[0][4] & LMP_EV5)
561 hdev->esco_type |= (ESCO_EV5);
562
563 if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
564 hdev->esco_type |= (ESCO_2EV3);
565
566 if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
567 hdev->esco_type |= (ESCO_3EV3);
568
569 if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
570 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
571 }
572
573 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
574 struct sk_buff *skb)
575 {
576 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
577
578 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
579
580 if (rp->status)
581 return;
582
583 if (hdev->max_page < rp->max_page)
584 hdev->max_page = rp->max_page;
585
586 if (rp->page < HCI_MAX_PAGES)
587 memcpy(hdev->features[rp->page], rp->features, 8);
588 }
589
590 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
591 struct sk_buff *skb)
592 {
593 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
594
595 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
596
597 if (rp->status)
598 return;
599
600 hdev->flow_ctl_mode = rp->mode;
601 }
602
603 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
604 {
605 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
606
607 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
608
609 if (rp->status)
610 return;
611
612 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
613 hdev->sco_mtu = rp->sco_mtu;
614 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
615 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
616
617 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
618 hdev->sco_mtu = 64;
619 hdev->sco_pkts = 8;
620 }
621
622 hdev->acl_cnt = hdev->acl_pkts;
623 hdev->sco_cnt = hdev->sco_pkts;
624
625 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
626 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
627 }
628
629 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
630 {
631 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
632
633 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
634
635 if (rp->status)
636 return;
637
638 if (test_bit(HCI_INIT, &hdev->flags))
639 bacpy(&hdev->bdaddr, &rp->bdaddr);
640
641 if (test_bit(HCI_SETUP, &hdev->dev_flags))
642 bacpy(&hdev->setup_addr, &rp->bdaddr);
643 }
644
645 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
646 struct sk_buff *skb)
647 {
648 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
649
650 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
651
652 if (rp->status)
653 return;
654
655 if (test_bit(HCI_INIT, &hdev->flags)) {
656 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
657 hdev->page_scan_window = __le16_to_cpu(rp->window);
658 }
659 }
660
661 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
662 struct sk_buff *skb)
663 {
664 u8 status = *((u8 *) skb->data);
665 struct hci_cp_write_page_scan_activity *sent;
666
667 BT_DBG("%s status 0x%2.2x", hdev->name, status);
668
669 if (status)
670 return;
671
672 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
673 if (!sent)
674 return;
675
676 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
677 hdev->page_scan_window = __le16_to_cpu(sent->window);
678 }
679
680 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
681 struct sk_buff *skb)
682 {
683 struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
684
685 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
686
687 if (rp->status)
688 return;
689
690 if (test_bit(HCI_INIT, &hdev->flags))
691 hdev->page_scan_type = rp->type;
692 }
693
694 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
695 struct sk_buff *skb)
696 {
697 u8 status = *((u8 *) skb->data);
698 u8 *type;
699
700 BT_DBG("%s status 0x%2.2x", hdev->name, status);
701
702 if (status)
703 return;
704
705 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
706 if (type)
707 hdev->page_scan_type = *type;
708 }
709
710 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
711 struct sk_buff *skb)
712 {
713 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
714
715 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
716
717 if (rp->status)
718 return;
719
720 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
721 hdev->block_len = __le16_to_cpu(rp->block_len);
722 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
723
724 hdev->block_cnt = hdev->num_blocks;
725
726 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
727 hdev->block_cnt, hdev->block_len);
728 }
729
730 static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
731 {
732 struct hci_rp_read_clock *rp = (void *) skb->data;
733 struct hci_cp_read_clock *cp;
734 struct hci_conn *conn;
735
736 BT_DBG("%s", hdev->name);
737
738 if (skb->len < sizeof(*rp))
739 return;
740
741 if (rp->status)
742 return;
743
744 hci_dev_lock(hdev);
745
746 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
747 if (!cp)
748 goto unlock;
749
750 if (cp->which == 0x00) {
751 hdev->clock = le32_to_cpu(rp->clock);
752 goto unlock;
753 }
754
755 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
756 if (conn) {
757 conn->clock = le32_to_cpu(rp->clock);
758 conn->clock_accuracy = le16_to_cpu(rp->accuracy);
759 }
760
761 unlock:
762 hci_dev_unlock(hdev);
763 }
764
765 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
766 struct sk_buff *skb)
767 {
768 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
769
770 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
771
772 if (rp->status)
773 goto a2mp_rsp;
774
775 hdev->amp_status = rp->amp_status;
776 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
777 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
778 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
779 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
780 hdev->amp_type = rp->amp_type;
781 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
782 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
783 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
784 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
785
786 a2mp_rsp:
787 a2mp_send_getinfo_rsp(hdev);
788 }
789
790 static void hci_cc_read_local_amp_assoc(struct hci_dev *hdev,
791 struct sk_buff *skb)
792 {
793 struct hci_rp_read_local_amp_assoc *rp = (void *) skb->data;
794 struct amp_assoc *assoc = &hdev->loc_assoc;
795 size_t rem_len, frag_len;
796
797 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
798
799 if (rp->status)
800 goto a2mp_rsp;
801
802 frag_len = skb->len - sizeof(*rp);
803 rem_len = __le16_to_cpu(rp->rem_len);
804
805 if (rem_len > frag_len) {
806 BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
807
808 memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
809 assoc->offset += frag_len;
810
811 /* Read other fragments */
812 amp_read_loc_assoc_frag(hdev, rp->phy_handle);
813
814 return;
815 }
816
817 memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
818 assoc->len = assoc->offset + rem_len;
819 assoc->offset = 0;
820
821 a2mp_rsp:
822 /* Send A2MP Rsp when all fragments are received */
823 a2mp_send_getampassoc_rsp(hdev, rp->status);
824 a2mp_send_create_phy_link_req(hdev, rp->status);
825 }
826
827 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
828 struct sk_buff *skb)
829 {
830 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
831
832 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
833
834 if (rp->status)
835 return;
836
837 hdev->inq_tx_power = rp->tx_power;
838 }
839
840 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
841 {
842 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
843 struct hci_cp_pin_code_reply *cp;
844 struct hci_conn *conn;
845
846 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
847
848 hci_dev_lock(hdev);
849
850 if (test_bit(HCI_MGMT, &hdev->dev_flags))
851 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
852
853 if (rp->status)
854 goto unlock;
855
856 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
857 if (!cp)
858 goto unlock;
859
860 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
861 if (conn)
862 conn->pin_length = cp->pin_len;
863
864 unlock:
865 hci_dev_unlock(hdev);
866 }
867
868 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
869 {
870 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
871
872 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
873
874 hci_dev_lock(hdev);
875
876 if (test_bit(HCI_MGMT, &hdev->dev_flags))
877 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
878 rp->status);
879
880 hci_dev_unlock(hdev);
881 }
882
883 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
884 struct sk_buff *skb)
885 {
886 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
887
888 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
889
890 if (rp->status)
891 return;
892
893 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
894 hdev->le_pkts = rp->le_max_pkt;
895
896 hdev->le_cnt = hdev->le_pkts;
897
898 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
899 }
900
901 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
902 struct sk_buff *skb)
903 {
904 struct hci_rp_le_read_local_features *rp = (void *) skb->data;
905
906 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
907
908 if (rp->status)
909 return;
910
911 memcpy(hdev->le_features, rp->features, 8);
912 }
913
914 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
915 struct sk_buff *skb)
916 {
917 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
918
919 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
920
921 if (rp->status)
922 return;
923
924 hdev->adv_tx_power = rp->tx_power;
925 }
926
927 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
928 {
929 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
930
931 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
932
933 hci_dev_lock(hdev);
934
935 if (test_bit(HCI_MGMT, &hdev->dev_flags))
936 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
937 rp->status);
938
939 hci_dev_unlock(hdev);
940 }
941
942 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
943 struct sk_buff *skb)
944 {
945 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
946
947 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
948
949 hci_dev_lock(hdev);
950
951 if (test_bit(HCI_MGMT, &hdev->dev_flags))
952 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
953 ACL_LINK, 0, rp->status);
954
955 hci_dev_unlock(hdev);
956 }
957
958 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
959 {
960 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
961
962 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
963
964 hci_dev_lock(hdev);
965
966 if (test_bit(HCI_MGMT, &hdev->dev_flags))
967 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
968 0, rp->status);
969
970 hci_dev_unlock(hdev);
971 }
972
973 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
974 struct sk_buff *skb)
975 {
976 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
977
978 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
979
980 hci_dev_lock(hdev);
981
982 if (test_bit(HCI_MGMT, &hdev->dev_flags))
983 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
984 ACL_LINK, 0, rp->status);
985
986 hci_dev_unlock(hdev);
987 }
988
989 static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
990 struct sk_buff *skb)
991 {
992 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
993
994 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
995
996 hci_dev_lock(hdev);
997 mgmt_read_local_oob_data_complete(hdev, rp->hash, rp->randomizer,
998 NULL, NULL, rp->status);
999 hci_dev_unlock(hdev);
1000 }
1001
1002 static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
1003 struct sk_buff *skb)
1004 {
1005 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
1006
1007 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1008
1009 hci_dev_lock(hdev);
1010 mgmt_read_local_oob_data_complete(hdev, rp->hash192, rp->randomizer192,
1011 rp->hash256, rp->randomizer256,
1012 rp->status);
1013 hci_dev_unlock(hdev);
1014 }
1015
1016
1017 static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
1018 {
1019 __u8 status = *((__u8 *) skb->data);
1020 bdaddr_t *sent;
1021
1022 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1023
1024 if (status)
1025 return;
1026
1027 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1028 if (!sent)
1029 return;
1030
1031 hci_dev_lock(hdev);
1032
1033 bacpy(&hdev->random_addr, sent);
1034
1035 hci_dev_unlock(hdev);
1036 }
1037
1038 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1039 {
1040 __u8 *sent, status = *((__u8 *) skb->data);
1041
1042 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1043
1044 if (status)
1045 return;
1046
1047 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1048 if (!sent)
1049 return;
1050
1051 hci_dev_lock(hdev);
1052
1053 /* If we're doing connection initiation as peripheral. Set a
1054 * timeout in case something goes wrong.
1055 */
1056 if (*sent) {
1057 struct hci_conn *conn;
1058
1059 set_bit(HCI_LE_ADV, &hdev->dev_flags);
1060
1061 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1062 if (conn)
1063 queue_delayed_work(hdev->workqueue,
1064 &conn->le_conn_timeout,
1065 conn->conn_timeout);
1066 } else {
1067 clear_bit(HCI_LE_ADV, &hdev->dev_flags);
1068 }
1069
1070 hci_dev_unlock(hdev);
1071 }
1072
1073 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1074 {
1075 struct hci_cp_le_set_scan_param *cp;
1076 __u8 status = *((__u8 *) skb->data);
1077
1078 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1079
1080 if (status)
1081 return;
1082
1083 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1084 if (!cp)
1085 return;
1086
1087 hci_dev_lock(hdev);
1088
1089 hdev->le_scan_type = cp->type;
1090
1091 hci_dev_unlock(hdev);
1092 }
1093
1094 static bool has_pending_adv_report(struct hci_dev *hdev)
1095 {
1096 struct discovery_state *d = &hdev->discovery;
1097
1098 return bacmp(&d->last_adv_addr, BDADDR_ANY);
1099 }
1100
1101 static void clear_pending_adv_report(struct hci_dev *hdev)
1102 {
1103 struct discovery_state *d = &hdev->discovery;
1104
1105 bacpy(&d->last_adv_addr, BDADDR_ANY);
1106 d->last_adv_data_len = 0;
1107 }
1108
1109 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1110 u8 bdaddr_type, s8 rssi, u32 flags,
1111 u8 *data, u8 len)
1112 {
1113 struct discovery_state *d = &hdev->discovery;
1114
1115 bacpy(&d->last_adv_addr, bdaddr);
1116 d->last_adv_addr_type = bdaddr_type;
1117 d->last_adv_rssi = rssi;
1118 d->last_adv_flags = flags;
1119 memcpy(d->last_adv_data, data, len);
1120 d->last_adv_data_len = len;
1121 }
1122
1123 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1124 struct sk_buff *skb)
1125 {
1126 struct hci_cp_le_set_scan_enable *cp;
1127 __u8 status = *((__u8 *) skb->data);
1128
1129 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1130
1131 if (status)
1132 return;
1133
1134 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1135 if (!cp)
1136 return;
1137
1138 switch (cp->enable) {
1139 case LE_SCAN_ENABLE:
1140 set_bit(HCI_LE_SCAN, &hdev->dev_flags);
1141 if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1142 clear_pending_adv_report(hdev);
1143 break;
1144
1145 case LE_SCAN_DISABLE:
1146 /* We do this here instead of when setting DISCOVERY_STOPPED
1147 * since the latter would potentially require waiting for
1148 * inquiry to stop too.
1149 */
1150 if (has_pending_adv_report(hdev)) {
1151 struct discovery_state *d = &hdev->discovery;
1152
1153 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1154 d->last_adv_addr_type, NULL,
1155 d->last_adv_rssi, d->last_adv_flags,
1156 d->last_adv_data,
1157 d->last_adv_data_len, NULL, 0);
1158 }
1159
1160 /* Cancel this timer so that we don't try to disable scanning
1161 * when it's already disabled.
1162 */
1163 cancel_delayed_work(&hdev->le_scan_disable);
1164
1165 clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
1166
1167 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1168 * interrupted scanning due to a connect request. Mark
1169 * therefore discovery as stopped. If this was not
1170 * because of a connect request advertising might have
1171 * been disabled because of active scanning, so
1172 * re-enable it again if necessary.
1173 */
1174 if (test_and_clear_bit(HCI_LE_SCAN_INTERRUPTED,
1175 &hdev->dev_flags))
1176 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1177 else if (!test_bit(HCI_LE_ADV, &hdev->dev_flags) &&
1178 hdev->discovery.state == DISCOVERY_FINDING)
1179 mgmt_reenable_advertising(hdev);
1180
1181 break;
1182
1183 default:
1184 BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1185 break;
1186 }
1187 }
1188
1189 static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1190 struct sk_buff *skb)
1191 {
1192 struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1193
1194 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1195
1196 if (rp->status)
1197 return;
1198
1199 hdev->le_white_list_size = rp->size;
1200 }
1201
1202 static void hci_cc_le_clear_white_list(struct hci_dev *hdev,
1203 struct sk_buff *skb)
1204 {
1205 __u8 status = *((__u8 *) skb->data);
1206
1207 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1208
1209 if (status)
1210 return;
1211
1212 hci_bdaddr_list_clear(&hdev->le_white_list);
1213 }
1214
1215 static void hci_cc_le_add_to_white_list(struct hci_dev *hdev,
1216 struct sk_buff *skb)
1217 {
1218 struct hci_cp_le_add_to_white_list *sent;
1219 __u8 status = *((__u8 *) skb->data);
1220
1221 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1222
1223 if (status)
1224 return;
1225
1226 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST);
1227 if (!sent)
1228 return;
1229
1230 hci_bdaddr_list_add(&hdev->le_white_list, &sent->bdaddr,
1231 sent->bdaddr_type);
1232 }
1233
1234 static void hci_cc_le_del_from_white_list(struct hci_dev *hdev,
1235 struct sk_buff *skb)
1236 {
1237 struct hci_cp_le_del_from_white_list *sent;
1238 __u8 status = *((__u8 *) skb->data);
1239
1240 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1241
1242 if (status)
1243 return;
1244
1245 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST);
1246 if (!sent)
1247 return;
1248
1249 hci_bdaddr_list_del(&hdev->le_white_list, &sent->bdaddr,
1250 sent->bdaddr_type);
1251 }
1252
1253 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1254 struct sk_buff *skb)
1255 {
1256 struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1257
1258 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1259
1260 if (rp->status)
1261 return;
1262
1263 memcpy(hdev->le_states, rp->le_states, 8);
1264 }
1265
1266 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1267 struct sk_buff *skb)
1268 {
1269 struct hci_cp_write_le_host_supported *sent;
1270 __u8 status = *((__u8 *) skb->data);
1271
1272 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1273
1274 if (status)
1275 return;
1276
1277 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1278 if (!sent)
1279 return;
1280
1281 if (sent->le) {
1282 hdev->features[1][0] |= LMP_HOST_LE;
1283 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1284 } else {
1285 hdev->features[1][0] &= ~LMP_HOST_LE;
1286 clear_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1287 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
1288 }
1289
1290 if (sent->simul)
1291 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1292 else
1293 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1294 }
1295
1296 static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1297 {
1298 struct hci_cp_le_set_adv_param *cp;
1299 u8 status = *((u8 *) skb->data);
1300
1301 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1302
1303 if (status)
1304 return;
1305
1306 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1307 if (!cp)
1308 return;
1309
1310 hci_dev_lock(hdev);
1311 hdev->adv_addr_type = cp->own_address_type;
1312 hci_dev_unlock(hdev);
1313 }
1314
1315 static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,
1316 struct sk_buff *skb)
1317 {
1318 struct hci_rp_write_remote_amp_assoc *rp = (void *) skb->data;
1319
1320 BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
1321 hdev->name, rp->status, rp->phy_handle);
1322
1323 if (rp->status)
1324 return;
1325
1326 amp_write_rem_assoc_continue(hdev, rp->phy_handle);
1327 }
1328
1329 static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
1330 {
1331 struct hci_rp_read_rssi *rp = (void *) skb->data;
1332 struct hci_conn *conn;
1333
1334 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1335
1336 if (rp->status)
1337 return;
1338
1339 hci_dev_lock(hdev);
1340
1341 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1342 if (conn)
1343 conn->rssi = rp->rssi;
1344
1345 hci_dev_unlock(hdev);
1346 }
1347
1348 static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
1349 {
1350 struct hci_cp_read_tx_power *sent;
1351 struct hci_rp_read_tx_power *rp = (void *) skb->data;
1352 struct hci_conn *conn;
1353
1354 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1355
1356 if (rp->status)
1357 return;
1358
1359 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
1360 if (!sent)
1361 return;
1362
1363 hci_dev_lock(hdev);
1364
1365 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1366 if (!conn)
1367 goto unlock;
1368
1369 switch (sent->type) {
1370 case 0x00:
1371 conn->tx_power = rp->tx_power;
1372 break;
1373 case 0x01:
1374 conn->max_tx_power = rp->tx_power;
1375 break;
1376 }
1377
1378 unlock:
1379 hci_dev_unlock(hdev);
1380 }
1381
1382 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1383 {
1384 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1385
1386 if (status) {
1387 hci_conn_check_pending(hdev);
1388 return;
1389 }
1390
1391 set_bit(HCI_INQUIRY, &hdev->flags);
1392 }
1393
1394 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1395 {
1396 struct hci_cp_create_conn *cp;
1397 struct hci_conn *conn;
1398
1399 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1400
1401 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1402 if (!cp)
1403 return;
1404
1405 hci_dev_lock(hdev);
1406
1407 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1408
1409 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1410
1411 if (status) {
1412 if (conn && conn->state == BT_CONNECT) {
1413 if (status != 0x0c || conn->attempt > 2) {
1414 conn->state = BT_CLOSED;
1415 hci_proto_connect_cfm(conn, status);
1416 hci_conn_del(conn);
1417 } else
1418 conn->state = BT_CONNECT2;
1419 }
1420 } else {
1421 if (!conn) {
1422 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
1423 HCI_ROLE_MASTER);
1424 if (!conn)
1425 BT_ERR("No memory for new connection");
1426 }
1427 }
1428
1429 hci_dev_unlock(hdev);
1430 }
1431
1432 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1433 {
1434 struct hci_cp_add_sco *cp;
1435 struct hci_conn *acl, *sco;
1436 __u16 handle;
1437
1438 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1439
1440 if (!status)
1441 return;
1442
1443 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1444 if (!cp)
1445 return;
1446
1447 handle = __le16_to_cpu(cp->handle);
1448
1449 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1450
1451 hci_dev_lock(hdev);
1452
1453 acl = hci_conn_hash_lookup_handle(hdev, handle);
1454 if (acl) {
1455 sco = acl->link;
1456 if (sco) {
1457 sco->state = BT_CLOSED;
1458
1459 hci_proto_connect_cfm(sco, status);
1460 hci_conn_del(sco);
1461 }
1462 }
1463
1464 hci_dev_unlock(hdev);
1465 }
1466
1467 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1468 {
1469 struct hci_cp_auth_requested *cp;
1470 struct hci_conn *conn;
1471
1472 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1473
1474 if (!status)
1475 return;
1476
1477 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1478 if (!cp)
1479 return;
1480
1481 hci_dev_lock(hdev);
1482
1483 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1484 if (conn) {
1485 if (conn->state == BT_CONFIG) {
1486 hci_proto_connect_cfm(conn, status);
1487 hci_conn_drop(conn);
1488 }
1489 }
1490
1491 hci_dev_unlock(hdev);
1492 }
1493
1494 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1495 {
1496 struct hci_cp_set_conn_encrypt *cp;
1497 struct hci_conn *conn;
1498
1499 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1500
1501 if (!status)
1502 return;
1503
1504 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1505 if (!cp)
1506 return;
1507
1508 hci_dev_lock(hdev);
1509
1510 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1511 if (conn) {
1512 if (conn->state == BT_CONFIG) {
1513 hci_proto_connect_cfm(conn, status);
1514 hci_conn_drop(conn);
1515 }
1516 }
1517
1518 hci_dev_unlock(hdev);
1519 }
1520
1521 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1522 struct hci_conn *conn)
1523 {
1524 if (conn->state != BT_CONFIG || !conn->out)
1525 return 0;
1526
1527 if (conn->pending_sec_level == BT_SECURITY_SDP)
1528 return 0;
1529
1530 /* Only request authentication for SSP connections or non-SSP
1531 * devices with sec_level MEDIUM or HIGH or if MITM protection
1532 * is requested.
1533 */
1534 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1535 conn->pending_sec_level != BT_SECURITY_FIPS &&
1536 conn->pending_sec_level != BT_SECURITY_HIGH &&
1537 conn->pending_sec_level != BT_SECURITY_MEDIUM)
1538 return 0;
1539
1540 return 1;
1541 }
1542
1543 static int hci_resolve_name(struct hci_dev *hdev,
1544 struct inquiry_entry *e)
1545 {
1546 struct hci_cp_remote_name_req cp;
1547
1548 memset(&cp, 0, sizeof(cp));
1549
1550 bacpy(&cp.bdaddr, &e->data.bdaddr);
1551 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1552 cp.pscan_mode = e->data.pscan_mode;
1553 cp.clock_offset = e->data.clock_offset;
1554
1555 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1556 }
1557
1558 static bool hci_resolve_next_name(struct hci_dev *hdev)
1559 {
1560 struct discovery_state *discov = &hdev->discovery;
1561 struct inquiry_entry *e;
1562
1563 if (list_empty(&discov->resolve))
1564 return false;
1565
1566 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1567 if (!e)
1568 return false;
1569
1570 if (hci_resolve_name(hdev, e) == 0) {
1571 e->name_state = NAME_PENDING;
1572 return true;
1573 }
1574
1575 return false;
1576 }
1577
1578 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1579 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1580 {
1581 struct discovery_state *discov = &hdev->discovery;
1582 struct inquiry_entry *e;
1583
1584 if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1585 mgmt_device_connected(hdev, conn, 0, name, name_len);
1586
1587 if (discov->state == DISCOVERY_STOPPED)
1588 return;
1589
1590 if (discov->state == DISCOVERY_STOPPING)
1591 goto discov_complete;
1592
1593 if (discov->state != DISCOVERY_RESOLVING)
1594 return;
1595
1596 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1597 /* If the device was not found in a list of found devices names of which
1598 * are pending. there is no need to continue resolving a next name as it
1599 * will be done upon receiving another Remote Name Request Complete
1600 * Event */
1601 if (!e)
1602 return;
1603
1604 list_del(&e->list);
1605 if (name) {
1606 e->name_state = NAME_KNOWN;
1607 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1608 e->data.rssi, name, name_len);
1609 } else {
1610 e->name_state = NAME_NOT_KNOWN;
1611 }
1612
1613 if (hci_resolve_next_name(hdev))
1614 return;
1615
1616 discov_complete:
1617 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1618 }
1619
1620 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1621 {
1622 struct hci_cp_remote_name_req *cp;
1623 struct hci_conn *conn;
1624
1625 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1626
1627 /* If successful wait for the name req complete event before
1628 * checking for the need to do authentication */
1629 if (!status)
1630 return;
1631
1632 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1633 if (!cp)
1634 return;
1635
1636 hci_dev_lock(hdev);
1637
1638 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1639
1640 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1641 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1642
1643 if (!conn)
1644 goto unlock;
1645
1646 if (!hci_outgoing_auth_needed(hdev, conn))
1647 goto unlock;
1648
1649 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1650 struct hci_cp_auth_requested auth_cp;
1651
1652 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
1653
1654 auth_cp.handle = __cpu_to_le16(conn->handle);
1655 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
1656 sizeof(auth_cp), &auth_cp);
1657 }
1658
1659 unlock:
1660 hci_dev_unlock(hdev);
1661 }
1662
1663 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1664 {
1665 struct hci_cp_read_remote_features *cp;
1666 struct hci_conn *conn;
1667
1668 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1669
1670 if (!status)
1671 return;
1672
1673 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1674 if (!cp)
1675 return;
1676
1677 hci_dev_lock(hdev);
1678
1679 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1680 if (conn) {
1681 if (conn->state == BT_CONFIG) {
1682 hci_proto_connect_cfm(conn, status);
1683 hci_conn_drop(conn);
1684 }
1685 }
1686
1687 hci_dev_unlock(hdev);
1688 }
1689
1690 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1691 {
1692 struct hci_cp_read_remote_ext_features *cp;
1693 struct hci_conn *conn;
1694
1695 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1696
1697 if (!status)
1698 return;
1699
1700 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1701 if (!cp)
1702 return;
1703
1704 hci_dev_lock(hdev);
1705
1706 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1707 if (conn) {
1708 if (conn->state == BT_CONFIG) {
1709 hci_proto_connect_cfm(conn, status);
1710 hci_conn_drop(conn);
1711 }
1712 }
1713
1714 hci_dev_unlock(hdev);
1715 }
1716
1717 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1718 {
1719 struct hci_cp_setup_sync_conn *cp;
1720 struct hci_conn *acl, *sco;
1721 __u16 handle;
1722
1723 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1724
1725 if (!status)
1726 return;
1727
1728 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1729 if (!cp)
1730 return;
1731
1732 handle = __le16_to_cpu(cp->handle);
1733
1734 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1735
1736 hci_dev_lock(hdev);
1737
1738 acl = hci_conn_hash_lookup_handle(hdev, handle);
1739 if (acl) {
1740 sco = acl->link;
1741 if (sco) {
1742 sco->state = BT_CLOSED;
1743
1744 hci_proto_connect_cfm(sco, status);
1745 hci_conn_del(sco);
1746 }
1747 }
1748
1749 hci_dev_unlock(hdev);
1750 }
1751
1752 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1753 {
1754 struct hci_cp_sniff_mode *cp;
1755 struct hci_conn *conn;
1756
1757 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1758
1759 if (!status)
1760 return;
1761
1762 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1763 if (!cp)
1764 return;
1765
1766 hci_dev_lock(hdev);
1767
1768 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1769 if (conn) {
1770 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1771
1772 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1773 hci_sco_setup(conn, status);
1774 }
1775
1776 hci_dev_unlock(hdev);
1777 }
1778
1779 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1780 {
1781 struct hci_cp_exit_sniff_mode *cp;
1782 struct hci_conn *conn;
1783
1784 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1785
1786 if (!status)
1787 return;
1788
1789 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1790 if (!cp)
1791 return;
1792
1793 hci_dev_lock(hdev);
1794
1795 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1796 if (conn) {
1797 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1798
1799 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1800 hci_sco_setup(conn, status);
1801 }
1802
1803 hci_dev_unlock(hdev);
1804 }
1805
1806 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1807 {
1808 struct hci_cp_disconnect *cp;
1809 struct hci_conn *conn;
1810
1811 if (!status)
1812 return;
1813
1814 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1815 if (!cp)
1816 return;
1817
1818 hci_dev_lock(hdev);
1819
1820 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1821 if (conn)
1822 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1823 conn->dst_type, status);
1824
1825 hci_dev_unlock(hdev);
1826 }
1827
1828 static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status)
1829 {
1830 struct hci_cp_create_phy_link *cp;
1831
1832 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1833
1834 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
1835 if (!cp)
1836 return;
1837
1838 hci_dev_lock(hdev);
1839
1840 if (status) {
1841 struct hci_conn *hcon;
1842
1843 hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
1844 if (hcon)
1845 hci_conn_del(hcon);
1846 } else {
1847 amp_write_remote_assoc(hdev, cp->phy_handle);
1848 }
1849
1850 hci_dev_unlock(hdev);
1851 }
1852
1853 static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status)
1854 {
1855 struct hci_cp_accept_phy_link *cp;
1856
1857 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1858
1859 if (status)
1860 return;
1861
1862 cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
1863 if (!cp)
1864 return;
1865
1866 amp_write_remote_assoc(hdev, cp->phy_handle);
1867 }
1868
1869 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
1870 {
1871 struct hci_cp_le_create_conn *cp;
1872 struct hci_conn *conn;
1873
1874 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1875
1876 /* All connection failure handling is taken care of by the
1877 * hci_le_conn_failed function which is triggered by the HCI
1878 * request completion callbacks used for connecting.
1879 */
1880 if (status)
1881 return;
1882
1883 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
1884 if (!cp)
1885 return;
1886
1887 hci_dev_lock(hdev);
1888
1889 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
1890 if (!conn)
1891 goto unlock;
1892
1893 /* Store the initiator and responder address information which
1894 * is needed for SMP. These values will not change during the
1895 * lifetime of the connection.
1896 */
1897 conn->init_addr_type = cp->own_address_type;
1898 if (cp->own_address_type == ADDR_LE_DEV_RANDOM)
1899 bacpy(&conn->init_addr, &hdev->random_addr);
1900 else
1901 bacpy(&conn->init_addr, &hdev->bdaddr);
1902
1903 conn->resp_addr_type = cp->peer_addr_type;
1904 bacpy(&conn->resp_addr, &cp->peer_addr);
1905
1906 /* We don't want the connection attempt to stick around
1907 * indefinitely since LE doesn't have a page timeout concept
1908 * like BR/EDR. Set a timer for any connection that doesn't use
1909 * the white list for connecting.
1910 */
1911 if (cp->filter_policy == HCI_LE_USE_PEER_ADDR)
1912 queue_delayed_work(conn->hdev->workqueue,
1913 &conn->le_conn_timeout,
1914 conn->conn_timeout);
1915
1916 unlock:
1917 hci_dev_unlock(hdev);
1918 }
1919
1920 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1921 {
1922 struct hci_cp_le_start_enc *cp;
1923 struct hci_conn *conn;
1924
1925 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1926
1927 if (!status)
1928 return;
1929
1930 hci_dev_lock(hdev);
1931
1932 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
1933 if (!cp)
1934 goto unlock;
1935
1936 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1937 if (!conn)
1938 goto unlock;
1939
1940 if (conn->state != BT_CONNECTED)
1941 goto unlock;
1942
1943 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
1944 hci_conn_drop(conn);
1945
1946 unlock:
1947 hci_dev_unlock(hdev);
1948 }
1949
1950 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
1951 {
1952 struct hci_cp_switch_role *cp;
1953 struct hci_conn *conn;
1954
1955 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1956
1957 if (!status)
1958 return;
1959
1960 cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
1961 if (!cp)
1962 return;
1963
1964 hci_dev_lock(hdev);
1965
1966 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1967 if (conn)
1968 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
1969
1970 hci_dev_unlock(hdev);
1971 }
1972
1973 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1974 {
1975 __u8 status = *((__u8 *) skb->data);
1976 struct discovery_state *discov = &hdev->discovery;
1977 struct inquiry_entry *e;
1978
1979 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1980
1981 hci_conn_check_pending(hdev);
1982
1983 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1984 return;
1985
1986 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
1987 wake_up_bit(&hdev->flags, HCI_INQUIRY);
1988
1989 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1990 return;
1991
1992 hci_dev_lock(hdev);
1993
1994 if (discov->state != DISCOVERY_FINDING)
1995 goto unlock;
1996
1997 if (list_empty(&discov->resolve)) {
1998 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1999 goto unlock;
2000 }
2001
2002 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2003 if (e && hci_resolve_name(hdev, e) == 0) {
2004 e->name_state = NAME_PENDING;
2005 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
2006 } else {
2007 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2008 }
2009
2010 unlock:
2011 hci_dev_unlock(hdev);
2012 }
2013
2014 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2015 {
2016 struct inquiry_data data;
2017 struct inquiry_info *info = (void *) (skb->data + 1);
2018 int num_rsp = *((__u8 *) skb->data);
2019
2020 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2021
2022 if (!num_rsp)
2023 return;
2024
2025 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
2026 return;
2027
2028 hci_dev_lock(hdev);
2029
2030 for (; num_rsp; num_rsp--, info++) {
2031 u32 flags;
2032
2033 bacpy(&data.bdaddr, &info->bdaddr);
2034 data.pscan_rep_mode = info->pscan_rep_mode;
2035 data.pscan_period_mode = info->pscan_period_mode;
2036 data.pscan_mode = info->pscan_mode;
2037 memcpy(data.dev_class, info->dev_class, 3);
2038 data.clock_offset = info->clock_offset;
2039 data.rssi = 0x00;
2040 data.ssp_mode = 0x00;
2041
2042 flags = hci_inquiry_cache_update(hdev, &data, false);
2043
2044 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2045 info->dev_class, 0, flags, NULL, 0, NULL, 0);
2046 }
2047
2048 hci_dev_unlock(hdev);
2049 }
2050
2051 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2052 {
2053 struct hci_ev_conn_complete *ev = (void *) skb->data;
2054 struct hci_conn *conn;
2055
2056 BT_DBG("%s", hdev->name);
2057
2058 hci_dev_lock(hdev);
2059
2060 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2061 if (!conn) {
2062 if (ev->link_type != SCO_LINK)
2063 goto unlock;
2064
2065 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2066 if (!conn)
2067 goto unlock;
2068
2069 conn->type = SCO_LINK;
2070 }
2071
2072 if (!ev->status) {
2073 conn->handle = __le16_to_cpu(ev->handle);
2074
2075 if (conn->type == ACL_LINK) {
2076 conn->state = BT_CONFIG;
2077 hci_conn_hold(conn);
2078
2079 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
2080 !hci_find_link_key(hdev, &ev->bdaddr))
2081 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2082 else
2083 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2084 } else
2085 conn->state = BT_CONNECTED;
2086
2087 hci_conn_add_sysfs(conn);
2088
2089 if (test_bit(HCI_AUTH, &hdev->flags))
2090 set_bit(HCI_CONN_AUTH, &conn->flags);
2091
2092 if (test_bit(HCI_ENCRYPT, &hdev->flags))
2093 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2094
2095 /* Get remote features */
2096 if (conn->type == ACL_LINK) {
2097 struct hci_cp_read_remote_features cp;
2098 cp.handle = ev->handle;
2099 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2100 sizeof(cp), &cp);
2101
2102 hci_update_page_scan(hdev, NULL);
2103 }
2104
2105 /* Set packet type for incoming connection */
2106 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2107 struct hci_cp_change_conn_ptype cp;
2108 cp.handle = ev->handle;
2109 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2110 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2111 &cp);
2112 }
2113 } else {
2114 conn->state = BT_CLOSED;
2115 if (conn->type == ACL_LINK)
2116 mgmt_connect_failed(hdev, &conn->dst, conn->type,
2117 conn->dst_type, ev->status);
2118 }
2119
2120 if (conn->type == ACL_LINK)
2121 hci_sco_setup(conn, ev->status);
2122
2123 if (ev->status) {
2124 hci_proto_connect_cfm(conn, ev->status);
2125 hci_conn_del(conn);
2126 } else if (ev->link_type != ACL_LINK)
2127 hci_proto_connect_cfm(conn, ev->status);
2128
2129 unlock:
2130 hci_dev_unlock(hdev);
2131
2132 hci_conn_check_pending(hdev);
2133 }
2134
2135 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
2136 {
2137 struct hci_cp_reject_conn_req cp;
2138
2139 bacpy(&cp.bdaddr, bdaddr);
2140 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2141 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2142 }
2143
2144 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2145 {
2146 struct hci_ev_conn_request *ev = (void *) skb->data;
2147 int mask = hdev->link_mode;
2148 struct inquiry_entry *ie;
2149 struct hci_conn *conn;
2150 __u8 flags = 0;
2151
2152 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2153 ev->link_type);
2154
2155 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2156 &flags);
2157
2158 if (!(mask & HCI_LM_ACCEPT)) {
2159 hci_reject_conn(hdev, &ev->bdaddr);
2160 return;
2161 }
2162
2163 if (hci_bdaddr_list_lookup(&hdev->blacklist, &ev->bdaddr,
2164 BDADDR_BREDR)) {
2165 hci_reject_conn(hdev, &ev->bdaddr);
2166 return;
2167 }
2168
2169 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags) &&
2170 !hci_bdaddr_list_lookup(&hdev->whitelist, &ev->bdaddr,
2171 BDADDR_BREDR)) {
2172 hci_reject_conn(hdev, &ev->bdaddr);
2173 return;
2174 }
2175
2176 /* Connection accepted */
2177
2178 hci_dev_lock(hdev);
2179
2180 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2181 if (ie)
2182 memcpy(ie->data.dev_class, ev->dev_class, 3);
2183
2184 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2185 &ev->bdaddr);
2186 if (!conn) {
2187 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2188 HCI_ROLE_SLAVE);
2189 if (!conn) {
2190 BT_ERR("No memory for new connection");
2191 hci_dev_unlock(hdev);
2192 return;
2193 }
2194 }
2195
2196 memcpy(conn->dev_class, ev->dev_class, 3);
2197
2198 hci_dev_unlock(hdev);
2199
2200 if (ev->link_type == ACL_LINK ||
2201 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2202 struct hci_cp_accept_conn_req cp;
2203 conn->state = BT_CONNECT;
2204
2205 bacpy(&cp.bdaddr, &ev->bdaddr);
2206
2207 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2208 cp.role = 0x00; /* Become master */
2209 else
2210 cp.role = 0x01; /* Remain slave */
2211
2212 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
2213 } else if (!(flags & HCI_PROTO_DEFER)) {
2214 struct hci_cp_accept_sync_conn_req cp;
2215 conn->state = BT_CONNECT;
2216
2217 bacpy(&cp.bdaddr, &ev->bdaddr);
2218 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2219
2220 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
2221 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
2222 cp.max_latency = cpu_to_le16(0xffff);
2223 cp.content_format = cpu_to_le16(hdev->voice_setting);
2224 cp.retrans_effort = 0xff;
2225
2226 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
2227 &cp);
2228 } else {
2229 conn->state = BT_CONNECT2;
2230 hci_proto_connect_cfm(conn, 0);
2231 }
2232 }
2233
2234 static u8 hci_to_mgmt_reason(u8 err)
2235 {
2236 switch (err) {
2237 case HCI_ERROR_CONNECTION_TIMEOUT:
2238 return MGMT_DEV_DISCONN_TIMEOUT;
2239 case HCI_ERROR_REMOTE_USER_TERM:
2240 case HCI_ERROR_REMOTE_LOW_RESOURCES:
2241 case HCI_ERROR_REMOTE_POWER_OFF:
2242 return MGMT_DEV_DISCONN_REMOTE;
2243 case HCI_ERROR_LOCAL_HOST_TERM:
2244 return MGMT_DEV_DISCONN_LOCAL_HOST;
2245 default:
2246 return MGMT_DEV_DISCONN_UNKNOWN;
2247 }
2248 }
2249
2250 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2251 {
2252 struct hci_ev_disconn_complete *ev = (void *) skb->data;
2253 u8 reason = hci_to_mgmt_reason(ev->reason);
2254 struct hci_conn_params *params;
2255 struct hci_conn *conn;
2256 bool mgmt_connected;
2257 u8 type;
2258
2259 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2260
2261 hci_dev_lock(hdev);
2262
2263 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2264 if (!conn)
2265 goto unlock;
2266
2267 if (ev->status) {
2268 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2269 conn->dst_type, ev->status);
2270 goto unlock;
2271 }
2272
2273 conn->state = BT_CLOSED;
2274
2275 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2276 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2277 reason, mgmt_connected);
2278
2279 if (conn->type == ACL_LINK) {
2280 if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2281 hci_remove_link_key(hdev, &conn->dst);
2282
2283 hci_update_page_scan(hdev, NULL);
2284 }
2285
2286 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2287 if (params) {
2288 switch (params->auto_connect) {
2289 case HCI_AUTO_CONN_LINK_LOSS:
2290 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2291 break;
2292 /* Fall through */
2293
2294 case HCI_AUTO_CONN_DIRECT:
2295 case HCI_AUTO_CONN_ALWAYS:
2296 list_del_init(&params->action);
2297 list_add(&params->action, &hdev->pend_le_conns);
2298 hci_update_background_scan(hdev);
2299 break;
2300
2301 default:
2302 break;
2303 }
2304 }
2305
2306 type = conn->type;
2307
2308 hci_proto_disconn_cfm(conn, ev->reason);
2309 hci_conn_del(conn);
2310
2311 /* Re-enable advertising if necessary, since it might
2312 * have been disabled by the connection. From the
2313 * HCI_LE_Set_Advertise_Enable command description in
2314 * the core specification (v4.0):
2315 * "The Controller shall continue advertising until the Host
2316 * issues an LE_Set_Advertise_Enable command with
2317 * Advertising_Enable set to 0x00 (Advertising is disabled)
2318 * or until a connection is created or until the Advertising
2319 * is timed out due to Directed Advertising."
2320 */
2321 if (type == LE_LINK)
2322 mgmt_reenable_advertising(hdev);
2323
2324 unlock:
2325 hci_dev_unlock(hdev);
2326 }
2327
2328 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2329 {
2330 struct hci_ev_auth_complete *ev = (void *) skb->data;
2331 struct hci_conn *conn;
2332
2333 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2334
2335 hci_dev_lock(hdev);
2336
2337 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2338 if (!conn)
2339 goto unlock;
2340
2341 if (!ev->status) {
2342 if (!hci_conn_ssp_enabled(conn) &&
2343 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
2344 BT_INFO("re-auth of legacy device is not possible.");
2345 } else {
2346 set_bit(HCI_CONN_AUTH, &conn->flags);
2347 conn->sec_level = conn->pending_sec_level;
2348 }
2349 } else {
2350 mgmt_auth_failed(conn, ev->status);
2351 }
2352
2353 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2354 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2355
2356 if (conn->state == BT_CONFIG) {
2357 if (!ev->status && hci_conn_ssp_enabled(conn)) {
2358 struct hci_cp_set_conn_encrypt cp;
2359 cp.handle = ev->handle;
2360 cp.encrypt = 0x01;
2361 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2362 &cp);
2363 } else {
2364 conn->state = BT_CONNECTED;
2365 hci_proto_connect_cfm(conn, ev->status);
2366 hci_conn_drop(conn);
2367 }
2368 } else {
2369 hci_auth_cfm(conn, ev->status);
2370
2371 hci_conn_hold(conn);
2372 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2373 hci_conn_drop(conn);
2374 }
2375
2376 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2377 if (!ev->status) {
2378 struct hci_cp_set_conn_encrypt cp;
2379 cp.handle = ev->handle;
2380 cp.encrypt = 0x01;
2381 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2382 &cp);
2383 } else {
2384 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2385 hci_encrypt_cfm(conn, ev->status, 0x00);
2386 }
2387 }
2388
2389 unlock:
2390 hci_dev_unlock(hdev);
2391 }
2392
2393 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2394 {
2395 struct hci_ev_remote_name *ev = (void *) skb->data;
2396 struct hci_conn *conn;
2397
2398 BT_DBG("%s", hdev->name);
2399
2400 hci_conn_check_pending(hdev);
2401
2402 hci_dev_lock(hdev);
2403
2404 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2405
2406 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2407 goto check_auth;
2408
2409 if (ev->status == 0)
2410 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2411 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2412 else
2413 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2414
2415 check_auth:
2416 if (!conn)
2417 goto unlock;
2418
2419 if (!hci_outgoing_auth_needed(hdev, conn))
2420 goto unlock;
2421
2422 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2423 struct hci_cp_auth_requested cp;
2424
2425 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2426
2427 cp.handle = __cpu_to_le16(conn->handle);
2428 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2429 }
2430
2431 unlock:
2432 hci_dev_unlock(hdev);
2433 }
2434
2435 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2436 {
2437 struct hci_ev_encrypt_change *ev = (void *) skb->data;
2438 struct hci_conn *conn;
2439
2440 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2441
2442 hci_dev_lock(hdev);
2443
2444 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2445 if (!conn)
2446 goto unlock;
2447
2448 if (!ev->status) {
2449 if (ev->encrypt) {
2450 /* Encryption implies authentication */
2451 set_bit(HCI_CONN_AUTH, &conn->flags);
2452 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2453 conn->sec_level = conn->pending_sec_level;
2454
2455 /* P-256 authentication key implies FIPS */
2456 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
2457 set_bit(HCI_CONN_FIPS, &conn->flags);
2458
2459 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
2460 conn->type == LE_LINK)
2461 set_bit(HCI_CONN_AES_CCM, &conn->flags);
2462 } else {
2463 clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
2464 clear_bit(HCI_CONN_AES_CCM, &conn->flags);
2465 }
2466 }
2467
2468 /* We should disregard the current RPA and generate a new one
2469 * whenever the encryption procedure fails.
2470 */
2471 if (ev->status && conn->type == LE_LINK)
2472 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
2473
2474 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2475
2476 if (ev->status && conn->state == BT_CONNECTED) {
2477 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2478 hci_conn_drop(conn);
2479 goto unlock;
2480 }
2481
2482 if (conn->state == BT_CONFIG) {
2483 if (!ev->status)
2484 conn->state = BT_CONNECTED;
2485
2486 /* In Secure Connections Only mode, do not allow any
2487 * connections that are not encrypted with AES-CCM
2488 * using a P-256 authenticated combination key.
2489 */
2490 if (test_bit(HCI_SC_ONLY, &hdev->dev_flags) &&
2491 (!test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
2492 conn->key_type != HCI_LK_AUTH_COMBINATION_P256)) {
2493 hci_proto_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE);
2494 hci_conn_drop(conn);
2495 goto unlock;
2496 }
2497
2498 hci_proto_connect_cfm(conn, ev->status);
2499 hci_conn_drop(conn);
2500 } else
2501 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2502
2503 unlock:
2504 hci_dev_unlock(hdev);
2505 }
2506
2507 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2508 struct sk_buff *skb)
2509 {
2510 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2511 struct hci_conn *conn;
2512
2513 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2514
2515 hci_dev_lock(hdev);
2516
2517 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2518 if (conn) {
2519 if (!ev->status)
2520 set_bit(HCI_CONN_SECURE, &conn->flags);
2521
2522 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2523
2524 hci_key_change_cfm(conn, ev->status);
2525 }
2526
2527 hci_dev_unlock(hdev);
2528 }
2529
2530 static void hci_remote_features_evt(struct hci_dev *hdev,
2531 struct sk_buff *skb)
2532 {
2533 struct hci_ev_remote_features *ev = (void *) skb->data;
2534 struct hci_conn *conn;
2535
2536 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2537
2538 hci_dev_lock(hdev);
2539
2540 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2541 if (!conn)
2542 goto unlock;
2543
2544 if (!ev->status)
2545 memcpy(conn->features[0], ev->features, 8);
2546
2547 if (conn->state != BT_CONFIG)
2548 goto unlock;
2549
2550 if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
2551 struct hci_cp_read_remote_ext_features cp;
2552 cp.handle = ev->handle;
2553 cp.page = 0x01;
2554 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2555 sizeof(cp), &cp);
2556 goto unlock;
2557 }
2558
2559 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2560 struct hci_cp_remote_name_req cp;
2561 memset(&cp, 0, sizeof(cp));
2562 bacpy(&cp.bdaddr, &conn->dst);
2563 cp.pscan_rep_mode = 0x02;
2564 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2565 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2566 mgmt_device_connected(hdev, conn, 0, NULL, 0);
2567
2568 if (!hci_outgoing_auth_needed(hdev, conn)) {
2569 conn->state = BT_CONNECTED;
2570 hci_proto_connect_cfm(conn, ev->status);
2571 hci_conn_drop(conn);
2572 }
2573
2574 unlock:
2575 hci_dev_unlock(hdev);
2576 }
2577
2578 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2579 {
2580 struct hci_ev_cmd_complete *ev = (void *) skb->data;
2581 u8 status = skb->data[sizeof(*ev)];
2582 __u16 opcode;
2583
2584 skb_pull(skb, sizeof(*ev));
2585
2586 opcode = __le16_to_cpu(ev->opcode);
2587
2588 switch (opcode) {
2589 case HCI_OP_INQUIRY_CANCEL:
2590 hci_cc_inquiry_cancel(hdev, skb);
2591 break;
2592
2593 case HCI_OP_PERIODIC_INQ:
2594 hci_cc_periodic_inq(hdev, skb);
2595 break;
2596
2597 case HCI_OP_EXIT_PERIODIC_INQ:
2598 hci_cc_exit_periodic_inq(hdev, skb);
2599 break;
2600
2601 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2602 hci_cc_remote_name_req_cancel(hdev, skb);
2603 break;
2604
2605 case HCI_OP_ROLE_DISCOVERY:
2606 hci_cc_role_discovery(hdev, skb);
2607 break;
2608
2609 case HCI_OP_READ_LINK_POLICY:
2610 hci_cc_read_link_policy(hdev, skb);
2611 break;
2612
2613 case HCI_OP_WRITE_LINK_POLICY:
2614 hci_cc_write_link_policy(hdev, skb);
2615 break;
2616
2617 case HCI_OP_READ_DEF_LINK_POLICY:
2618 hci_cc_read_def_link_policy(hdev, skb);
2619 break;
2620
2621 case HCI_OP_WRITE_DEF_LINK_POLICY:
2622 hci_cc_write_def_link_policy(hdev, skb);
2623 break;
2624
2625 case HCI_OP_RESET:
2626 hci_cc_reset(hdev, skb);
2627 break;
2628
2629 case HCI_OP_WRITE_LOCAL_NAME:
2630 hci_cc_write_local_name(hdev, skb);
2631 break;
2632
2633 case HCI_OP_READ_LOCAL_NAME:
2634 hci_cc_read_local_name(hdev, skb);
2635 break;
2636
2637 case HCI_OP_WRITE_AUTH_ENABLE:
2638 hci_cc_write_auth_enable(hdev, skb);
2639 break;
2640
2641 case HCI_OP_WRITE_ENCRYPT_MODE:
2642 hci_cc_write_encrypt_mode(hdev, skb);
2643 break;
2644
2645 case HCI_OP_WRITE_SCAN_ENABLE:
2646 hci_cc_write_scan_enable(hdev, skb);
2647 break;
2648
2649 case HCI_OP_READ_CLASS_OF_DEV:
2650 hci_cc_read_class_of_dev(hdev, skb);
2651 break;
2652
2653 case HCI_OP_WRITE_CLASS_OF_DEV:
2654 hci_cc_write_class_of_dev(hdev, skb);
2655 break;
2656
2657 case HCI_OP_READ_VOICE_SETTING:
2658 hci_cc_read_voice_setting(hdev, skb);
2659 break;
2660
2661 case HCI_OP_WRITE_VOICE_SETTING:
2662 hci_cc_write_voice_setting(hdev, skb);
2663 break;
2664
2665 case HCI_OP_READ_NUM_SUPPORTED_IAC:
2666 hci_cc_read_num_supported_iac(hdev, skb);
2667 break;
2668
2669 case HCI_OP_WRITE_SSP_MODE:
2670 hci_cc_write_ssp_mode(hdev, skb);
2671 break;
2672
2673 case HCI_OP_WRITE_SC_SUPPORT:
2674 hci_cc_write_sc_support(hdev, skb);
2675 break;
2676
2677 case HCI_OP_READ_LOCAL_VERSION:
2678 hci_cc_read_local_version(hdev, skb);
2679 break;
2680
2681 case HCI_OP_READ_LOCAL_COMMANDS:
2682 hci_cc_read_local_commands(hdev, skb);
2683 break;
2684
2685 case HCI_OP_READ_LOCAL_FEATURES:
2686 hci_cc_read_local_features(hdev, skb);
2687 break;
2688
2689 case HCI_OP_READ_LOCAL_EXT_FEATURES:
2690 hci_cc_read_local_ext_features(hdev, skb);
2691 break;
2692
2693 case HCI_OP_READ_BUFFER_SIZE:
2694 hci_cc_read_buffer_size(hdev, skb);
2695 break;
2696
2697 case HCI_OP_READ_BD_ADDR:
2698 hci_cc_read_bd_addr(hdev, skb);
2699 break;
2700
2701 case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
2702 hci_cc_read_page_scan_activity(hdev, skb);
2703 break;
2704
2705 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
2706 hci_cc_write_page_scan_activity(hdev, skb);
2707 break;
2708
2709 case HCI_OP_READ_PAGE_SCAN_TYPE:
2710 hci_cc_read_page_scan_type(hdev, skb);
2711 break;
2712
2713 case HCI_OP_WRITE_PAGE_SCAN_TYPE:
2714 hci_cc_write_page_scan_type(hdev, skb);
2715 break;
2716
2717 case HCI_OP_READ_DATA_BLOCK_SIZE:
2718 hci_cc_read_data_block_size(hdev, skb);
2719 break;
2720
2721 case HCI_OP_READ_FLOW_CONTROL_MODE:
2722 hci_cc_read_flow_control_mode(hdev, skb);
2723 break;
2724
2725 case HCI_OP_READ_LOCAL_AMP_INFO:
2726 hci_cc_read_local_amp_info(hdev, skb);
2727 break;
2728
2729 case HCI_OP_READ_CLOCK:
2730 hci_cc_read_clock(hdev, skb);
2731 break;
2732
2733 case HCI_OP_READ_LOCAL_AMP_ASSOC:
2734 hci_cc_read_local_amp_assoc(hdev, skb);
2735 break;
2736
2737 case HCI_OP_READ_INQ_RSP_TX_POWER:
2738 hci_cc_read_inq_rsp_tx_power(hdev, skb);
2739 break;
2740
2741 case HCI_OP_PIN_CODE_REPLY:
2742 hci_cc_pin_code_reply(hdev, skb);
2743 break;
2744
2745 case HCI_OP_PIN_CODE_NEG_REPLY:
2746 hci_cc_pin_code_neg_reply(hdev, skb);
2747 break;
2748
2749 case HCI_OP_READ_LOCAL_OOB_DATA:
2750 hci_cc_read_local_oob_data(hdev, skb);
2751 break;
2752
2753 case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
2754 hci_cc_read_local_oob_ext_data(hdev, skb);
2755 break;
2756
2757 case HCI_OP_LE_READ_BUFFER_SIZE:
2758 hci_cc_le_read_buffer_size(hdev, skb);
2759 break;
2760
2761 case HCI_OP_LE_READ_LOCAL_FEATURES:
2762 hci_cc_le_read_local_features(hdev, skb);
2763 break;
2764
2765 case HCI_OP_LE_READ_ADV_TX_POWER:
2766 hci_cc_le_read_adv_tx_power(hdev, skb);
2767 break;
2768
2769 case HCI_OP_USER_CONFIRM_REPLY:
2770 hci_cc_user_confirm_reply(hdev, skb);
2771 break;
2772
2773 case HCI_OP_USER_CONFIRM_NEG_REPLY:
2774 hci_cc_user_confirm_neg_reply(hdev, skb);
2775 break;
2776
2777 case HCI_OP_USER_PASSKEY_REPLY:
2778 hci_cc_user_passkey_reply(hdev, skb);
2779 break;
2780
2781 case HCI_OP_USER_PASSKEY_NEG_REPLY:
2782 hci_cc_user_passkey_neg_reply(hdev, skb);
2783 break;
2784
2785 case HCI_OP_LE_SET_RANDOM_ADDR:
2786 hci_cc_le_set_random_addr(hdev, skb);
2787 break;
2788
2789 case HCI_OP_LE_SET_ADV_ENABLE:
2790 hci_cc_le_set_adv_enable(hdev, skb);
2791 break;
2792
2793 case HCI_OP_LE_SET_SCAN_PARAM:
2794 hci_cc_le_set_scan_param(hdev, skb);
2795 break;
2796
2797 case HCI_OP_LE_SET_SCAN_ENABLE:
2798 hci_cc_le_set_scan_enable(hdev, skb);
2799 break;
2800
2801 case HCI_OP_LE_READ_WHITE_LIST_SIZE:
2802 hci_cc_le_read_white_list_size(hdev, skb);
2803 break;
2804
2805 case HCI_OP_LE_CLEAR_WHITE_LIST:
2806 hci_cc_le_clear_white_list(hdev, skb);
2807 break;
2808
2809 case HCI_OP_LE_ADD_TO_WHITE_LIST:
2810 hci_cc_le_add_to_white_list(hdev, skb);
2811 break;
2812
2813 case HCI_OP_LE_DEL_FROM_WHITE_LIST:
2814 hci_cc_le_del_from_white_list(hdev, skb);
2815 break;
2816
2817 case HCI_OP_LE_READ_SUPPORTED_STATES:
2818 hci_cc_le_read_supported_states(hdev, skb);
2819 break;
2820
2821 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2822 hci_cc_write_le_host_supported(hdev, skb);
2823 break;
2824
2825 case HCI_OP_LE_SET_ADV_PARAM:
2826 hci_cc_set_adv_param(hdev, skb);
2827 break;
2828
2829 case HCI_OP_WRITE_REMOTE_AMP_ASSOC:
2830 hci_cc_write_remote_amp_assoc(hdev, skb);
2831 break;
2832
2833 case HCI_OP_READ_RSSI:
2834 hci_cc_read_rssi(hdev, skb);
2835 break;
2836
2837 case HCI_OP_READ_TX_POWER:
2838 hci_cc_read_tx_power(hdev, skb);
2839 break;
2840
2841 default:
2842 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2843 break;
2844 }
2845
2846 if (opcode != HCI_OP_NOP)
2847 cancel_delayed_work(&hdev->cmd_timer);
2848
2849 hci_req_cmd_complete(hdev, opcode, status);
2850
2851 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2852 atomic_set(&hdev->cmd_cnt, 1);
2853 if (!skb_queue_empty(&hdev->cmd_q))
2854 queue_work(hdev->workqueue, &hdev->cmd_work);
2855 }
2856 }
2857
2858 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2859 {
2860 struct hci_ev_cmd_status *ev = (void *) skb->data;
2861 __u16 opcode;
2862
2863 skb_pull(skb, sizeof(*ev));
2864
2865 opcode = __le16_to_cpu(ev->opcode);
2866
2867 switch (opcode) {
2868 case HCI_OP_INQUIRY:
2869 hci_cs_inquiry(hdev, ev->status);
2870 break;
2871
2872 case HCI_OP_CREATE_CONN:
2873 hci_cs_create_conn(hdev, ev->status);
2874 break;
2875
2876 case HCI_OP_DISCONNECT:
2877 hci_cs_disconnect(hdev, ev->status);
2878 break;
2879
2880 case HCI_OP_ADD_SCO:
2881 hci_cs_add_sco(hdev, ev->status);
2882 break;
2883
2884 case HCI_OP_AUTH_REQUESTED:
2885 hci_cs_auth_requested(hdev, ev->status);
2886 break;
2887
2888 case HCI_OP_SET_CONN_ENCRYPT:
2889 hci_cs_set_conn_encrypt(hdev, ev->status);
2890 break;
2891
2892 case HCI_OP_REMOTE_NAME_REQ:
2893 hci_cs_remote_name_req(hdev, ev->status);
2894 break;
2895
2896 case HCI_OP_READ_REMOTE_FEATURES:
2897 hci_cs_read_remote_features(hdev, ev->status);
2898 break;
2899
2900 case HCI_OP_READ_REMOTE_EXT_FEATURES:
2901 hci_cs_read_remote_ext_features(hdev, ev->status);
2902 break;
2903
2904 case HCI_OP_SETUP_SYNC_CONN:
2905 hci_cs_setup_sync_conn(hdev, ev->status);
2906 break;
2907
2908 case HCI_OP_CREATE_PHY_LINK:
2909 hci_cs_create_phylink(hdev, ev->status);
2910 break;
2911
2912 case HCI_OP_ACCEPT_PHY_LINK:
2913 hci_cs_accept_phylink(hdev, ev->status);
2914 break;
2915
2916 case HCI_OP_SNIFF_MODE:
2917 hci_cs_sniff_mode(hdev, ev->status);
2918 break;
2919
2920 case HCI_OP_EXIT_SNIFF_MODE:
2921 hci_cs_exit_sniff_mode(hdev, ev->status);
2922 break;
2923
2924 case HCI_OP_SWITCH_ROLE:
2925 hci_cs_switch_role(hdev, ev->status);
2926 break;
2927
2928 case HCI_OP_LE_CREATE_CONN:
2929 hci_cs_le_create_conn(hdev, ev->status);
2930 break;
2931
2932 case HCI_OP_LE_START_ENC:
2933 hci_cs_le_start_enc(hdev, ev->status);
2934 break;
2935
2936 default:
2937 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2938 break;
2939 }
2940
2941 if (opcode != HCI_OP_NOP)
2942 cancel_delayed_work(&hdev->cmd_timer);
2943
2944 if (ev->status ||
2945 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->req.event))
2946 hci_req_cmd_complete(hdev, opcode, ev->status);
2947
2948 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2949 atomic_set(&hdev->cmd_cnt, 1);
2950 if (!skb_queue_empty(&hdev->cmd_q))
2951 queue_work(hdev->workqueue, &hdev->cmd_work);
2952 }
2953 }
2954
2955 static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb)
2956 {
2957 struct hci_ev_hardware_error *ev = (void *) skb->data;
2958
2959 BT_ERR("%s hardware error 0x%2.2x", hdev->name, ev->code);
2960 }
2961
2962 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2963 {
2964 struct hci_ev_role_change *ev = (void *) skb->data;
2965 struct hci_conn *conn;
2966
2967 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2968
2969 hci_dev_lock(hdev);
2970
2971 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2972 if (conn) {
2973 if (!ev->status)
2974 conn->role = ev->role;
2975
2976 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2977
2978 hci_role_switch_cfm(conn, ev->status, ev->role);
2979 }
2980
2981 hci_dev_unlock(hdev);
2982 }
2983
2984 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2985 {
2986 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2987 int i;
2988
2989 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
2990 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2991 return;
2992 }
2993
2994 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2995 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
2996 BT_DBG("%s bad parameters", hdev->name);
2997 return;
2998 }
2999
3000 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
3001
3002 for (i = 0; i < ev->num_hndl; i++) {
3003 struct hci_comp_pkts_info *info = &ev->handles[i];
3004 struct hci_conn *conn;
3005 __u16 handle, count;
3006
3007 handle = __le16_to_cpu(info->handle);
3008 count = __le16_to_cpu(info->count);
3009
3010 conn = hci_conn_hash_lookup_handle(hdev, handle);
3011 if (!conn)
3012 continue;
3013
3014 conn->sent -= count;
3015
3016 switch (conn->type) {
3017 case ACL_LINK:
3018 hdev->acl_cnt += count;
3019 if (hdev->acl_cnt > hdev->acl_pkts)
3020 hdev->acl_cnt = hdev->acl_pkts;
3021 break;
3022
3023 case LE_LINK:
3024 if (hdev->le_pkts) {
3025 hdev->le_cnt += count;
3026 if (hdev->le_cnt > hdev->le_pkts)
3027 hdev->le_cnt = hdev->le_pkts;
3028 } else {
3029 hdev->acl_cnt += count;
3030 if (hdev->acl_cnt > hdev->acl_pkts)
3031 hdev->acl_cnt = hdev->acl_pkts;
3032 }
3033 break;
3034
3035 case SCO_LINK:
3036 hdev->sco_cnt += count;
3037 if (hdev->sco_cnt > hdev->sco_pkts)
3038 hdev->sco_cnt = hdev->sco_pkts;
3039 break;
3040
3041 default:
3042 BT_ERR("Unknown type %d conn %p", conn->type, conn);
3043 break;
3044 }
3045 }
3046
3047 queue_work(hdev->workqueue, &hdev->tx_work);
3048 }
3049
3050 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
3051 __u16 handle)
3052 {
3053 struct hci_chan *chan;
3054
3055 switch (hdev->dev_type) {
3056 case HCI_BREDR:
3057 return hci_conn_hash_lookup_handle(hdev, handle);
3058 case HCI_AMP:
3059 chan = hci_chan_lookup_handle(hdev, handle);
3060 if (chan)
3061 return chan->conn;
3062 break;
3063 default:
3064 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3065 break;
3066 }
3067
3068 return NULL;
3069 }
3070
3071 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
3072 {
3073 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
3074 int i;
3075
3076 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
3077 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
3078 return;
3079 }
3080
3081 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
3082 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
3083 BT_DBG("%s bad parameters", hdev->name);
3084 return;
3085 }
3086
3087 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
3088 ev->num_hndl);
3089
3090 for (i = 0; i < ev->num_hndl; i++) {
3091 struct hci_comp_blocks_info *info = &ev->handles[i];
3092 struct hci_conn *conn = NULL;
3093 __u16 handle, block_count;
3094
3095 handle = __le16_to_cpu(info->handle);
3096 block_count = __le16_to_cpu(info->blocks);
3097
3098 conn = __hci_conn_lookup_handle(hdev, handle);
3099 if (!conn)
3100 continue;
3101
3102 conn->sent -= block_count;
3103
3104 switch (conn->type) {
3105 case ACL_LINK:
3106 case AMP_LINK:
3107 hdev->block_cnt += block_count;
3108 if (hdev->block_cnt > hdev->num_blocks)
3109 hdev->block_cnt = hdev->num_blocks;
3110 break;
3111
3112 default:
3113 BT_ERR("Unknown type %d conn %p", conn->type, conn);
3114 break;
3115 }
3116 }
3117
3118 queue_work(hdev->workqueue, &hdev->tx_work);
3119 }
3120
3121 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3122 {
3123 struct hci_ev_mode_change *ev = (void *) skb->data;
3124 struct hci_conn *conn;
3125
3126 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3127
3128 hci_dev_lock(hdev);
3129
3130 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3131 if (conn) {
3132 conn->mode = ev->mode;
3133
3134 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
3135 &conn->flags)) {
3136 if (conn->mode == HCI_CM_ACTIVE)
3137 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3138 else
3139 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3140 }
3141
3142 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
3143 hci_sco_setup(conn, ev->status);
3144 }
3145
3146 hci_dev_unlock(hdev);
3147 }
3148
3149 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3150 {
3151 struct hci_ev_pin_code_req *ev = (void *) skb->data;
3152 struct hci_conn *conn;
3153
3154 BT_DBG("%s", hdev->name);
3155
3156 hci_dev_lock(hdev);
3157
3158 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3159 if (!conn)
3160 goto unlock;
3161
3162 if (conn->state == BT_CONNECTED) {
3163 hci_conn_hold(conn);
3164 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3165 hci_conn_drop(conn);
3166 }
3167
3168 if (!test_bit(HCI_BONDABLE, &hdev->dev_flags) &&
3169 !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
3170 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3171 sizeof(ev->bdaddr), &ev->bdaddr);
3172 } else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
3173 u8 secure;
3174
3175 if (conn->pending_sec_level == BT_SECURITY_HIGH)
3176 secure = 1;
3177 else
3178 secure = 0;
3179
3180 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
3181 }
3182
3183 unlock:
3184 hci_dev_unlock(hdev);
3185 }
3186
3187 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3188 {
3189 struct hci_ev_link_key_req *ev = (void *) skb->data;
3190 struct hci_cp_link_key_reply cp;
3191 struct hci_conn *conn;
3192 struct link_key *key;
3193
3194 BT_DBG("%s", hdev->name);
3195
3196 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3197 return;
3198
3199 hci_dev_lock(hdev);
3200
3201 key = hci_find_link_key(hdev, &ev->bdaddr);
3202 if (!key) {
3203 BT_DBG("%s link key not found for %pMR", hdev->name,
3204 &ev->bdaddr);
3205 goto not_found;
3206 }
3207
3208 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
3209 &ev->bdaddr);
3210
3211 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3212 if (conn) {
3213 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
3214 key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
3215 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
3216 BT_DBG("%s ignoring unauthenticated key", hdev->name);
3217 goto not_found;
3218 }
3219
3220 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
3221 (conn->pending_sec_level == BT_SECURITY_HIGH ||
3222 conn->pending_sec_level == BT_SECURITY_FIPS)) {
3223 BT_DBG("%s ignoring key unauthenticated for high security",
3224 hdev->name);
3225 goto not_found;
3226 }
3227
3228 conn->key_type = key->type;
3229 conn->pin_length = key->pin_len;
3230 }
3231
3232 bacpy(&cp.bdaddr, &ev->bdaddr);
3233 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
3234
3235 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
3236
3237 hci_dev_unlock(hdev);
3238
3239 return;
3240
3241 not_found:
3242 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
3243 hci_dev_unlock(hdev);
3244 }
3245
3246 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3247 {
3248 struct hci_ev_link_key_notify *ev = (void *) skb->data;
3249 struct hci_conn *conn;
3250 struct link_key *key;
3251 bool persistent;
3252 u8 pin_len = 0;
3253
3254 BT_DBG("%s", hdev->name);
3255
3256 hci_dev_lock(hdev);
3257
3258 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3259 if (conn) {
3260 hci_conn_hold(conn);
3261 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3262 pin_len = conn->pin_length;
3263
3264 if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
3265 conn->key_type = ev->key_type;
3266
3267 hci_conn_drop(conn);
3268 }
3269
3270 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3271 goto unlock;
3272
3273 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
3274 ev->key_type, pin_len, &persistent);
3275 if (!key)
3276 goto unlock;
3277
3278 mgmt_new_link_key(hdev, key, persistent);
3279
3280 /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
3281 * is set. If it's not set simply remove the key from the kernel
3282 * list (we've still notified user space about it but with
3283 * store_hint being 0).
3284 */
3285 if (key->type == HCI_LK_DEBUG_COMBINATION &&
3286 !test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags)) {
3287 list_del(&key->list);
3288 kfree(key);
3289 } else if (conn) {
3290 if (persistent)
3291 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3292 else
3293 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3294 }
3295
3296 unlock:
3297 hci_dev_unlock(hdev);
3298 }
3299
3300 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
3301 {
3302 struct hci_ev_clock_offset *ev = (void *) skb->data;
3303 struct hci_conn *conn;
3304
3305 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3306
3307 hci_dev_lock(hdev);
3308
3309 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3310 if (conn && !ev->status) {
3311 struct inquiry_entry *ie;
3312
3313 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3314 if (ie) {
3315 ie->data.clock_offset = ev->clock_offset;
3316 ie->timestamp = jiffies;
3317 }
3318 }
3319
3320 hci_dev_unlock(hdev);
3321 }
3322
3323 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3324 {
3325 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
3326 struct hci_conn *conn;
3327
3328 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3329
3330 hci_dev_lock(hdev);
3331
3332 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3333 if (conn && !ev->status)
3334 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
3335
3336 hci_dev_unlock(hdev);
3337 }
3338
3339 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
3340 {
3341 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
3342 struct inquiry_entry *ie;
3343
3344 BT_DBG("%s", hdev->name);
3345
3346 hci_dev_lock(hdev);
3347
3348 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3349 if (ie) {
3350 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
3351 ie->timestamp = jiffies;
3352 }
3353
3354 hci_dev_unlock(hdev);
3355 }
3356
3357 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
3358 struct sk_buff *skb)
3359 {
3360 struct inquiry_data data;
3361 int num_rsp = *((__u8 *) skb->data);
3362
3363 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3364
3365 if (!num_rsp)
3366 return;
3367
3368 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3369 return;
3370
3371 hci_dev_lock(hdev);
3372
3373 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
3374 struct inquiry_info_with_rssi_and_pscan_mode *info;
3375 info = (void *) (skb->data + 1);
3376
3377 for (; num_rsp; num_rsp--, info++) {
3378 u32 flags;
3379
3380 bacpy(&data.bdaddr, &info->bdaddr);
3381 data.pscan_rep_mode = info->pscan_rep_mode;
3382 data.pscan_period_mode = info->pscan_period_mode;
3383 data.pscan_mode = info->pscan_mode;
3384 memcpy(data.dev_class, info->dev_class, 3);
3385 data.clock_offset = info->clock_offset;
3386 data.rssi = info->rssi;
3387 data.ssp_mode = 0x00;
3388
3389 flags = hci_inquiry_cache_update(hdev, &data, false);
3390
3391 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3392 info->dev_class, info->rssi,
3393 flags, NULL, 0, NULL, 0);
3394 }
3395 } else {
3396 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
3397
3398 for (; num_rsp; num_rsp--, info++) {
3399 u32 flags;
3400
3401 bacpy(&data.bdaddr, &info->bdaddr);
3402 data.pscan_rep_mode = info->pscan_rep_mode;
3403 data.pscan_period_mode = info->pscan_period_mode;
3404 data.pscan_mode = 0x00;
3405 memcpy(data.dev_class, info->dev_class, 3);
3406 data.clock_offset = info->clock_offset;
3407 data.rssi = info->rssi;
3408 data.ssp_mode = 0x00;
3409
3410 flags = hci_inquiry_cache_update(hdev, &data, false);
3411
3412 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3413 info->dev_class, info->rssi,
3414 flags, NULL, 0, NULL, 0);
3415 }
3416 }
3417
3418 hci_dev_unlock(hdev);
3419 }
3420
3421 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
3422 struct sk_buff *skb)
3423 {
3424 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
3425 struct hci_conn *conn;
3426
3427 BT_DBG("%s", hdev->name);
3428
3429 hci_dev_lock(hdev);
3430
3431 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3432 if (!conn)
3433 goto unlock;
3434
3435 if (ev->page < HCI_MAX_PAGES)
3436 memcpy(conn->features[ev->page], ev->features, 8);
3437
3438 if (!ev->status && ev->page == 0x01) {
3439 struct inquiry_entry *ie;
3440
3441 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3442 if (ie)
3443 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3444
3445 if (ev->features[0] & LMP_HOST_SSP) {
3446 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3447 } else {
3448 /* It is mandatory by the Bluetooth specification that
3449 * Extended Inquiry Results are only used when Secure
3450 * Simple Pairing is enabled, but some devices violate
3451 * this.
3452 *
3453 * To make these devices work, the internal SSP
3454 * enabled flag needs to be cleared if the remote host
3455 * features do not indicate SSP support */
3456 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3457 }
3458
3459 if (ev->features[0] & LMP_HOST_SC)
3460 set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
3461 }
3462
3463 if (conn->state != BT_CONFIG)
3464 goto unlock;
3465
3466 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3467 struct hci_cp_remote_name_req cp;
3468 memset(&cp, 0, sizeof(cp));
3469 bacpy(&cp.bdaddr, &conn->dst);
3470 cp.pscan_rep_mode = 0x02;
3471 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3472 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3473 mgmt_device_connected(hdev, conn, 0, NULL, 0);
3474
3475 if (!hci_outgoing_auth_needed(hdev, conn)) {
3476 conn->state = BT_CONNECTED;
3477 hci_proto_connect_cfm(conn, ev->status);
3478 hci_conn_drop(conn);
3479 }
3480
3481 unlock:
3482 hci_dev_unlock(hdev);
3483 }
3484
3485 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
3486 struct sk_buff *skb)
3487 {
3488 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
3489 struct hci_conn *conn;
3490
3491 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3492
3493 hci_dev_lock(hdev);
3494
3495 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3496 if (!conn) {
3497 if (ev->link_type == ESCO_LINK)
3498 goto unlock;
3499
3500 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
3501 if (!conn)
3502 goto unlock;
3503
3504 conn->type = SCO_LINK;
3505 }
3506
3507 switch (ev->status) {
3508 case 0x00:
3509 conn->handle = __le16_to_cpu(ev->handle);
3510 conn->state = BT_CONNECTED;
3511
3512 hci_conn_add_sysfs(conn);
3513 break;
3514
3515 case 0x10: /* Connection Accept Timeout */
3516 case 0x0d: /* Connection Rejected due to Limited Resources */
3517 case 0x11: /* Unsupported Feature or Parameter Value */
3518 case 0x1c: /* SCO interval rejected */
3519 case 0x1a: /* Unsupported Remote Feature */
3520 case 0x1f: /* Unspecified error */
3521 case 0x20: /* Unsupported LMP Parameter value */
3522 if (conn->out) {
3523 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
3524 (hdev->esco_type & EDR_ESCO_MASK);
3525 if (hci_setup_sync(conn, conn->link->handle))
3526 goto unlock;
3527 }
3528 /* fall through */
3529
3530 default:
3531 conn->state = BT_CLOSED;
3532 break;
3533 }
3534
3535 hci_proto_connect_cfm(conn, ev->status);
3536 if (ev->status)
3537 hci_conn_del(conn);
3538
3539 unlock:
3540 hci_dev_unlock(hdev);
3541 }
3542
3543 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
3544 {
3545 size_t parsed = 0;
3546
3547 while (parsed < eir_len) {
3548 u8 field_len = eir[0];
3549
3550 if (field_len == 0)
3551 return parsed;
3552
3553 parsed += field_len + 1;
3554 eir += field_len + 1;
3555 }
3556
3557 return eir_len;
3558 }
3559
3560 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
3561 struct sk_buff *skb)
3562 {
3563 struct inquiry_data data;
3564 struct extended_inquiry_info *info = (void *) (skb->data + 1);
3565 int num_rsp = *((__u8 *) skb->data);
3566 size_t eir_len;
3567
3568 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3569
3570 if (!num_rsp)
3571 return;
3572
3573 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3574 return;
3575
3576 hci_dev_lock(hdev);
3577
3578 for (; num_rsp; num_rsp--, info++) {
3579 u32 flags;
3580 bool name_known;
3581
3582 bacpy(&data.bdaddr, &info->bdaddr);
3583 data.pscan_rep_mode = info->pscan_rep_mode;
3584 data.pscan_period_mode = info->pscan_period_mode;
3585 data.pscan_mode = 0x00;
3586 memcpy(data.dev_class, info->dev_class, 3);
3587 data.clock_offset = info->clock_offset;
3588 data.rssi = info->rssi;
3589 data.ssp_mode = 0x01;
3590
3591 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3592 name_known = eir_has_data_type(info->data,
3593 sizeof(info->data),
3594 EIR_NAME_COMPLETE);
3595 else
3596 name_known = true;
3597
3598 flags = hci_inquiry_cache_update(hdev, &data, name_known);
3599
3600 eir_len = eir_get_length(info->data, sizeof(info->data));
3601
3602 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3603 info->dev_class, info->rssi,
3604 flags, info->data, eir_len, NULL, 0);
3605 }
3606
3607 hci_dev_unlock(hdev);
3608 }
3609
3610 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
3611 struct sk_buff *skb)
3612 {
3613 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
3614 struct hci_conn *conn;
3615
3616 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
3617 __le16_to_cpu(ev->handle));
3618
3619 hci_dev_lock(hdev);
3620
3621 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3622 if (!conn)
3623 goto unlock;
3624
3625 /* For BR/EDR the necessary steps are taken through the
3626 * auth_complete event.
3627 */
3628 if (conn->type != LE_LINK)
3629 goto unlock;
3630
3631 if (!ev->status)
3632 conn->sec_level = conn->pending_sec_level;
3633
3634 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3635
3636 if (ev->status && conn->state == BT_CONNECTED) {
3637 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3638 hci_conn_drop(conn);
3639 goto unlock;
3640 }
3641
3642 if (conn->state == BT_CONFIG) {
3643 if (!ev->status)
3644 conn->state = BT_CONNECTED;
3645
3646 hci_proto_connect_cfm(conn, ev->status);
3647 hci_conn_drop(conn);
3648 } else {
3649 hci_auth_cfm(conn, ev->status);
3650
3651 hci_conn_hold(conn);
3652 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3653 hci_conn_drop(conn);
3654 }
3655
3656 unlock:
3657 hci_dev_unlock(hdev);
3658 }
3659
3660 static u8 hci_get_auth_req(struct hci_conn *conn)
3661 {
3662 /* If remote requests no-bonding follow that lead */
3663 if (conn->remote_auth == HCI_AT_NO_BONDING ||
3664 conn->remote_auth == HCI_AT_NO_BONDING_MITM)
3665 return conn->remote_auth | (conn->auth_type & 0x01);
3666
3667 /* If both remote and local have enough IO capabilities, require
3668 * MITM protection
3669 */
3670 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
3671 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
3672 return conn->remote_auth | 0x01;
3673
3674 /* No MITM protection possible so ignore remote requirement */
3675 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
3676 }
3677
3678 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3679 {
3680 struct hci_ev_io_capa_request *ev = (void *) skb->data;
3681 struct hci_conn *conn;
3682
3683 BT_DBG("%s", hdev->name);
3684
3685 hci_dev_lock(hdev);
3686
3687 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3688 if (!conn)
3689 goto unlock;
3690
3691 hci_conn_hold(conn);
3692
3693 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3694 goto unlock;
3695
3696 /* Allow pairing if we're pairable, the initiators of the
3697 * pairing or if the remote is not requesting bonding.
3698 */
3699 if (test_bit(HCI_BONDABLE, &hdev->dev_flags) ||
3700 test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
3701 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3702 struct hci_cp_io_capability_reply cp;
3703
3704 bacpy(&cp.bdaddr, &ev->bdaddr);
3705 /* Change the IO capability from KeyboardDisplay
3706 * to DisplayYesNo as it is not supported by BT spec. */
3707 cp.capability = (conn->io_capability == 0x04) ?
3708 HCI_IO_DISPLAY_YESNO : conn->io_capability;
3709
3710 /* If we are initiators, there is no remote information yet */
3711 if (conn->remote_auth == 0xff) {
3712 /* Request MITM protection if our IO caps allow it
3713 * except for the no-bonding case.
3714 */
3715 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
3716 conn->auth_type != HCI_AT_NO_BONDING)
3717 conn->auth_type |= 0x01;
3718 } else {
3719 conn->auth_type = hci_get_auth_req(conn);
3720 }
3721
3722 /* If we're not bondable, force one of the non-bondable
3723 * authentication requirement values.
3724 */
3725 if (!test_bit(HCI_BONDABLE, &hdev->dev_flags))
3726 conn->auth_type &= HCI_AT_NO_BONDING_MITM;
3727
3728 cp.authentication = conn->auth_type;
3729
3730 if (hci_find_remote_oob_data(hdev, &conn->dst) &&
3731 (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)))
3732 cp.oob_data = 0x01;
3733 else
3734 cp.oob_data = 0x00;
3735
3736 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3737 sizeof(cp), &cp);
3738 } else {
3739 struct hci_cp_io_capability_neg_reply cp;
3740
3741 bacpy(&cp.bdaddr, &ev->bdaddr);
3742 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3743
3744 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3745 sizeof(cp), &cp);
3746 }
3747
3748 unlock:
3749 hci_dev_unlock(hdev);
3750 }
3751
3752 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
3753 {
3754 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3755 struct hci_conn *conn;
3756
3757 BT_DBG("%s", hdev->name);
3758
3759 hci_dev_lock(hdev);
3760
3761 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3762 if (!conn)
3763 goto unlock;
3764
3765 conn->remote_cap = ev->capability;
3766 conn->remote_auth = ev->authentication;
3767 if (ev->oob_data)
3768 set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
3769
3770 unlock:
3771 hci_dev_unlock(hdev);
3772 }
3773
3774 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
3775 struct sk_buff *skb)
3776 {
3777 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3778 int loc_mitm, rem_mitm, confirm_hint = 0;
3779 struct hci_conn *conn;
3780
3781 BT_DBG("%s", hdev->name);
3782
3783 hci_dev_lock(hdev);
3784
3785 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3786 goto unlock;
3787
3788 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3789 if (!conn)
3790 goto unlock;
3791
3792 loc_mitm = (conn->auth_type & 0x01);
3793 rem_mitm = (conn->remote_auth & 0x01);
3794
3795 /* If we require MITM but the remote device can't provide that
3796 * (it has NoInputNoOutput) then reject the confirmation
3797 * request. We check the security level here since it doesn't
3798 * necessarily match conn->auth_type.
3799 */
3800 if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
3801 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
3802 BT_DBG("Rejecting request: remote device can't provide MITM");
3803 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3804 sizeof(ev->bdaddr), &ev->bdaddr);
3805 goto unlock;
3806 }
3807
3808 /* If no side requires MITM protection; auto-accept */
3809 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
3810 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
3811
3812 /* If we're not the initiators request authorization to
3813 * proceed from user space (mgmt_user_confirm with
3814 * confirm_hint set to 1). The exception is if neither
3815 * side had MITM or if the local IO capability is
3816 * NoInputNoOutput, in which case we do auto-accept
3817 */
3818 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
3819 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
3820 (loc_mitm || rem_mitm)) {
3821 BT_DBG("Confirming auto-accept as acceptor");
3822 confirm_hint = 1;
3823 goto confirm;
3824 }
3825
3826 BT_DBG("Auto-accept of user confirmation with %ums delay",
3827 hdev->auto_accept_delay);
3828
3829 if (hdev->auto_accept_delay > 0) {
3830 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
3831 queue_delayed_work(conn->hdev->workqueue,
3832 &conn->auto_accept_work, delay);
3833 goto unlock;
3834 }
3835
3836 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3837 sizeof(ev->bdaddr), &ev->bdaddr);
3838 goto unlock;
3839 }
3840
3841 confirm:
3842 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
3843 le32_to_cpu(ev->passkey), confirm_hint);
3844
3845 unlock:
3846 hci_dev_unlock(hdev);
3847 }
3848
3849 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
3850 struct sk_buff *skb)
3851 {
3852 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3853
3854 BT_DBG("%s", hdev->name);
3855
3856 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3857 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
3858 }
3859
3860 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
3861 struct sk_buff *skb)
3862 {
3863 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
3864 struct hci_conn *conn;
3865
3866 BT_DBG("%s", hdev->name);
3867
3868 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3869 if (!conn)
3870 return;
3871
3872 conn->passkey_notify = __le32_to_cpu(ev->passkey);
3873 conn->passkey_entered = 0;
3874
3875 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3876 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3877 conn->dst_type, conn->passkey_notify,
3878 conn->passkey_entered);
3879 }
3880
3881 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3882 {
3883 struct hci_ev_keypress_notify *ev = (void *) skb->data;
3884 struct hci_conn *conn;
3885
3886 BT_DBG("%s", hdev->name);
3887
3888 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3889 if (!conn)
3890 return;
3891
3892 switch (ev->type) {
3893 case HCI_KEYPRESS_STARTED:
3894 conn->passkey_entered = 0;
3895 return;
3896
3897 case HCI_KEYPRESS_ENTERED:
3898 conn->passkey_entered++;
3899 break;
3900
3901 case HCI_KEYPRESS_ERASED:
3902 conn->passkey_entered--;
3903 break;
3904
3905 case HCI_KEYPRESS_CLEARED:
3906 conn->passkey_entered = 0;
3907 break;
3908
3909 case HCI_KEYPRESS_COMPLETED:
3910 return;
3911 }
3912
3913 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3914 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3915 conn->dst_type, conn->passkey_notify,
3916 conn->passkey_entered);
3917 }
3918
3919 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
3920 struct sk_buff *skb)
3921 {
3922 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
3923 struct hci_conn *conn;
3924
3925 BT_DBG("%s", hdev->name);
3926
3927 hci_dev_lock(hdev);
3928
3929 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3930 if (!conn)
3931 goto unlock;
3932
3933 /* Reset the authentication requirement to unknown */
3934 conn->remote_auth = 0xff;
3935
3936 /* To avoid duplicate auth_failed events to user space we check
3937 * the HCI_CONN_AUTH_PEND flag which will be set if we
3938 * initiated the authentication. A traditional auth_complete
3939 * event gets always produced as initiator and is also mapped to
3940 * the mgmt_auth_failed event */
3941 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
3942 mgmt_auth_failed(conn, ev->status);
3943
3944 hci_conn_drop(conn);
3945
3946 unlock:
3947 hci_dev_unlock(hdev);
3948 }
3949
3950 static void hci_remote_host_features_evt(struct hci_dev *hdev,
3951 struct sk_buff *skb)
3952 {
3953 struct hci_ev_remote_host_features *ev = (void *) skb->data;
3954 struct inquiry_entry *ie;
3955 struct hci_conn *conn;
3956
3957 BT_DBG("%s", hdev->name);
3958
3959 hci_dev_lock(hdev);
3960
3961 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3962 if (conn)
3963 memcpy(conn->features[1], ev->features, 8);
3964
3965 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3966 if (ie)
3967 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3968
3969 hci_dev_unlock(hdev);
3970 }
3971
3972 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
3973 struct sk_buff *skb)
3974 {
3975 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
3976 struct oob_data *data;
3977
3978 BT_DBG("%s", hdev->name);
3979
3980 hci_dev_lock(hdev);
3981
3982 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3983 goto unlock;
3984
3985 data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
3986 if (data) {
3987 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
3988 struct hci_cp_remote_oob_ext_data_reply cp;
3989
3990 bacpy(&cp.bdaddr, &ev->bdaddr);
3991 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
3992 memcpy(cp.randomizer192, data->randomizer192,
3993 sizeof(cp.randomizer192));
3994 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
3995 memcpy(cp.randomizer256, data->randomizer256,
3996 sizeof(cp.randomizer256));
3997
3998 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
3999 sizeof(cp), &cp);
4000 } else {
4001 struct hci_cp_remote_oob_data_reply cp;
4002
4003 bacpy(&cp.bdaddr, &ev->bdaddr);
4004 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
4005 memcpy(cp.randomizer, data->randomizer192,
4006 sizeof(cp.randomizer));
4007
4008 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
4009 sizeof(cp), &cp);
4010 }
4011 } else {
4012 struct hci_cp_remote_oob_data_neg_reply cp;
4013
4014 bacpy(&cp.bdaddr, &ev->bdaddr);
4015 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
4016 sizeof(cp), &cp);
4017 }
4018
4019 unlock:
4020 hci_dev_unlock(hdev);
4021 }
4022
4023 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
4024 struct sk_buff *skb)
4025 {
4026 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
4027 struct hci_conn *hcon, *bredr_hcon;
4028
4029 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
4030 ev->status);
4031
4032 hci_dev_lock(hdev);
4033
4034 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4035 if (!hcon) {
4036 hci_dev_unlock(hdev);
4037 return;
4038 }
4039
4040 if (ev->status) {
4041 hci_conn_del(hcon);
4042 hci_dev_unlock(hdev);
4043 return;
4044 }
4045
4046 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
4047
4048 hcon->state = BT_CONNECTED;
4049 bacpy(&hcon->dst, &bredr_hcon->dst);
4050
4051 hci_conn_hold(hcon);
4052 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4053 hci_conn_drop(hcon);
4054
4055 hci_conn_add_sysfs(hcon);
4056
4057 amp_physical_cfm(bredr_hcon, hcon);
4058
4059 hci_dev_unlock(hdev);
4060 }
4061
4062 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4063 {
4064 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
4065 struct hci_conn *hcon;
4066 struct hci_chan *hchan;
4067 struct amp_mgr *mgr;
4068
4069 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
4070 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
4071 ev->status);
4072
4073 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4074 if (!hcon)
4075 return;
4076
4077 /* Create AMP hchan */
4078 hchan = hci_chan_create(hcon);
4079 if (!hchan)
4080 return;
4081
4082 hchan->handle = le16_to_cpu(ev->handle);
4083
4084 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
4085
4086 mgr = hcon->amp_mgr;
4087 if (mgr && mgr->bredr_chan) {
4088 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
4089
4090 l2cap_chan_lock(bredr_chan);
4091
4092 bredr_chan->conn->mtu = hdev->block_mtu;
4093 l2cap_logical_cfm(bredr_chan, hchan, 0);
4094 hci_conn_hold(hcon);
4095
4096 l2cap_chan_unlock(bredr_chan);
4097 }
4098 }
4099
4100 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
4101 struct sk_buff *skb)
4102 {
4103 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
4104 struct hci_chan *hchan;
4105
4106 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
4107 le16_to_cpu(ev->handle), ev->status);
4108
4109 if (ev->status)
4110 return;
4111
4112 hci_dev_lock(hdev);
4113
4114 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
4115 if (!hchan)
4116 goto unlock;
4117
4118 amp_destroy_logical_link(hchan, ev->reason);
4119
4120 unlock:
4121 hci_dev_unlock(hdev);
4122 }
4123
4124 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
4125 struct sk_buff *skb)
4126 {
4127 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
4128 struct hci_conn *hcon;
4129
4130 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4131
4132 if (ev->status)
4133 return;
4134
4135 hci_dev_lock(hdev);
4136
4137 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4138 if (hcon) {
4139 hcon->state = BT_CLOSED;
4140 hci_conn_del(hcon);
4141 }
4142
4143 hci_dev_unlock(hdev);
4144 }
4145
4146 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4147 {
4148 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
4149 struct hci_conn_params *params;
4150 struct hci_conn *conn;
4151 struct smp_irk *irk;
4152 u8 addr_type;
4153
4154 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4155
4156 hci_dev_lock(hdev);
4157
4158 /* All controllers implicitly stop advertising in the event of a
4159 * connection, so ensure that the state bit is cleared.
4160 */
4161 clear_bit(HCI_LE_ADV, &hdev->dev_flags);
4162
4163 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
4164 if (!conn) {
4165 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr, ev->role);
4166 if (!conn) {
4167 BT_ERR("No memory for new connection");
4168 goto unlock;
4169 }
4170
4171 conn->dst_type = ev->bdaddr_type;
4172
4173 /* If we didn't have a hci_conn object previously
4174 * but we're in master role this must be something
4175 * initiated using a white list. Since white list based
4176 * connections are not "first class citizens" we don't
4177 * have full tracking of them. Therefore, we go ahead
4178 * with a "best effort" approach of determining the
4179 * initiator address based on the HCI_PRIVACY flag.
4180 */
4181 if (conn->out) {
4182 conn->resp_addr_type = ev->bdaddr_type;
4183 bacpy(&conn->resp_addr, &ev->bdaddr);
4184 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
4185 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
4186 bacpy(&conn->init_addr, &hdev->rpa);
4187 } else {
4188 hci_copy_identity_address(hdev,
4189 &conn->init_addr,
4190 &conn->init_addr_type);
4191 }
4192 }
4193 } else {
4194 cancel_delayed_work(&conn->le_conn_timeout);
4195 }
4196
4197 if (!conn->out) {
4198 /* Set the responder (our side) address type based on
4199 * the advertising address type.
4200 */
4201 conn->resp_addr_type = hdev->adv_addr_type;
4202 if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM)
4203 bacpy(&conn->resp_addr, &hdev->random_addr);
4204 else
4205 bacpy(&conn->resp_addr, &hdev->bdaddr);
4206
4207 conn->init_addr_type = ev->bdaddr_type;
4208 bacpy(&conn->init_addr, &ev->bdaddr);
4209
4210 /* For incoming connections, set the default minimum
4211 * and maximum connection interval. They will be used
4212 * to check if the parameters are in range and if not
4213 * trigger the connection update procedure.
4214 */
4215 conn->le_conn_min_interval = hdev->le_conn_min_interval;
4216 conn->le_conn_max_interval = hdev->le_conn_max_interval;
4217 }
4218
4219 /* Lookup the identity address from the stored connection
4220 * address and address type.
4221 *
4222 * When establishing connections to an identity address, the
4223 * connection procedure will store the resolvable random
4224 * address first. Now if it can be converted back into the
4225 * identity address, start using the identity address from
4226 * now on.
4227 */
4228 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
4229 if (irk) {
4230 bacpy(&conn->dst, &irk->bdaddr);
4231 conn->dst_type = irk->addr_type;
4232 }
4233
4234 if (ev->status) {
4235 hci_le_conn_failed(conn, ev->status);
4236 goto unlock;
4237 }
4238
4239 if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
4240 addr_type = BDADDR_LE_PUBLIC;
4241 else
4242 addr_type = BDADDR_LE_RANDOM;
4243
4244 /* Drop the connection if the device is blocked */
4245 if (hci_bdaddr_list_lookup(&hdev->blacklist, &conn->dst, addr_type)) {
4246 hci_conn_drop(conn);
4247 goto unlock;
4248 }
4249
4250 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4251 mgmt_device_connected(hdev, conn, 0, NULL, 0);
4252
4253 conn->sec_level = BT_SECURITY_LOW;
4254 conn->handle = __le16_to_cpu(ev->handle);
4255 conn->state = BT_CONNECTED;
4256
4257 conn->le_conn_interval = le16_to_cpu(ev->interval);
4258 conn->le_conn_latency = le16_to_cpu(ev->latency);
4259 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4260
4261 hci_conn_add_sysfs(conn);
4262
4263 hci_proto_connect_cfm(conn, ev->status);
4264
4265 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
4266 conn->dst_type);
4267 if (params) {
4268 list_del_init(&params->action);
4269 if (params->conn) {
4270 hci_conn_drop(params->conn);
4271 hci_conn_put(params->conn);
4272 params->conn = NULL;
4273 }
4274 }
4275
4276 unlock:
4277 hci_update_background_scan(hdev);
4278 hci_dev_unlock(hdev);
4279 }
4280
4281 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
4282 struct sk_buff *skb)
4283 {
4284 struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
4285 struct hci_conn *conn;
4286
4287 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4288
4289 if (ev->status)
4290 return;
4291
4292 hci_dev_lock(hdev);
4293
4294 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4295 if (conn) {
4296 conn->le_conn_interval = le16_to_cpu(ev->interval);
4297 conn->le_conn_latency = le16_to_cpu(ev->latency);
4298 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4299 }
4300
4301 hci_dev_unlock(hdev);
4302 }
4303
4304 /* This function requires the caller holds hdev->lock */
4305 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
4306 bdaddr_t *addr,
4307 u8 addr_type, u8 adv_type)
4308 {
4309 struct hci_conn *conn;
4310 struct hci_conn_params *params;
4311
4312 /* If the event is not connectable don't proceed further */
4313 if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
4314 return NULL;
4315
4316 /* Ignore if the device is blocked */
4317 if (hci_bdaddr_list_lookup(&hdev->blacklist, addr, addr_type))
4318 return NULL;
4319
4320 /* Most controller will fail if we try to create new connections
4321 * while we have an existing one in slave role.
4322 */
4323 if (hdev->conn_hash.le_num_slave > 0)
4324 return NULL;
4325
4326 /* If we're not connectable only connect devices that we have in
4327 * our pend_le_conns list.
4328 */
4329 params = hci_pend_le_action_lookup(&hdev->pend_le_conns,
4330 addr, addr_type);
4331 if (!params)
4332 return NULL;
4333
4334 switch (params->auto_connect) {
4335 case HCI_AUTO_CONN_DIRECT:
4336 /* Only devices advertising with ADV_DIRECT_IND are
4337 * triggering a connection attempt. This is allowing
4338 * incoming connections from slave devices.
4339 */
4340 if (adv_type != LE_ADV_DIRECT_IND)
4341 return NULL;
4342 break;
4343 case HCI_AUTO_CONN_ALWAYS:
4344 /* Devices advertising with ADV_IND or ADV_DIRECT_IND
4345 * are triggering a connection attempt. This means
4346 * that incoming connectioms from slave device are
4347 * accepted and also outgoing connections to slave
4348 * devices are established when found.
4349 */
4350 break;
4351 default:
4352 return NULL;
4353 }
4354
4355 conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
4356 HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER);
4357 if (!IS_ERR(conn)) {
4358 /* Store the pointer since we don't really have any
4359 * other owner of the object besides the params that
4360 * triggered it. This way we can abort the connection if
4361 * the parameters get removed and keep the reference
4362 * count consistent once the connection is established.
4363 */
4364 params->conn = hci_conn_get(conn);
4365 return conn;
4366 }
4367
4368 switch (PTR_ERR(conn)) {
4369 case -EBUSY:
4370 /* If hci_connect() returns -EBUSY it means there is already
4371 * an LE connection attempt going on. Since controllers don't
4372 * support more than one connection attempt at the time, we
4373 * don't consider this an error case.
4374 */
4375 break;
4376 default:
4377 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
4378 return NULL;
4379 }
4380
4381 return NULL;
4382 }
4383
4384 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
4385 u8 bdaddr_type, s8 rssi, u8 *data, u8 len)
4386 {
4387 struct discovery_state *d = &hdev->discovery;
4388 struct smp_irk *irk;
4389 struct hci_conn *conn;
4390 bool match;
4391 u32 flags;
4392
4393 /* Check if we need to convert to identity address */
4394 irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
4395 if (irk) {
4396 bdaddr = &irk->bdaddr;
4397 bdaddr_type = irk->addr_type;
4398 }
4399
4400 /* Check if we have been requested to connect to this device */
4401 conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type);
4402 if (conn && type == LE_ADV_IND) {
4403 /* Store report for later inclusion by
4404 * mgmt_device_connected
4405 */
4406 memcpy(conn->le_adv_data, data, len);
4407 conn->le_adv_data_len = len;
4408 }
4409
4410 /* Passive scanning shouldn't trigger any device found events,
4411 * except for devices marked as CONN_REPORT for which we do send
4412 * device found events.
4413 */
4414 if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
4415 if (type == LE_ADV_DIRECT_IND)
4416 return;
4417
4418 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
4419 bdaddr, bdaddr_type))
4420 return;
4421
4422 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
4423 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
4424 else
4425 flags = 0;
4426 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4427 rssi, flags, data, len, NULL, 0);
4428 return;
4429 }
4430
4431 /* When receiving non-connectable or scannable undirected
4432 * advertising reports, this means that the remote device is
4433 * not connectable and then clearly indicate this in the
4434 * device found event.
4435 *
4436 * When receiving a scan response, then there is no way to
4437 * know if the remote device is connectable or not. However
4438 * since scan responses are merged with a previously seen
4439 * advertising report, the flags field from that report
4440 * will be used.
4441 *
4442 * In the really unlikely case that a controller get confused
4443 * and just sends a scan response event, then it is marked as
4444 * not connectable as well.
4445 */
4446 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND ||
4447 type == LE_ADV_SCAN_RSP)
4448 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
4449 else
4450 flags = 0;
4451
4452 /* If there's nothing pending either store the data from this
4453 * event or send an immediate device found event if the data
4454 * should not be stored for later.
4455 */
4456 if (!has_pending_adv_report(hdev)) {
4457 /* If the report will trigger a SCAN_REQ store it for
4458 * later merging.
4459 */
4460 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4461 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4462 rssi, flags, data, len);
4463 return;
4464 }
4465
4466 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4467 rssi, flags, data, len, NULL, 0);
4468 return;
4469 }
4470
4471 /* Check if the pending report is for the same device as the new one */
4472 match = (!bacmp(bdaddr, &d->last_adv_addr) &&
4473 bdaddr_type == d->last_adv_addr_type);
4474
4475 /* If the pending data doesn't match this report or this isn't a
4476 * scan response (e.g. we got a duplicate ADV_IND) then force
4477 * sending of the pending data.
4478 */
4479 if (type != LE_ADV_SCAN_RSP || !match) {
4480 /* Send out whatever is in the cache, but skip duplicates */
4481 if (!match)
4482 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4483 d->last_adv_addr_type, NULL,
4484 d->last_adv_rssi, d->last_adv_flags,
4485 d->last_adv_data,
4486 d->last_adv_data_len, NULL, 0);
4487
4488 /* If the new report will trigger a SCAN_REQ store it for
4489 * later merging.
4490 */
4491 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4492 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4493 rssi, flags, data, len);
4494 return;
4495 }
4496
4497 /* The advertising reports cannot be merged, so clear
4498 * the pending report and send out a device found event.
4499 */
4500 clear_pending_adv_report(hdev);
4501 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4502 rssi, flags, data, len, NULL, 0);
4503 return;
4504 }
4505
4506 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
4507 * the new event is a SCAN_RSP. We can therefore proceed with
4508 * sending a merged device found event.
4509 */
4510 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4511 d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
4512 d->last_adv_data, d->last_adv_data_len, data, len);
4513 clear_pending_adv_report(hdev);
4514 }
4515
4516 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
4517 {
4518 u8 num_reports = skb->data[0];
4519 void *ptr = &skb->data[1];
4520
4521 hci_dev_lock(hdev);
4522
4523 while (num_reports--) {
4524 struct hci_ev_le_advertising_info *ev = ptr;
4525 s8 rssi;
4526
4527 rssi = ev->data[ev->length];
4528 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
4529 ev->bdaddr_type, rssi, ev->data, ev->length);
4530
4531 ptr += sizeof(*ev) + ev->length + 1;
4532 }
4533
4534 hci_dev_unlock(hdev);
4535 }
4536
4537 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4538 {
4539 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
4540 struct hci_cp_le_ltk_reply cp;
4541 struct hci_cp_le_ltk_neg_reply neg;
4542 struct hci_conn *conn;
4543 struct smp_ltk *ltk;
4544
4545 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
4546
4547 hci_dev_lock(hdev);
4548
4549 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4550 if (conn == NULL)
4551 goto not_found;
4552
4553 ltk = hci_find_ltk(hdev, ev->ediv, ev->rand, conn->role);
4554 if (ltk == NULL)
4555 goto not_found;
4556
4557 memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
4558 cp.handle = cpu_to_le16(conn->handle);
4559
4560 conn->pending_sec_level = smp_ltk_sec_level(ltk);
4561
4562 conn->enc_key_size = ltk->enc_size;
4563
4564 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
4565
4566 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
4567 * temporary key used to encrypt a connection following
4568 * pairing. It is used during the Encrypted Session Setup to
4569 * distribute the keys. Later, security can be re-established
4570 * using a distributed LTK.
4571 */
4572 if (ltk->type == SMP_STK) {
4573 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
4574 list_del(&ltk->list);
4575 kfree(ltk);
4576 } else {
4577 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
4578 }
4579
4580 hci_dev_unlock(hdev);
4581
4582 return;
4583
4584 not_found:
4585 neg.handle = ev->handle;
4586 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
4587 hci_dev_unlock(hdev);
4588 }
4589
4590 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
4591 u8 reason)
4592 {
4593 struct hci_cp_le_conn_param_req_neg_reply cp;
4594
4595 cp.handle = cpu_to_le16(handle);
4596 cp.reason = reason;
4597
4598 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
4599 &cp);
4600 }
4601
4602 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
4603 struct sk_buff *skb)
4604 {
4605 struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data;
4606 struct hci_cp_le_conn_param_req_reply cp;
4607 struct hci_conn *hcon;
4608 u16 handle, min, max, latency, timeout;
4609
4610 handle = le16_to_cpu(ev->handle);
4611 min = le16_to_cpu(ev->interval_min);
4612 max = le16_to_cpu(ev->interval_max);
4613 latency = le16_to_cpu(ev->latency);
4614 timeout = le16_to_cpu(ev->timeout);
4615
4616 hcon = hci_conn_hash_lookup_handle(hdev, handle);
4617 if (!hcon || hcon->state != BT_CONNECTED)
4618 return send_conn_param_neg_reply(hdev, handle,
4619 HCI_ERROR_UNKNOWN_CONN_ID);
4620
4621 if (hci_check_conn_params(min, max, latency, timeout))
4622 return send_conn_param_neg_reply(hdev, handle,
4623 HCI_ERROR_INVALID_LL_PARAMS);
4624
4625 if (hcon->role == HCI_ROLE_MASTER) {
4626 struct hci_conn_params *params;
4627 u8 store_hint;
4628
4629 hci_dev_lock(hdev);
4630
4631 params = hci_conn_params_lookup(hdev, &hcon->dst,
4632 hcon->dst_type);
4633 if (params) {
4634 params->conn_min_interval = min;
4635 params->conn_max_interval = max;
4636 params->conn_latency = latency;
4637 params->supervision_timeout = timeout;
4638 store_hint = 0x01;
4639 } else{
4640 store_hint = 0x00;
4641 }
4642
4643 hci_dev_unlock(hdev);
4644
4645 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
4646 store_hint, min, max, latency, timeout);
4647 }
4648
4649 cp.handle = ev->handle;
4650 cp.interval_min = ev->interval_min;
4651 cp.interval_max = ev->interval_max;
4652 cp.latency = ev->latency;
4653 cp.timeout = ev->timeout;
4654 cp.min_ce_len = 0;
4655 cp.max_ce_len = 0;
4656
4657 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
4658 }
4659
4660 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
4661 {
4662 struct hci_ev_le_meta *le_ev = (void *) skb->data;
4663
4664 skb_pull(skb, sizeof(*le_ev));
4665
4666 switch (le_ev->subevent) {
4667 case HCI_EV_LE_CONN_COMPLETE:
4668 hci_le_conn_complete_evt(hdev, skb);
4669 break;
4670
4671 case HCI_EV_LE_CONN_UPDATE_COMPLETE:
4672 hci_le_conn_update_complete_evt(hdev, skb);
4673 break;
4674
4675 case HCI_EV_LE_ADVERTISING_REPORT:
4676 hci_le_adv_report_evt(hdev, skb);
4677 break;
4678
4679 case HCI_EV_LE_LTK_REQ:
4680 hci_le_ltk_request_evt(hdev, skb);
4681 break;
4682
4683 case HCI_EV_LE_REMOTE_CONN_PARAM_REQ:
4684 hci_le_remote_conn_param_req_evt(hdev, skb);
4685 break;
4686
4687 default:
4688 break;
4689 }
4690 }
4691
4692 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
4693 {
4694 struct hci_ev_channel_selected *ev = (void *) skb->data;
4695 struct hci_conn *hcon;
4696
4697 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
4698
4699 skb_pull(skb, sizeof(*ev));
4700
4701 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4702 if (!hcon)
4703 return;
4704
4705 amp_read_loc_assoc_final_data(hdev, hcon);
4706 }
4707
4708 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
4709 {
4710 struct hci_event_hdr *hdr = (void *) skb->data;
4711 __u8 event = hdr->evt;
4712
4713 hci_dev_lock(hdev);
4714
4715 /* Received events are (currently) only needed when a request is
4716 * ongoing so avoid unnecessary memory allocation.
4717 */
4718 if (hci_req_pending(hdev)) {
4719 kfree_skb(hdev->recv_evt);
4720 hdev->recv_evt = skb_clone(skb, GFP_KERNEL);
4721 }
4722
4723 hci_dev_unlock(hdev);
4724
4725 skb_pull(skb, HCI_EVENT_HDR_SIZE);
4726
4727 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->req.event == event) {
4728 struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
4729 u16 opcode = __le16_to_cpu(cmd_hdr->opcode);
4730
4731 hci_req_cmd_complete(hdev, opcode, 0);
4732 }
4733
4734 switch (event) {
4735 case HCI_EV_INQUIRY_COMPLETE:
4736 hci_inquiry_complete_evt(hdev, skb);
4737 break;
4738
4739 case HCI_EV_INQUIRY_RESULT:
4740 hci_inquiry_result_evt(hdev, skb);
4741 break;
4742
4743 case HCI_EV_CONN_COMPLETE:
4744 hci_conn_complete_evt(hdev, skb);
4745 break;
4746
4747 case HCI_EV_CONN_REQUEST:
4748 hci_conn_request_evt(hdev, skb);
4749 break;
4750
4751 case HCI_EV_DISCONN_COMPLETE:
4752 hci_disconn_complete_evt(hdev, skb);
4753 break;
4754
4755 case HCI_EV_AUTH_COMPLETE:
4756 hci_auth_complete_evt(hdev, skb);
4757 break;
4758
4759 case HCI_EV_REMOTE_NAME:
4760 hci_remote_name_evt(hdev, skb);
4761 break;
4762
4763 case HCI_EV_ENCRYPT_CHANGE:
4764 hci_encrypt_change_evt(hdev, skb);
4765 break;
4766
4767 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
4768 hci_change_link_key_complete_evt(hdev, skb);
4769 break;
4770
4771 case HCI_EV_REMOTE_FEATURES:
4772 hci_remote_features_evt(hdev, skb);
4773 break;
4774
4775 case HCI_EV_CMD_COMPLETE:
4776 hci_cmd_complete_evt(hdev, skb);
4777 break;
4778
4779 case HCI_EV_CMD_STATUS:
4780 hci_cmd_status_evt(hdev, skb);
4781 break;
4782
4783 case HCI_EV_HARDWARE_ERROR:
4784 hci_hardware_error_evt(hdev, skb);
4785 break;
4786
4787 case HCI_EV_ROLE_CHANGE:
4788 hci_role_change_evt(hdev, skb);
4789 break;
4790
4791 case HCI_EV_NUM_COMP_PKTS:
4792 hci_num_comp_pkts_evt(hdev, skb);
4793 break;
4794
4795 case HCI_EV_MODE_CHANGE:
4796 hci_mode_change_evt(hdev, skb);
4797 break;
4798
4799 case HCI_EV_PIN_CODE_REQ:
4800 hci_pin_code_request_evt(hdev, skb);
4801 break;
4802
4803 case HCI_EV_LINK_KEY_REQ:
4804 hci_link_key_request_evt(hdev, skb);
4805 break;
4806
4807 case HCI_EV_LINK_KEY_NOTIFY:
4808 hci_link_key_notify_evt(hdev, skb);
4809 break;
4810
4811 case HCI_EV_CLOCK_OFFSET:
4812 hci_clock_offset_evt(hdev, skb);
4813 break;
4814
4815 case HCI_EV_PKT_TYPE_CHANGE:
4816 hci_pkt_type_change_evt(hdev, skb);
4817 break;
4818
4819 case HCI_EV_PSCAN_REP_MODE:
4820 hci_pscan_rep_mode_evt(hdev, skb);
4821 break;
4822
4823 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
4824 hci_inquiry_result_with_rssi_evt(hdev, skb);
4825 break;
4826
4827 case HCI_EV_REMOTE_EXT_FEATURES:
4828 hci_remote_ext_features_evt(hdev, skb);
4829 break;
4830
4831 case HCI_EV_SYNC_CONN_COMPLETE:
4832 hci_sync_conn_complete_evt(hdev, skb);
4833 break;
4834
4835 case HCI_EV_EXTENDED_INQUIRY_RESULT:
4836 hci_extended_inquiry_result_evt(hdev, skb);
4837 break;
4838
4839 case HCI_EV_KEY_REFRESH_COMPLETE:
4840 hci_key_refresh_complete_evt(hdev, skb);
4841 break;
4842
4843 case HCI_EV_IO_CAPA_REQUEST:
4844 hci_io_capa_request_evt(hdev, skb);
4845 break;
4846
4847 case HCI_EV_IO_CAPA_REPLY:
4848 hci_io_capa_reply_evt(hdev, skb);
4849 break;
4850
4851 case HCI_EV_USER_CONFIRM_REQUEST:
4852 hci_user_confirm_request_evt(hdev, skb);
4853 break;
4854
4855 case HCI_EV_USER_PASSKEY_REQUEST:
4856 hci_user_passkey_request_evt(hdev, skb);
4857 break;
4858
4859 case HCI_EV_USER_PASSKEY_NOTIFY:
4860 hci_user_passkey_notify_evt(hdev, skb);
4861 break;
4862
4863 case HCI_EV_KEYPRESS_NOTIFY:
4864 hci_keypress_notify_evt(hdev, skb);
4865 break;
4866
4867 case HCI_EV_SIMPLE_PAIR_COMPLETE:
4868 hci_simple_pair_complete_evt(hdev, skb);
4869 break;
4870
4871 case HCI_EV_REMOTE_HOST_FEATURES:
4872 hci_remote_host_features_evt(hdev, skb);
4873 break;
4874
4875 case HCI_EV_LE_META:
4876 hci_le_meta_evt(hdev, skb);
4877 break;
4878
4879 case HCI_EV_CHANNEL_SELECTED:
4880 hci_chan_selected_evt(hdev, skb);
4881 break;
4882
4883 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
4884 hci_remote_oob_data_request_evt(hdev, skb);
4885 break;
4886
4887 case HCI_EV_PHY_LINK_COMPLETE:
4888 hci_phy_link_complete_evt(hdev, skb);
4889 break;
4890
4891 case HCI_EV_LOGICAL_LINK_COMPLETE:
4892 hci_loglink_complete_evt(hdev, skb);
4893 break;
4894
4895 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
4896 hci_disconn_loglink_complete_evt(hdev, skb);
4897 break;
4898
4899 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
4900 hci_disconn_phylink_complete_evt(hdev, skb);
4901 break;
4902
4903 case HCI_EV_NUM_COMP_BLOCKS:
4904 hci_num_comp_blocks_evt(hdev, skb);
4905 break;
4906
4907 default:
4908 BT_DBG("%s event 0x%2.2x", hdev->name, event);
4909 break;
4910 }
4911
4912 kfree_skb(skb);
4913 hdev->stat.evt_rx++;
4914 }