]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - net/bluetooth/hci_event.c
Bluetooth: Introduce "New Connection Parameter" Event
[mirror_ubuntu-artful-kernel.git] / net / bluetooth / hci_event.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI event handling. */
26
27 #include <asm/unaligned.h>
28
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
32
33 #include "a2mp.h"
34 #include "amp.h"
35 #include "smp.h"
36
37 /* Handle HCI Event packets */
38
39 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
40 {
41 __u8 status = *((__u8 *) skb->data);
42
43 BT_DBG("%s status 0x%2.2x", hdev->name, status);
44
45 if (status)
46 return;
47
48 clear_bit(HCI_INQUIRY, &hdev->flags);
49 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
50 wake_up_bit(&hdev->flags, HCI_INQUIRY);
51
52 hci_dev_lock(hdev);
53 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
54 hci_dev_unlock(hdev);
55
56 hci_conn_check_pending(hdev);
57 }
58
59 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
60 {
61 __u8 status = *((__u8 *) skb->data);
62
63 BT_DBG("%s status 0x%2.2x", hdev->name, status);
64
65 if (status)
66 return;
67
68 set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
69 }
70
71 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
72 {
73 __u8 status = *((__u8 *) skb->data);
74
75 BT_DBG("%s status 0x%2.2x", hdev->name, status);
76
77 if (status)
78 return;
79
80 clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
81
82 hci_conn_check_pending(hdev);
83 }
84
85 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
86 struct sk_buff *skb)
87 {
88 BT_DBG("%s", hdev->name);
89 }
90
91 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
92 {
93 struct hci_rp_role_discovery *rp = (void *) skb->data;
94 struct hci_conn *conn;
95
96 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
97
98 if (rp->status)
99 return;
100
101 hci_dev_lock(hdev);
102
103 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
104 if (conn) {
105 if (rp->role)
106 clear_bit(HCI_CONN_MASTER, &conn->flags);
107 else
108 set_bit(HCI_CONN_MASTER, &conn->flags);
109 }
110
111 hci_dev_unlock(hdev);
112 }
113
114 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
115 {
116 struct hci_rp_read_link_policy *rp = (void *) skb->data;
117 struct hci_conn *conn;
118
119 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
120
121 if (rp->status)
122 return;
123
124 hci_dev_lock(hdev);
125
126 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
127 if (conn)
128 conn->link_policy = __le16_to_cpu(rp->policy);
129
130 hci_dev_unlock(hdev);
131 }
132
133 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
134 {
135 struct hci_rp_write_link_policy *rp = (void *) skb->data;
136 struct hci_conn *conn;
137 void *sent;
138
139 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
140
141 if (rp->status)
142 return;
143
144 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
145 if (!sent)
146 return;
147
148 hci_dev_lock(hdev);
149
150 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
151 if (conn)
152 conn->link_policy = get_unaligned_le16(sent + 2);
153
154 hci_dev_unlock(hdev);
155 }
156
157 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
158 struct sk_buff *skb)
159 {
160 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
161
162 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
163
164 if (rp->status)
165 return;
166
167 hdev->link_policy = __le16_to_cpu(rp->policy);
168 }
169
170 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
171 struct sk_buff *skb)
172 {
173 __u8 status = *((__u8 *) skb->data);
174 void *sent;
175
176 BT_DBG("%s status 0x%2.2x", hdev->name, status);
177
178 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
179 if (!sent)
180 return;
181
182 if (!status)
183 hdev->link_policy = get_unaligned_le16(sent);
184 }
185
186 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
187 {
188 __u8 status = *((__u8 *) skb->data);
189
190 BT_DBG("%s status 0x%2.2x", hdev->name, status);
191
192 clear_bit(HCI_RESET, &hdev->flags);
193
194 /* Reset all non-persistent flags */
195 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
196
197 hdev->discovery.state = DISCOVERY_STOPPED;
198 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
199 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
200
201 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
202 hdev->adv_data_len = 0;
203
204 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
205 hdev->scan_rsp_data_len = 0;
206
207 hdev->le_scan_type = LE_SCAN_PASSIVE;
208
209 hdev->ssp_debug_mode = 0;
210 }
211
212 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
213 {
214 __u8 status = *((__u8 *) skb->data);
215 void *sent;
216
217 BT_DBG("%s status 0x%2.2x", hdev->name, status);
218
219 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
220 if (!sent)
221 return;
222
223 hci_dev_lock(hdev);
224
225 if (test_bit(HCI_MGMT, &hdev->dev_flags))
226 mgmt_set_local_name_complete(hdev, sent, status);
227 else if (!status)
228 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
229
230 hci_dev_unlock(hdev);
231 }
232
233 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
234 {
235 struct hci_rp_read_local_name *rp = (void *) skb->data;
236
237 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
238
239 if (rp->status)
240 return;
241
242 if (test_bit(HCI_SETUP, &hdev->dev_flags))
243 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
244 }
245
246 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
247 {
248 __u8 status = *((__u8 *) skb->data);
249 void *sent;
250
251 BT_DBG("%s status 0x%2.2x", hdev->name, status);
252
253 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
254 if (!sent)
255 return;
256
257 if (!status) {
258 __u8 param = *((__u8 *) sent);
259
260 if (param == AUTH_ENABLED)
261 set_bit(HCI_AUTH, &hdev->flags);
262 else
263 clear_bit(HCI_AUTH, &hdev->flags);
264 }
265
266 if (test_bit(HCI_MGMT, &hdev->dev_flags))
267 mgmt_auth_enable_complete(hdev, status);
268 }
269
270 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
271 {
272 __u8 status = *((__u8 *) skb->data);
273 void *sent;
274
275 BT_DBG("%s status 0x%2.2x", hdev->name, status);
276
277 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
278 if (!sent)
279 return;
280
281 if (!status) {
282 __u8 param = *((__u8 *) sent);
283
284 if (param)
285 set_bit(HCI_ENCRYPT, &hdev->flags);
286 else
287 clear_bit(HCI_ENCRYPT, &hdev->flags);
288 }
289 }
290
291 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
292 {
293 __u8 param, status = *((__u8 *) skb->data);
294 int old_pscan, old_iscan;
295 void *sent;
296
297 BT_DBG("%s status 0x%2.2x", hdev->name, status);
298
299 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
300 if (!sent)
301 return;
302
303 param = *((__u8 *) sent);
304
305 hci_dev_lock(hdev);
306
307 if (status) {
308 mgmt_write_scan_failed(hdev, param, status);
309 hdev->discov_timeout = 0;
310 goto done;
311 }
312
313 /* We need to ensure that we set this back on if someone changed
314 * the scan mode through a raw HCI socket.
315 */
316 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
317
318 old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
319 old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
320
321 if (param & SCAN_INQUIRY) {
322 set_bit(HCI_ISCAN, &hdev->flags);
323 if (!old_iscan)
324 mgmt_discoverable(hdev, 1);
325 } else if (old_iscan)
326 mgmt_discoverable(hdev, 0);
327
328 if (param & SCAN_PAGE) {
329 set_bit(HCI_PSCAN, &hdev->flags);
330 if (!old_pscan)
331 mgmt_connectable(hdev, 1);
332 } else if (old_pscan)
333 mgmt_connectable(hdev, 0);
334
335 done:
336 hci_dev_unlock(hdev);
337 }
338
339 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
340 {
341 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
342
343 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
344
345 if (rp->status)
346 return;
347
348 memcpy(hdev->dev_class, rp->dev_class, 3);
349
350 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
351 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
352 }
353
354 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
355 {
356 __u8 status = *((__u8 *) skb->data);
357 void *sent;
358
359 BT_DBG("%s status 0x%2.2x", hdev->name, status);
360
361 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
362 if (!sent)
363 return;
364
365 hci_dev_lock(hdev);
366
367 if (status == 0)
368 memcpy(hdev->dev_class, sent, 3);
369
370 if (test_bit(HCI_MGMT, &hdev->dev_flags))
371 mgmt_set_class_of_dev_complete(hdev, sent, status);
372
373 hci_dev_unlock(hdev);
374 }
375
376 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
377 {
378 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
379 __u16 setting;
380
381 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
382
383 if (rp->status)
384 return;
385
386 setting = __le16_to_cpu(rp->voice_setting);
387
388 if (hdev->voice_setting == setting)
389 return;
390
391 hdev->voice_setting = setting;
392
393 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
394
395 if (hdev->notify)
396 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
397 }
398
399 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
400 struct sk_buff *skb)
401 {
402 __u8 status = *((__u8 *) skb->data);
403 __u16 setting;
404 void *sent;
405
406 BT_DBG("%s status 0x%2.2x", hdev->name, status);
407
408 if (status)
409 return;
410
411 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
412 if (!sent)
413 return;
414
415 setting = get_unaligned_le16(sent);
416
417 if (hdev->voice_setting == setting)
418 return;
419
420 hdev->voice_setting = setting;
421
422 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
423
424 if (hdev->notify)
425 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
426 }
427
428 static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
429 struct sk_buff *skb)
430 {
431 struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
432
433 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
434
435 if (rp->status)
436 return;
437
438 hdev->num_iac = rp->num_iac;
439
440 BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
441 }
442
443 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
444 {
445 __u8 status = *((__u8 *) skb->data);
446 struct hci_cp_write_ssp_mode *sent;
447
448 BT_DBG("%s status 0x%2.2x", hdev->name, status);
449
450 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
451 if (!sent)
452 return;
453
454 if (!status) {
455 if (sent->mode)
456 hdev->features[1][0] |= LMP_HOST_SSP;
457 else
458 hdev->features[1][0] &= ~LMP_HOST_SSP;
459 }
460
461 if (test_bit(HCI_MGMT, &hdev->dev_flags))
462 mgmt_ssp_enable_complete(hdev, sent->mode, status);
463 else if (!status) {
464 if (sent->mode)
465 set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
466 else
467 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
468 }
469 }
470
471 static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
472 {
473 u8 status = *((u8 *) skb->data);
474 struct hci_cp_write_sc_support *sent;
475
476 BT_DBG("%s status 0x%2.2x", hdev->name, status);
477
478 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
479 if (!sent)
480 return;
481
482 if (!status) {
483 if (sent->support)
484 hdev->features[1][0] |= LMP_HOST_SC;
485 else
486 hdev->features[1][0] &= ~LMP_HOST_SC;
487 }
488
489 if (test_bit(HCI_MGMT, &hdev->dev_flags))
490 mgmt_sc_enable_complete(hdev, sent->support, status);
491 else if (!status) {
492 if (sent->support)
493 set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
494 else
495 clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
496 }
497 }
498
499 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
500 {
501 struct hci_rp_read_local_version *rp = (void *) skb->data;
502
503 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
504
505 if (rp->status)
506 return;
507
508 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
509 hdev->hci_ver = rp->hci_ver;
510 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
511 hdev->lmp_ver = rp->lmp_ver;
512 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
513 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
514 }
515 }
516
517 static void hci_cc_read_local_commands(struct hci_dev *hdev,
518 struct sk_buff *skb)
519 {
520 struct hci_rp_read_local_commands *rp = (void *) skb->data;
521
522 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
523
524 if (rp->status)
525 return;
526
527 if (test_bit(HCI_SETUP, &hdev->dev_flags))
528 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
529 }
530
531 static void hci_cc_read_local_features(struct hci_dev *hdev,
532 struct sk_buff *skb)
533 {
534 struct hci_rp_read_local_features *rp = (void *) skb->data;
535
536 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
537
538 if (rp->status)
539 return;
540
541 memcpy(hdev->features, rp->features, 8);
542
543 /* Adjust default settings according to features
544 * supported by device. */
545
546 if (hdev->features[0][0] & LMP_3SLOT)
547 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
548
549 if (hdev->features[0][0] & LMP_5SLOT)
550 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
551
552 if (hdev->features[0][1] & LMP_HV2) {
553 hdev->pkt_type |= (HCI_HV2);
554 hdev->esco_type |= (ESCO_HV2);
555 }
556
557 if (hdev->features[0][1] & LMP_HV3) {
558 hdev->pkt_type |= (HCI_HV3);
559 hdev->esco_type |= (ESCO_HV3);
560 }
561
562 if (lmp_esco_capable(hdev))
563 hdev->esco_type |= (ESCO_EV3);
564
565 if (hdev->features[0][4] & LMP_EV4)
566 hdev->esco_type |= (ESCO_EV4);
567
568 if (hdev->features[0][4] & LMP_EV5)
569 hdev->esco_type |= (ESCO_EV5);
570
571 if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
572 hdev->esco_type |= (ESCO_2EV3);
573
574 if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
575 hdev->esco_type |= (ESCO_3EV3);
576
577 if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
578 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
579 }
580
581 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
582 struct sk_buff *skb)
583 {
584 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
585
586 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
587
588 if (rp->status)
589 return;
590
591 if (hdev->max_page < rp->max_page)
592 hdev->max_page = rp->max_page;
593
594 if (rp->page < HCI_MAX_PAGES)
595 memcpy(hdev->features[rp->page], rp->features, 8);
596 }
597
598 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
599 struct sk_buff *skb)
600 {
601 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
602
603 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
604
605 if (!rp->status)
606 hdev->flow_ctl_mode = rp->mode;
607 }
608
609 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
610 {
611 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
612
613 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
614
615 if (rp->status)
616 return;
617
618 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
619 hdev->sco_mtu = rp->sco_mtu;
620 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
621 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
622
623 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
624 hdev->sco_mtu = 64;
625 hdev->sco_pkts = 8;
626 }
627
628 hdev->acl_cnt = hdev->acl_pkts;
629 hdev->sco_cnt = hdev->sco_pkts;
630
631 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
632 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
633 }
634
635 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
636 {
637 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
638
639 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
640
641 if (!rp->status)
642 bacpy(&hdev->bdaddr, &rp->bdaddr);
643 }
644
645 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
646 struct sk_buff *skb)
647 {
648 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
649
650 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
651
652 if (test_bit(HCI_INIT, &hdev->flags) && !rp->status) {
653 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
654 hdev->page_scan_window = __le16_to_cpu(rp->window);
655 }
656 }
657
658 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
659 struct sk_buff *skb)
660 {
661 u8 status = *((u8 *) skb->data);
662 struct hci_cp_write_page_scan_activity *sent;
663
664 BT_DBG("%s status 0x%2.2x", hdev->name, status);
665
666 if (status)
667 return;
668
669 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
670 if (!sent)
671 return;
672
673 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
674 hdev->page_scan_window = __le16_to_cpu(sent->window);
675 }
676
677 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
678 struct sk_buff *skb)
679 {
680 struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
681
682 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
683
684 if (test_bit(HCI_INIT, &hdev->flags) && !rp->status)
685 hdev->page_scan_type = rp->type;
686 }
687
688 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
689 struct sk_buff *skb)
690 {
691 u8 status = *((u8 *) skb->data);
692 u8 *type;
693
694 BT_DBG("%s status 0x%2.2x", hdev->name, status);
695
696 if (status)
697 return;
698
699 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
700 if (type)
701 hdev->page_scan_type = *type;
702 }
703
704 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
705 struct sk_buff *skb)
706 {
707 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
708
709 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
710
711 if (rp->status)
712 return;
713
714 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
715 hdev->block_len = __le16_to_cpu(rp->block_len);
716 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
717
718 hdev->block_cnt = hdev->num_blocks;
719
720 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
721 hdev->block_cnt, hdev->block_len);
722 }
723
724 static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
725 {
726 struct hci_rp_read_clock *rp = (void *) skb->data;
727 struct hci_cp_read_clock *cp;
728 struct hci_conn *conn;
729
730 BT_DBG("%s", hdev->name);
731
732 if (skb->len < sizeof(*rp))
733 return;
734
735 if (rp->status)
736 return;
737
738 hci_dev_lock(hdev);
739
740 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
741 if (!cp)
742 goto unlock;
743
744 if (cp->which == 0x00) {
745 hdev->clock = le32_to_cpu(rp->clock);
746 goto unlock;
747 }
748
749 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
750 if (conn) {
751 conn->clock = le32_to_cpu(rp->clock);
752 conn->clock_accuracy = le16_to_cpu(rp->accuracy);
753 }
754
755 unlock:
756 hci_dev_unlock(hdev);
757 }
758
759 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
760 struct sk_buff *skb)
761 {
762 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
763
764 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
765
766 if (rp->status)
767 goto a2mp_rsp;
768
769 hdev->amp_status = rp->amp_status;
770 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
771 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
772 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
773 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
774 hdev->amp_type = rp->amp_type;
775 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
776 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
777 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
778 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
779
780 a2mp_rsp:
781 a2mp_send_getinfo_rsp(hdev);
782 }
783
784 static void hci_cc_read_local_amp_assoc(struct hci_dev *hdev,
785 struct sk_buff *skb)
786 {
787 struct hci_rp_read_local_amp_assoc *rp = (void *) skb->data;
788 struct amp_assoc *assoc = &hdev->loc_assoc;
789 size_t rem_len, frag_len;
790
791 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
792
793 if (rp->status)
794 goto a2mp_rsp;
795
796 frag_len = skb->len - sizeof(*rp);
797 rem_len = __le16_to_cpu(rp->rem_len);
798
799 if (rem_len > frag_len) {
800 BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
801
802 memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
803 assoc->offset += frag_len;
804
805 /* Read other fragments */
806 amp_read_loc_assoc_frag(hdev, rp->phy_handle);
807
808 return;
809 }
810
811 memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
812 assoc->len = assoc->offset + rem_len;
813 assoc->offset = 0;
814
815 a2mp_rsp:
816 /* Send A2MP Rsp when all fragments are received */
817 a2mp_send_getampassoc_rsp(hdev, rp->status);
818 a2mp_send_create_phy_link_req(hdev, rp->status);
819 }
820
821 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
822 struct sk_buff *skb)
823 {
824 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
825
826 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
827
828 if (!rp->status)
829 hdev->inq_tx_power = rp->tx_power;
830 }
831
832 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
833 {
834 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
835 struct hci_cp_pin_code_reply *cp;
836 struct hci_conn *conn;
837
838 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
839
840 hci_dev_lock(hdev);
841
842 if (test_bit(HCI_MGMT, &hdev->dev_flags))
843 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
844
845 if (rp->status)
846 goto unlock;
847
848 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
849 if (!cp)
850 goto unlock;
851
852 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
853 if (conn)
854 conn->pin_length = cp->pin_len;
855
856 unlock:
857 hci_dev_unlock(hdev);
858 }
859
860 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
861 {
862 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
863
864 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
865
866 hci_dev_lock(hdev);
867
868 if (test_bit(HCI_MGMT, &hdev->dev_flags))
869 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
870 rp->status);
871
872 hci_dev_unlock(hdev);
873 }
874
875 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
876 struct sk_buff *skb)
877 {
878 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
879
880 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
881
882 if (rp->status)
883 return;
884
885 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
886 hdev->le_pkts = rp->le_max_pkt;
887
888 hdev->le_cnt = hdev->le_pkts;
889
890 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
891 }
892
893 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
894 struct sk_buff *skb)
895 {
896 struct hci_rp_le_read_local_features *rp = (void *) skb->data;
897
898 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
899
900 if (!rp->status)
901 memcpy(hdev->le_features, rp->features, 8);
902 }
903
904 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
905 struct sk_buff *skb)
906 {
907 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
908
909 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
910
911 if (!rp->status)
912 hdev->adv_tx_power = rp->tx_power;
913 }
914
915 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
916 {
917 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
918
919 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
920
921 hci_dev_lock(hdev);
922
923 if (test_bit(HCI_MGMT, &hdev->dev_flags))
924 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
925 rp->status);
926
927 hci_dev_unlock(hdev);
928 }
929
930 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
931 struct sk_buff *skb)
932 {
933 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
934
935 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
936
937 hci_dev_lock(hdev);
938
939 if (test_bit(HCI_MGMT, &hdev->dev_flags))
940 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
941 ACL_LINK, 0, rp->status);
942
943 hci_dev_unlock(hdev);
944 }
945
946 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
947 {
948 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
949
950 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
951
952 hci_dev_lock(hdev);
953
954 if (test_bit(HCI_MGMT, &hdev->dev_flags))
955 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
956 0, rp->status);
957
958 hci_dev_unlock(hdev);
959 }
960
961 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
962 struct sk_buff *skb)
963 {
964 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
965
966 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
967
968 hci_dev_lock(hdev);
969
970 if (test_bit(HCI_MGMT, &hdev->dev_flags))
971 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
972 ACL_LINK, 0, rp->status);
973
974 hci_dev_unlock(hdev);
975 }
976
977 static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
978 struct sk_buff *skb)
979 {
980 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
981
982 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
983
984 hci_dev_lock(hdev);
985 mgmt_read_local_oob_data_complete(hdev, rp->hash, rp->randomizer,
986 NULL, NULL, rp->status);
987 hci_dev_unlock(hdev);
988 }
989
990 static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
991 struct sk_buff *skb)
992 {
993 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
994
995 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
996
997 hci_dev_lock(hdev);
998 mgmt_read_local_oob_data_complete(hdev, rp->hash192, rp->randomizer192,
999 rp->hash256, rp->randomizer256,
1000 rp->status);
1001 hci_dev_unlock(hdev);
1002 }
1003
1004
1005 static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
1006 {
1007 __u8 status = *((__u8 *) skb->data);
1008 bdaddr_t *sent;
1009
1010 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1011
1012 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1013 if (!sent)
1014 return;
1015
1016 hci_dev_lock(hdev);
1017
1018 if (!status)
1019 bacpy(&hdev->random_addr, sent);
1020
1021 hci_dev_unlock(hdev);
1022 }
1023
1024 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1025 {
1026 __u8 *sent, status = *((__u8 *) skb->data);
1027
1028 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1029
1030 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1031 if (!sent)
1032 return;
1033
1034 if (status)
1035 return;
1036
1037 hci_dev_lock(hdev);
1038
1039 /* If we're doing connection initation as peripheral. Set a
1040 * timeout in case something goes wrong.
1041 */
1042 if (*sent) {
1043 struct hci_conn *conn;
1044
1045 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1046 if (conn)
1047 queue_delayed_work(hdev->workqueue,
1048 &conn->le_conn_timeout,
1049 HCI_LE_CONN_TIMEOUT);
1050 }
1051
1052 mgmt_advertising(hdev, *sent);
1053
1054 hci_dev_unlock(hdev);
1055 }
1056
1057 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1058 {
1059 struct hci_cp_le_set_scan_param *cp;
1060 __u8 status = *((__u8 *) skb->data);
1061
1062 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1063
1064 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1065 if (!cp)
1066 return;
1067
1068 hci_dev_lock(hdev);
1069
1070 if (!status)
1071 hdev->le_scan_type = cp->type;
1072
1073 hci_dev_unlock(hdev);
1074 }
1075
1076 static bool has_pending_adv_report(struct hci_dev *hdev)
1077 {
1078 struct discovery_state *d = &hdev->discovery;
1079
1080 return bacmp(&d->last_adv_addr, BDADDR_ANY);
1081 }
1082
1083 static void clear_pending_adv_report(struct hci_dev *hdev)
1084 {
1085 struct discovery_state *d = &hdev->discovery;
1086
1087 bacpy(&d->last_adv_addr, BDADDR_ANY);
1088 d->last_adv_data_len = 0;
1089 }
1090
1091 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1092 u8 bdaddr_type, s8 rssi, u32 flags,
1093 u8 *data, u8 len)
1094 {
1095 struct discovery_state *d = &hdev->discovery;
1096
1097 bacpy(&d->last_adv_addr, bdaddr);
1098 d->last_adv_addr_type = bdaddr_type;
1099 d->last_adv_rssi = rssi;
1100 d->last_adv_flags = flags;
1101 memcpy(d->last_adv_data, data, len);
1102 d->last_adv_data_len = len;
1103 }
1104
1105 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1106 struct sk_buff *skb)
1107 {
1108 struct hci_cp_le_set_scan_enable *cp;
1109 __u8 status = *((__u8 *) skb->data);
1110
1111 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1112
1113 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1114 if (!cp)
1115 return;
1116
1117 if (status)
1118 return;
1119
1120 switch (cp->enable) {
1121 case LE_SCAN_ENABLE:
1122 set_bit(HCI_LE_SCAN, &hdev->dev_flags);
1123 if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1124 clear_pending_adv_report(hdev);
1125 break;
1126
1127 case LE_SCAN_DISABLE:
1128 /* We do this here instead of when setting DISCOVERY_STOPPED
1129 * since the latter would potentially require waiting for
1130 * inquiry to stop too.
1131 */
1132 if (has_pending_adv_report(hdev)) {
1133 struct discovery_state *d = &hdev->discovery;
1134
1135 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1136 d->last_adv_addr_type, NULL,
1137 d->last_adv_rssi, d->last_adv_flags,
1138 d->last_adv_data,
1139 d->last_adv_data_len, NULL, 0);
1140 }
1141
1142 /* Cancel this timer so that we don't try to disable scanning
1143 * when it's already disabled.
1144 */
1145 cancel_delayed_work(&hdev->le_scan_disable);
1146
1147 clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
1148 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1149 * interrupted scanning due to a connect request. Mark
1150 * therefore discovery as stopped.
1151 */
1152 if (test_and_clear_bit(HCI_LE_SCAN_INTERRUPTED,
1153 &hdev->dev_flags))
1154 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1155 break;
1156
1157 default:
1158 BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1159 break;
1160 }
1161 }
1162
1163 static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1164 struct sk_buff *skb)
1165 {
1166 struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1167
1168 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1169
1170 if (!rp->status)
1171 hdev->le_white_list_size = rp->size;
1172 }
1173
1174 static void hci_cc_le_clear_white_list(struct hci_dev *hdev,
1175 struct sk_buff *skb)
1176 {
1177 __u8 status = *((__u8 *) skb->data);
1178
1179 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1180
1181 if (!status)
1182 hci_white_list_clear(hdev);
1183 }
1184
1185 static void hci_cc_le_add_to_white_list(struct hci_dev *hdev,
1186 struct sk_buff *skb)
1187 {
1188 struct hci_cp_le_add_to_white_list *sent;
1189 __u8 status = *((__u8 *) skb->data);
1190
1191 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1192
1193 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST);
1194 if (!sent)
1195 return;
1196
1197 if (!status)
1198 hci_white_list_add(hdev, &sent->bdaddr, sent->bdaddr_type);
1199 }
1200
1201 static void hci_cc_le_del_from_white_list(struct hci_dev *hdev,
1202 struct sk_buff *skb)
1203 {
1204 struct hci_cp_le_del_from_white_list *sent;
1205 __u8 status = *((__u8 *) skb->data);
1206
1207 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1208
1209 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST);
1210 if (!sent)
1211 return;
1212
1213 if (!status)
1214 hci_white_list_del(hdev, &sent->bdaddr, sent->bdaddr_type);
1215 }
1216
1217 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1218 struct sk_buff *skb)
1219 {
1220 struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1221
1222 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1223
1224 if (!rp->status)
1225 memcpy(hdev->le_states, rp->le_states, 8);
1226 }
1227
1228 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1229 struct sk_buff *skb)
1230 {
1231 struct hci_cp_write_le_host_supported *sent;
1232 __u8 status = *((__u8 *) skb->data);
1233
1234 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1235
1236 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1237 if (!sent)
1238 return;
1239
1240 if (!status) {
1241 if (sent->le) {
1242 hdev->features[1][0] |= LMP_HOST_LE;
1243 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1244 } else {
1245 hdev->features[1][0] &= ~LMP_HOST_LE;
1246 clear_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1247 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
1248 }
1249
1250 if (sent->simul)
1251 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1252 else
1253 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1254 }
1255 }
1256
1257 static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1258 {
1259 struct hci_cp_le_set_adv_param *cp;
1260 u8 status = *((u8 *) skb->data);
1261
1262 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1263
1264 if (status)
1265 return;
1266
1267 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1268 if (!cp)
1269 return;
1270
1271 hci_dev_lock(hdev);
1272 hdev->adv_addr_type = cp->own_address_type;
1273 hci_dev_unlock(hdev);
1274 }
1275
1276 static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,
1277 struct sk_buff *skb)
1278 {
1279 struct hci_rp_write_remote_amp_assoc *rp = (void *) skb->data;
1280
1281 BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
1282 hdev->name, rp->status, rp->phy_handle);
1283
1284 if (rp->status)
1285 return;
1286
1287 amp_write_rem_assoc_continue(hdev, rp->phy_handle);
1288 }
1289
1290 static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
1291 {
1292 struct hci_rp_read_rssi *rp = (void *) skb->data;
1293 struct hci_conn *conn;
1294
1295 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1296
1297 if (rp->status)
1298 return;
1299
1300 hci_dev_lock(hdev);
1301
1302 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1303 if (conn)
1304 conn->rssi = rp->rssi;
1305
1306 hci_dev_unlock(hdev);
1307 }
1308
1309 static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
1310 {
1311 struct hci_cp_read_tx_power *sent;
1312 struct hci_rp_read_tx_power *rp = (void *) skb->data;
1313 struct hci_conn *conn;
1314
1315 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1316
1317 if (rp->status)
1318 return;
1319
1320 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
1321 if (!sent)
1322 return;
1323
1324 hci_dev_lock(hdev);
1325
1326 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1327 if (!conn)
1328 goto unlock;
1329
1330 switch (sent->type) {
1331 case 0x00:
1332 conn->tx_power = rp->tx_power;
1333 break;
1334 case 0x01:
1335 conn->max_tx_power = rp->tx_power;
1336 break;
1337 }
1338
1339 unlock:
1340 hci_dev_unlock(hdev);
1341 }
1342
1343 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1344 {
1345 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1346
1347 if (status) {
1348 hci_conn_check_pending(hdev);
1349 return;
1350 }
1351
1352 set_bit(HCI_INQUIRY, &hdev->flags);
1353 }
1354
1355 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1356 {
1357 struct hci_cp_create_conn *cp;
1358 struct hci_conn *conn;
1359
1360 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1361
1362 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1363 if (!cp)
1364 return;
1365
1366 hci_dev_lock(hdev);
1367
1368 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1369
1370 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1371
1372 if (status) {
1373 if (conn && conn->state == BT_CONNECT) {
1374 if (status != 0x0c || conn->attempt > 2) {
1375 conn->state = BT_CLOSED;
1376 hci_proto_connect_cfm(conn, status);
1377 hci_conn_del(conn);
1378 } else
1379 conn->state = BT_CONNECT2;
1380 }
1381 } else {
1382 if (!conn) {
1383 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
1384 if (conn) {
1385 conn->out = true;
1386 set_bit(HCI_CONN_MASTER, &conn->flags);
1387 } else
1388 BT_ERR("No memory for new connection");
1389 }
1390 }
1391
1392 hci_dev_unlock(hdev);
1393 }
1394
1395 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1396 {
1397 struct hci_cp_add_sco *cp;
1398 struct hci_conn *acl, *sco;
1399 __u16 handle;
1400
1401 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1402
1403 if (!status)
1404 return;
1405
1406 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1407 if (!cp)
1408 return;
1409
1410 handle = __le16_to_cpu(cp->handle);
1411
1412 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1413
1414 hci_dev_lock(hdev);
1415
1416 acl = hci_conn_hash_lookup_handle(hdev, handle);
1417 if (acl) {
1418 sco = acl->link;
1419 if (sco) {
1420 sco->state = BT_CLOSED;
1421
1422 hci_proto_connect_cfm(sco, status);
1423 hci_conn_del(sco);
1424 }
1425 }
1426
1427 hci_dev_unlock(hdev);
1428 }
1429
1430 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1431 {
1432 struct hci_cp_auth_requested *cp;
1433 struct hci_conn *conn;
1434
1435 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1436
1437 if (!status)
1438 return;
1439
1440 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1441 if (!cp)
1442 return;
1443
1444 hci_dev_lock(hdev);
1445
1446 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1447 if (conn) {
1448 if (conn->state == BT_CONFIG) {
1449 hci_proto_connect_cfm(conn, status);
1450 hci_conn_drop(conn);
1451 }
1452 }
1453
1454 hci_dev_unlock(hdev);
1455 }
1456
1457 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1458 {
1459 struct hci_cp_set_conn_encrypt *cp;
1460 struct hci_conn *conn;
1461
1462 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1463
1464 if (!status)
1465 return;
1466
1467 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1468 if (!cp)
1469 return;
1470
1471 hci_dev_lock(hdev);
1472
1473 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1474 if (conn) {
1475 if (conn->state == BT_CONFIG) {
1476 hci_proto_connect_cfm(conn, status);
1477 hci_conn_drop(conn);
1478 }
1479 }
1480
1481 hci_dev_unlock(hdev);
1482 }
1483
1484 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1485 struct hci_conn *conn)
1486 {
1487 if (conn->state != BT_CONFIG || !conn->out)
1488 return 0;
1489
1490 if (conn->pending_sec_level == BT_SECURITY_SDP)
1491 return 0;
1492
1493 /* Only request authentication for SSP connections or non-SSP
1494 * devices with sec_level MEDIUM or HIGH or if MITM protection
1495 * is requested.
1496 */
1497 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1498 conn->pending_sec_level != BT_SECURITY_FIPS &&
1499 conn->pending_sec_level != BT_SECURITY_HIGH &&
1500 conn->pending_sec_level != BT_SECURITY_MEDIUM)
1501 return 0;
1502
1503 return 1;
1504 }
1505
1506 static int hci_resolve_name(struct hci_dev *hdev,
1507 struct inquiry_entry *e)
1508 {
1509 struct hci_cp_remote_name_req cp;
1510
1511 memset(&cp, 0, sizeof(cp));
1512
1513 bacpy(&cp.bdaddr, &e->data.bdaddr);
1514 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1515 cp.pscan_mode = e->data.pscan_mode;
1516 cp.clock_offset = e->data.clock_offset;
1517
1518 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1519 }
1520
1521 static bool hci_resolve_next_name(struct hci_dev *hdev)
1522 {
1523 struct discovery_state *discov = &hdev->discovery;
1524 struct inquiry_entry *e;
1525
1526 if (list_empty(&discov->resolve))
1527 return false;
1528
1529 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1530 if (!e)
1531 return false;
1532
1533 if (hci_resolve_name(hdev, e) == 0) {
1534 e->name_state = NAME_PENDING;
1535 return true;
1536 }
1537
1538 return false;
1539 }
1540
1541 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1542 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1543 {
1544 struct discovery_state *discov = &hdev->discovery;
1545 struct inquiry_entry *e;
1546
1547 if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1548 mgmt_device_connected(hdev, bdaddr, ACL_LINK, 0x00, 0, name,
1549 name_len, conn->dev_class);
1550
1551 if (discov->state == DISCOVERY_STOPPED)
1552 return;
1553
1554 if (discov->state == DISCOVERY_STOPPING)
1555 goto discov_complete;
1556
1557 if (discov->state != DISCOVERY_RESOLVING)
1558 return;
1559
1560 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1561 /* If the device was not found in a list of found devices names of which
1562 * are pending. there is no need to continue resolving a next name as it
1563 * will be done upon receiving another Remote Name Request Complete
1564 * Event */
1565 if (!e)
1566 return;
1567
1568 list_del(&e->list);
1569 if (name) {
1570 e->name_state = NAME_KNOWN;
1571 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1572 e->data.rssi, name, name_len);
1573 } else {
1574 e->name_state = NAME_NOT_KNOWN;
1575 }
1576
1577 if (hci_resolve_next_name(hdev))
1578 return;
1579
1580 discov_complete:
1581 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1582 }
1583
1584 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1585 {
1586 struct hci_cp_remote_name_req *cp;
1587 struct hci_conn *conn;
1588
1589 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1590
1591 /* If successful wait for the name req complete event before
1592 * checking for the need to do authentication */
1593 if (!status)
1594 return;
1595
1596 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1597 if (!cp)
1598 return;
1599
1600 hci_dev_lock(hdev);
1601
1602 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1603
1604 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1605 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1606
1607 if (!conn)
1608 goto unlock;
1609
1610 if (!hci_outgoing_auth_needed(hdev, conn))
1611 goto unlock;
1612
1613 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1614 struct hci_cp_auth_requested auth_cp;
1615
1616 auth_cp.handle = __cpu_to_le16(conn->handle);
1617 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
1618 sizeof(auth_cp), &auth_cp);
1619 }
1620
1621 unlock:
1622 hci_dev_unlock(hdev);
1623 }
1624
1625 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1626 {
1627 struct hci_cp_read_remote_features *cp;
1628 struct hci_conn *conn;
1629
1630 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1631
1632 if (!status)
1633 return;
1634
1635 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1636 if (!cp)
1637 return;
1638
1639 hci_dev_lock(hdev);
1640
1641 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1642 if (conn) {
1643 if (conn->state == BT_CONFIG) {
1644 hci_proto_connect_cfm(conn, status);
1645 hci_conn_drop(conn);
1646 }
1647 }
1648
1649 hci_dev_unlock(hdev);
1650 }
1651
1652 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1653 {
1654 struct hci_cp_read_remote_ext_features *cp;
1655 struct hci_conn *conn;
1656
1657 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1658
1659 if (!status)
1660 return;
1661
1662 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1663 if (!cp)
1664 return;
1665
1666 hci_dev_lock(hdev);
1667
1668 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1669 if (conn) {
1670 if (conn->state == BT_CONFIG) {
1671 hci_proto_connect_cfm(conn, status);
1672 hci_conn_drop(conn);
1673 }
1674 }
1675
1676 hci_dev_unlock(hdev);
1677 }
1678
1679 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1680 {
1681 struct hci_cp_setup_sync_conn *cp;
1682 struct hci_conn *acl, *sco;
1683 __u16 handle;
1684
1685 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1686
1687 if (!status)
1688 return;
1689
1690 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1691 if (!cp)
1692 return;
1693
1694 handle = __le16_to_cpu(cp->handle);
1695
1696 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1697
1698 hci_dev_lock(hdev);
1699
1700 acl = hci_conn_hash_lookup_handle(hdev, handle);
1701 if (acl) {
1702 sco = acl->link;
1703 if (sco) {
1704 sco->state = BT_CLOSED;
1705
1706 hci_proto_connect_cfm(sco, status);
1707 hci_conn_del(sco);
1708 }
1709 }
1710
1711 hci_dev_unlock(hdev);
1712 }
1713
1714 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1715 {
1716 struct hci_cp_sniff_mode *cp;
1717 struct hci_conn *conn;
1718
1719 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1720
1721 if (!status)
1722 return;
1723
1724 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1725 if (!cp)
1726 return;
1727
1728 hci_dev_lock(hdev);
1729
1730 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1731 if (conn) {
1732 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1733
1734 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1735 hci_sco_setup(conn, status);
1736 }
1737
1738 hci_dev_unlock(hdev);
1739 }
1740
1741 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1742 {
1743 struct hci_cp_exit_sniff_mode *cp;
1744 struct hci_conn *conn;
1745
1746 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1747
1748 if (!status)
1749 return;
1750
1751 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1752 if (!cp)
1753 return;
1754
1755 hci_dev_lock(hdev);
1756
1757 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1758 if (conn) {
1759 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1760
1761 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1762 hci_sco_setup(conn, status);
1763 }
1764
1765 hci_dev_unlock(hdev);
1766 }
1767
1768 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1769 {
1770 struct hci_cp_disconnect *cp;
1771 struct hci_conn *conn;
1772
1773 if (!status)
1774 return;
1775
1776 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1777 if (!cp)
1778 return;
1779
1780 hci_dev_lock(hdev);
1781
1782 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1783 if (conn)
1784 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1785 conn->dst_type, status);
1786
1787 hci_dev_unlock(hdev);
1788 }
1789
1790 static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status)
1791 {
1792 struct hci_cp_create_phy_link *cp;
1793
1794 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1795
1796 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
1797 if (!cp)
1798 return;
1799
1800 hci_dev_lock(hdev);
1801
1802 if (status) {
1803 struct hci_conn *hcon;
1804
1805 hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
1806 if (hcon)
1807 hci_conn_del(hcon);
1808 } else {
1809 amp_write_remote_assoc(hdev, cp->phy_handle);
1810 }
1811
1812 hci_dev_unlock(hdev);
1813 }
1814
1815 static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status)
1816 {
1817 struct hci_cp_accept_phy_link *cp;
1818
1819 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1820
1821 if (status)
1822 return;
1823
1824 cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
1825 if (!cp)
1826 return;
1827
1828 amp_write_remote_assoc(hdev, cp->phy_handle);
1829 }
1830
1831 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
1832 {
1833 struct hci_cp_le_create_conn *cp;
1834 struct hci_conn *conn;
1835
1836 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1837
1838 /* All connection failure handling is taken care of by the
1839 * hci_le_conn_failed function which is triggered by the HCI
1840 * request completion callbacks used for connecting.
1841 */
1842 if (status)
1843 return;
1844
1845 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
1846 if (!cp)
1847 return;
1848
1849 hci_dev_lock(hdev);
1850
1851 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
1852 if (!conn)
1853 goto unlock;
1854
1855 /* Store the initiator and responder address information which
1856 * is needed for SMP. These values will not change during the
1857 * lifetime of the connection.
1858 */
1859 conn->init_addr_type = cp->own_address_type;
1860 if (cp->own_address_type == ADDR_LE_DEV_RANDOM)
1861 bacpy(&conn->init_addr, &hdev->random_addr);
1862 else
1863 bacpy(&conn->init_addr, &hdev->bdaddr);
1864
1865 conn->resp_addr_type = cp->peer_addr_type;
1866 bacpy(&conn->resp_addr, &cp->peer_addr);
1867
1868 /* We don't want the connection attempt to stick around
1869 * indefinitely since LE doesn't have a page timeout concept
1870 * like BR/EDR. Set a timer for any connection that doesn't use
1871 * the white list for connecting.
1872 */
1873 if (cp->filter_policy == HCI_LE_USE_PEER_ADDR)
1874 queue_delayed_work(conn->hdev->workqueue,
1875 &conn->le_conn_timeout,
1876 HCI_LE_CONN_TIMEOUT);
1877
1878 unlock:
1879 hci_dev_unlock(hdev);
1880 }
1881
1882 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1883 {
1884 struct hci_cp_le_start_enc *cp;
1885 struct hci_conn *conn;
1886
1887 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1888
1889 if (!status)
1890 return;
1891
1892 hci_dev_lock(hdev);
1893
1894 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
1895 if (!cp)
1896 goto unlock;
1897
1898 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1899 if (!conn)
1900 goto unlock;
1901
1902 if (conn->state != BT_CONNECTED)
1903 goto unlock;
1904
1905 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
1906 hci_conn_drop(conn);
1907
1908 unlock:
1909 hci_dev_unlock(hdev);
1910 }
1911
1912 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1913 {
1914 __u8 status = *((__u8 *) skb->data);
1915 struct discovery_state *discov = &hdev->discovery;
1916 struct inquiry_entry *e;
1917
1918 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1919
1920 hci_conn_check_pending(hdev);
1921
1922 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1923 return;
1924
1925 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
1926 wake_up_bit(&hdev->flags, HCI_INQUIRY);
1927
1928 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1929 return;
1930
1931 hci_dev_lock(hdev);
1932
1933 if (discov->state != DISCOVERY_FINDING)
1934 goto unlock;
1935
1936 if (list_empty(&discov->resolve)) {
1937 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1938 goto unlock;
1939 }
1940
1941 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1942 if (e && hci_resolve_name(hdev, e) == 0) {
1943 e->name_state = NAME_PENDING;
1944 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
1945 } else {
1946 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1947 }
1948
1949 unlock:
1950 hci_dev_unlock(hdev);
1951 }
1952
1953 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1954 {
1955 struct inquiry_data data;
1956 struct inquiry_info *info = (void *) (skb->data + 1);
1957 int num_rsp = *((__u8 *) skb->data);
1958
1959 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1960
1961 if (!num_rsp)
1962 return;
1963
1964 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
1965 return;
1966
1967 hci_dev_lock(hdev);
1968
1969 for (; num_rsp; num_rsp--, info++) {
1970 u32 flags;
1971
1972 bacpy(&data.bdaddr, &info->bdaddr);
1973 data.pscan_rep_mode = info->pscan_rep_mode;
1974 data.pscan_period_mode = info->pscan_period_mode;
1975 data.pscan_mode = info->pscan_mode;
1976 memcpy(data.dev_class, info->dev_class, 3);
1977 data.clock_offset = info->clock_offset;
1978 data.rssi = 0x00;
1979 data.ssp_mode = 0x00;
1980
1981 flags = hci_inquiry_cache_update(hdev, &data, false);
1982
1983 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
1984 info->dev_class, 0, flags, NULL, 0, NULL, 0);
1985 }
1986
1987 hci_dev_unlock(hdev);
1988 }
1989
1990 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1991 {
1992 struct hci_ev_conn_complete *ev = (void *) skb->data;
1993 struct hci_conn *conn;
1994
1995 BT_DBG("%s", hdev->name);
1996
1997 hci_dev_lock(hdev);
1998
1999 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2000 if (!conn) {
2001 if (ev->link_type != SCO_LINK)
2002 goto unlock;
2003
2004 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2005 if (!conn)
2006 goto unlock;
2007
2008 conn->type = SCO_LINK;
2009 }
2010
2011 if (!ev->status) {
2012 conn->handle = __le16_to_cpu(ev->handle);
2013
2014 if (conn->type == ACL_LINK) {
2015 conn->state = BT_CONFIG;
2016 hci_conn_hold(conn);
2017
2018 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
2019 !hci_find_link_key(hdev, &ev->bdaddr))
2020 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2021 else
2022 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2023 } else
2024 conn->state = BT_CONNECTED;
2025
2026 hci_conn_add_sysfs(conn);
2027
2028 if (test_bit(HCI_AUTH, &hdev->flags))
2029 set_bit(HCI_CONN_AUTH, &conn->flags);
2030
2031 if (test_bit(HCI_ENCRYPT, &hdev->flags))
2032 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2033
2034 /* Get remote features */
2035 if (conn->type == ACL_LINK) {
2036 struct hci_cp_read_remote_features cp;
2037 cp.handle = ev->handle;
2038 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2039 sizeof(cp), &cp);
2040 }
2041
2042 /* Set packet type for incoming connection */
2043 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2044 struct hci_cp_change_conn_ptype cp;
2045 cp.handle = ev->handle;
2046 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2047 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2048 &cp);
2049 }
2050 } else {
2051 conn->state = BT_CLOSED;
2052 if (conn->type == ACL_LINK)
2053 mgmt_connect_failed(hdev, &conn->dst, conn->type,
2054 conn->dst_type, ev->status);
2055 }
2056
2057 if (conn->type == ACL_LINK)
2058 hci_sco_setup(conn, ev->status);
2059
2060 if (ev->status) {
2061 hci_proto_connect_cfm(conn, ev->status);
2062 hci_conn_del(conn);
2063 } else if (ev->link_type != ACL_LINK)
2064 hci_proto_connect_cfm(conn, ev->status);
2065
2066 unlock:
2067 hci_dev_unlock(hdev);
2068
2069 hci_conn_check_pending(hdev);
2070 }
2071
2072 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2073 {
2074 struct hci_ev_conn_request *ev = (void *) skb->data;
2075 int mask = hdev->link_mode;
2076 __u8 flags = 0;
2077
2078 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2079 ev->link_type);
2080
2081 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2082 &flags);
2083
2084 if ((mask & HCI_LM_ACCEPT) &&
2085 !hci_blacklist_lookup(hdev, &ev->bdaddr, BDADDR_BREDR)) {
2086 /* Connection accepted */
2087 struct inquiry_entry *ie;
2088 struct hci_conn *conn;
2089
2090 hci_dev_lock(hdev);
2091
2092 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2093 if (ie)
2094 memcpy(ie->data.dev_class, ev->dev_class, 3);
2095
2096 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2097 &ev->bdaddr);
2098 if (!conn) {
2099 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
2100 if (!conn) {
2101 BT_ERR("No memory for new connection");
2102 hci_dev_unlock(hdev);
2103 return;
2104 }
2105 }
2106
2107 memcpy(conn->dev_class, ev->dev_class, 3);
2108
2109 hci_dev_unlock(hdev);
2110
2111 if (ev->link_type == ACL_LINK ||
2112 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2113 struct hci_cp_accept_conn_req cp;
2114 conn->state = BT_CONNECT;
2115
2116 bacpy(&cp.bdaddr, &ev->bdaddr);
2117
2118 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2119 cp.role = 0x00; /* Become master */
2120 else
2121 cp.role = 0x01; /* Remain slave */
2122
2123 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp),
2124 &cp);
2125 } else if (!(flags & HCI_PROTO_DEFER)) {
2126 struct hci_cp_accept_sync_conn_req cp;
2127 conn->state = BT_CONNECT;
2128
2129 bacpy(&cp.bdaddr, &ev->bdaddr);
2130 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2131
2132 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
2133 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
2134 cp.max_latency = cpu_to_le16(0xffff);
2135 cp.content_format = cpu_to_le16(hdev->voice_setting);
2136 cp.retrans_effort = 0xff;
2137
2138 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
2139 sizeof(cp), &cp);
2140 } else {
2141 conn->state = BT_CONNECT2;
2142 hci_proto_connect_cfm(conn, 0);
2143 }
2144 } else {
2145 /* Connection rejected */
2146 struct hci_cp_reject_conn_req cp;
2147
2148 bacpy(&cp.bdaddr, &ev->bdaddr);
2149 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2150 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2151 }
2152 }
2153
2154 static u8 hci_to_mgmt_reason(u8 err)
2155 {
2156 switch (err) {
2157 case HCI_ERROR_CONNECTION_TIMEOUT:
2158 return MGMT_DEV_DISCONN_TIMEOUT;
2159 case HCI_ERROR_REMOTE_USER_TERM:
2160 case HCI_ERROR_REMOTE_LOW_RESOURCES:
2161 case HCI_ERROR_REMOTE_POWER_OFF:
2162 return MGMT_DEV_DISCONN_REMOTE;
2163 case HCI_ERROR_LOCAL_HOST_TERM:
2164 return MGMT_DEV_DISCONN_LOCAL_HOST;
2165 default:
2166 return MGMT_DEV_DISCONN_UNKNOWN;
2167 }
2168 }
2169
2170 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2171 {
2172 struct hci_ev_disconn_complete *ev = (void *) skb->data;
2173 u8 reason = hci_to_mgmt_reason(ev->reason);
2174 struct hci_conn_params *params;
2175 struct hci_conn *conn;
2176 bool mgmt_connected;
2177 u8 type;
2178
2179 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2180
2181 hci_dev_lock(hdev);
2182
2183 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2184 if (!conn)
2185 goto unlock;
2186
2187 if (ev->status) {
2188 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2189 conn->dst_type, ev->status);
2190 goto unlock;
2191 }
2192
2193 conn->state = BT_CLOSED;
2194
2195 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2196 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2197 reason, mgmt_connected);
2198
2199 if (conn->type == ACL_LINK &&
2200 test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2201 hci_remove_link_key(hdev, &conn->dst);
2202
2203 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2204 if (params) {
2205 switch (params->auto_connect) {
2206 case HCI_AUTO_CONN_LINK_LOSS:
2207 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2208 break;
2209 /* Fall through */
2210
2211 case HCI_AUTO_CONN_ALWAYS:
2212 hci_pend_le_conn_add(hdev, &conn->dst, conn->dst_type);
2213 break;
2214
2215 default:
2216 break;
2217 }
2218 }
2219
2220 type = conn->type;
2221
2222 hci_proto_disconn_cfm(conn, ev->reason);
2223 hci_conn_del(conn);
2224
2225 /* Re-enable advertising if necessary, since it might
2226 * have been disabled by the connection. From the
2227 * HCI_LE_Set_Advertise_Enable command description in
2228 * the core specification (v4.0):
2229 * "The Controller shall continue advertising until the Host
2230 * issues an LE_Set_Advertise_Enable command with
2231 * Advertising_Enable set to 0x00 (Advertising is disabled)
2232 * or until a connection is created or until the Advertising
2233 * is timed out due to Directed Advertising."
2234 */
2235 if (type == LE_LINK)
2236 mgmt_reenable_advertising(hdev);
2237
2238 unlock:
2239 hci_dev_unlock(hdev);
2240 }
2241
2242 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2243 {
2244 struct hci_ev_auth_complete *ev = (void *) skb->data;
2245 struct hci_conn *conn;
2246
2247 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2248
2249 hci_dev_lock(hdev);
2250
2251 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2252 if (!conn)
2253 goto unlock;
2254
2255 if (!ev->status) {
2256 if (!hci_conn_ssp_enabled(conn) &&
2257 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
2258 BT_INFO("re-auth of legacy device is not possible.");
2259 } else {
2260 set_bit(HCI_CONN_AUTH, &conn->flags);
2261 conn->sec_level = conn->pending_sec_level;
2262 }
2263 } else {
2264 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
2265 ev->status);
2266 }
2267
2268 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2269 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2270
2271 if (conn->state == BT_CONFIG) {
2272 if (!ev->status && hci_conn_ssp_enabled(conn)) {
2273 struct hci_cp_set_conn_encrypt cp;
2274 cp.handle = ev->handle;
2275 cp.encrypt = 0x01;
2276 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2277 &cp);
2278 } else {
2279 conn->state = BT_CONNECTED;
2280 hci_proto_connect_cfm(conn, ev->status);
2281 hci_conn_drop(conn);
2282 }
2283 } else {
2284 hci_auth_cfm(conn, ev->status);
2285
2286 hci_conn_hold(conn);
2287 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2288 hci_conn_drop(conn);
2289 }
2290
2291 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2292 if (!ev->status) {
2293 struct hci_cp_set_conn_encrypt cp;
2294 cp.handle = ev->handle;
2295 cp.encrypt = 0x01;
2296 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2297 &cp);
2298 } else {
2299 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2300 hci_encrypt_cfm(conn, ev->status, 0x00);
2301 }
2302 }
2303
2304 unlock:
2305 hci_dev_unlock(hdev);
2306 }
2307
2308 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2309 {
2310 struct hci_ev_remote_name *ev = (void *) skb->data;
2311 struct hci_conn *conn;
2312
2313 BT_DBG("%s", hdev->name);
2314
2315 hci_conn_check_pending(hdev);
2316
2317 hci_dev_lock(hdev);
2318
2319 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2320
2321 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2322 goto check_auth;
2323
2324 if (ev->status == 0)
2325 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2326 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2327 else
2328 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2329
2330 check_auth:
2331 if (!conn)
2332 goto unlock;
2333
2334 if (!hci_outgoing_auth_needed(hdev, conn))
2335 goto unlock;
2336
2337 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2338 struct hci_cp_auth_requested cp;
2339 cp.handle = __cpu_to_le16(conn->handle);
2340 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2341 }
2342
2343 unlock:
2344 hci_dev_unlock(hdev);
2345 }
2346
2347 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2348 {
2349 struct hci_ev_encrypt_change *ev = (void *) skb->data;
2350 struct hci_conn *conn;
2351
2352 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2353
2354 hci_dev_lock(hdev);
2355
2356 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2357 if (!conn)
2358 goto unlock;
2359
2360 if (!ev->status) {
2361 if (ev->encrypt) {
2362 /* Encryption implies authentication */
2363 set_bit(HCI_CONN_AUTH, &conn->flags);
2364 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2365 conn->sec_level = conn->pending_sec_level;
2366
2367 /* P-256 authentication key implies FIPS */
2368 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
2369 set_bit(HCI_CONN_FIPS, &conn->flags);
2370
2371 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
2372 conn->type == LE_LINK)
2373 set_bit(HCI_CONN_AES_CCM, &conn->flags);
2374 } else {
2375 clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
2376 clear_bit(HCI_CONN_AES_CCM, &conn->flags);
2377 }
2378 }
2379
2380 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2381
2382 if (ev->status && conn->state == BT_CONNECTED) {
2383 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2384 hci_conn_drop(conn);
2385 goto unlock;
2386 }
2387
2388 if (conn->state == BT_CONFIG) {
2389 if (!ev->status)
2390 conn->state = BT_CONNECTED;
2391
2392 /* In Secure Connections Only mode, do not allow any
2393 * connections that are not encrypted with AES-CCM
2394 * using a P-256 authenticated combination key.
2395 */
2396 if (test_bit(HCI_SC_ONLY, &hdev->dev_flags) &&
2397 (!test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
2398 conn->key_type != HCI_LK_AUTH_COMBINATION_P256)) {
2399 hci_proto_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE);
2400 hci_conn_drop(conn);
2401 goto unlock;
2402 }
2403
2404 hci_proto_connect_cfm(conn, ev->status);
2405 hci_conn_drop(conn);
2406 } else
2407 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2408
2409 unlock:
2410 hci_dev_unlock(hdev);
2411 }
2412
2413 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2414 struct sk_buff *skb)
2415 {
2416 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2417 struct hci_conn *conn;
2418
2419 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2420
2421 hci_dev_lock(hdev);
2422
2423 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2424 if (conn) {
2425 if (!ev->status)
2426 set_bit(HCI_CONN_SECURE, &conn->flags);
2427
2428 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2429
2430 hci_key_change_cfm(conn, ev->status);
2431 }
2432
2433 hci_dev_unlock(hdev);
2434 }
2435
2436 static void hci_remote_features_evt(struct hci_dev *hdev,
2437 struct sk_buff *skb)
2438 {
2439 struct hci_ev_remote_features *ev = (void *) skb->data;
2440 struct hci_conn *conn;
2441
2442 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2443
2444 hci_dev_lock(hdev);
2445
2446 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2447 if (!conn)
2448 goto unlock;
2449
2450 if (!ev->status)
2451 memcpy(conn->features[0], ev->features, 8);
2452
2453 if (conn->state != BT_CONFIG)
2454 goto unlock;
2455
2456 if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
2457 struct hci_cp_read_remote_ext_features cp;
2458 cp.handle = ev->handle;
2459 cp.page = 0x01;
2460 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2461 sizeof(cp), &cp);
2462 goto unlock;
2463 }
2464
2465 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2466 struct hci_cp_remote_name_req cp;
2467 memset(&cp, 0, sizeof(cp));
2468 bacpy(&cp.bdaddr, &conn->dst);
2469 cp.pscan_rep_mode = 0x02;
2470 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2471 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2472 mgmt_device_connected(hdev, &conn->dst, conn->type,
2473 conn->dst_type, 0, NULL, 0,
2474 conn->dev_class);
2475
2476 if (!hci_outgoing_auth_needed(hdev, conn)) {
2477 conn->state = BT_CONNECTED;
2478 hci_proto_connect_cfm(conn, ev->status);
2479 hci_conn_drop(conn);
2480 }
2481
2482 unlock:
2483 hci_dev_unlock(hdev);
2484 }
2485
2486 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2487 {
2488 struct hci_ev_cmd_complete *ev = (void *) skb->data;
2489 u8 status = skb->data[sizeof(*ev)];
2490 __u16 opcode;
2491
2492 skb_pull(skb, sizeof(*ev));
2493
2494 opcode = __le16_to_cpu(ev->opcode);
2495
2496 switch (opcode) {
2497 case HCI_OP_INQUIRY_CANCEL:
2498 hci_cc_inquiry_cancel(hdev, skb);
2499 break;
2500
2501 case HCI_OP_PERIODIC_INQ:
2502 hci_cc_periodic_inq(hdev, skb);
2503 break;
2504
2505 case HCI_OP_EXIT_PERIODIC_INQ:
2506 hci_cc_exit_periodic_inq(hdev, skb);
2507 break;
2508
2509 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2510 hci_cc_remote_name_req_cancel(hdev, skb);
2511 break;
2512
2513 case HCI_OP_ROLE_DISCOVERY:
2514 hci_cc_role_discovery(hdev, skb);
2515 break;
2516
2517 case HCI_OP_READ_LINK_POLICY:
2518 hci_cc_read_link_policy(hdev, skb);
2519 break;
2520
2521 case HCI_OP_WRITE_LINK_POLICY:
2522 hci_cc_write_link_policy(hdev, skb);
2523 break;
2524
2525 case HCI_OP_READ_DEF_LINK_POLICY:
2526 hci_cc_read_def_link_policy(hdev, skb);
2527 break;
2528
2529 case HCI_OP_WRITE_DEF_LINK_POLICY:
2530 hci_cc_write_def_link_policy(hdev, skb);
2531 break;
2532
2533 case HCI_OP_RESET:
2534 hci_cc_reset(hdev, skb);
2535 break;
2536
2537 case HCI_OP_WRITE_LOCAL_NAME:
2538 hci_cc_write_local_name(hdev, skb);
2539 break;
2540
2541 case HCI_OP_READ_LOCAL_NAME:
2542 hci_cc_read_local_name(hdev, skb);
2543 break;
2544
2545 case HCI_OP_WRITE_AUTH_ENABLE:
2546 hci_cc_write_auth_enable(hdev, skb);
2547 break;
2548
2549 case HCI_OP_WRITE_ENCRYPT_MODE:
2550 hci_cc_write_encrypt_mode(hdev, skb);
2551 break;
2552
2553 case HCI_OP_WRITE_SCAN_ENABLE:
2554 hci_cc_write_scan_enable(hdev, skb);
2555 break;
2556
2557 case HCI_OP_READ_CLASS_OF_DEV:
2558 hci_cc_read_class_of_dev(hdev, skb);
2559 break;
2560
2561 case HCI_OP_WRITE_CLASS_OF_DEV:
2562 hci_cc_write_class_of_dev(hdev, skb);
2563 break;
2564
2565 case HCI_OP_READ_VOICE_SETTING:
2566 hci_cc_read_voice_setting(hdev, skb);
2567 break;
2568
2569 case HCI_OP_WRITE_VOICE_SETTING:
2570 hci_cc_write_voice_setting(hdev, skb);
2571 break;
2572
2573 case HCI_OP_READ_NUM_SUPPORTED_IAC:
2574 hci_cc_read_num_supported_iac(hdev, skb);
2575 break;
2576
2577 case HCI_OP_WRITE_SSP_MODE:
2578 hci_cc_write_ssp_mode(hdev, skb);
2579 break;
2580
2581 case HCI_OP_WRITE_SC_SUPPORT:
2582 hci_cc_write_sc_support(hdev, skb);
2583 break;
2584
2585 case HCI_OP_READ_LOCAL_VERSION:
2586 hci_cc_read_local_version(hdev, skb);
2587 break;
2588
2589 case HCI_OP_READ_LOCAL_COMMANDS:
2590 hci_cc_read_local_commands(hdev, skb);
2591 break;
2592
2593 case HCI_OP_READ_LOCAL_FEATURES:
2594 hci_cc_read_local_features(hdev, skb);
2595 break;
2596
2597 case HCI_OP_READ_LOCAL_EXT_FEATURES:
2598 hci_cc_read_local_ext_features(hdev, skb);
2599 break;
2600
2601 case HCI_OP_READ_BUFFER_SIZE:
2602 hci_cc_read_buffer_size(hdev, skb);
2603 break;
2604
2605 case HCI_OP_READ_BD_ADDR:
2606 hci_cc_read_bd_addr(hdev, skb);
2607 break;
2608
2609 case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
2610 hci_cc_read_page_scan_activity(hdev, skb);
2611 break;
2612
2613 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
2614 hci_cc_write_page_scan_activity(hdev, skb);
2615 break;
2616
2617 case HCI_OP_READ_PAGE_SCAN_TYPE:
2618 hci_cc_read_page_scan_type(hdev, skb);
2619 break;
2620
2621 case HCI_OP_WRITE_PAGE_SCAN_TYPE:
2622 hci_cc_write_page_scan_type(hdev, skb);
2623 break;
2624
2625 case HCI_OP_READ_DATA_BLOCK_SIZE:
2626 hci_cc_read_data_block_size(hdev, skb);
2627 break;
2628
2629 case HCI_OP_READ_FLOW_CONTROL_MODE:
2630 hci_cc_read_flow_control_mode(hdev, skb);
2631 break;
2632
2633 case HCI_OP_READ_LOCAL_AMP_INFO:
2634 hci_cc_read_local_amp_info(hdev, skb);
2635 break;
2636
2637 case HCI_OP_READ_CLOCK:
2638 hci_cc_read_clock(hdev, skb);
2639 break;
2640
2641 case HCI_OP_READ_LOCAL_AMP_ASSOC:
2642 hci_cc_read_local_amp_assoc(hdev, skb);
2643 break;
2644
2645 case HCI_OP_READ_INQ_RSP_TX_POWER:
2646 hci_cc_read_inq_rsp_tx_power(hdev, skb);
2647 break;
2648
2649 case HCI_OP_PIN_CODE_REPLY:
2650 hci_cc_pin_code_reply(hdev, skb);
2651 break;
2652
2653 case HCI_OP_PIN_CODE_NEG_REPLY:
2654 hci_cc_pin_code_neg_reply(hdev, skb);
2655 break;
2656
2657 case HCI_OP_READ_LOCAL_OOB_DATA:
2658 hci_cc_read_local_oob_data(hdev, skb);
2659 break;
2660
2661 case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
2662 hci_cc_read_local_oob_ext_data(hdev, skb);
2663 break;
2664
2665 case HCI_OP_LE_READ_BUFFER_SIZE:
2666 hci_cc_le_read_buffer_size(hdev, skb);
2667 break;
2668
2669 case HCI_OP_LE_READ_LOCAL_FEATURES:
2670 hci_cc_le_read_local_features(hdev, skb);
2671 break;
2672
2673 case HCI_OP_LE_READ_ADV_TX_POWER:
2674 hci_cc_le_read_adv_tx_power(hdev, skb);
2675 break;
2676
2677 case HCI_OP_USER_CONFIRM_REPLY:
2678 hci_cc_user_confirm_reply(hdev, skb);
2679 break;
2680
2681 case HCI_OP_USER_CONFIRM_NEG_REPLY:
2682 hci_cc_user_confirm_neg_reply(hdev, skb);
2683 break;
2684
2685 case HCI_OP_USER_PASSKEY_REPLY:
2686 hci_cc_user_passkey_reply(hdev, skb);
2687 break;
2688
2689 case HCI_OP_USER_PASSKEY_NEG_REPLY:
2690 hci_cc_user_passkey_neg_reply(hdev, skb);
2691 break;
2692
2693 case HCI_OP_LE_SET_RANDOM_ADDR:
2694 hci_cc_le_set_random_addr(hdev, skb);
2695 break;
2696
2697 case HCI_OP_LE_SET_ADV_ENABLE:
2698 hci_cc_le_set_adv_enable(hdev, skb);
2699 break;
2700
2701 case HCI_OP_LE_SET_SCAN_PARAM:
2702 hci_cc_le_set_scan_param(hdev, skb);
2703 break;
2704
2705 case HCI_OP_LE_SET_SCAN_ENABLE:
2706 hci_cc_le_set_scan_enable(hdev, skb);
2707 break;
2708
2709 case HCI_OP_LE_READ_WHITE_LIST_SIZE:
2710 hci_cc_le_read_white_list_size(hdev, skb);
2711 break;
2712
2713 case HCI_OP_LE_CLEAR_WHITE_LIST:
2714 hci_cc_le_clear_white_list(hdev, skb);
2715 break;
2716
2717 case HCI_OP_LE_ADD_TO_WHITE_LIST:
2718 hci_cc_le_add_to_white_list(hdev, skb);
2719 break;
2720
2721 case HCI_OP_LE_DEL_FROM_WHITE_LIST:
2722 hci_cc_le_del_from_white_list(hdev, skb);
2723 break;
2724
2725 case HCI_OP_LE_READ_SUPPORTED_STATES:
2726 hci_cc_le_read_supported_states(hdev, skb);
2727 break;
2728
2729 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2730 hci_cc_write_le_host_supported(hdev, skb);
2731 break;
2732
2733 case HCI_OP_LE_SET_ADV_PARAM:
2734 hci_cc_set_adv_param(hdev, skb);
2735 break;
2736
2737 case HCI_OP_WRITE_REMOTE_AMP_ASSOC:
2738 hci_cc_write_remote_amp_assoc(hdev, skb);
2739 break;
2740
2741 case HCI_OP_READ_RSSI:
2742 hci_cc_read_rssi(hdev, skb);
2743 break;
2744
2745 case HCI_OP_READ_TX_POWER:
2746 hci_cc_read_tx_power(hdev, skb);
2747 break;
2748
2749 default:
2750 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2751 break;
2752 }
2753
2754 if (opcode != HCI_OP_NOP)
2755 cancel_delayed_work(&hdev->cmd_timer);
2756
2757 hci_req_cmd_complete(hdev, opcode, status);
2758
2759 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2760 atomic_set(&hdev->cmd_cnt, 1);
2761 if (!skb_queue_empty(&hdev->cmd_q))
2762 queue_work(hdev->workqueue, &hdev->cmd_work);
2763 }
2764 }
2765
2766 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2767 {
2768 struct hci_ev_cmd_status *ev = (void *) skb->data;
2769 __u16 opcode;
2770
2771 skb_pull(skb, sizeof(*ev));
2772
2773 opcode = __le16_to_cpu(ev->opcode);
2774
2775 switch (opcode) {
2776 case HCI_OP_INQUIRY:
2777 hci_cs_inquiry(hdev, ev->status);
2778 break;
2779
2780 case HCI_OP_CREATE_CONN:
2781 hci_cs_create_conn(hdev, ev->status);
2782 break;
2783
2784 case HCI_OP_ADD_SCO:
2785 hci_cs_add_sco(hdev, ev->status);
2786 break;
2787
2788 case HCI_OP_AUTH_REQUESTED:
2789 hci_cs_auth_requested(hdev, ev->status);
2790 break;
2791
2792 case HCI_OP_SET_CONN_ENCRYPT:
2793 hci_cs_set_conn_encrypt(hdev, ev->status);
2794 break;
2795
2796 case HCI_OP_REMOTE_NAME_REQ:
2797 hci_cs_remote_name_req(hdev, ev->status);
2798 break;
2799
2800 case HCI_OP_READ_REMOTE_FEATURES:
2801 hci_cs_read_remote_features(hdev, ev->status);
2802 break;
2803
2804 case HCI_OP_READ_REMOTE_EXT_FEATURES:
2805 hci_cs_read_remote_ext_features(hdev, ev->status);
2806 break;
2807
2808 case HCI_OP_SETUP_SYNC_CONN:
2809 hci_cs_setup_sync_conn(hdev, ev->status);
2810 break;
2811
2812 case HCI_OP_SNIFF_MODE:
2813 hci_cs_sniff_mode(hdev, ev->status);
2814 break;
2815
2816 case HCI_OP_EXIT_SNIFF_MODE:
2817 hci_cs_exit_sniff_mode(hdev, ev->status);
2818 break;
2819
2820 case HCI_OP_DISCONNECT:
2821 hci_cs_disconnect(hdev, ev->status);
2822 break;
2823
2824 case HCI_OP_CREATE_PHY_LINK:
2825 hci_cs_create_phylink(hdev, ev->status);
2826 break;
2827
2828 case HCI_OP_ACCEPT_PHY_LINK:
2829 hci_cs_accept_phylink(hdev, ev->status);
2830 break;
2831
2832 case HCI_OP_LE_CREATE_CONN:
2833 hci_cs_le_create_conn(hdev, ev->status);
2834 break;
2835
2836 case HCI_OP_LE_START_ENC:
2837 hci_cs_le_start_enc(hdev, ev->status);
2838 break;
2839
2840 default:
2841 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2842 break;
2843 }
2844
2845 if (opcode != HCI_OP_NOP)
2846 cancel_delayed_work(&hdev->cmd_timer);
2847
2848 if (ev->status ||
2849 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->req.event))
2850 hci_req_cmd_complete(hdev, opcode, ev->status);
2851
2852 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2853 atomic_set(&hdev->cmd_cnt, 1);
2854 if (!skb_queue_empty(&hdev->cmd_q))
2855 queue_work(hdev->workqueue, &hdev->cmd_work);
2856 }
2857 }
2858
2859 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2860 {
2861 struct hci_ev_role_change *ev = (void *) skb->data;
2862 struct hci_conn *conn;
2863
2864 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2865
2866 hci_dev_lock(hdev);
2867
2868 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2869 if (conn) {
2870 if (!ev->status) {
2871 if (ev->role)
2872 clear_bit(HCI_CONN_MASTER, &conn->flags);
2873 else
2874 set_bit(HCI_CONN_MASTER, &conn->flags);
2875 }
2876
2877 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2878
2879 hci_role_switch_cfm(conn, ev->status, ev->role);
2880 }
2881
2882 hci_dev_unlock(hdev);
2883 }
2884
2885 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2886 {
2887 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2888 int i;
2889
2890 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
2891 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2892 return;
2893 }
2894
2895 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2896 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
2897 BT_DBG("%s bad parameters", hdev->name);
2898 return;
2899 }
2900
2901 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
2902
2903 for (i = 0; i < ev->num_hndl; i++) {
2904 struct hci_comp_pkts_info *info = &ev->handles[i];
2905 struct hci_conn *conn;
2906 __u16 handle, count;
2907
2908 handle = __le16_to_cpu(info->handle);
2909 count = __le16_to_cpu(info->count);
2910
2911 conn = hci_conn_hash_lookup_handle(hdev, handle);
2912 if (!conn)
2913 continue;
2914
2915 conn->sent -= count;
2916
2917 switch (conn->type) {
2918 case ACL_LINK:
2919 hdev->acl_cnt += count;
2920 if (hdev->acl_cnt > hdev->acl_pkts)
2921 hdev->acl_cnt = hdev->acl_pkts;
2922 break;
2923
2924 case LE_LINK:
2925 if (hdev->le_pkts) {
2926 hdev->le_cnt += count;
2927 if (hdev->le_cnt > hdev->le_pkts)
2928 hdev->le_cnt = hdev->le_pkts;
2929 } else {
2930 hdev->acl_cnt += count;
2931 if (hdev->acl_cnt > hdev->acl_pkts)
2932 hdev->acl_cnt = hdev->acl_pkts;
2933 }
2934 break;
2935
2936 case SCO_LINK:
2937 hdev->sco_cnt += count;
2938 if (hdev->sco_cnt > hdev->sco_pkts)
2939 hdev->sco_cnt = hdev->sco_pkts;
2940 break;
2941
2942 default:
2943 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2944 break;
2945 }
2946 }
2947
2948 queue_work(hdev->workqueue, &hdev->tx_work);
2949 }
2950
2951 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
2952 __u16 handle)
2953 {
2954 struct hci_chan *chan;
2955
2956 switch (hdev->dev_type) {
2957 case HCI_BREDR:
2958 return hci_conn_hash_lookup_handle(hdev, handle);
2959 case HCI_AMP:
2960 chan = hci_chan_lookup_handle(hdev, handle);
2961 if (chan)
2962 return chan->conn;
2963 break;
2964 default:
2965 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2966 break;
2967 }
2968
2969 return NULL;
2970 }
2971
2972 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
2973 {
2974 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
2975 int i;
2976
2977 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
2978 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2979 return;
2980 }
2981
2982 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2983 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
2984 BT_DBG("%s bad parameters", hdev->name);
2985 return;
2986 }
2987
2988 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
2989 ev->num_hndl);
2990
2991 for (i = 0; i < ev->num_hndl; i++) {
2992 struct hci_comp_blocks_info *info = &ev->handles[i];
2993 struct hci_conn *conn = NULL;
2994 __u16 handle, block_count;
2995
2996 handle = __le16_to_cpu(info->handle);
2997 block_count = __le16_to_cpu(info->blocks);
2998
2999 conn = __hci_conn_lookup_handle(hdev, handle);
3000 if (!conn)
3001 continue;
3002
3003 conn->sent -= block_count;
3004
3005 switch (conn->type) {
3006 case ACL_LINK:
3007 case AMP_LINK:
3008 hdev->block_cnt += block_count;
3009 if (hdev->block_cnt > hdev->num_blocks)
3010 hdev->block_cnt = hdev->num_blocks;
3011 break;
3012
3013 default:
3014 BT_ERR("Unknown type %d conn %p", conn->type, conn);
3015 break;
3016 }
3017 }
3018
3019 queue_work(hdev->workqueue, &hdev->tx_work);
3020 }
3021
3022 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3023 {
3024 struct hci_ev_mode_change *ev = (void *) skb->data;
3025 struct hci_conn *conn;
3026
3027 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3028
3029 hci_dev_lock(hdev);
3030
3031 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3032 if (conn) {
3033 conn->mode = ev->mode;
3034
3035 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
3036 &conn->flags)) {
3037 if (conn->mode == HCI_CM_ACTIVE)
3038 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3039 else
3040 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3041 }
3042
3043 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
3044 hci_sco_setup(conn, ev->status);
3045 }
3046
3047 hci_dev_unlock(hdev);
3048 }
3049
3050 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3051 {
3052 struct hci_ev_pin_code_req *ev = (void *) skb->data;
3053 struct hci_conn *conn;
3054
3055 BT_DBG("%s", hdev->name);
3056
3057 hci_dev_lock(hdev);
3058
3059 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3060 if (!conn)
3061 goto unlock;
3062
3063 if (conn->state == BT_CONNECTED) {
3064 hci_conn_hold(conn);
3065 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3066 hci_conn_drop(conn);
3067 }
3068
3069 if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags))
3070 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3071 sizeof(ev->bdaddr), &ev->bdaddr);
3072 else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
3073 u8 secure;
3074
3075 if (conn->pending_sec_level == BT_SECURITY_HIGH)
3076 secure = 1;
3077 else
3078 secure = 0;
3079
3080 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
3081 }
3082
3083 unlock:
3084 hci_dev_unlock(hdev);
3085 }
3086
3087 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3088 {
3089 struct hci_ev_link_key_req *ev = (void *) skb->data;
3090 struct hci_cp_link_key_reply cp;
3091 struct hci_conn *conn;
3092 struct link_key *key;
3093
3094 BT_DBG("%s", hdev->name);
3095
3096 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3097 return;
3098
3099 hci_dev_lock(hdev);
3100
3101 key = hci_find_link_key(hdev, &ev->bdaddr);
3102 if (!key) {
3103 BT_DBG("%s link key not found for %pMR", hdev->name,
3104 &ev->bdaddr);
3105 goto not_found;
3106 }
3107
3108 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
3109 &ev->bdaddr);
3110
3111 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3112 if (conn) {
3113 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
3114 key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
3115 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
3116 BT_DBG("%s ignoring unauthenticated key", hdev->name);
3117 goto not_found;
3118 }
3119
3120 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
3121 (conn->pending_sec_level == BT_SECURITY_HIGH ||
3122 conn->pending_sec_level == BT_SECURITY_FIPS)) {
3123 BT_DBG("%s ignoring key unauthenticated for high security",
3124 hdev->name);
3125 goto not_found;
3126 }
3127
3128 conn->key_type = key->type;
3129 conn->pin_length = key->pin_len;
3130 }
3131
3132 bacpy(&cp.bdaddr, &ev->bdaddr);
3133 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
3134
3135 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
3136
3137 hci_dev_unlock(hdev);
3138
3139 return;
3140
3141 not_found:
3142 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
3143 hci_dev_unlock(hdev);
3144 }
3145
3146 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3147 {
3148 struct hci_ev_link_key_notify *ev = (void *) skb->data;
3149 struct hci_conn *conn;
3150 struct link_key *key;
3151 bool persistent;
3152 u8 pin_len = 0;
3153
3154 BT_DBG("%s", hdev->name);
3155
3156 hci_dev_lock(hdev);
3157
3158 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3159 if (conn) {
3160 hci_conn_hold(conn);
3161 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3162 pin_len = conn->pin_length;
3163
3164 if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
3165 conn->key_type = ev->key_type;
3166
3167 hci_conn_drop(conn);
3168 }
3169
3170 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3171 goto unlock;
3172
3173 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
3174 ev->key_type, pin_len, &persistent);
3175 if (!key)
3176 goto unlock;
3177
3178 mgmt_new_link_key(hdev, key, persistent);
3179
3180 /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
3181 * is set. If it's not set simply remove the key from the kernel
3182 * list (we've still notified user space about it but with
3183 * store_hint being 0).
3184 */
3185 if (key->type == HCI_LK_DEBUG_COMBINATION &&
3186 !test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags)) {
3187 list_del(&key->list);
3188 kfree(key);
3189 } else if (conn) {
3190 if (persistent)
3191 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3192 else
3193 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3194 }
3195
3196 unlock:
3197 hci_dev_unlock(hdev);
3198 }
3199
3200 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
3201 {
3202 struct hci_ev_clock_offset *ev = (void *) skb->data;
3203 struct hci_conn *conn;
3204
3205 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3206
3207 hci_dev_lock(hdev);
3208
3209 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3210 if (conn && !ev->status) {
3211 struct inquiry_entry *ie;
3212
3213 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3214 if (ie) {
3215 ie->data.clock_offset = ev->clock_offset;
3216 ie->timestamp = jiffies;
3217 }
3218 }
3219
3220 hci_dev_unlock(hdev);
3221 }
3222
3223 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3224 {
3225 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
3226 struct hci_conn *conn;
3227
3228 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3229
3230 hci_dev_lock(hdev);
3231
3232 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3233 if (conn && !ev->status)
3234 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
3235
3236 hci_dev_unlock(hdev);
3237 }
3238
3239 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
3240 {
3241 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
3242 struct inquiry_entry *ie;
3243
3244 BT_DBG("%s", hdev->name);
3245
3246 hci_dev_lock(hdev);
3247
3248 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3249 if (ie) {
3250 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
3251 ie->timestamp = jiffies;
3252 }
3253
3254 hci_dev_unlock(hdev);
3255 }
3256
3257 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
3258 struct sk_buff *skb)
3259 {
3260 struct inquiry_data data;
3261 int num_rsp = *((__u8 *) skb->data);
3262
3263 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3264
3265 if (!num_rsp)
3266 return;
3267
3268 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3269 return;
3270
3271 hci_dev_lock(hdev);
3272
3273 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
3274 struct inquiry_info_with_rssi_and_pscan_mode *info;
3275 info = (void *) (skb->data + 1);
3276
3277 for (; num_rsp; num_rsp--, info++) {
3278 u32 flags;
3279
3280 bacpy(&data.bdaddr, &info->bdaddr);
3281 data.pscan_rep_mode = info->pscan_rep_mode;
3282 data.pscan_period_mode = info->pscan_period_mode;
3283 data.pscan_mode = info->pscan_mode;
3284 memcpy(data.dev_class, info->dev_class, 3);
3285 data.clock_offset = info->clock_offset;
3286 data.rssi = info->rssi;
3287 data.ssp_mode = 0x00;
3288
3289 flags = hci_inquiry_cache_update(hdev, &data, false);
3290
3291 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3292 info->dev_class, info->rssi,
3293 flags, NULL, 0, NULL, 0);
3294 }
3295 } else {
3296 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
3297
3298 for (; num_rsp; num_rsp--, info++) {
3299 u32 flags;
3300
3301 bacpy(&data.bdaddr, &info->bdaddr);
3302 data.pscan_rep_mode = info->pscan_rep_mode;
3303 data.pscan_period_mode = info->pscan_period_mode;
3304 data.pscan_mode = 0x00;
3305 memcpy(data.dev_class, info->dev_class, 3);
3306 data.clock_offset = info->clock_offset;
3307 data.rssi = info->rssi;
3308 data.ssp_mode = 0x00;
3309
3310 flags = hci_inquiry_cache_update(hdev, &data, false);
3311
3312 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3313 info->dev_class, info->rssi,
3314 flags, NULL, 0, NULL, 0);
3315 }
3316 }
3317
3318 hci_dev_unlock(hdev);
3319 }
3320
3321 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
3322 struct sk_buff *skb)
3323 {
3324 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
3325 struct hci_conn *conn;
3326
3327 BT_DBG("%s", hdev->name);
3328
3329 hci_dev_lock(hdev);
3330
3331 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3332 if (!conn)
3333 goto unlock;
3334
3335 if (ev->page < HCI_MAX_PAGES)
3336 memcpy(conn->features[ev->page], ev->features, 8);
3337
3338 if (!ev->status && ev->page == 0x01) {
3339 struct inquiry_entry *ie;
3340
3341 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3342 if (ie)
3343 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3344
3345 if (ev->features[0] & LMP_HOST_SSP) {
3346 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3347 } else {
3348 /* It is mandatory by the Bluetooth specification that
3349 * Extended Inquiry Results are only used when Secure
3350 * Simple Pairing is enabled, but some devices violate
3351 * this.
3352 *
3353 * To make these devices work, the internal SSP
3354 * enabled flag needs to be cleared if the remote host
3355 * features do not indicate SSP support */
3356 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3357 }
3358
3359 if (ev->features[0] & LMP_HOST_SC)
3360 set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
3361 }
3362
3363 if (conn->state != BT_CONFIG)
3364 goto unlock;
3365
3366 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3367 struct hci_cp_remote_name_req cp;
3368 memset(&cp, 0, sizeof(cp));
3369 bacpy(&cp.bdaddr, &conn->dst);
3370 cp.pscan_rep_mode = 0x02;
3371 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3372 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3373 mgmt_device_connected(hdev, &conn->dst, conn->type,
3374 conn->dst_type, 0, NULL, 0,
3375 conn->dev_class);
3376
3377 if (!hci_outgoing_auth_needed(hdev, conn)) {
3378 conn->state = BT_CONNECTED;
3379 hci_proto_connect_cfm(conn, ev->status);
3380 hci_conn_drop(conn);
3381 }
3382
3383 unlock:
3384 hci_dev_unlock(hdev);
3385 }
3386
3387 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
3388 struct sk_buff *skb)
3389 {
3390 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
3391 struct hci_conn *conn;
3392
3393 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3394
3395 hci_dev_lock(hdev);
3396
3397 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3398 if (!conn) {
3399 if (ev->link_type == ESCO_LINK)
3400 goto unlock;
3401
3402 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
3403 if (!conn)
3404 goto unlock;
3405
3406 conn->type = SCO_LINK;
3407 }
3408
3409 switch (ev->status) {
3410 case 0x00:
3411 conn->handle = __le16_to_cpu(ev->handle);
3412 conn->state = BT_CONNECTED;
3413
3414 hci_conn_add_sysfs(conn);
3415 break;
3416
3417 case 0x10: /* Connection Accept Timeout */
3418 case 0x0d: /* Connection Rejected due to Limited Resources */
3419 case 0x11: /* Unsupported Feature or Parameter Value */
3420 case 0x1c: /* SCO interval rejected */
3421 case 0x1a: /* Unsupported Remote Feature */
3422 case 0x1f: /* Unspecified error */
3423 case 0x20: /* Unsupported LMP Parameter value */
3424 if (conn->out) {
3425 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
3426 (hdev->esco_type & EDR_ESCO_MASK);
3427 if (hci_setup_sync(conn, conn->link->handle))
3428 goto unlock;
3429 }
3430 /* fall through */
3431
3432 default:
3433 conn->state = BT_CLOSED;
3434 break;
3435 }
3436
3437 hci_proto_connect_cfm(conn, ev->status);
3438 if (ev->status)
3439 hci_conn_del(conn);
3440
3441 unlock:
3442 hci_dev_unlock(hdev);
3443 }
3444
3445 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
3446 {
3447 size_t parsed = 0;
3448
3449 while (parsed < eir_len) {
3450 u8 field_len = eir[0];
3451
3452 if (field_len == 0)
3453 return parsed;
3454
3455 parsed += field_len + 1;
3456 eir += field_len + 1;
3457 }
3458
3459 return eir_len;
3460 }
3461
3462 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
3463 struct sk_buff *skb)
3464 {
3465 struct inquiry_data data;
3466 struct extended_inquiry_info *info = (void *) (skb->data + 1);
3467 int num_rsp = *((__u8 *) skb->data);
3468 size_t eir_len;
3469
3470 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3471
3472 if (!num_rsp)
3473 return;
3474
3475 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3476 return;
3477
3478 hci_dev_lock(hdev);
3479
3480 for (; num_rsp; num_rsp--, info++) {
3481 u32 flags;
3482 bool name_known;
3483
3484 bacpy(&data.bdaddr, &info->bdaddr);
3485 data.pscan_rep_mode = info->pscan_rep_mode;
3486 data.pscan_period_mode = info->pscan_period_mode;
3487 data.pscan_mode = 0x00;
3488 memcpy(data.dev_class, info->dev_class, 3);
3489 data.clock_offset = info->clock_offset;
3490 data.rssi = info->rssi;
3491 data.ssp_mode = 0x01;
3492
3493 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3494 name_known = eir_has_data_type(info->data,
3495 sizeof(info->data),
3496 EIR_NAME_COMPLETE);
3497 else
3498 name_known = true;
3499
3500 flags = hci_inquiry_cache_update(hdev, &data, name_known);
3501
3502 eir_len = eir_get_length(info->data, sizeof(info->data));
3503
3504 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3505 info->dev_class, info->rssi,
3506 flags, info->data, eir_len, NULL, 0);
3507 }
3508
3509 hci_dev_unlock(hdev);
3510 }
3511
3512 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
3513 struct sk_buff *skb)
3514 {
3515 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
3516 struct hci_conn *conn;
3517
3518 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
3519 __le16_to_cpu(ev->handle));
3520
3521 hci_dev_lock(hdev);
3522
3523 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3524 if (!conn)
3525 goto unlock;
3526
3527 /* For BR/EDR the necessary steps are taken through the
3528 * auth_complete event.
3529 */
3530 if (conn->type != LE_LINK)
3531 goto unlock;
3532
3533 if (!ev->status)
3534 conn->sec_level = conn->pending_sec_level;
3535
3536 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3537
3538 if (ev->status && conn->state == BT_CONNECTED) {
3539 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3540 hci_conn_drop(conn);
3541 goto unlock;
3542 }
3543
3544 if (conn->state == BT_CONFIG) {
3545 if (!ev->status)
3546 conn->state = BT_CONNECTED;
3547
3548 hci_proto_connect_cfm(conn, ev->status);
3549 hci_conn_drop(conn);
3550 } else {
3551 hci_auth_cfm(conn, ev->status);
3552
3553 hci_conn_hold(conn);
3554 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3555 hci_conn_drop(conn);
3556 }
3557
3558 unlock:
3559 hci_dev_unlock(hdev);
3560 }
3561
3562 static u8 hci_get_auth_req(struct hci_conn *conn)
3563 {
3564 /* If remote requests no-bonding follow that lead */
3565 if (conn->remote_auth == HCI_AT_NO_BONDING ||
3566 conn->remote_auth == HCI_AT_NO_BONDING_MITM)
3567 return conn->remote_auth | (conn->auth_type & 0x01);
3568
3569 /* If both remote and local have enough IO capabilities, require
3570 * MITM protection
3571 */
3572 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
3573 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
3574 return conn->remote_auth | 0x01;
3575
3576 /* No MITM protection possible so ignore remote requirement */
3577 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
3578 }
3579
3580 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3581 {
3582 struct hci_ev_io_capa_request *ev = (void *) skb->data;
3583 struct hci_conn *conn;
3584
3585 BT_DBG("%s", hdev->name);
3586
3587 hci_dev_lock(hdev);
3588
3589 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3590 if (!conn)
3591 goto unlock;
3592
3593 hci_conn_hold(conn);
3594
3595 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3596 goto unlock;
3597
3598 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) ||
3599 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3600 struct hci_cp_io_capability_reply cp;
3601
3602 bacpy(&cp.bdaddr, &ev->bdaddr);
3603 /* Change the IO capability from KeyboardDisplay
3604 * to DisplayYesNo as it is not supported by BT spec. */
3605 cp.capability = (conn->io_capability == 0x04) ?
3606 HCI_IO_DISPLAY_YESNO : conn->io_capability;
3607
3608 /* If we are initiators, there is no remote information yet */
3609 if (conn->remote_auth == 0xff) {
3610 cp.authentication = conn->auth_type;
3611
3612 /* Request MITM protection if our IO caps allow it
3613 * except for the no-bonding case.
3614 * conn->auth_type is not updated here since
3615 * that might cause the user confirmation to be
3616 * rejected in case the remote doesn't have the
3617 * IO capabilities for MITM.
3618 */
3619 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
3620 cp.authentication != HCI_AT_NO_BONDING)
3621 cp.authentication |= 0x01;
3622 } else {
3623 conn->auth_type = hci_get_auth_req(conn);
3624 cp.authentication = conn->auth_type;
3625 }
3626
3627 if (hci_find_remote_oob_data(hdev, &conn->dst) &&
3628 (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)))
3629 cp.oob_data = 0x01;
3630 else
3631 cp.oob_data = 0x00;
3632
3633 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3634 sizeof(cp), &cp);
3635 } else {
3636 struct hci_cp_io_capability_neg_reply cp;
3637
3638 bacpy(&cp.bdaddr, &ev->bdaddr);
3639 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3640
3641 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3642 sizeof(cp), &cp);
3643 }
3644
3645 unlock:
3646 hci_dev_unlock(hdev);
3647 }
3648
3649 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
3650 {
3651 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3652 struct hci_conn *conn;
3653
3654 BT_DBG("%s", hdev->name);
3655
3656 hci_dev_lock(hdev);
3657
3658 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3659 if (!conn)
3660 goto unlock;
3661
3662 conn->remote_cap = ev->capability;
3663 conn->remote_auth = ev->authentication;
3664 if (ev->oob_data)
3665 set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
3666
3667 unlock:
3668 hci_dev_unlock(hdev);
3669 }
3670
3671 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
3672 struct sk_buff *skb)
3673 {
3674 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3675 int loc_mitm, rem_mitm, confirm_hint = 0;
3676 struct hci_conn *conn;
3677
3678 BT_DBG("%s", hdev->name);
3679
3680 hci_dev_lock(hdev);
3681
3682 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3683 goto unlock;
3684
3685 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3686 if (!conn)
3687 goto unlock;
3688
3689 loc_mitm = (conn->auth_type & 0x01);
3690 rem_mitm = (conn->remote_auth & 0x01);
3691
3692 /* If we require MITM but the remote device can't provide that
3693 * (it has NoInputNoOutput) then reject the confirmation request
3694 */
3695 if (loc_mitm && conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
3696 BT_DBG("Rejecting request: remote device can't provide MITM");
3697 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3698 sizeof(ev->bdaddr), &ev->bdaddr);
3699 goto unlock;
3700 }
3701
3702 /* If no side requires MITM protection; auto-accept */
3703 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
3704 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
3705
3706 /* If we're not the initiators request authorization to
3707 * proceed from user space (mgmt_user_confirm with
3708 * confirm_hint set to 1). The exception is if neither
3709 * side had MITM in which case we do auto-accept.
3710 */
3711 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
3712 (loc_mitm || rem_mitm)) {
3713 BT_DBG("Confirming auto-accept as acceptor");
3714 confirm_hint = 1;
3715 goto confirm;
3716 }
3717
3718 BT_DBG("Auto-accept of user confirmation with %ums delay",
3719 hdev->auto_accept_delay);
3720
3721 if (hdev->auto_accept_delay > 0) {
3722 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
3723 queue_delayed_work(conn->hdev->workqueue,
3724 &conn->auto_accept_work, delay);
3725 goto unlock;
3726 }
3727
3728 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3729 sizeof(ev->bdaddr), &ev->bdaddr);
3730 goto unlock;
3731 }
3732
3733 confirm:
3734 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
3735 le32_to_cpu(ev->passkey), confirm_hint);
3736
3737 unlock:
3738 hci_dev_unlock(hdev);
3739 }
3740
3741 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
3742 struct sk_buff *skb)
3743 {
3744 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3745
3746 BT_DBG("%s", hdev->name);
3747
3748 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3749 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
3750 }
3751
3752 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
3753 struct sk_buff *skb)
3754 {
3755 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
3756 struct hci_conn *conn;
3757
3758 BT_DBG("%s", hdev->name);
3759
3760 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3761 if (!conn)
3762 return;
3763
3764 conn->passkey_notify = __le32_to_cpu(ev->passkey);
3765 conn->passkey_entered = 0;
3766
3767 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3768 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3769 conn->dst_type, conn->passkey_notify,
3770 conn->passkey_entered);
3771 }
3772
3773 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3774 {
3775 struct hci_ev_keypress_notify *ev = (void *) skb->data;
3776 struct hci_conn *conn;
3777
3778 BT_DBG("%s", hdev->name);
3779
3780 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3781 if (!conn)
3782 return;
3783
3784 switch (ev->type) {
3785 case HCI_KEYPRESS_STARTED:
3786 conn->passkey_entered = 0;
3787 return;
3788
3789 case HCI_KEYPRESS_ENTERED:
3790 conn->passkey_entered++;
3791 break;
3792
3793 case HCI_KEYPRESS_ERASED:
3794 conn->passkey_entered--;
3795 break;
3796
3797 case HCI_KEYPRESS_CLEARED:
3798 conn->passkey_entered = 0;
3799 break;
3800
3801 case HCI_KEYPRESS_COMPLETED:
3802 return;
3803 }
3804
3805 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3806 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3807 conn->dst_type, conn->passkey_notify,
3808 conn->passkey_entered);
3809 }
3810
3811 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
3812 struct sk_buff *skb)
3813 {
3814 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
3815 struct hci_conn *conn;
3816
3817 BT_DBG("%s", hdev->name);
3818
3819 hci_dev_lock(hdev);
3820
3821 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3822 if (!conn)
3823 goto unlock;
3824
3825 /* To avoid duplicate auth_failed events to user space we check
3826 * the HCI_CONN_AUTH_PEND flag which will be set if we
3827 * initiated the authentication. A traditional auth_complete
3828 * event gets always produced as initiator and is also mapped to
3829 * the mgmt_auth_failed event */
3830 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
3831 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
3832 ev->status);
3833
3834 hci_conn_drop(conn);
3835
3836 unlock:
3837 hci_dev_unlock(hdev);
3838 }
3839
3840 static void hci_remote_host_features_evt(struct hci_dev *hdev,
3841 struct sk_buff *skb)
3842 {
3843 struct hci_ev_remote_host_features *ev = (void *) skb->data;
3844 struct inquiry_entry *ie;
3845 struct hci_conn *conn;
3846
3847 BT_DBG("%s", hdev->name);
3848
3849 hci_dev_lock(hdev);
3850
3851 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3852 if (conn)
3853 memcpy(conn->features[1], ev->features, 8);
3854
3855 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3856 if (ie)
3857 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3858
3859 hci_dev_unlock(hdev);
3860 }
3861
3862 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
3863 struct sk_buff *skb)
3864 {
3865 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
3866 struct oob_data *data;
3867
3868 BT_DBG("%s", hdev->name);
3869
3870 hci_dev_lock(hdev);
3871
3872 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3873 goto unlock;
3874
3875 data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
3876 if (data) {
3877 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
3878 struct hci_cp_remote_oob_ext_data_reply cp;
3879
3880 bacpy(&cp.bdaddr, &ev->bdaddr);
3881 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
3882 memcpy(cp.randomizer192, data->randomizer192,
3883 sizeof(cp.randomizer192));
3884 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
3885 memcpy(cp.randomizer256, data->randomizer256,
3886 sizeof(cp.randomizer256));
3887
3888 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
3889 sizeof(cp), &cp);
3890 } else {
3891 struct hci_cp_remote_oob_data_reply cp;
3892
3893 bacpy(&cp.bdaddr, &ev->bdaddr);
3894 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
3895 memcpy(cp.randomizer, data->randomizer192,
3896 sizeof(cp.randomizer));
3897
3898 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
3899 sizeof(cp), &cp);
3900 }
3901 } else {
3902 struct hci_cp_remote_oob_data_neg_reply cp;
3903
3904 bacpy(&cp.bdaddr, &ev->bdaddr);
3905 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
3906 sizeof(cp), &cp);
3907 }
3908
3909 unlock:
3910 hci_dev_unlock(hdev);
3911 }
3912
3913 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
3914 struct sk_buff *skb)
3915 {
3916 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
3917 struct hci_conn *hcon, *bredr_hcon;
3918
3919 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
3920 ev->status);
3921
3922 hci_dev_lock(hdev);
3923
3924 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3925 if (!hcon) {
3926 hci_dev_unlock(hdev);
3927 return;
3928 }
3929
3930 if (ev->status) {
3931 hci_conn_del(hcon);
3932 hci_dev_unlock(hdev);
3933 return;
3934 }
3935
3936 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
3937
3938 hcon->state = BT_CONNECTED;
3939 bacpy(&hcon->dst, &bredr_hcon->dst);
3940
3941 hci_conn_hold(hcon);
3942 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3943 hci_conn_drop(hcon);
3944
3945 hci_conn_add_sysfs(hcon);
3946
3947 amp_physical_cfm(bredr_hcon, hcon);
3948
3949 hci_dev_unlock(hdev);
3950 }
3951
3952 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3953 {
3954 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
3955 struct hci_conn *hcon;
3956 struct hci_chan *hchan;
3957 struct amp_mgr *mgr;
3958
3959 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
3960 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
3961 ev->status);
3962
3963 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3964 if (!hcon)
3965 return;
3966
3967 /* Create AMP hchan */
3968 hchan = hci_chan_create(hcon);
3969 if (!hchan)
3970 return;
3971
3972 hchan->handle = le16_to_cpu(ev->handle);
3973
3974 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
3975
3976 mgr = hcon->amp_mgr;
3977 if (mgr && mgr->bredr_chan) {
3978 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
3979
3980 l2cap_chan_lock(bredr_chan);
3981
3982 bredr_chan->conn->mtu = hdev->block_mtu;
3983 l2cap_logical_cfm(bredr_chan, hchan, 0);
3984 hci_conn_hold(hcon);
3985
3986 l2cap_chan_unlock(bredr_chan);
3987 }
3988 }
3989
3990 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
3991 struct sk_buff *skb)
3992 {
3993 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
3994 struct hci_chan *hchan;
3995
3996 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
3997 le16_to_cpu(ev->handle), ev->status);
3998
3999 if (ev->status)
4000 return;
4001
4002 hci_dev_lock(hdev);
4003
4004 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
4005 if (!hchan)
4006 goto unlock;
4007
4008 amp_destroy_logical_link(hchan, ev->reason);
4009
4010 unlock:
4011 hci_dev_unlock(hdev);
4012 }
4013
4014 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
4015 struct sk_buff *skb)
4016 {
4017 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
4018 struct hci_conn *hcon;
4019
4020 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4021
4022 if (ev->status)
4023 return;
4024
4025 hci_dev_lock(hdev);
4026
4027 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4028 if (hcon) {
4029 hcon->state = BT_CLOSED;
4030 hci_conn_del(hcon);
4031 }
4032
4033 hci_dev_unlock(hdev);
4034 }
4035
4036 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4037 {
4038 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
4039 struct hci_conn *conn;
4040 struct smp_irk *irk;
4041
4042 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4043
4044 hci_dev_lock(hdev);
4045
4046 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
4047 if (!conn) {
4048 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
4049 if (!conn) {
4050 BT_ERR("No memory for new connection");
4051 goto unlock;
4052 }
4053
4054 conn->dst_type = ev->bdaddr_type;
4055
4056 if (ev->role == LE_CONN_ROLE_MASTER) {
4057 conn->out = true;
4058 set_bit(HCI_CONN_MASTER, &conn->flags);
4059 }
4060
4061 /* If we didn't have a hci_conn object previously
4062 * but we're in master role this must be something
4063 * initiated using a white list. Since white list based
4064 * connections are not "first class citizens" we don't
4065 * have full tracking of them. Therefore, we go ahead
4066 * with a "best effort" approach of determining the
4067 * initiator address based on the HCI_PRIVACY flag.
4068 */
4069 if (conn->out) {
4070 conn->resp_addr_type = ev->bdaddr_type;
4071 bacpy(&conn->resp_addr, &ev->bdaddr);
4072 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
4073 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
4074 bacpy(&conn->init_addr, &hdev->rpa);
4075 } else {
4076 hci_copy_identity_address(hdev,
4077 &conn->init_addr,
4078 &conn->init_addr_type);
4079 }
4080 }
4081 } else {
4082 cancel_delayed_work(&conn->le_conn_timeout);
4083 }
4084
4085 if (!conn->out) {
4086 /* Set the responder (our side) address type based on
4087 * the advertising address type.
4088 */
4089 conn->resp_addr_type = hdev->adv_addr_type;
4090 if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM)
4091 bacpy(&conn->resp_addr, &hdev->random_addr);
4092 else
4093 bacpy(&conn->resp_addr, &hdev->bdaddr);
4094
4095 conn->init_addr_type = ev->bdaddr_type;
4096 bacpy(&conn->init_addr, &ev->bdaddr);
4097
4098 /* For incoming connections, set the default minimum
4099 * and maximum connection interval. They will be used
4100 * to check if the parameters are in range and if not
4101 * trigger the connection update procedure.
4102 */
4103 conn->le_conn_min_interval = hdev->le_conn_min_interval;
4104 conn->le_conn_max_interval = hdev->le_conn_max_interval;
4105 }
4106
4107 /* Lookup the identity address from the stored connection
4108 * address and address type.
4109 *
4110 * When establishing connections to an identity address, the
4111 * connection procedure will store the resolvable random
4112 * address first. Now if it can be converted back into the
4113 * identity address, start using the identity address from
4114 * now on.
4115 */
4116 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
4117 if (irk) {
4118 bacpy(&conn->dst, &irk->bdaddr);
4119 conn->dst_type = irk->addr_type;
4120 }
4121
4122 if (ev->status) {
4123 hci_le_conn_failed(conn, ev->status);
4124 goto unlock;
4125 }
4126
4127 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4128 mgmt_device_connected(hdev, &conn->dst, conn->type,
4129 conn->dst_type, 0, NULL, 0, NULL);
4130
4131 conn->sec_level = BT_SECURITY_LOW;
4132 conn->handle = __le16_to_cpu(ev->handle);
4133 conn->state = BT_CONNECTED;
4134
4135 conn->le_conn_interval = le16_to_cpu(ev->interval);
4136 conn->le_conn_latency = le16_to_cpu(ev->latency);
4137 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4138
4139 hci_conn_add_sysfs(conn);
4140
4141 hci_proto_connect_cfm(conn, ev->status);
4142
4143 hci_pend_le_conn_del(hdev, &conn->dst, conn->dst_type);
4144
4145 unlock:
4146 hci_dev_unlock(hdev);
4147 }
4148
4149 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
4150 struct sk_buff *skb)
4151 {
4152 struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
4153 struct hci_conn *conn;
4154
4155 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4156
4157 if (ev->status)
4158 return;
4159
4160 hci_dev_lock(hdev);
4161
4162 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4163 if (conn) {
4164 conn->le_conn_interval = le16_to_cpu(ev->interval);
4165 conn->le_conn_latency = le16_to_cpu(ev->latency);
4166 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4167 }
4168
4169 hci_dev_unlock(hdev);
4170 }
4171
4172 /* This function requires the caller holds hdev->lock */
4173 static void check_pending_le_conn(struct hci_dev *hdev, bdaddr_t *addr,
4174 u8 addr_type)
4175 {
4176 struct hci_conn *conn;
4177 struct smp_irk *irk;
4178
4179 /* If this is a resolvable address, we should resolve it and then
4180 * update address and address type variables.
4181 */
4182 irk = hci_get_irk(hdev, addr, addr_type);
4183 if (irk) {
4184 addr = &irk->bdaddr;
4185 addr_type = irk->addr_type;
4186 }
4187
4188 if (!hci_pend_le_conn_lookup(hdev, addr, addr_type))
4189 return;
4190
4191 conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
4192 HCI_AT_NO_BONDING);
4193 if (!IS_ERR(conn))
4194 return;
4195
4196 switch (PTR_ERR(conn)) {
4197 case -EBUSY:
4198 /* If hci_connect() returns -EBUSY it means there is already
4199 * an LE connection attempt going on. Since controllers don't
4200 * support more than one connection attempt at the time, we
4201 * don't consider this an error case.
4202 */
4203 break;
4204 default:
4205 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
4206 }
4207 }
4208
4209 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
4210 u8 bdaddr_type, s8 rssi, u8 *data, u8 len)
4211 {
4212 struct discovery_state *d = &hdev->discovery;
4213 bool match;
4214 u32 flags;
4215
4216 /* Passive scanning shouldn't trigger any device found events */
4217 if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
4218 if (type == LE_ADV_IND || type == LE_ADV_DIRECT_IND)
4219 check_pending_le_conn(hdev, bdaddr, bdaddr_type);
4220 return;
4221 }
4222
4223 /* When receiving non-connectable or scannable undirected
4224 * advertising reports, this means that the remote device is
4225 * not connectable and then clearly indicate this in the
4226 * device found event.
4227 *
4228 * When receiving a scan response, then there is no way to
4229 * know if the remote device is connectable or not. However
4230 * since scan responses are merged with a previously seen
4231 * advertising report, the flags field from that report
4232 * will be used.
4233 *
4234 * In the really unlikely case that a controller get confused
4235 * and just sends a scan response event, then it is marked as
4236 * not connectable as well.
4237 */
4238 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND ||
4239 type == LE_ADV_SCAN_RSP)
4240 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
4241 else
4242 flags = 0;
4243
4244 /* If there's nothing pending either store the data from this
4245 * event or send an immediate device found event if the data
4246 * should not be stored for later.
4247 */
4248 if (!has_pending_adv_report(hdev)) {
4249 /* If the report will trigger a SCAN_REQ store it for
4250 * later merging.
4251 */
4252 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4253 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4254 rssi, flags, data, len);
4255 return;
4256 }
4257
4258 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4259 rssi, flags, data, len, NULL, 0);
4260 return;
4261 }
4262
4263 /* Check if the pending report is for the same device as the new one */
4264 match = (!bacmp(bdaddr, &d->last_adv_addr) &&
4265 bdaddr_type == d->last_adv_addr_type);
4266
4267 /* If the pending data doesn't match this report or this isn't a
4268 * scan response (e.g. we got a duplicate ADV_IND) then force
4269 * sending of the pending data.
4270 */
4271 if (type != LE_ADV_SCAN_RSP || !match) {
4272 /* Send out whatever is in the cache, but skip duplicates */
4273 if (!match)
4274 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4275 d->last_adv_addr_type, NULL,
4276 d->last_adv_rssi, d->last_adv_flags,
4277 d->last_adv_data,
4278 d->last_adv_data_len, NULL, 0);
4279
4280 /* If the new report will trigger a SCAN_REQ store it for
4281 * later merging.
4282 */
4283 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4284 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4285 rssi, flags, data, len);
4286 return;
4287 }
4288
4289 /* The advertising reports cannot be merged, so clear
4290 * the pending report and send out a device found event.
4291 */
4292 clear_pending_adv_report(hdev);
4293 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4294 rssi, flags, data, len, NULL, 0);
4295 return;
4296 }
4297
4298 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
4299 * the new event is a SCAN_RSP. We can therefore proceed with
4300 * sending a merged device found event.
4301 */
4302 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4303 d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
4304 d->last_adv_data, d->last_adv_data_len, data, len);
4305 clear_pending_adv_report(hdev);
4306 }
4307
4308 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
4309 {
4310 u8 num_reports = skb->data[0];
4311 void *ptr = &skb->data[1];
4312
4313 hci_dev_lock(hdev);
4314
4315 while (num_reports--) {
4316 struct hci_ev_le_advertising_info *ev = ptr;
4317 s8 rssi;
4318
4319 rssi = ev->data[ev->length];
4320 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
4321 ev->bdaddr_type, rssi, ev->data, ev->length);
4322
4323 ptr += sizeof(*ev) + ev->length + 1;
4324 }
4325
4326 hci_dev_unlock(hdev);
4327 }
4328
4329 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4330 {
4331 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
4332 struct hci_cp_le_ltk_reply cp;
4333 struct hci_cp_le_ltk_neg_reply neg;
4334 struct hci_conn *conn;
4335 struct smp_ltk *ltk;
4336
4337 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
4338
4339 hci_dev_lock(hdev);
4340
4341 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4342 if (conn == NULL)
4343 goto not_found;
4344
4345 ltk = hci_find_ltk(hdev, ev->ediv, ev->rand, conn->out);
4346 if (ltk == NULL)
4347 goto not_found;
4348
4349 memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
4350 cp.handle = cpu_to_le16(conn->handle);
4351
4352 if (ltk->authenticated)
4353 conn->pending_sec_level = BT_SECURITY_HIGH;
4354 else
4355 conn->pending_sec_level = BT_SECURITY_MEDIUM;
4356
4357 conn->enc_key_size = ltk->enc_size;
4358
4359 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
4360
4361 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
4362 * temporary key used to encrypt a connection following
4363 * pairing. It is used during the Encrypted Session Setup to
4364 * distribute the keys. Later, security can be re-established
4365 * using a distributed LTK.
4366 */
4367 if (ltk->type == SMP_STK) {
4368 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
4369 list_del(&ltk->list);
4370 kfree(ltk);
4371 } else {
4372 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
4373 }
4374
4375 hci_dev_unlock(hdev);
4376
4377 return;
4378
4379 not_found:
4380 neg.handle = ev->handle;
4381 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
4382 hci_dev_unlock(hdev);
4383 }
4384
4385 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
4386 u8 reason)
4387 {
4388 struct hci_cp_le_conn_param_req_neg_reply cp;
4389
4390 cp.handle = cpu_to_le16(handle);
4391 cp.reason = reason;
4392
4393 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
4394 &cp);
4395 }
4396
4397 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
4398 struct sk_buff *skb)
4399 {
4400 struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data;
4401 struct hci_cp_le_conn_param_req_reply cp;
4402 struct hci_conn *hcon;
4403 u16 handle, min, max, latency, timeout;
4404
4405 handle = le16_to_cpu(ev->handle);
4406 min = le16_to_cpu(ev->interval_min);
4407 max = le16_to_cpu(ev->interval_max);
4408 latency = le16_to_cpu(ev->latency);
4409 timeout = le16_to_cpu(ev->timeout);
4410
4411 hcon = hci_conn_hash_lookup_handle(hdev, handle);
4412 if (!hcon || hcon->state != BT_CONNECTED)
4413 return send_conn_param_neg_reply(hdev, handle,
4414 HCI_ERROR_UNKNOWN_CONN_ID);
4415
4416 if (hci_check_conn_params(min, max, latency, timeout))
4417 return send_conn_param_neg_reply(hdev, handle,
4418 HCI_ERROR_INVALID_LL_PARAMS);
4419
4420 if (test_bit(HCI_CONN_MASTER, &hcon->flags))
4421 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type, min, max,
4422 latency, timeout);
4423
4424 cp.handle = ev->handle;
4425 cp.interval_min = ev->interval_min;
4426 cp.interval_max = ev->interval_max;
4427 cp.latency = ev->latency;
4428 cp.timeout = ev->timeout;
4429 cp.min_ce_len = 0;
4430 cp.max_ce_len = 0;
4431
4432 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
4433 }
4434
4435 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
4436 {
4437 struct hci_ev_le_meta *le_ev = (void *) skb->data;
4438
4439 skb_pull(skb, sizeof(*le_ev));
4440
4441 switch (le_ev->subevent) {
4442 case HCI_EV_LE_CONN_COMPLETE:
4443 hci_le_conn_complete_evt(hdev, skb);
4444 break;
4445
4446 case HCI_EV_LE_CONN_UPDATE_COMPLETE:
4447 hci_le_conn_update_complete_evt(hdev, skb);
4448 break;
4449
4450 case HCI_EV_LE_ADVERTISING_REPORT:
4451 hci_le_adv_report_evt(hdev, skb);
4452 break;
4453
4454 case HCI_EV_LE_LTK_REQ:
4455 hci_le_ltk_request_evt(hdev, skb);
4456 break;
4457
4458 case HCI_EV_LE_REMOTE_CONN_PARAM_REQ:
4459 hci_le_remote_conn_param_req_evt(hdev, skb);
4460 break;
4461
4462 default:
4463 break;
4464 }
4465 }
4466
4467 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
4468 {
4469 struct hci_ev_channel_selected *ev = (void *) skb->data;
4470 struct hci_conn *hcon;
4471
4472 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
4473
4474 skb_pull(skb, sizeof(*ev));
4475
4476 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4477 if (!hcon)
4478 return;
4479
4480 amp_read_loc_assoc_final_data(hdev, hcon);
4481 }
4482
4483 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
4484 {
4485 struct hci_event_hdr *hdr = (void *) skb->data;
4486 __u8 event = hdr->evt;
4487
4488 hci_dev_lock(hdev);
4489
4490 /* Received events are (currently) only needed when a request is
4491 * ongoing so avoid unnecessary memory allocation.
4492 */
4493 if (hdev->req_status == HCI_REQ_PEND) {
4494 kfree_skb(hdev->recv_evt);
4495 hdev->recv_evt = skb_clone(skb, GFP_KERNEL);
4496 }
4497
4498 hci_dev_unlock(hdev);
4499
4500 skb_pull(skb, HCI_EVENT_HDR_SIZE);
4501
4502 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->req.event == event) {
4503 struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
4504 u16 opcode = __le16_to_cpu(cmd_hdr->opcode);
4505
4506 hci_req_cmd_complete(hdev, opcode, 0);
4507 }
4508
4509 switch (event) {
4510 case HCI_EV_INQUIRY_COMPLETE:
4511 hci_inquiry_complete_evt(hdev, skb);
4512 break;
4513
4514 case HCI_EV_INQUIRY_RESULT:
4515 hci_inquiry_result_evt(hdev, skb);
4516 break;
4517
4518 case HCI_EV_CONN_COMPLETE:
4519 hci_conn_complete_evt(hdev, skb);
4520 break;
4521
4522 case HCI_EV_CONN_REQUEST:
4523 hci_conn_request_evt(hdev, skb);
4524 break;
4525
4526 case HCI_EV_DISCONN_COMPLETE:
4527 hci_disconn_complete_evt(hdev, skb);
4528 break;
4529
4530 case HCI_EV_AUTH_COMPLETE:
4531 hci_auth_complete_evt(hdev, skb);
4532 break;
4533
4534 case HCI_EV_REMOTE_NAME:
4535 hci_remote_name_evt(hdev, skb);
4536 break;
4537
4538 case HCI_EV_ENCRYPT_CHANGE:
4539 hci_encrypt_change_evt(hdev, skb);
4540 break;
4541
4542 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
4543 hci_change_link_key_complete_evt(hdev, skb);
4544 break;
4545
4546 case HCI_EV_REMOTE_FEATURES:
4547 hci_remote_features_evt(hdev, skb);
4548 break;
4549
4550 case HCI_EV_CMD_COMPLETE:
4551 hci_cmd_complete_evt(hdev, skb);
4552 break;
4553
4554 case HCI_EV_CMD_STATUS:
4555 hci_cmd_status_evt(hdev, skb);
4556 break;
4557
4558 case HCI_EV_ROLE_CHANGE:
4559 hci_role_change_evt(hdev, skb);
4560 break;
4561
4562 case HCI_EV_NUM_COMP_PKTS:
4563 hci_num_comp_pkts_evt(hdev, skb);
4564 break;
4565
4566 case HCI_EV_MODE_CHANGE:
4567 hci_mode_change_evt(hdev, skb);
4568 break;
4569
4570 case HCI_EV_PIN_CODE_REQ:
4571 hci_pin_code_request_evt(hdev, skb);
4572 break;
4573
4574 case HCI_EV_LINK_KEY_REQ:
4575 hci_link_key_request_evt(hdev, skb);
4576 break;
4577
4578 case HCI_EV_LINK_KEY_NOTIFY:
4579 hci_link_key_notify_evt(hdev, skb);
4580 break;
4581
4582 case HCI_EV_CLOCK_OFFSET:
4583 hci_clock_offset_evt(hdev, skb);
4584 break;
4585
4586 case HCI_EV_PKT_TYPE_CHANGE:
4587 hci_pkt_type_change_evt(hdev, skb);
4588 break;
4589
4590 case HCI_EV_PSCAN_REP_MODE:
4591 hci_pscan_rep_mode_evt(hdev, skb);
4592 break;
4593
4594 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
4595 hci_inquiry_result_with_rssi_evt(hdev, skb);
4596 break;
4597
4598 case HCI_EV_REMOTE_EXT_FEATURES:
4599 hci_remote_ext_features_evt(hdev, skb);
4600 break;
4601
4602 case HCI_EV_SYNC_CONN_COMPLETE:
4603 hci_sync_conn_complete_evt(hdev, skb);
4604 break;
4605
4606 case HCI_EV_EXTENDED_INQUIRY_RESULT:
4607 hci_extended_inquiry_result_evt(hdev, skb);
4608 break;
4609
4610 case HCI_EV_KEY_REFRESH_COMPLETE:
4611 hci_key_refresh_complete_evt(hdev, skb);
4612 break;
4613
4614 case HCI_EV_IO_CAPA_REQUEST:
4615 hci_io_capa_request_evt(hdev, skb);
4616 break;
4617
4618 case HCI_EV_IO_CAPA_REPLY:
4619 hci_io_capa_reply_evt(hdev, skb);
4620 break;
4621
4622 case HCI_EV_USER_CONFIRM_REQUEST:
4623 hci_user_confirm_request_evt(hdev, skb);
4624 break;
4625
4626 case HCI_EV_USER_PASSKEY_REQUEST:
4627 hci_user_passkey_request_evt(hdev, skb);
4628 break;
4629
4630 case HCI_EV_USER_PASSKEY_NOTIFY:
4631 hci_user_passkey_notify_evt(hdev, skb);
4632 break;
4633
4634 case HCI_EV_KEYPRESS_NOTIFY:
4635 hci_keypress_notify_evt(hdev, skb);
4636 break;
4637
4638 case HCI_EV_SIMPLE_PAIR_COMPLETE:
4639 hci_simple_pair_complete_evt(hdev, skb);
4640 break;
4641
4642 case HCI_EV_REMOTE_HOST_FEATURES:
4643 hci_remote_host_features_evt(hdev, skb);
4644 break;
4645
4646 case HCI_EV_LE_META:
4647 hci_le_meta_evt(hdev, skb);
4648 break;
4649
4650 case HCI_EV_CHANNEL_SELECTED:
4651 hci_chan_selected_evt(hdev, skb);
4652 break;
4653
4654 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
4655 hci_remote_oob_data_request_evt(hdev, skb);
4656 break;
4657
4658 case HCI_EV_PHY_LINK_COMPLETE:
4659 hci_phy_link_complete_evt(hdev, skb);
4660 break;
4661
4662 case HCI_EV_LOGICAL_LINK_COMPLETE:
4663 hci_loglink_complete_evt(hdev, skb);
4664 break;
4665
4666 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
4667 hci_disconn_loglink_complete_evt(hdev, skb);
4668 break;
4669
4670 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
4671 hci_disconn_phylink_complete_evt(hdev, skb);
4672 break;
4673
4674 case HCI_EV_NUM_COMP_BLOCKS:
4675 hci_num_comp_blocks_evt(hdev, skb);
4676 break;
4677
4678 default:
4679 BT_DBG("%s event 0x%2.2x", hdev->name, event);
4680 break;
4681 }
4682
4683 kfree_skb(skb);
4684 hdev->stat.evt_rx++;
4685 }