]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/bluetooth/hci_event.c
Bluetooth: Read number of supported IAC on controller setup
[mirror_ubuntu-bionic-kernel.git] / net / bluetooth / hci_event.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI event handling. */
26
27 #include <asm/unaligned.h>
28
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
32
33 #include "a2mp.h"
34 #include "amp.h"
35
36 /* Handle HCI Event packets */
37
38 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
39 {
40 __u8 status = *((__u8 *) skb->data);
41
42 BT_DBG("%s status 0x%2.2x", hdev->name, status);
43
44 if (status)
45 return;
46
47 clear_bit(HCI_INQUIRY, &hdev->flags);
48 smp_mb__after_clear_bit(); /* wake_up_bit advises about this barrier */
49 wake_up_bit(&hdev->flags, HCI_INQUIRY);
50
51 hci_conn_check_pending(hdev);
52 }
53
54 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
55 {
56 __u8 status = *((__u8 *) skb->data);
57
58 BT_DBG("%s status 0x%2.2x", hdev->name, status);
59
60 if (status)
61 return;
62
63 set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
64 }
65
66 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
67 {
68 __u8 status = *((__u8 *) skb->data);
69
70 BT_DBG("%s status 0x%2.2x", hdev->name, status);
71
72 if (status)
73 return;
74
75 clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
76
77 hci_conn_check_pending(hdev);
78 }
79
80 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
81 struct sk_buff *skb)
82 {
83 BT_DBG("%s", hdev->name);
84 }
85
86 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
87 {
88 struct hci_rp_role_discovery *rp = (void *) skb->data;
89 struct hci_conn *conn;
90
91 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
92
93 if (rp->status)
94 return;
95
96 hci_dev_lock(hdev);
97
98 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
99 if (conn) {
100 if (rp->role)
101 conn->link_mode &= ~HCI_LM_MASTER;
102 else
103 conn->link_mode |= HCI_LM_MASTER;
104 }
105
106 hci_dev_unlock(hdev);
107 }
108
109 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
110 {
111 struct hci_rp_read_link_policy *rp = (void *) skb->data;
112 struct hci_conn *conn;
113
114 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
115
116 if (rp->status)
117 return;
118
119 hci_dev_lock(hdev);
120
121 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
122 if (conn)
123 conn->link_policy = __le16_to_cpu(rp->policy);
124
125 hci_dev_unlock(hdev);
126 }
127
128 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
129 {
130 struct hci_rp_write_link_policy *rp = (void *) skb->data;
131 struct hci_conn *conn;
132 void *sent;
133
134 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
135
136 if (rp->status)
137 return;
138
139 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
140 if (!sent)
141 return;
142
143 hci_dev_lock(hdev);
144
145 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
146 if (conn)
147 conn->link_policy = get_unaligned_le16(sent + 2);
148
149 hci_dev_unlock(hdev);
150 }
151
152 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
153 struct sk_buff *skb)
154 {
155 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
156
157 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
158
159 if (rp->status)
160 return;
161
162 hdev->link_policy = __le16_to_cpu(rp->policy);
163 }
164
165 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
166 struct sk_buff *skb)
167 {
168 __u8 status = *((__u8 *) skb->data);
169 void *sent;
170
171 BT_DBG("%s status 0x%2.2x", hdev->name, status);
172
173 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
174 if (!sent)
175 return;
176
177 if (!status)
178 hdev->link_policy = get_unaligned_le16(sent);
179 }
180
181 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
182 {
183 __u8 status = *((__u8 *) skb->data);
184
185 BT_DBG("%s status 0x%2.2x", hdev->name, status);
186
187 clear_bit(HCI_RESET, &hdev->flags);
188
189 /* Reset all non-persistent flags */
190 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
191
192 hdev->discovery.state = DISCOVERY_STOPPED;
193 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
194 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
195
196 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
197 hdev->adv_data_len = 0;
198 }
199
200 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
201 {
202 __u8 status = *((__u8 *) skb->data);
203 void *sent;
204
205 BT_DBG("%s status 0x%2.2x", hdev->name, status);
206
207 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
208 if (!sent)
209 return;
210
211 hci_dev_lock(hdev);
212
213 if (test_bit(HCI_MGMT, &hdev->dev_flags))
214 mgmt_set_local_name_complete(hdev, sent, status);
215 else if (!status)
216 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
217
218 hci_dev_unlock(hdev);
219 }
220
221 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
222 {
223 struct hci_rp_read_local_name *rp = (void *) skb->data;
224
225 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
226
227 if (rp->status)
228 return;
229
230 if (test_bit(HCI_SETUP, &hdev->dev_flags))
231 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
232 }
233
234 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
235 {
236 __u8 status = *((__u8 *) skb->data);
237 void *sent;
238
239 BT_DBG("%s status 0x%2.2x", hdev->name, status);
240
241 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
242 if (!sent)
243 return;
244
245 if (!status) {
246 __u8 param = *((__u8 *) sent);
247
248 if (param == AUTH_ENABLED)
249 set_bit(HCI_AUTH, &hdev->flags);
250 else
251 clear_bit(HCI_AUTH, &hdev->flags);
252 }
253
254 if (test_bit(HCI_MGMT, &hdev->dev_flags))
255 mgmt_auth_enable_complete(hdev, status);
256 }
257
258 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
259 {
260 __u8 status = *((__u8 *) skb->data);
261 void *sent;
262
263 BT_DBG("%s status 0x%2.2x", hdev->name, status);
264
265 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
266 if (!sent)
267 return;
268
269 if (!status) {
270 __u8 param = *((__u8 *) sent);
271
272 if (param)
273 set_bit(HCI_ENCRYPT, &hdev->flags);
274 else
275 clear_bit(HCI_ENCRYPT, &hdev->flags);
276 }
277 }
278
279 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
280 {
281 __u8 param, status = *((__u8 *) skb->data);
282 int old_pscan, old_iscan;
283 void *sent;
284
285 BT_DBG("%s status 0x%2.2x", hdev->name, status);
286
287 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
288 if (!sent)
289 return;
290
291 param = *((__u8 *) sent);
292
293 hci_dev_lock(hdev);
294
295 if (status) {
296 mgmt_write_scan_failed(hdev, param, status);
297 hdev->discov_timeout = 0;
298 goto done;
299 }
300
301 /* We need to ensure that we set this back on if someone changed
302 * the scan mode through a raw HCI socket.
303 */
304 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
305
306 old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
307 old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
308
309 if (param & SCAN_INQUIRY) {
310 set_bit(HCI_ISCAN, &hdev->flags);
311 if (!old_iscan)
312 mgmt_discoverable(hdev, 1);
313 if (hdev->discov_timeout > 0) {
314 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
315 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
316 to);
317 }
318 } else if (old_iscan)
319 mgmt_discoverable(hdev, 0);
320
321 if (param & SCAN_PAGE) {
322 set_bit(HCI_PSCAN, &hdev->flags);
323 if (!old_pscan)
324 mgmt_connectable(hdev, 1);
325 } else if (old_pscan)
326 mgmt_connectable(hdev, 0);
327
328 done:
329 hci_dev_unlock(hdev);
330 }
331
332 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
333 {
334 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
335
336 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
337
338 if (rp->status)
339 return;
340
341 memcpy(hdev->dev_class, rp->dev_class, 3);
342
343 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
344 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
345 }
346
347 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
348 {
349 __u8 status = *((__u8 *) skb->data);
350 void *sent;
351
352 BT_DBG("%s status 0x%2.2x", hdev->name, status);
353
354 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
355 if (!sent)
356 return;
357
358 hci_dev_lock(hdev);
359
360 if (status == 0)
361 memcpy(hdev->dev_class, sent, 3);
362
363 if (test_bit(HCI_MGMT, &hdev->dev_flags))
364 mgmt_set_class_of_dev_complete(hdev, sent, status);
365
366 hci_dev_unlock(hdev);
367 }
368
369 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
370 {
371 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
372 __u16 setting;
373
374 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
375
376 if (rp->status)
377 return;
378
379 setting = __le16_to_cpu(rp->voice_setting);
380
381 if (hdev->voice_setting == setting)
382 return;
383
384 hdev->voice_setting = setting;
385
386 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
387
388 if (hdev->notify)
389 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
390 }
391
392 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
393 struct sk_buff *skb)
394 {
395 __u8 status = *((__u8 *) skb->data);
396 __u16 setting;
397 void *sent;
398
399 BT_DBG("%s status 0x%2.2x", hdev->name, status);
400
401 if (status)
402 return;
403
404 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
405 if (!sent)
406 return;
407
408 setting = get_unaligned_le16(sent);
409
410 if (hdev->voice_setting == setting)
411 return;
412
413 hdev->voice_setting = setting;
414
415 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
416
417 if (hdev->notify)
418 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
419 }
420
421 static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
422 struct sk_buff *skb)
423 {
424 struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
425
426 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
427
428 if (rp->status)
429 return;
430
431 hdev->num_iac = rp->num_iac;
432
433 BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
434 }
435
436 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
437 {
438 __u8 status = *((__u8 *) skb->data);
439 struct hci_cp_write_ssp_mode *sent;
440
441 BT_DBG("%s status 0x%2.2x", hdev->name, status);
442
443 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
444 if (!sent)
445 return;
446
447 if (!status) {
448 if (sent->mode)
449 hdev->features[1][0] |= LMP_HOST_SSP;
450 else
451 hdev->features[1][0] &= ~LMP_HOST_SSP;
452 }
453
454 if (test_bit(HCI_MGMT, &hdev->dev_flags))
455 mgmt_ssp_enable_complete(hdev, sent->mode, status);
456 else if (!status) {
457 if (sent->mode)
458 set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
459 else
460 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
461 }
462 }
463
464 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
465 {
466 struct hci_rp_read_local_version *rp = (void *) skb->data;
467
468 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
469
470 if (rp->status)
471 return;
472
473 hdev->hci_ver = rp->hci_ver;
474 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
475 hdev->lmp_ver = rp->lmp_ver;
476 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
477 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
478
479 BT_DBG("%s manufacturer 0x%4.4x hci ver %d:%d", hdev->name,
480 hdev->manufacturer, hdev->hci_ver, hdev->hci_rev);
481 }
482
483 static void hci_cc_read_local_commands(struct hci_dev *hdev,
484 struct sk_buff *skb)
485 {
486 struct hci_rp_read_local_commands *rp = (void *) skb->data;
487
488 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
489
490 if (!rp->status)
491 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
492 }
493
494 static void hci_cc_read_local_features(struct hci_dev *hdev,
495 struct sk_buff *skb)
496 {
497 struct hci_rp_read_local_features *rp = (void *) skb->data;
498
499 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
500
501 if (rp->status)
502 return;
503
504 memcpy(hdev->features, rp->features, 8);
505
506 /* Adjust default settings according to features
507 * supported by device. */
508
509 if (hdev->features[0][0] & LMP_3SLOT)
510 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
511
512 if (hdev->features[0][0] & LMP_5SLOT)
513 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
514
515 if (hdev->features[0][1] & LMP_HV2) {
516 hdev->pkt_type |= (HCI_HV2);
517 hdev->esco_type |= (ESCO_HV2);
518 }
519
520 if (hdev->features[0][1] & LMP_HV3) {
521 hdev->pkt_type |= (HCI_HV3);
522 hdev->esco_type |= (ESCO_HV3);
523 }
524
525 if (lmp_esco_capable(hdev))
526 hdev->esco_type |= (ESCO_EV3);
527
528 if (hdev->features[0][4] & LMP_EV4)
529 hdev->esco_type |= (ESCO_EV4);
530
531 if (hdev->features[0][4] & LMP_EV5)
532 hdev->esco_type |= (ESCO_EV5);
533
534 if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
535 hdev->esco_type |= (ESCO_2EV3);
536
537 if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
538 hdev->esco_type |= (ESCO_3EV3);
539
540 if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
541 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
542
543 BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name,
544 hdev->features[0][0], hdev->features[0][1],
545 hdev->features[0][2], hdev->features[0][3],
546 hdev->features[0][4], hdev->features[0][5],
547 hdev->features[0][6], hdev->features[0][7]);
548 }
549
550 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
551 struct sk_buff *skb)
552 {
553 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
554
555 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
556
557 if (rp->status)
558 return;
559
560 hdev->max_page = rp->max_page;
561
562 if (rp->page < HCI_MAX_PAGES)
563 memcpy(hdev->features[rp->page], rp->features, 8);
564 }
565
566 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
567 struct sk_buff *skb)
568 {
569 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
570
571 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
572
573 if (!rp->status)
574 hdev->flow_ctl_mode = rp->mode;
575 }
576
577 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
578 {
579 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
580
581 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
582
583 if (rp->status)
584 return;
585
586 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
587 hdev->sco_mtu = rp->sco_mtu;
588 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
589 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
590
591 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
592 hdev->sco_mtu = 64;
593 hdev->sco_pkts = 8;
594 }
595
596 hdev->acl_cnt = hdev->acl_pkts;
597 hdev->sco_cnt = hdev->sco_pkts;
598
599 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
600 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
601 }
602
603 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
604 {
605 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
606
607 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
608
609 if (!rp->status)
610 bacpy(&hdev->bdaddr, &rp->bdaddr);
611 }
612
613 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
614 struct sk_buff *skb)
615 {
616 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
617
618 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
619
620 if (test_bit(HCI_INIT, &hdev->flags) && !rp->status) {
621 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
622 hdev->page_scan_window = __le16_to_cpu(rp->window);
623 }
624 }
625
626 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
627 struct sk_buff *skb)
628 {
629 u8 status = *((u8 *) skb->data);
630 struct hci_cp_write_page_scan_activity *sent;
631
632 BT_DBG("%s status 0x%2.2x", hdev->name, status);
633
634 if (status)
635 return;
636
637 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
638 if (!sent)
639 return;
640
641 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
642 hdev->page_scan_window = __le16_to_cpu(sent->window);
643 }
644
645 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
646 struct sk_buff *skb)
647 {
648 struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
649
650 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
651
652 if (test_bit(HCI_INIT, &hdev->flags) && !rp->status)
653 hdev->page_scan_type = rp->type;
654 }
655
656 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
657 struct sk_buff *skb)
658 {
659 u8 status = *((u8 *) skb->data);
660 u8 *type;
661
662 BT_DBG("%s status 0x%2.2x", hdev->name, status);
663
664 if (status)
665 return;
666
667 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
668 if (type)
669 hdev->page_scan_type = *type;
670 }
671
672 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
673 struct sk_buff *skb)
674 {
675 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
676
677 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
678
679 if (rp->status)
680 return;
681
682 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
683 hdev->block_len = __le16_to_cpu(rp->block_len);
684 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
685
686 hdev->block_cnt = hdev->num_blocks;
687
688 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
689 hdev->block_cnt, hdev->block_len);
690 }
691
692 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
693 struct sk_buff *skb)
694 {
695 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
696
697 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
698
699 if (rp->status)
700 goto a2mp_rsp;
701
702 hdev->amp_status = rp->amp_status;
703 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
704 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
705 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
706 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
707 hdev->amp_type = rp->amp_type;
708 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
709 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
710 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
711 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
712
713 a2mp_rsp:
714 a2mp_send_getinfo_rsp(hdev);
715 }
716
717 static void hci_cc_read_local_amp_assoc(struct hci_dev *hdev,
718 struct sk_buff *skb)
719 {
720 struct hci_rp_read_local_amp_assoc *rp = (void *) skb->data;
721 struct amp_assoc *assoc = &hdev->loc_assoc;
722 size_t rem_len, frag_len;
723
724 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
725
726 if (rp->status)
727 goto a2mp_rsp;
728
729 frag_len = skb->len - sizeof(*rp);
730 rem_len = __le16_to_cpu(rp->rem_len);
731
732 if (rem_len > frag_len) {
733 BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
734
735 memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
736 assoc->offset += frag_len;
737
738 /* Read other fragments */
739 amp_read_loc_assoc_frag(hdev, rp->phy_handle);
740
741 return;
742 }
743
744 memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
745 assoc->len = assoc->offset + rem_len;
746 assoc->offset = 0;
747
748 a2mp_rsp:
749 /* Send A2MP Rsp when all fragments are received */
750 a2mp_send_getampassoc_rsp(hdev, rp->status);
751 a2mp_send_create_phy_link_req(hdev, rp->status);
752 }
753
754 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
755 struct sk_buff *skb)
756 {
757 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
758
759 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
760
761 if (!rp->status)
762 hdev->inq_tx_power = rp->tx_power;
763 }
764
765 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
766 {
767 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
768 struct hci_cp_pin_code_reply *cp;
769 struct hci_conn *conn;
770
771 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
772
773 hci_dev_lock(hdev);
774
775 if (test_bit(HCI_MGMT, &hdev->dev_flags))
776 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
777
778 if (rp->status)
779 goto unlock;
780
781 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
782 if (!cp)
783 goto unlock;
784
785 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
786 if (conn)
787 conn->pin_length = cp->pin_len;
788
789 unlock:
790 hci_dev_unlock(hdev);
791 }
792
793 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
794 {
795 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
796
797 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
798
799 hci_dev_lock(hdev);
800
801 if (test_bit(HCI_MGMT, &hdev->dev_flags))
802 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
803 rp->status);
804
805 hci_dev_unlock(hdev);
806 }
807
808 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
809 struct sk_buff *skb)
810 {
811 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
812
813 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
814
815 if (rp->status)
816 return;
817
818 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
819 hdev->le_pkts = rp->le_max_pkt;
820
821 hdev->le_cnt = hdev->le_pkts;
822
823 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
824 }
825
826 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
827 struct sk_buff *skb)
828 {
829 struct hci_rp_le_read_local_features *rp = (void *) skb->data;
830
831 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
832
833 if (!rp->status)
834 memcpy(hdev->le_features, rp->features, 8);
835 }
836
837 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
838 struct sk_buff *skb)
839 {
840 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
841
842 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
843
844 if (!rp->status)
845 hdev->adv_tx_power = rp->tx_power;
846 }
847
848 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
849 {
850 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
851
852 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
853
854 hci_dev_lock(hdev);
855
856 if (test_bit(HCI_MGMT, &hdev->dev_flags))
857 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
858 rp->status);
859
860 hci_dev_unlock(hdev);
861 }
862
863 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
864 struct sk_buff *skb)
865 {
866 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
867
868 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
869
870 hci_dev_lock(hdev);
871
872 if (test_bit(HCI_MGMT, &hdev->dev_flags))
873 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
874 ACL_LINK, 0, rp->status);
875
876 hci_dev_unlock(hdev);
877 }
878
879 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
880 {
881 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
882
883 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
884
885 hci_dev_lock(hdev);
886
887 if (test_bit(HCI_MGMT, &hdev->dev_flags))
888 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
889 0, rp->status);
890
891 hci_dev_unlock(hdev);
892 }
893
894 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
895 struct sk_buff *skb)
896 {
897 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
898
899 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
900
901 hci_dev_lock(hdev);
902
903 if (test_bit(HCI_MGMT, &hdev->dev_flags))
904 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
905 ACL_LINK, 0, rp->status);
906
907 hci_dev_unlock(hdev);
908 }
909
910 static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
911 struct sk_buff *skb)
912 {
913 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
914
915 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
916
917 hci_dev_lock(hdev);
918 mgmt_read_local_oob_data_reply_complete(hdev, rp->hash,
919 rp->randomizer, rp->status);
920 hci_dev_unlock(hdev);
921 }
922
923 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
924 {
925 __u8 *sent, status = *((__u8 *) skb->data);
926
927 BT_DBG("%s status 0x%2.2x", hdev->name, status);
928
929 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
930 if (!sent)
931 return;
932
933 hci_dev_lock(hdev);
934
935 if (!status) {
936 if (*sent)
937 set_bit(HCI_ADVERTISING, &hdev->dev_flags);
938 else
939 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
940 }
941
942 if (*sent && !test_bit(HCI_INIT, &hdev->flags)) {
943 struct hci_request req;
944
945 hci_req_init(&req, hdev);
946 hci_update_ad(&req);
947 hci_req_run(&req, NULL);
948 }
949
950 hci_dev_unlock(hdev);
951 }
952
953 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
954 struct sk_buff *skb)
955 {
956 struct hci_cp_le_set_scan_enable *cp;
957 __u8 status = *((__u8 *) skb->data);
958
959 BT_DBG("%s status 0x%2.2x", hdev->name, status);
960
961 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
962 if (!cp)
963 return;
964
965 if (status)
966 return;
967
968 switch (cp->enable) {
969 case LE_SCAN_ENABLE:
970 set_bit(HCI_LE_SCAN, &hdev->dev_flags);
971 break;
972
973 case LE_SCAN_DISABLE:
974 clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
975 break;
976
977 default:
978 BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
979 break;
980 }
981 }
982
983 static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
984 struct sk_buff *skb)
985 {
986 struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
987
988 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
989
990 if (!rp->status)
991 hdev->le_white_list_size = rp->size;
992 }
993
994 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
995 struct sk_buff *skb)
996 {
997 struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
998
999 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1000
1001 if (!rp->status)
1002 memcpy(hdev->le_states, rp->le_states, 8);
1003 }
1004
1005 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1006 struct sk_buff *skb)
1007 {
1008 struct hci_cp_write_le_host_supported *sent;
1009 __u8 status = *((__u8 *) skb->data);
1010
1011 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1012
1013 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1014 if (!sent)
1015 return;
1016
1017 if (!status) {
1018 if (sent->le) {
1019 hdev->features[1][0] |= LMP_HOST_LE;
1020 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1021 } else {
1022 hdev->features[1][0] &= ~LMP_HOST_LE;
1023 clear_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1024 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
1025 }
1026
1027 if (sent->simul)
1028 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1029 else
1030 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1031 }
1032 }
1033
1034 static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,
1035 struct sk_buff *skb)
1036 {
1037 struct hci_rp_write_remote_amp_assoc *rp = (void *) skb->data;
1038
1039 BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
1040 hdev->name, rp->status, rp->phy_handle);
1041
1042 if (rp->status)
1043 return;
1044
1045 amp_write_rem_assoc_continue(hdev, rp->phy_handle);
1046 }
1047
1048 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1049 {
1050 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1051
1052 if (status) {
1053 hci_conn_check_pending(hdev);
1054 return;
1055 }
1056
1057 set_bit(HCI_INQUIRY, &hdev->flags);
1058 }
1059
1060 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1061 {
1062 struct hci_cp_create_conn *cp;
1063 struct hci_conn *conn;
1064
1065 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1066
1067 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1068 if (!cp)
1069 return;
1070
1071 hci_dev_lock(hdev);
1072
1073 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1074
1075 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1076
1077 if (status) {
1078 if (conn && conn->state == BT_CONNECT) {
1079 if (status != 0x0c || conn->attempt > 2) {
1080 conn->state = BT_CLOSED;
1081 hci_proto_connect_cfm(conn, status);
1082 hci_conn_del(conn);
1083 } else
1084 conn->state = BT_CONNECT2;
1085 }
1086 } else {
1087 if (!conn) {
1088 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
1089 if (conn) {
1090 conn->out = true;
1091 conn->link_mode |= HCI_LM_MASTER;
1092 } else
1093 BT_ERR("No memory for new connection");
1094 }
1095 }
1096
1097 hci_dev_unlock(hdev);
1098 }
1099
1100 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1101 {
1102 struct hci_cp_add_sco *cp;
1103 struct hci_conn *acl, *sco;
1104 __u16 handle;
1105
1106 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1107
1108 if (!status)
1109 return;
1110
1111 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1112 if (!cp)
1113 return;
1114
1115 handle = __le16_to_cpu(cp->handle);
1116
1117 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1118
1119 hci_dev_lock(hdev);
1120
1121 acl = hci_conn_hash_lookup_handle(hdev, handle);
1122 if (acl) {
1123 sco = acl->link;
1124 if (sco) {
1125 sco->state = BT_CLOSED;
1126
1127 hci_proto_connect_cfm(sco, status);
1128 hci_conn_del(sco);
1129 }
1130 }
1131
1132 hci_dev_unlock(hdev);
1133 }
1134
1135 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1136 {
1137 struct hci_cp_auth_requested *cp;
1138 struct hci_conn *conn;
1139
1140 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1141
1142 if (!status)
1143 return;
1144
1145 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1146 if (!cp)
1147 return;
1148
1149 hci_dev_lock(hdev);
1150
1151 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1152 if (conn) {
1153 if (conn->state == BT_CONFIG) {
1154 hci_proto_connect_cfm(conn, status);
1155 hci_conn_drop(conn);
1156 }
1157 }
1158
1159 hci_dev_unlock(hdev);
1160 }
1161
1162 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1163 {
1164 struct hci_cp_set_conn_encrypt *cp;
1165 struct hci_conn *conn;
1166
1167 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1168
1169 if (!status)
1170 return;
1171
1172 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1173 if (!cp)
1174 return;
1175
1176 hci_dev_lock(hdev);
1177
1178 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1179 if (conn) {
1180 if (conn->state == BT_CONFIG) {
1181 hci_proto_connect_cfm(conn, status);
1182 hci_conn_drop(conn);
1183 }
1184 }
1185
1186 hci_dev_unlock(hdev);
1187 }
1188
1189 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1190 struct hci_conn *conn)
1191 {
1192 if (conn->state != BT_CONFIG || !conn->out)
1193 return 0;
1194
1195 if (conn->pending_sec_level == BT_SECURITY_SDP)
1196 return 0;
1197
1198 /* Only request authentication for SSP connections or non-SSP
1199 * devices with sec_level HIGH or if MITM protection is requested */
1200 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1201 conn->pending_sec_level != BT_SECURITY_HIGH)
1202 return 0;
1203
1204 return 1;
1205 }
1206
1207 static int hci_resolve_name(struct hci_dev *hdev,
1208 struct inquiry_entry *e)
1209 {
1210 struct hci_cp_remote_name_req cp;
1211
1212 memset(&cp, 0, sizeof(cp));
1213
1214 bacpy(&cp.bdaddr, &e->data.bdaddr);
1215 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1216 cp.pscan_mode = e->data.pscan_mode;
1217 cp.clock_offset = e->data.clock_offset;
1218
1219 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1220 }
1221
1222 static bool hci_resolve_next_name(struct hci_dev *hdev)
1223 {
1224 struct discovery_state *discov = &hdev->discovery;
1225 struct inquiry_entry *e;
1226
1227 if (list_empty(&discov->resolve))
1228 return false;
1229
1230 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1231 if (!e)
1232 return false;
1233
1234 if (hci_resolve_name(hdev, e) == 0) {
1235 e->name_state = NAME_PENDING;
1236 return true;
1237 }
1238
1239 return false;
1240 }
1241
1242 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1243 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1244 {
1245 struct discovery_state *discov = &hdev->discovery;
1246 struct inquiry_entry *e;
1247
1248 if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1249 mgmt_device_connected(hdev, bdaddr, ACL_LINK, 0x00, 0, name,
1250 name_len, conn->dev_class);
1251
1252 if (discov->state == DISCOVERY_STOPPED)
1253 return;
1254
1255 if (discov->state == DISCOVERY_STOPPING)
1256 goto discov_complete;
1257
1258 if (discov->state != DISCOVERY_RESOLVING)
1259 return;
1260
1261 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1262 /* If the device was not found in a list of found devices names of which
1263 * are pending. there is no need to continue resolving a next name as it
1264 * will be done upon receiving another Remote Name Request Complete
1265 * Event */
1266 if (!e)
1267 return;
1268
1269 list_del(&e->list);
1270 if (name) {
1271 e->name_state = NAME_KNOWN;
1272 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1273 e->data.rssi, name, name_len);
1274 } else {
1275 e->name_state = NAME_NOT_KNOWN;
1276 }
1277
1278 if (hci_resolve_next_name(hdev))
1279 return;
1280
1281 discov_complete:
1282 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1283 }
1284
1285 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1286 {
1287 struct hci_cp_remote_name_req *cp;
1288 struct hci_conn *conn;
1289
1290 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1291
1292 /* If successful wait for the name req complete event before
1293 * checking for the need to do authentication */
1294 if (!status)
1295 return;
1296
1297 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1298 if (!cp)
1299 return;
1300
1301 hci_dev_lock(hdev);
1302
1303 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1304
1305 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1306 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1307
1308 if (!conn)
1309 goto unlock;
1310
1311 if (!hci_outgoing_auth_needed(hdev, conn))
1312 goto unlock;
1313
1314 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1315 struct hci_cp_auth_requested auth_cp;
1316
1317 auth_cp.handle = __cpu_to_le16(conn->handle);
1318 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
1319 sizeof(auth_cp), &auth_cp);
1320 }
1321
1322 unlock:
1323 hci_dev_unlock(hdev);
1324 }
1325
1326 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1327 {
1328 struct hci_cp_read_remote_features *cp;
1329 struct hci_conn *conn;
1330
1331 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1332
1333 if (!status)
1334 return;
1335
1336 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1337 if (!cp)
1338 return;
1339
1340 hci_dev_lock(hdev);
1341
1342 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1343 if (conn) {
1344 if (conn->state == BT_CONFIG) {
1345 hci_proto_connect_cfm(conn, status);
1346 hci_conn_drop(conn);
1347 }
1348 }
1349
1350 hci_dev_unlock(hdev);
1351 }
1352
1353 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1354 {
1355 struct hci_cp_read_remote_ext_features *cp;
1356 struct hci_conn *conn;
1357
1358 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1359
1360 if (!status)
1361 return;
1362
1363 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1364 if (!cp)
1365 return;
1366
1367 hci_dev_lock(hdev);
1368
1369 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1370 if (conn) {
1371 if (conn->state == BT_CONFIG) {
1372 hci_proto_connect_cfm(conn, status);
1373 hci_conn_drop(conn);
1374 }
1375 }
1376
1377 hci_dev_unlock(hdev);
1378 }
1379
1380 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1381 {
1382 struct hci_cp_setup_sync_conn *cp;
1383 struct hci_conn *acl, *sco;
1384 __u16 handle;
1385
1386 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1387
1388 if (!status)
1389 return;
1390
1391 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1392 if (!cp)
1393 return;
1394
1395 handle = __le16_to_cpu(cp->handle);
1396
1397 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1398
1399 hci_dev_lock(hdev);
1400
1401 acl = hci_conn_hash_lookup_handle(hdev, handle);
1402 if (acl) {
1403 sco = acl->link;
1404 if (sco) {
1405 sco->state = BT_CLOSED;
1406
1407 hci_proto_connect_cfm(sco, status);
1408 hci_conn_del(sco);
1409 }
1410 }
1411
1412 hci_dev_unlock(hdev);
1413 }
1414
1415 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1416 {
1417 struct hci_cp_sniff_mode *cp;
1418 struct hci_conn *conn;
1419
1420 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1421
1422 if (!status)
1423 return;
1424
1425 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1426 if (!cp)
1427 return;
1428
1429 hci_dev_lock(hdev);
1430
1431 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1432 if (conn) {
1433 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1434
1435 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1436 hci_sco_setup(conn, status);
1437 }
1438
1439 hci_dev_unlock(hdev);
1440 }
1441
1442 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1443 {
1444 struct hci_cp_exit_sniff_mode *cp;
1445 struct hci_conn *conn;
1446
1447 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1448
1449 if (!status)
1450 return;
1451
1452 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1453 if (!cp)
1454 return;
1455
1456 hci_dev_lock(hdev);
1457
1458 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1459 if (conn) {
1460 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1461
1462 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1463 hci_sco_setup(conn, status);
1464 }
1465
1466 hci_dev_unlock(hdev);
1467 }
1468
1469 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1470 {
1471 struct hci_cp_disconnect *cp;
1472 struct hci_conn *conn;
1473
1474 if (!status)
1475 return;
1476
1477 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1478 if (!cp)
1479 return;
1480
1481 hci_dev_lock(hdev);
1482
1483 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1484 if (conn)
1485 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1486 conn->dst_type, status);
1487
1488 hci_dev_unlock(hdev);
1489 }
1490
1491 static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status)
1492 {
1493 struct hci_cp_create_phy_link *cp;
1494
1495 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1496
1497 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
1498 if (!cp)
1499 return;
1500
1501 hci_dev_lock(hdev);
1502
1503 if (status) {
1504 struct hci_conn *hcon;
1505
1506 hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
1507 if (hcon)
1508 hci_conn_del(hcon);
1509 } else {
1510 amp_write_remote_assoc(hdev, cp->phy_handle);
1511 }
1512
1513 hci_dev_unlock(hdev);
1514 }
1515
1516 static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status)
1517 {
1518 struct hci_cp_accept_phy_link *cp;
1519
1520 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1521
1522 if (status)
1523 return;
1524
1525 cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
1526 if (!cp)
1527 return;
1528
1529 amp_write_remote_assoc(hdev, cp->phy_handle);
1530 }
1531
1532 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1533 {
1534 __u8 status = *((__u8 *) skb->data);
1535 struct discovery_state *discov = &hdev->discovery;
1536 struct inquiry_entry *e;
1537
1538 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1539
1540 hci_conn_check_pending(hdev);
1541
1542 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1543 return;
1544
1545 smp_mb__after_clear_bit(); /* wake_up_bit advises about this barrier */
1546 wake_up_bit(&hdev->flags, HCI_INQUIRY);
1547
1548 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1549 return;
1550
1551 hci_dev_lock(hdev);
1552
1553 if (discov->state != DISCOVERY_FINDING)
1554 goto unlock;
1555
1556 if (list_empty(&discov->resolve)) {
1557 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1558 goto unlock;
1559 }
1560
1561 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1562 if (e && hci_resolve_name(hdev, e) == 0) {
1563 e->name_state = NAME_PENDING;
1564 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
1565 } else {
1566 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1567 }
1568
1569 unlock:
1570 hci_dev_unlock(hdev);
1571 }
1572
1573 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1574 {
1575 struct inquiry_data data;
1576 struct inquiry_info *info = (void *) (skb->data + 1);
1577 int num_rsp = *((__u8 *) skb->data);
1578
1579 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1580
1581 if (!num_rsp)
1582 return;
1583
1584 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
1585 return;
1586
1587 hci_dev_lock(hdev);
1588
1589 for (; num_rsp; num_rsp--, info++) {
1590 bool name_known, ssp;
1591
1592 bacpy(&data.bdaddr, &info->bdaddr);
1593 data.pscan_rep_mode = info->pscan_rep_mode;
1594 data.pscan_period_mode = info->pscan_period_mode;
1595 data.pscan_mode = info->pscan_mode;
1596 memcpy(data.dev_class, info->dev_class, 3);
1597 data.clock_offset = info->clock_offset;
1598 data.rssi = 0x00;
1599 data.ssp_mode = 0x00;
1600
1601 name_known = hci_inquiry_cache_update(hdev, &data, false, &ssp);
1602 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
1603 info->dev_class, 0, !name_known, ssp, NULL,
1604 0);
1605 }
1606
1607 hci_dev_unlock(hdev);
1608 }
1609
1610 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1611 {
1612 struct hci_ev_conn_complete *ev = (void *) skb->data;
1613 struct hci_conn *conn;
1614
1615 BT_DBG("%s", hdev->name);
1616
1617 hci_dev_lock(hdev);
1618
1619 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1620 if (!conn) {
1621 if (ev->link_type != SCO_LINK)
1622 goto unlock;
1623
1624 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
1625 if (!conn)
1626 goto unlock;
1627
1628 conn->type = SCO_LINK;
1629 }
1630
1631 if (!ev->status) {
1632 conn->handle = __le16_to_cpu(ev->handle);
1633
1634 if (conn->type == ACL_LINK) {
1635 conn->state = BT_CONFIG;
1636 hci_conn_hold(conn);
1637
1638 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
1639 !hci_find_link_key(hdev, &ev->bdaddr))
1640 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
1641 else
1642 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1643 } else
1644 conn->state = BT_CONNECTED;
1645
1646 hci_conn_add_sysfs(conn);
1647
1648 if (test_bit(HCI_AUTH, &hdev->flags))
1649 conn->link_mode |= HCI_LM_AUTH;
1650
1651 if (test_bit(HCI_ENCRYPT, &hdev->flags))
1652 conn->link_mode |= HCI_LM_ENCRYPT;
1653
1654 /* Get remote features */
1655 if (conn->type == ACL_LINK) {
1656 struct hci_cp_read_remote_features cp;
1657 cp.handle = ev->handle;
1658 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
1659 sizeof(cp), &cp);
1660 }
1661
1662 /* Set packet type for incoming connection */
1663 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
1664 struct hci_cp_change_conn_ptype cp;
1665 cp.handle = ev->handle;
1666 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1667 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
1668 &cp);
1669 }
1670 } else {
1671 conn->state = BT_CLOSED;
1672 if (conn->type == ACL_LINK)
1673 mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
1674 conn->dst_type, ev->status);
1675 }
1676
1677 if (conn->type == ACL_LINK)
1678 hci_sco_setup(conn, ev->status);
1679
1680 if (ev->status) {
1681 hci_proto_connect_cfm(conn, ev->status);
1682 hci_conn_del(conn);
1683 } else if (ev->link_type != ACL_LINK)
1684 hci_proto_connect_cfm(conn, ev->status);
1685
1686 unlock:
1687 hci_dev_unlock(hdev);
1688
1689 hci_conn_check_pending(hdev);
1690 }
1691
1692 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1693 {
1694 struct hci_ev_conn_request *ev = (void *) skb->data;
1695 int mask = hdev->link_mode;
1696 __u8 flags = 0;
1697
1698 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
1699 ev->link_type);
1700
1701 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
1702 &flags);
1703
1704 if ((mask & HCI_LM_ACCEPT) &&
1705 !hci_blacklist_lookup(hdev, &ev->bdaddr)) {
1706 /* Connection accepted */
1707 struct inquiry_entry *ie;
1708 struct hci_conn *conn;
1709
1710 hci_dev_lock(hdev);
1711
1712 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
1713 if (ie)
1714 memcpy(ie->data.dev_class, ev->dev_class, 3);
1715
1716 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
1717 &ev->bdaddr);
1718 if (!conn) {
1719 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
1720 if (!conn) {
1721 BT_ERR("No memory for new connection");
1722 hci_dev_unlock(hdev);
1723 return;
1724 }
1725 }
1726
1727 memcpy(conn->dev_class, ev->dev_class, 3);
1728
1729 hci_dev_unlock(hdev);
1730
1731 if (ev->link_type == ACL_LINK ||
1732 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
1733 struct hci_cp_accept_conn_req cp;
1734 conn->state = BT_CONNECT;
1735
1736 bacpy(&cp.bdaddr, &ev->bdaddr);
1737
1738 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
1739 cp.role = 0x00; /* Become master */
1740 else
1741 cp.role = 0x01; /* Remain slave */
1742
1743 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp),
1744 &cp);
1745 } else if (!(flags & HCI_PROTO_DEFER)) {
1746 struct hci_cp_accept_sync_conn_req cp;
1747 conn->state = BT_CONNECT;
1748
1749 bacpy(&cp.bdaddr, &ev->bdaddr);
1750 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1751
1752 cp.tx_bandwidth = __constant_cpu_to_le32(0x00001f40);
1753 cp.rx_bandwidth = __constant_cpu_to_le32(0x00001f40);
1754 cp.max_latency = __constant_cpu_to_le16(0xffff);
1755 cp.content_format = cpu_to_le16(hdev->voice_setting);
1756 cp.retrans_effort = 0xff;
1757
1758 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
1759 sizeof(cp), &cp);
1760 } else {
1761 conn->state = BT_CONNECT2;
1762 hci_proto_connect_cfm(conn, 0);
1763 }
1764 } else {
1765 /* Connection rejected */
1766 struct hci_cp_reject_conn_req cp;
1767
1768 bacpy(&cp.bdaddr, &ev->bdaddr);
1769 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
1770 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
1771 }
1772 }
1773
1774 static u8 hci_to_mgmt_reason(u8 err)
1775 {
1776 switch (err) {
1777 case HCI_ERROR_CONNECTION_TIMEOUT:
1778 return MGMT_DEV_DISCONN_TIMEOUT;
1779 case HCI_ERROR_REMOTE_USER_TERM:
1780 case HCI_ERROR_REMOTE_LOW_RESOURCES:
1781 case HCI_ERROR_REMOTE_POWER_OFF:
1782 return MGMT_DEV_DISCONN_REMOTE;
1783 case HCI_ERROR_LOCAL_HOST_TERM:
1784 return MGMT_DEV_DISCONN_LOCAL_HOST;
1785 default:
1786 return MGMT_DEV_DISCONN_UNKNOWN;
1787 }
1788 }
1789
1790 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1791 {
1792 struct hci_ev_disconn_complete *ev = (void *) skb->data;
1793 struct hci_conn *conn;
1794
1795 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
1796
1797 hci_dev_lock(hdev);
1798
1799 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1800 if (!conn)
1801 goto unlock;
1802
1803 if (ev->status == 0)
1804 conn->state = BT_CLOSED;
1805
1806 if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags) &&
1807 (conn->type == ACL_LINK || conn->type == LE_LINK)) {
1808 if (ev->status) {
1809 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1810 conn->dst_type, ev->status);
1811 } else {
1812 u8 reason = hci_to_mgmt_reason(ev->reason);
1813
1814 mgmt_device_disconnected(hdev, &conn->dst, conn->type,
1815 conn->dst_type, reason);
1816 }
1817 }
1818
1819 if (ev->status == 0) {
1820 u8 type = conn->type;
1821
1822 if (type == ACL_LINK && conn->flush_key)
1823 hci_remove_link_key(hdev, &conn->dst);
1824 hci_proto_disconn_cfm(conn, ev->reason);
1825 hci_conn_del(conn);
1826
1827 /* Re-enable advertising if necessary, since it might
1828 * have been disabled by the connection. From the
1829 * HCI_LE_Set_Advertise_Enable command description in
1830 * the core specification (v4.0):
1831 * "The Controller shall continue advertising until the Host
1832 * issues an LE_Set_Advertise_Enable command with
1833 * Advertising_Enable set to 0x00 (Advertising is disabled)
1834 * or until a connection is created or until the Advertising
1835 * is timed out due to Directed Advertising."
1836 */
1837 if (type == LE_LINK)
1838 mgmt_reenable_advertising(hdev);
1839 }
1840
1841 unlock:
1842 hci_dev_unlock(hdev);
1843 }
1844
1845 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1846 {
1847 struct hci_ev_auth_complete *ev = (void *) skb->data;
1848 struct hci_conn *conn;
1849
1850 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
1851
1852 hci_dev_lock(hdev);
1853
1854 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1855 if (!conn)
1856 goto unlock;
1857
1858 if (!ev->status) {
1859 if (!hci_conn_ssp_enabled(conn) &&
1860 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
1861 BT_INFO("re-auth of legacy device is not possible.");
1862 } else {
1863 conn->link_mode |= HCI_LM_AUTH;
1864 conn->sec_level = conn->pending_sec_level;
1865 }
1866 } else {
1867 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
1868 ev->status);
1869 }
1870
1871 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
1872 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
1873
1874 if (conn->state == BT_CONFIG) {
1875 if (!ev->status && hci_conn_ssp_enabled(conn)) {
1876 struct hci_cp_set_conn_encrypt cp;
1877 cp.handle = ev->handle;
1878 cp.encrypt = 0x01;
1879 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1880 &cp);
1881 } else {
1882 conn->state = BT_CONNECTED;
1883 hci_proto_connect_cfm(conn, ev->status);
1884 hci_conn_drop(conn);
1885 }
1886 } else {
1887 hci_auth_cfm(conn, ev->status);
1888
1889 hci_conn_hold(conn);
1890 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1891 hci_conn_drop(conn);
1892 }
1893
1894 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
1895 if (!ev->status) {
1896 struct hci_cp_set_conn_encrypt cp;
1897 cp.handle = ev->handle;
1898 cp.encrypt = 0x01;
1899 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1900 &cp);
1901 } else {
1902 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
1903 hci_encrypt_cfm(conn, ev->status, 0x00);
1904 }
1905 }
1906
1907 unlock:
1908 hci_dev_unlock(hdev);
1909 }
1910
1911 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
1912 {
1913 struct hci_ev_remote_name *ev = (void *) skb->data;
1914 struct hci_conn *conn;
1915
1916 BT_DBG("%s", hdev->name);
1917
1918 hci_conn_check_pending(hdev);
1919
1920 hci_dev_lock(hdev);
1921
1922 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
1923
1924 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1925 goto check_auth;
1926
1927 if (ev->status == 0)
1928 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
1929 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
1930 else
1931 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
1932
1933 check_auth:
1934 if (!conn)
1935 goto unlock;
1936
1937 if (!hci_outgoing_auth_needed(hdev, conn))
1938 goto unlock;
1939
1940 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1941 struct hci_cp_auth_requested cp;
1942 cp.handle = __cpu_to_le16(conn->handle);
1943 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1944 }
1945
1946 unlock:
1947 hci_dev_unlock(hdev);
1948 }
1949
1950 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
1951 {
1952 struct hci_ev_encrypt_change *ev = (void *) skb->data;
1953 struct hci_conn *conn;
1954
1955 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
1956
1957 hci_dev_lock(hdev);
1958
1959 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1960 if (conn) {
1961 if (!ev->status) {
1962 if (ev->encrypt) {
1963 /* Encryption implies authentication */
1964 conn->link_mode |= HCI_LM_AUTH;
1965 conn->link_mode |= HCI_LM_ENCRYPT;
1966 conn->sec_level = conn->pending_sec_level;
1967 } else
1968 conn->link_mode &= ~HCI_LM_ENCRYPT;
1969 }
1970
1971 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
1972
1973 if (ev->status && conn->state == BT_CONNECTED) {
1974 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
1975 hci_conn_drop(conn);
1976 goto unlock;
1977 }
1978
1979 if (conn->state == BT_CONFIG) {
1980 if (!ev->status)
1981 conn->state = BT_CONNECTED;
1982
1983 hci_proto_connect_cfm(conn, ev->status);
1984 hci_conn_drop(conn);
1985 } else
1986 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
1987 }
1988
1989 unlock:
1990 hci_dev_unlock(hdev);
1991 }
1992
1993 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
1994 struct sk_buff *skb)
1995 {
1996 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
1997 struct hci_conn *conn;
1998
1999 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2000
2001 hci_dev_lock(hdev);
2002
2003 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2004 if (conn) {
2005 if (!ev->status)
2006 conn->link_mode |= HCI_LM_SECURE;
2007
2008 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2009
2010 hci_key_change_cfm(conn, ev->status);
2011 }
2012
2013 hci_dev_unlock(hdev);
2014 }
2015
2016 static void hci_remote_features_evt(struct hci_dev *hdev,
2017 struct sk_buff *skb)
2018 {
2019 struct hci_ev_remote_features *ev = (void *) skb->data;
2020 struct hci_conn *conn;
2021
2022 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2023
2024 hci_dev_lock(hdev);
2025
2026 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2027 if (!conn)
2028 goto unlock;
2029
2030 if (!ev->status)
2031 memcpy(conn->features[0], ev->features, 8);
2032
2033 if (conn->state != BT_CONFIG)
2034 goto unlock;
2035
2036 if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
2037 struct hci_cp_read_remote_ext_features cp;
2038 cp.handle = ev->handle;
2039 cp.page = 0x01;
2040 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2041 sizeof(cp), &cp);
2042 goto unlock;
2043 }
2044
2045 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2046 struct hci_cp_remote_name_req cp;
2047 memset(&cp, 0, sizeof(cp));
2048 bacpy(&cp.bdaddr, &conn->dst);
2049 cp.pscan_rep_mode = 0x02;
2050 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2051 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2052 mgmt_device_connected(hdev, &conn->dst, conn->type,
2053 conn->dst_type, 0, NULL, 0,
2054 conn->dev_class);
2055
2056 if (!hci_outgoing_auth_needed(hdev, conn)) {
2057 conn->state = BT_CONNECTED;
2058 hci_proto_connect_cfm(conn, ev->status);
2059 hci_conn_drop(conn);
2060 }
2061
2062 unlock:
2063 hci_dev_unlock(hdev);
2064 }
2065
2066 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2067 {
2068 struct hci_ev_cmd_complete *ev = (void *) skb->data;
2069 u8 status = skb->data[sizeof(*ev)];
2070 __u16 opcode;
2071
2072 skb_pull(skb, sizeof(*ev));
2073
2074 opcode = __le16_to_cpu(ev->opcode);
2075
2076 switch (opcode) {
2077 case HCI_OP_INQUIRY_CANCEL:
2078 hci_cc_inquiry_cancel(hdev, skb);
2079 break;
2080
2081 case HCI_OP_PERIODIC_INQ:
2082 hci_cc_periodic_inq(hdev, skb);
2083 break;
2084
2085 case HCI_OP_EXIT_PERIODIC_INQ:
2086 hci_cc_exit_periodic_inq(hdev, skb);
2087 break;
2088
2089 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2090 hci_cc_remote_name_req_cancel(hdev, skb);
2091 break;
2092
2093 case HCI_OP_ROLE_DISCOVERY:
2094 hci_cc_role_discovery(hdev, skb);
2095 break;
2096
2097 case HCI_OP_READ_LINK_POLICY:
2098 hci_cc_read_link_policy(hdev, skb);
2099 break;
2100
2101 case HCI_OP_WRITE_LINK_POLICY:
2102 hci_cc_write_link_policy(hdev, skb);
2103 break;
2104
2105 case HCI_OP_READ_DEF_LINK_POLICY:
2106 hci_cc_read_def_link_policy(hdev, skb);
2107 break;
2108
2109 case HCI_OP_WRITE_DEF_LINK_POLICY:
2110 hci_cc_write_def_link_policy(hdev, skb);
2111 break;
2112
2113 case HCI_OP_RESET:
2114 hci_cc_reset(hdev, skb);
2115 break;
2116
2117 case HCI_OP_WRITE_LOCAL_NAME:
2118 hci_cc_write_local_name(hdev, skb);
2119 break;
2120
2121 case HCI_OP_READ_LOCAL_NAME:
2122 hci_cc_read_local_name(hdev, skb);
2123 break;
2124
2125 case HCI_OP_WRITE_AUTH_ENABLE:
2126 hci_cc_write_auth_enable(hdev, skb);
2127 break;
2128
2129 case HCI_OP_WRITE_ENCRYPT_MODE:
2130 hci_cc_write_encrypt_mode(hdev, skb);
2131 break;
2132
2133 case HCI_OP_WRITE_SCAN_ENABLE:
2134 hci_cc_write_scan_enable(hdev, skb);
2135 break;
2136
2137 case HCI_OP_READ_CLASS_OF_DEV:
2138 hci_cc_read_class_of_dev(hdev, skb);
2139 break;
2140
2141 case HCI_OP_WRITE_CLASS_OF_DEV:
2142 hci_cc_write_class_of_dev(hdev, skb);
2143 break;
2144
2145 case HCI_OP_READ_VOICE_SETTING:
2146 hci_cc_read_voice_setting(hdev, skb);
2147 break;
2148
2149 case HCI_OP_WRITE_VOICE_SETTING:
2150 hci_cc_write_voice_setting(hdev, skb);
2151 break;
2152
2153 case HCI_OP_READ_NUM_SUPPORTED_IAC:
2154 hci_cc_read_num_supported_iac(hdev, skb);
2155 break;
2156
2157 case HCI_OP_WRITE_SSP_MODE:
2158 hci_cc_write_ssp_mode(hdev, skb);
2159 break;
2160
2161 case HCI_OP_READ_LOCAL_VERSION:
2162 hci_cc_read_local_version(hdev, skb);
2163 break;
2164
2165 case HCI_OP_READ_LOCAL_COMMANDS:
2166 hci_cc_read_local_commands(hdev, skb);
2167 break;
2168
2169 case HCI_OP_READ_LOCAL_FEATURES:
2170 hci_cc_read_local_features(hdev, skb);
2171 break;
2172
2173 case HCI_OP_READ_LOCAL_EXT_FEATURES:
2174 hci_cc_read_local_ext_features(hdev, skb);
2175 break;
2176
2177 case HCI_OP_READ_BUFFER_SIZE:
2178 hci_cc_read_buffer_size(hdev, skb);
2179 break;
2180
2181 case HCI_OP_READ_BD_ADDR:
2182 hci_cc_read_bd_addr(hdev, skb);
2183 break;
2184
2185 case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
2186 hci_cc_read_page_scan_activity(hdev, skb);
2187 break;
2188
2189 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
2190 hci_cc_write_page_scan_activity(hdev, skb);
2191 break;
2192
2193 case HCI_OP_READ_PAGE_SCAN_TYPE:
2194 hci_cc_read_page_scan_type(hdev, skb);
2195 break;
2196
2197 case HCI_OP_WRITE_PAGE_SCAN_TYPE:
2198 hci_cc_write_page_scan_type(hdev, skb);
2199 break;
2200
2201 case HCI_OP_READ_DATA_BLOCK_SIZE:
2202 hci_cc_read_data_block_size(hdev, skb);
2203 break;
2204
2205 case HCI_OP_READ_FLOW_CONTROL_MODE:
2206 hci_cc_read_flow_control_mode(hdev, skb);
2207 break;
2208
2209 case HCI_OP_READ_LOCAL_AMP_INFO:
2210 hci_cc_read_local_amp_info(hdev, skb);
2211 break;
2212
2213 case HCI_OP_READ_LOCAL_AMP_ASSOC:
2214 hci_cc_read_local_amp_assoc(hdev, skb);
2215 break;
2216
2217 case HCI_OP_READ_INQ_RSP_TX_POWER:
2218 hci_cc_read_inq_rsp_tx_power(hdev, skb);
2219 break;
2220
2221 case HCI_OP_PIN_CODE_REPLY:
2222 hci_cc_pin_code_reply(hdev, skb);
2223 break;
2224
2225 case HCI_OP_PIN_CODE_NEG_REPLY:
2226 hci_cc_pin_code_neg_reply(hdev, skb);
2227 break;
2228
2229 case HCI_OP_READ_LOCAL_OOB_DATA:
2230 hci_cc_read_local_oob_data_reply(hdev, skb);
2231 break;
2232
2233 case HCI_OP_LE_READ_BUFFER_SIZE:
2234 hci_cc_le_read_buffer_size(hdev, skb);
2235 break;
2236
2237 case HCI_OP_LE_READ_LOCAL_FEATURES:
2238 hci_cc_le_read_local_features(hdev, skb);
2239 break;
2240
2241 case HCI_OP_LE_READ_ADV_TX_POWER:
2242 hci_cc_le_read_adv_tx_power(hdev, skb);
2243 break;
2244
2245 case HCI_OP_USER_CONFIRM_REPLY:
2246 hci_cc_user_confirm_reply(hdev, skb);
2247 break;
2248
2249 case HCI_OP_USER_CONFIRM_NEG_REPLY:
2250 hci_cc_user_confirm_neg_reply(hdev, skb);
2251 break;
2252
2253 case HCI_OP_USER_PASSKEY_REPLY:
2254 hci_cc_user_passkey_reply(hdev, skb);
2255 break;
2256
2257 case HCI_OP_USER_PASSKEY_NEG_REPLY:
2258 hci_cc_user_passkey_neg_reply(hdev, skb);
2259 break;
2260
2261 case HCI_OP_LE_SET_ADV_ENABLE:
2262 hci_cc_le_set_adv_enable(hdev, skb);
2263 break;
2264
2265 case HCI_OP_LE_SET_SCAN_ENABLE:
2266 hci_cc_le_set_scan_enable(hdev, skb);
2267 break;
2268
2269 case HCI_OP_LE_READ_WHITE_LIST_SIZE:
2270 hci_cc_le_read_white_list_size(hdev, skb);
2271 break;
2272
2273 case HCI_OP_LE_READ_SUPPORTED_STATES:
2274 hci_cc_le_read_supported_states(hdev, skb);
2275 break;
2276
2277 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2278 hci_cc_write_le_host_supported(hdev, skb);
2279 break;
2280
2281 case HCI_OP_WRITE_REMOTE_AMP_ASSOC:
2282 hci_cc_write_remote_amp_assoc(hdev, skb);
2283 break;
2284
2285 default:
2286 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2287 break;
2288 }
2289
2290 if (opcode != HCI_OP_NOP)
2291 del_timer(&hdev->cmd_timer);
2292
2293 hci_req_cmd_complete(hdev, opcode, status);
2294
2295 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2296 atomic_set(&hdev->cmd_cnt, 1);
2297 if (!skb_queue_empty(&hdev->cmd_q))
2298 queue_work(hdev->workqueue, &hdev->cmd_work);
2299 }
2300 }
2301
2302 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2303 {
2304 struct hci_ev_cmd_status *ev = (void *) skb->data;
2305 __u16 opcode;
2306
2307 skb_pull(skb, sizeof(*ev));
2308
2309 opcode = __le16_to_cpu(ev->opcode);
2310
2311 switch (opcode) {
2312 case HCI_OP_INQUIRY:
2313 hci_cs_inquiry(hdev, ev->status);
2314 break;
2315
2316 case HCI_OP_CREATE_CONN:
2317 hci_cs_create_conn(hdev, ev->status);
2318 break;
2319
2320 case HCI_OP_ADD_SCO:
2321 hci_cs_add_sco(hdev, ev->status);
2322 break;
2323
2324 case HCI_OP_AUTH_REQUESTED:
2325 hci_cs_auth_requested(hdev, ev->status);
2326 break;
2327
2328 case HCI_OP_SET_CONN_ENCRYPT:
2329 hci_cs_set_conn_encrypt(hdev, ev->status);
2330 break;
2331
2332 case HCI_OP_REMOTE_NAME_REQ:
2333 hci_cs_remote_name_req(hdev, ev->status);
2334 break;
2335
2336 case HCI_OP_READ_REMOTE_FEATURES:
2337 hci_cs_read_remote_features(hdev, ev->status);
2338 break;
2339
2340 case HCI_OP_READ_REMOTE_EXT_FEATURES:
2341 hci_cs_read_remote_ext_features(hdev, ev->status);
2342 break;
2343
2344 case HCI_OP_SETUP_SYNC_CONN:
2345 hci_cs_setup_sync_conn(hdev, ev->status);
2346 break;
2347
2348 case HCI_OP_SNIFF_MODE:
2349 hci_cs_sniff_mode(hdev, ev->status);
2350 break;
2351
2352 case HCI_OP_EXIT_SNIFF_MODE:
2353 hci_cs_exit_sniff_mode(hdev, ev->status);
2354 break;
2355
2356 case HCI_OP_DISCONNECT:
2357 hci_cs_disconnect(hdev, ev->status);
2358 break;
2359
2360 case HCI_OP_CREATE_PHY_LINK:
2361 hci_cs_create_phylink(hdev, ev->status);
2362 break;
2363
2364 case HCI_OP_ACCEPT_PHY_LINK:
2365 hci_cs_accept_phylink(hdev, ev->status);
2366 break;
2367
2368 default:
2369 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2370 break;
2371 }
2372
2373 if (opcode != HCI_OP_NOP)
2374 del_timer(&hdev->cmd_timer);
2375
2376 if (ev->status ||
2377 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->req.event))
2378 hci_req_cmd_complete(hdev, opcode, ev->status);
2379
2380 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2381 atomic_set(&hdev->cmd_cnt, 1);
2382 if (!skb_queue_empty(&hdev->cmd_q))
2383 queue_work(hdev->workqueue, &hdev->cmd_work);
2384 }
2385 }
2386
2387 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2388 {
2389 struct hci_ev_role_change *ev = (void *) skb->data;
2390 struct hci_conn *conn;
2391
2392 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2393
2394 hci_dev_lock(hdev);
2395
2396 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2397 if (conn) {
2398 if (!ev->status) {
2399 if (ev->role)
2400 conn->link_mode &= ~HCI_LM_MASTER;
2401 else
2402 conn->link_mode |= HCI_LM_MASTER;
2403 }
2404
2405 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2406
2407 hci_role_switch_cfm(conn, ev->status, ev->role);
2408 }
2409
2410 hci_dev_unlock(hdev);
2411 }
2412
2413 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2414 {
2415 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2416 int i;
2417
2418 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
2419 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2420 return;
2421 }
2422
2423 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2424 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
2425 BT_DBG("%s bad parameters", hdev->name);
2426 return;
2427 }
2428
2429 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
2430
2431 for (i = 0; i < ev->num_hndl; i++) {
2432 struct hci_comp_pkts_info *info = &ev->handles[i];
2433 struct hci_conn *conn;
2434 __u16 handle, count;
2435
2436 handle = __le16_to_cpu(info->handle);
2437 count = __le16_to_cpu(info->count);
2438
2439 conn = hci_conn_hash_lookup_handle(hdev, handle);
2440 if (!conn)
2441 continue;
2442
2443 conn->sent -= count;
2444
2445 switch (conn->type) {
2446 case ACL_LINK:
2447 hdev->acl_cnt += count;
2448 if (hdev->acl_cnt > hdev->acl_pkts)
2449 hdev->acl_cnt = hdev->acl_pkts;
2450 break;
2451
2452 case LE_LINK:
2453 if (hdev->le_pkts) {
2454 hdev->le_cnt += count;
2455 if (hdev->le_cnt > hdev->le_pkts)
2456 hdev->le_cnt = hdev->le_pkts;
2457 } else {
2458 hdev->acl_cnt += count;
2459 if (hdev->acl_cnt > hdev->acl_pkts)
2460 hdev->acl_cnt = hdev->acl_pkts;
2461 }
2462 break;
2463
2464 case SCO_LINK:
2465 hdev->sco_cnt += count;
2466 if (hdev->sco_cnt > hdev->sco_pkts)
2467 hdev->sco_cnt = hdev->sco_pkts;
2468 break;
2469
2470 default:
2471 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2472 break;
2473 }
2474 }
2475
2476 queue_work(hdev->workqueue, &hdev->tx_work);
2477 }
2478
2479 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
2480 __u16 handle)
2481 {
2482 struct hci_chan *chan;
2483
2484 switch (hdev->dev_type) {
2485 case HCI_BREDR:
2486 return hci_conn_hash_lookup_handle(hdev, handle);
2487 case HCI_AMP:
2488 chan = hci_chan_lookup_handle(hdev, handle);
2489 if (chan)
2490 return chan->conn;
2491 break;
2492 default:
2493 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2494 break;
2495 }
2496
2497 return NULL;
2498 }
2499
2500 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
2501 {
2502 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
2503 int i;
2504
2505 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
2506 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2507 return;
2508 }
2509
2510 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2511 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
2512 BT_DBG("%s bad parameters", hdev->name);
2513 return;
2514 }
2515
2516 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
2517 ev->num_hndl);
2518
2519 for (i = 0; i < ev->num_hndl; i++) {
2520 struct hci_comp_blocks_info *info = &ev->handles[i];
2521 struct hci_conn *conn = NULL;
2522 __u16 handle, block_count;
2523
2524 handle = __le16_to_cpu(info->handle);
2525 block_count = __le16_to_cpu(info->blocks);
2526
2527 conn = __hci_conn_lookup_handle(hdev, handle);
2528 if (!conn)
2529 continue;
2530
2531 conn->sent -= block_count;
2532
2533 switch (conn->type) {
2534 case ACL_LINK:
2535 case AMP_LINK:
2536 hdev->block_cnt += block_count;
2537 if (hdev->block_cnt > hdev->num_blocks)
2538 hdev->block_cnt = hdev->num_blocks;
2539 break;
2540
2541 default:
2542 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2543 break;
2544 }
2545 }
2546
2547 queue_work(hdev->workqueue, &hdev->tx_work);
2548 }
2549
2550 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2551 {
2552 struct hci_ev_mode_change *ev = (void *) skb->data;
2553 struct hci_conn *conn;
2554
2555 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2556
2557 hci_dev_lock(hdev);
2558
2559 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2560 if (conn) {
2561 conn->mode = ev->mode;
2562 conn->interval = __le16_to_cpu(ev->interval);
2563
2564 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
2565 &conn->flags)) {
2566 if (conn->mode == HCI_CM_ACTIVE)
2567 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2568 else
2569 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2570 }
2571
2572 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2573 hci_sco_setup(conn, ev->status);
2574 }
2575
2576 hci_dev_unlock(hdev);
2577 }
2578
2579 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2580 {
2581 struct hci_ev_pin_code_req *ev = (void *) skb->data;
2582 struct hci_conn *conn;
2583
2584 BT_DBG("%s", hdev->name);
2585
2586 hci_dev_lock(hdev);
2587
2588 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2589 if (!conn)
2590 goto unlock;
2591
2592 if (conn->state == BT_CONNECTED) {
2593 hci_conn_hold(conn);
2594 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2595 hci_conn_drop(conn);
2596 }
2597
2598 if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags))
2599 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2600 sizeof(ev->bdaddr), &ev->bdaddr);
2601 else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
2602 u8 secure;
2603
2604 if (conn->pending_sec_level == BT_SECURITY_HIGH)
2605 secure = 1;
2606 else
2607 secure = 0;
2608
2609 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
2610 }
2611
2612 unlock:
2613 hci_dev_unlock(hdev);
2614 }
2615
2616 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2617 {
2618 struct hci_ev_link_key_req *ev = (void *) skb->data;
2619 struct hci_cp_link_key_reply cp;
2620 struct hci_conn *conn;
2621 struct link_key *key;
2622
2623 BT_DBG("%s", hdev->name);
2624
2625 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2626 return;
2627
2628 hci_dev_lock(hdev);
2629
2630 key = hci_find_link_key(hdev, &ev->bdaddr);
2631 if (!key) {
2632 BT_DBG("%s link key not found for %pMR", hdev->name,
2633 &ev->bdaddr);
2634 goto not_found;
2635 }
2636
2637 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
2638 &ev->bdaddr);
2639
2640 if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) &&
2641 key->type == HCI_LK_DEBUG_COMBINATION) {
2642 BT_DBG("%s ignoring debug key", hdev->name);
2643 goto not_found;
2644 }
2645
2646 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2647 if (conn) {
2648 if (key->type == HCI_LK_UNAUTH_COMBINATION &&
2649 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
2650 BT_DBG("%s ignoring unauthenticated key", hdev->name);
2651 goto not_found;
2652 }
2653
2654 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
2655 conn->pending_sec_level == BT_SECURITY_HIGH) {
2656 BT_DBG("%s ignoring key unauthenticated for high security",
2657 hdev->name);
2658 goto not_found;
2659 }
2660
2661 conn->key_type = key->type;
2662 conn->pin_length = key->pin_len;
2663 }
2664
2665 bacpy(&cp.bdaddr, &ev->bdaddr);
2666 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
2667
2668 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
2669
2670 hci_dev_unlock(hdev);
2671
2672 return;
2673
2674 not_found:
2675 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
2676 hci_dev_unlock(hdev);
2677 }
2678
2679 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
2680 {
2681 struct hci_ev_link_key_notify *ev = (void *) skb->data;
2682 struct hci_conn *conn;
2683 u8 pin_len = 0;
2684
2685 BT_DBG("%s", hdev->name);
2686
2687 hci_dev_lock(hdev);
2688
2689 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2690 if (conn) {
2691 hci_conn_hold(conn);
2692 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2693 pin_len = conn->pin_length;
2694
2695 if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
2696 conn->key_type = ev->key_type;
2697
2698 hci_conn_drop(conn);
2699 }
2700
2701 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2702 hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
2703 ev->key_type, pin_len);
2704
2705 hci_dev_unlock(hdev);
2706 }
2707
2708 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
2709 {
2710 struct hci_ev_clock_offset *ev = (void *) skb->data;
2711 struct hci_conn *conn;
2712
2713 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2714
2715 hci_dev_lock(hdev);
2716
2717 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2718 if (conn && !ev->status) {
2719 struct inquiry_entry *ie;
2720
2721 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2722 if (ie) {
2723 ie->data.clock_offset = ev->clock_offset;
2724 ie->timestamp = jiffies;
2725 }
2726 }
2727
2728 hci_dev_unlock(hdev);
2729 }
2730
2731 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2732 {
2733 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
2734 struct hci_conn *conn;
2735
2736 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2737
2738 hci_dev_lock(hdev);
2739
2740 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2741 if (conn && !ev->status)
2742 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
2743
2744 hci_dev_unlock(hdev);
2745 }
2746
2747 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
2748 {
2749 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
2750 struct inquiry_entry *ie;
2751
2752 BT_DBG("%s", hdev->name);
2753
2754 hci_dev_lock(hdev);
2755
2756 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2757 if (ie) {
2758 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
2759 ie->timestamp = jiffies;
2760 }
2761
2762 hci_dev_unlock(hdev);
2763 }
2764
2765 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
2766 struct sk_buff *skb)
2767 {
2768 struct inquiry_data data;
2769 int num_rsp = *((__u8 *) skb->data);
2770 bool name_known, ssp;
2771
2772 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2773
2774 if (!num_rsp)
2775 return;
2776
2777 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
2778 return;
2779
2780 hci_dev_lock(hdev);
2781
2782 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
2783 struct inquiry_info_with_rssi_and_pscan_mode *info;
2784 info = (void *) (skb->data + 1);
2785
2786 for (; num_rsp; num_rsp--, info++) {
2787 bacpy(&data.bdaddr, &info->bdaddr);
2788 data.pscan_rep_mode = info->pscan_rep_mode;
2789 data.pscan_period_mode = info->pscan_period_mode;
2790 data.pscan_mode = info->pscan_mode;
2791 memcpy(data.dev_class, info->dev_class, 3);
2792 data.clock_offset = info->clock_offset;
2793 data.rssi = info->rssi;
2794 data.ssp_mode = 0x00;
2795
2796 name_known = hci_inquiry_cache_update(hdev, &data,
2797 false, &ssp);
2798 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2799 info->dev_class, info->rssi,
2800 !name_known, ssp, NULL, 0);
2801 }
2802 } else {
2803 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
2804
2805 for (; num_rsp; num_rsp--, info++) {
2806 bacpy(&data.bdaddr, &info->bdaddr);
2807 data.pscan_rep_mode = info->pscan_rep_mode;
2808 data.pscan_period_mode = info->pscan_period_mode;
2809 data.pscan_mode = 0x00;
2810 memcpy(data.dev_class, info->dev_class, 3);
2811 data.clock_offset = info->clock_offset;
2812 data.rssi = info->rssi;
2813 data.ssp_mode = 0x00;
2814 name_known = hci_inquiry_cache_update(hdev, &data,
2815 false, &ssp);
2816 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2817 info->dev_class, info->rssi,
2818 !name_known, ssp, NULL, 0);
2819 }
2820 }
2821
2822 hci_dev_unlock(hdev);
2823 }
2824
2825 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
2826 struct sk_buff *skb)
2827 {
2828 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
2829 struct hci_conn *conn;
2830
2831 BT_DBG("%s", hdev->name);
2832
2833 hci_dev_lock(hdev);
2834
2835 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2836 if (!conn)
2837 goto unlock;
2838
2839 if (ev->page < HCI_MAX_PAGES)
2840 memcpy(conn->features[ev->page], ev->features, 8);
2841
2842 if (!ev->status && ev->page == 0x01) {
2843 struct inquiry_entry *ie;
2844
2845 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2846 if (ie)
2847 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
2848
2849 if (ev->features[0] & LMP_HOST_SSP) {
2850 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
2851 } else {
2852 /* It is mandatory by the Bluetooth specification that
2853 * Extended Inquiry Results are only used when Secure
2854 * Simple Pairing is enabled, but some devices violate
2855 * this.
2856 *
2857 * To make these devices work, the internal SSP
2858 * enabled flag needs to be cleared if the remote host
2859 * features do not indicate SSP support */
2860 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
2861 }
2862 }
2863
2864 if (conn->state != BT_CONFIG)
2865 goto unlock;
2866
2867 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2868 struct hci_cp_remote_name_req cp;
2869 memset(&cp, 0, sizeof(cp));
2870 bacpy(&cp.bdaddr, &conn->dst);
2871 cp.pscan_rep_mode = 0x02;
2872 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2873 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2874 mgmt_device_connected(hdev, &conn->dst, conn->type,
2875 conn->dst_type, 0, NULL, 0,
2876 conn->dev_class);
2877
2878 if (!hci_outgoing_auth_needed(hdev, conn)) {
2879 conn->state = BT_CONNECTED;
2880 hci_proto_connect_cfm(conn, ev->status);
2881 hci_conn_drop(conn);
2882 }
2883
2884 unlock:
2885 hci_dev_unlock(hdev);
2886 }
2887
2888 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
2889 struct sk_buff *skb)
2890 {
2891 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
2892 struct hci_conn *conn;
2893
2894 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2895
2896 hci_dev_lock(hdev);
2897
2898 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2899 if (!conn) {
2900 if (ev->link_type == ESCO_LINK)
2901 goto unlock;
2902
2903 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2904 if (!conn)
2905 goto unlock;
2906
2907 conn->type = SCO_LINK;
2908 }
2909
2910 switch (ev->status) {
2911 case 0x00:
2912 conn->handle = __le16_to_cpu(ev->handle);
2913 conn->state = BT_CONNECTED;
2914
2915 hci_conn_add_sysfs(conn);
2916 break;
2917
2918 case 0x0d: /* Connection Rejected due to Limited Resources */
2919 case 0x11: /* Unsupported Feature or Parameter Value */
2920 case 0x1c: /* SCO interval rejected */
2921 case 0x1a: /* Unsupported Remote Feature */
2922 case 0x1f: /* Unspecified error */
2923 if (conn->out) {
2924 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
2925 (hdev->esco_type & EDR_ESCO_MASK);
2926 if (hci_setup_sync(conn, conn->link->handle))
2927 goto unlock;
2928 }
2929 /* fall through */
2930
2931 default:
2932 conn->state = BT_CLOSED;
2933 break;
2934 }
2935
2936 hci_proto_connect_cfm(conn, ev->status);
2937 if (ev->status)
2938 hci_conn_del(conn);
2939
2940 unlock:
2941 hci_dev_unlock(hdev);
2942 }
2943
2944 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
2945 struct sk_buff *skb)
2946 {
2947 struct inquiry_data data;
2948 struct extended_inquiry_info *info = (void *) (skb->data + 1);
2949 int num_rsp = *((__u8 *) skb->data);
2950 size_t eir_len;
2951
2952 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2953
2954 if (!num_rsp)
2955 return;
2956
2957 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
2958 return;
2959
2960 hci_dev_lock(hdev);
2961
2962 for (; num_rsp; num_rsp--, info++) {
2963 bool name_known, ssp;
2964
2965 bacpy(&data.bdaddr, &info->bdaddr);
2966 data.pscan_rep_mode = info->pscan_rep_mode;
2967 data.pscan_period_mode = info->pscan_period_mode;
2968 data.pscan_mode = 0x00;
2969 memcpy(data.dev_class, info->dev_class, 3);
2970 data.clock_offset = info->clock_offset;
2971 data.rssi = info->rssi;
2972 data.ssp_mode = 0x01;
2973
2974 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2975 name_known = eir_has_data_type(info->data,
2976 sizeof(info->data),
2977 EIR_NAME_COMPLETE);
2978 else
2979 name_known = true;
2980
2981 name_known = hci_inquiry_cache_update(hdev, &data, name_known,
2982 &ssp);
2983 eir_len = eir_get_length(info->data, sizeof(info->data));
2984 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2985 info->dev_class, info->rssi, !name_known,
2986 ssp, info->data, eir_len);
2987 }
2988
2989 hci_dev_unlock(hdev);
2990 }
2991
2992 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
2993 struct sk_buff *skb)
2994 {
2995 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
2996 struct hci_conn *conn;
2997
2998 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
2999 __le16_to_cpu(ev->handle));
3000
3001 hci_dev_lock(hdev);
3002
3003 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3004 if (!conn)
3005 goto unlock;
3006
3007 if (!ev->status)
3008 conn->sec_level = conn->pending_sec_level;
3009
3010 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3011
3012 if (ev->status && conn->state == BT_CONNECTED) {
3013 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3014 hci_conn_drop(conn);
3015 goto unlock;
3016 }
3017
3018 if (conn->state == BT_CONFIG) {
3019 if (!ev->status)
3020 conn->state = BT_CONNECTED;
3021
3022 hci_proto_connect_cfm(conn, ev->status);
3023 hci_conn_drop(conn);
3024 } else {
3025 hci_auth_cfm(conn, ev->status);
3026
3027 hci_conn_hold(conn);
3028 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3029 hci_conn_drop(conn);
3030 }
3031
3032 unlock:
3033 hci_dev_unlock(hdev);
3034 }
3035
3036 static u8 hci_get_auth_req(struct hci_conn *conn)
3037 {
3038 /* If remote requests dedicated bonding follow that lead */
3039 if (conn->remote_auth == HCI_AT_DEDICATED_BONDING ||
3040 conn->remote_auth == HCI_AT_DEDICATED_BONDING_MITM) {
3041 /* If both remote and local IO capabilities allow MITM
3042 * protection then require it, otherwise don't */
3043 if (conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT ||
3044 conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)
3045 return HCI_AT_DEDICATED_BONDING;
3046 else
3047 return HCI_AT_DEDICATED_BONDING_MITM;
3048 }
3049
3050 /* If remote requests no-bonding follow that lead */
3051 if (conn->remote_auth == HCI_AT_NO_BONDING ||
3052 conn->remote_auth == HCI_AT_NO_BONDING_MITM)
3053 return conn->remote_auth | (conn->auth_type & 0x01);
3054
3055 return conn->auth_type;
3056 }
3057
3058 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3059 {
3060 struct hci_ev_io_capa_request *ev = (void *) skb->data;
3061 struct hci_conn *conn;
3062
3063 BT_DBG("%s", hdev->name);
3064
3065 hci_dev_lock(hdev);
3066
3067 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3068 if (!conn)
3069 goto unlock;
3070
3071 hci_conn_hold(conn);
3072
3073 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3074 goto unlock;
3075
3076 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) ||
3077 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3078 struct hci_cp_io_capability_reply cp;
3079
3080 bacpy(&cp.bdaddr, &ev->bdaddr);
3081 /* Change the IO capability from KeyboardDisplay
3082 * to DisplayYesNo as it is not supported by BT spec. */
3083 cp.capability = (conn->io_capability == 0x04) ?
3084 HCI_IO_DISPLAY_YESNO : conn->io_capability;
3085 conn->auth_type = hci_get_auth_req(conn);
3086 cp.authentication = conn->auth_type;
3087
3088 if (hci_find_remote_oob_data(hdev, &conn->dst) &&
3089 (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)))
3090 cp.oob_data = 0x01;
3091 else
3092 cp.oob_data = 0x00;
3093
3094 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3095 sizeof(cp), &cp);
3096 } else {
3097 struct hci_cp_io_capability_neg_reply cp;
3098
3099 bacpy(&cp.bdaddr, &ev->bdaddr);
3100 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3101
3102 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3103 sizeof(cp), &cp);
3104 }
3105
3106 unlock:
3107 hci_dev_unlock(hdev);
3108 }
3109
3110 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
3111 {
3112 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3113 struct hci_conn *conn;
3114
3115 BT_DBG("%s", hdev->name);
3116
3117 hci_dev_lock(hdev);
3118
3119 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3120 if (!conn)
3121 goto unlock;
3122
3123 conn->remote_cap = ev->capability;
3124 conn->remote_auth = ev->authentication;
3125 if (ev->oob_data)
3126 set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
3127
3128 unlock:
3129 hci_dev_unlock(hdev);
3130 }
3131
3132 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
3133 struct sk_buff *skb)
3134 {
3135 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3136 int loc_mitm, rem_mitm, confirm_hint = 0;
3137 struct hci_conn *conn;
3138
3139 BT_DBG("%s", hdev->name);
3140
3141 hci_dev_lock(hdev);
3142
3143 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3144 goto unlock;
3145
3146 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3147 if (!conn)
3148 goto unlock;
3149
3150 loc_mitm = (conn->auth_type & 0x01);
3151 rem_mitm = (conn->remote_auth & 0x01);
3152
3153 /* If we require MITM but the remote device can't provide that
3154 * (it has NoInputNoOutput) then reject the confirmation
3155 * request. The only exception is when we're dedicated bonding
3156 * initiators (connect_cfm_cb set) since then we always have the MITM
3157 * bit set. */
3158 if (!conn->connect_cfm_cb && loc_mitm &&
3159 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
3160 BT_DBG("Rejecting request: remote device can't provide MITM");
3161 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3162 sizeof(ev->bdaddr), &ev->bdaddr);
3163 goto unlock;
3164 }
3165
3166 /* If no side requires MITM protection; auto-accept */
3167 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
3168 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
3169
3170 /* If we're not the initiators request authorization to
3171 * proceed from user space (mgmt_user_confirm with
3172 * confirm_hint set to 1). */
3173 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3174 BT_DBG("Confirming auto-accept as acceptor");
3175 confirm_hint = 1;
3176 goto confirm;
3177 }
3178
3179 BT_DBG("Auto-accept of user confirmation with %ums delay",
3180 hdev->auto_accept_delay);
3181
3182 if (hdev->auto_accept_delay > 0) {
3183 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
3184 mod_timer(&conn->auto_accept_timer, jiffies + delay);
3185 goto unlock;
3186 }
3187
3188 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3189 sizeof(ev->bdaddr), &ev->bdaddr);
3190 goto unlock;
3191 }
3192
3193 confirm:
3194 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0, ev->passkey,
3195 confirm_hint);
3196
3197 unlock:
3198 hci_dev_unlock(hdev);
3199 }
3200
3201 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
3202 struct sk_buff *skb)
3203 {
3204 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3205
3206 BT_DBG("%s", hdev->name);
3207
3208 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3209 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
3210 }
3211
3212 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
3213 struct sk_buff *skb)
3214 {
3215 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
3216 struct hci_conn *conn;
3217
3218 BT_DBG("%s", hdev->name);
3219
3220 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3221 if (!conn)
3222 return;
3223
3224 conn->passkey_notify = __le32_to_cpu(ev->passkey);
3225 conn->passkey_entered = 0;
3226
3227 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3228 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3229 conn->dst_type, conn->passkey_notify,
3230 conn->passkey_entered);
3231 }
3232
3233 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3234 {
3235 struct hci_ev_keypress_notify *ev = (void *) skb->data;
3236 struct hci_conn *conn;
3237
3238 BT_DBG("%s", hdev->name);
3239
3240 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3241 if (!conn)
3242 return;
3243
3244 switch (ev->type) {
3245 case HCI_KEYPRESS_STARTED:
3246 conn->passkey_entered = 0;
3247 return;
3248
3249 case HCI_KEYPRESS_ENTERED:
3250 conn->passkey_entered++;
3251 break;
3252
3253 case HCI_KEYPRESS_ERASED:
3254 conn->passkey_entered--;
3255 break;
3256
3257 case HCI_KEYPRESS_CLEARED:
3258 conn->passkey_entered = 0;
3259 break;
3260
3261 case HCI_KEYPRESS_COMPLETED:
3262 return;
3263 }
3264
3265 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3266 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3267 conn->dst_type, conn->passkey_notify,
3268 conn->passkey_entered);
3269 }
3270
3271 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
3272 struct sk_buff *skb)
3273 {
3274 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
3275 struct hci_conn *conn;
3276
3277 BT_DBG("%s", hdev->name);
3278
3279 hci_dev_lock(hdev);
3280
3281 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3282 if (!conn)
3283 goto unlock;
3284
3285 /* To avoid duplicate auth_failed events to user space we check
3286 * the HCI_CONN_AUTH_PEND flag which will be set if we
3287 * initiated the authentication. A traditional auth_complete
3288 * event gets always produced as initiator and is also mapped to
3289 * the mgmt_auth_failed event */
3290 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
3291 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
3292 ev->status);
3293
3294 hci_conn_drop(conn);
3295
3296 unlock:
3297 hci_dev_unlock(hdev);
3298 }
3299
3300 static void hci_remote_host_features_evt(struct hci_dev *hdev,
3301 struct sk_buff *skb)
3302 {
3303 struct hci_ev_remote_host_features *ev = (void *) skb->data;
3304 struct inquiry_entry *ie;
3305 struct hci_conn *conn;
3306
3307 BT_DBG("%s", hdev->name);
3308
3309 hci_dev_lock(hdev);
3310
3311 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3312 if (conn)
3313 memcpy(conn->features[1], ev->features, 8);
3314
3315 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3316 if (ie)
3317 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3318
3319 hci_dev_unlock(hdev);
3320 }
3321
3322 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
3323 struct sk_buff *skb)
3324 {
3325 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
3326 struct oob_data *data;
3327
3328 BT_DBG("%s", hdev->name);
3329
3330 hci_dev_lock(hdev);
3331
3332 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3333 goto unlock;
3334
3335 data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
3336 if (data) {
3337 struct hci_cp_remote_oob_data_reply cp;
3338
3339 bacpy(&cp.bdaddr, &ev->bdaddr);
3340 memcpy(cp.hash, data->hash, sizeof(cp.hash));
3341 memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer));
3342
3343 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp),
3344 &cp);
3345 } else {
3346 struct hci_cp_remote_oob_data_neg_reply cp;
3347
3348 bacpy(&cp.bdaddr, &ev->bdaddr);
3349 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp),
3350 &cp);
3351 }
3352
3353 unlock:
3354 hci_dev_unlock(hdev);
3355 }
3356
3357 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
3358 struct sk_buff *skb)
3359 {
3360 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
3361 struct hci_conn *hcon, *bredr_hcon;
3362
3363 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
3364 ev->status);
3365
3366 hci_dev_lock(hdev);
3367
3368 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3369 if (!hcon) {
3370 hci_dev_unlock(hdev);
3371 return;
3372 }
3373
3374 if (ev->status) {
3375 hci_conn_del(hcon);
3376 hci_dev_unlock(hdev);
3377 return;
3378 }
3379
3380 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
3381
3382 hcon->state = BT_CONNECTED;
3383 bacpy(&hcon->dst, &bredr_hcon->dst);
3384
3385 hci_conn_hold(hcon);
3386 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3387 hci_conn_drop(hcon);
3388
3389 hci_conn_add_sysfs(hcon);
3390
3391 amp_physical_cfm(bredr_hcon, hcon);
3392
3393 hci_dev_unlock(hdev);
3394 }
3395
3396 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3397 {
3398 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
3399 struct hci_conn *hcon;
3400 struct hci_chan *hchan;
3401 struct amp_mgr *mgr;
3402
3403 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
3404 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
3405 ev->status);
3406
3407 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3408 if (!hcon)
3409 return;
3410
3411 /* Create AMP hchan */
3412 hchan = hci_chan_create(hcon);
3413 if (!hchan)
3414 return;
3415
3416 hchan->handle = le16_to_cpu(ev->handle);
3417
3418 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
3419
3420 mgr = hcon->amp_mgr;
3421 if (mgr && mgr->bredr_chan) {
3422 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
3423
3424 l2cap_chan_lock(bredr_chan);
3425
3426 bredr_chan->conn->mtu = hdev->block_mtu;
3427 l2cap_logical_cfm(bredr_chan, hchan, 0);
3428 hci_conn_hold(hcon);
3429
3430 l2cap_chan_unlock(bredr_chan);
3431 }
3432 }
3433
3434 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
3435 struct sk_buff *skb)
3436 {
3437 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
3438 struct hci_chan *hchan;
3439
3440 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
3441 le16_to_cpu(ev->handle), ev->status);
3442
3443 if (ev->status)
3444 return;
3445
3446 hci_dev_lock(hdev);
3447
3448 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
3449 if (!hchan)
3450 goto unlock;
3451
3452 amp_destroy_logical_link(hchan, ev->reason);
3453
3454 unlock:
3455 hci_dev_unlock(hdev);
3456 }
3457
3458 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
3459 struct sk_buff *skb)
3460 {
3461 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
3462 struct hci_conn *hcon;
3463
3464 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3465
3466 if (ev->status)
3467 return;
3468
3469 hci_dev_lock(hdev);
3470
3471 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3472 if (hcon) {
3473 hcon->state = BT_CLOSED;
3474 hci_conn_del(hcon);
3475 }
3476
3477 hci_dev_unlock(hdev);
3478 }
3479
3480 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3481 {
3482 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
3483 struct hci_conn *conn;
3484
3485 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3486
3487 hci_dev_lock(hdev);
3488
3489 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
3490 if (!conn) {
3491 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
3492 if (!conn) {
3493 BT_ERR("No memory for new connection");
3494 goto unlock;
3495 }
3496
3497 conn->dst_type = ev->bdaddr_type;
3498
3499 /* The advertising parameters for own address type
3500 * define which source address and source address
3501 * type this connections has.
3502 */
3503 if (bacmp(&conn->src, BDADDR_ANY)) {
3504 conn->src_type = ADDR_LE_DEV_PUBLIC;
3505 } else {
3506 bacpy(&conn->src, &hdev->static_addr);
3507 conn->src_type = ADDR_LE_DEV_RANDOM;
3508 }
3509
3510 if (ev->role == LE_CONN_ROLE_MASTER) {
3511 conn->out = true;
3512 conn->link_mode |= HCI_LM_MASTER;
3513 }
3514 }
3515
3516 if (ev->status) {
3517 mgmt_connect_failed(hdev, &conn->dst, conn->type,
3518 conn->dst_type, ev->status);
3519 hci_proto_connect_cfm(conn, ev->status);
3520 conn->state = BT_CLOSED;
3521 hci_conn_del(conn);
3522 goto unlock;
3523 }
3524
3525 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3526 mgmt_device_connected(hdev, &ev->bdaddr, conn->type,
3527 conn->dst_type, 0, NULL, 0, NULL);
3528
3529 conn->sec_level = BT_SECURITY_LOW;
3530 conn->handle = __le16_to_cpu(ev->handle);
3531 conn->state = BT_CONNECTED;
3532
3533 hci_conn_add_sysfs(conn);
3534
3535 hci_proto_connect_cfm(conn, ev->status);
3536
3537 unlock:
3538 hci_dev_unlock(hdev);
3539 }
3540
3541 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
3542 {
3543 u8 num_reports = skb->data[0];
3544 void *ptr = &skb->data[1];
3545 s8 rssi;
3546
3547 while (num_reports--) {
3548 struct hci_ev_le_advertising_info *ev = ptr;
3549
3550 rssi = ev->data[ev->length];
3551 mgmt_device_found(hdev, &ev->bdaddr, LE_LINK, ev->bdaddr_type,
3552 NULL, rssi, 0, 1, ev->data, ev->length);
3553
3554 ptr += sizeof(*ev) + ev->length + 1;
3555 }
3556 }
3557
3558 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3559 {
3560 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
3561 struct hci_cp_le_ltk_reply cp;
3562 struct hci_cp_le_ltk_neg_reply neg;
3563 struct hci_conn *conn;
3564 struct smp_ltk *ltk;
3565
3566 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
3567
3568 hci_dev_lock(hdev);
3569
3570 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3571 if (conn == NULL)
3572 goto not_found;
3573
3574 ltk = hci_find_ltk(hdev, ev->ediv, ev->random);
3575 if (ltk == NULL)
3576 goto not_found;
3577
3578 memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
3579 cp.handle = cpu_to_le16(conn->handle);
3580
3581 if (ltk->authenticated)
3582 conn->pending_sec_level = BT_SECURITY_HIGH;
3583 else
3584 conn->pending_sec_level = BT_SECURITY_MEDIUM;
3585
3586 conn->enc_key_size = ltk->enc_size;
3587
3588 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
3589
3590 if (ltk->type & HCI_SMP_STK) {
3591 list_del(&ltk->list);
3592 kfree(ltk);
3593 }
3594
3595 hci_dev_unlock(hdev);
3596
3597 return;
3598
3599 not_found:
3600 neg.handle = ev->handle;
3601 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
3602 hci_dev_unlock(hdev);
3603 }
3604
3605 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
3606 {
3607 struct hci_ev_le_meta *le_ev = (void *) skb->data;
3608
3609 skb_pull(skb, sizeof(*le_ev));
3610
3611 switch (le_ev->subevent) {
3612 case HCI_EV_LE_CONN_COMPLETE:
3613 hci_le_conn_complete_evt(hdev, skb);
3614 break;
3615
3616 case HCI_EV_LE_ADVERTISING_REPORT:
3617 hci_le_adv_report_evt(hdev, skb);
3618 break;
3619
3620 case HCI_EV_LE_LTK_REQ:
3621 hci_le_ltk_request_evt(hdev, skb);
3622 break;
3623
3624 default:
3625 break;
3626 }
3627 }
3628
3629 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
3630 {
3631 struct hci_ev_channel_selected *ev = (void *) skb->data;
3632 struct hci_conn *hcon;
3633
3634 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
3635
3636 skb_pull(skb, sizeof(*ev));
3637
3638 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3639 if (!hcon)
3640 return;
3641
3642 amp_read_loc_assoc_final_data(hdev, hcon);
3643 }
3644
3645 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
3646 {
3647 struct hci_event_hdr *hdr = (void *) skb->data;
3648 __u8 event = hdr->evt;
3649
3650 hci_dev_lock(hdev);
3651
3652 /* Received events are (currently) only needed when a request is
3653 * ongoing so avoid unnecessary memory allocation.
3654 */
3655 if (hdev->req_status == HCI_REQ_PEND) {
3656 kfree_skb(hdev->recv_evt);
3657 hdev->recv_evt = skb_clone(skb, GFP_KERNEL);
3658 }
3659
3660 hci_dev_unlock(hdev);
3661
3662 skb_pull(skb, HCI_EVENT_HDR_SIZE);
3663
3664 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->req.event == event) {
3665 struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
3666 u16 opcode = __le16_to_cpu(cmd_hdr->opcode);
3667
3668 hci_req_cmd_complete(hdev, opcode, 0);
3669 }
3670
3671 switch (event) {
3672 case HCI_EV_INQUIRY_COMPLETE:
3673 hci_inquiry_complete_evt(hdev, skb);
3674 break;
3675
3676 case HCI_EV_INQUIRY_RESULT:
3677 hci_inquiry_result_evt(hdev, skb);
3678 break;
3679
3680 case HCI_EV_CONN_COMPLETE:
3681 hci_conn_complete_evt(hdev, skb);
3682 break;
3683
3684 case HCI_EV_CONN_REQUEST:
3685 hci_conn_request_evt(hdev, skb);
3686 break;
3687
3688 case HCI_EV_DISCONN_COMPLETE:
3689 hci_disconn_complete_evt(hdev, skb);
3690 break;
3691
3692 case HCI_EV_AUTH_COMPLETE:
3693 hci_auth_complete_evt(hdev, skb);
3694 break;
3695
3696 case HCI_EV_REMOTE_NAME:
3697 hci_remote_name_evt(hdev, skb);
3698 break;
3699
3700 case HCI_EV_ENCRYPT_CHANGE:
3701 hci_encrypt_change_evt(hdev, skb);
3702 break;
3703
3704 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
3705 hci_change_link_key_complete_evt(hdev, skb);
3706 break;
3707
3708 case HCI_EV_REMOTE_FEATURES:
3709 hci_remote_features_evt(hdev, skb);
3710 break;
3711
3712 case HCI_EV_CMD_COMPLETE:
3713 hci_cmd_complete_evt(hdev, skb);
3714 break;
3715
3716 case HCI_EV_CMD_STATUS:
3717 hci_cmd_status_evt(hdev, skb);
3718 break;
3719
3720 case HCI_EV_ROLE_CHANGE:
3721 hci_role_change_evt(hdev, skb);
3722 break;
3723
3724 case HCI_EV_NUM_COMP_PKTS:
3725 hci_num_comp_pkts_evt(hdev, skb);
3726 break;
3727
3728 case HCI_EV_MODE_CHANGE:
3729 hci_mode_change_evt(hdev, skb);
3730 break;
3731
3732 case HCI_EV_PIN_CODE_REQ:
3733 hci_pin_code_request_evt(hdev, skb);
3734 break;
3735
3736 case HCI_EV_LINK_KEY_REQ:
3737 hci_link_key_request_evt(hdev, skb);
3738 break;
3739
3740 case HCI_EV_LINK_KEY_NOTIFY:
3741 hci_link_key_notify_evt(hdev, skb);
3742 break;
3743
3744 case HCI_EV_CLOCK_OFFSET:
3745 hci_clock_offset_evt(hdev, skb);
3746 break;
3747
3748 case HCI_EV_PKT_TYPE_CHANGE:
3749 hci_pkt_type_change_evt(hdev, skb);
3750 break;
3751
3752 case HCI_EV_PSCAN_REP_MODE:
3753 hci_pscan_rep_mode_evt(hdev, skb);
3754 break;
3755
3756 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
3757 hci_inquiry_result_with_rssi_evt(hdev, skb);
3758 break;
3759
3760 case HCI_EV_REMOTE_EXT_FEATURES:
3761 hci_remote_ext_features_evt(hdev, skb);
3762 break;
3763
3764 case HCI_EV_SYNC_CONN_COMPLETE:
3765 hci_sync_conn_complete_evt(hdev, skb);
3766 break;
3767
3768 case HCI_EV_EXTENDED_INQUIRY_RESULT:
3769 hci_extended_inquiry_result_evt(hdev, skb);
3770 break;
3771
3772 case HCI_EV_KEY_REFRESH_COMPLETE:
3773 hci_key_refresh_complete_evt(hdev, skb);
3774 break;
3775
3776 case HCI_EV_IO_CAPA_REQUEST:
3777 hci_io_capa_request_evt(hdev, skb);
3778 break;
3779
3780 case HCI_EV_IO_CAPA_REPLY:
3781 hci_io_capa_reply_evt(hdev, skb);
3782 break;
3783
3784 case HCI_EV_USER_CONFIRM_REQUEST:
3785 hci_user_confirm_request_evt(hdev, skb);
3786 break;
3787
3788 case HCI_EV_USER_PASSKEY_REQUEST:
3789 hci_user_passkey_request_evt(hdev, skb);
3790 break;
3791
3792 case HCI_EV_USER_PASSKEY_NOTIFY:
3793 hci_user_passkey_notify_evt(hdev, skb);
3794 break;
3795
3796 case HCI_EV_KEYPRESS_NOTIFY:
3797 hci_keypress_notify_evt(hdev, skb);
3798 break;
3799
3800 case HCI_EV_SIMPLE_PAIR_COMPLETE:
3801 hci_simple_pair_complete_evt(hdev, skb);
3802 break;
3803
3804 case HCI_EV_REMOTE_HOST_FEATURES:
3805 hci_remote_host_features_evt(hdev, skb);
3806 break;
3807
3808 case HCI_EV_LE_META:
3809 hci_le_meta_evt(hdev, skb);
3810 break;
3811
3812 case HCI_EV_CHANNEL_SELECTED:
3813 hci_chan_selected_evt(hdev, skb);
3814 break;
3815
3816 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
3817 hci_remote_oob_data_request_evt(hdev, skb);
3818 break;
3819
3820 case HCI_EV_PHY_LINK_COMPLETE:
3821 hci_phy_link_complete_evt(hdev, skb);
3822 break;
3823
3824 case HCI_EV_LOGICAL_LINK_COMPLETE:
3825 hci_loglink_complete_evt(hdev, skb);
3826 break;
3827
3828 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
3829 hci_disconn_loglink_complete_evt(hdev, skb);
3830 break;
3831
3832 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
3833 hci_disconn_phylink_complete_evt(hdev, skb);
3834 break;
3835
3836 case HCI_EV_NUM_COMP_BLOCKS:
3837 hci_num_comp_blocks_evt(hdev, skb);
3838 break;
3839
3840 default:
3841 BT_DBG("%s event 0x%2.2x", hdev->name, event);
3842 break;
3843 }
3844
3845 kfree_skb(skb);
3846 hdev->stat.evt_rx++;
3847 }